fast-agent-mcp 0.2.49__py3-none-any.whl → 0.2.51__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (40) hide show
  1. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/METADATA +4 -4
  2. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/RECORD +38 -21
  3. mcp_agent/cli/commands/quickstart.py +107 -0
  4. mcp_agent/event_progress.py +18 -0
  5. mcp_agent/llm/model_database.py +39 -1
  6. mcp_agent/llm/model_factory.py +5 -3
  7. mcp_agent/llm/providers/augmented_llm_aliyun.py +7 -8
  8. mcp_agent/llm/providers/augmented_llm_deepseek.py +7 -8
  9. mcp_agent/llm/providers/augmented_llm_groq.py +80 -7
  10. mcp_agent/llm/providers/augmented_llm_openai.py +18 -7
  11. mcp_agent/llm/providers/augmented_llm_openrouter.py +10 -15
  12. mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py +127 -0
  13. mcp_agent/llm/providers/augmented_llm_xai.py +8 -8
  14. mcp_agent/llm/providers/google_converter.py +4 -0
  15. mcp_agent/logging/rich_progress.py +30 -7
  16. mcp_agent/mcp/helpers/content_helpers.py +29 -0
  17. mcp_agent/mcp/mcp_aggregator.py +32 -1
  18. mcp_agent/resources/examples/tensorzero/.env.sample +2 -0
  19. mcp_agent/resources/examples/tensorzero/Makefile +31 -0
  20. mcp_agent/resources/examples/tensorzero/README.md +55 -0
  21. mcp_agent/resources/examples/tensorzero/agent.py +35 -0
  22. mcp_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  23. mcp_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
  24. mcp_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  25. mcp_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
  26. mcp_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
  27. mcp_agent/resources/examples/tensorzero/image_demo.py +67 -0
  28. mcp_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
  29. mcp_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
  30. mcp_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
  31. mcp_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
  32. mcp_agent/resources/examples/tensorzero/simple_agent.py +25 -0
  33. mcp_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
  34. mcp_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
  35. mcp_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
  36. mcp_agent/llm/providers/augmented_llm_tensorzero.py +0 -441
  37. mcp_agent/llm/providers/multipart_converter_tensorzero.py +0 -201
  38. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/WHEEL +0 -0
  39. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/entry_points.txt +0 -0
  40. {fast_agent_mcp-0.2.49.dist-info → fast_agent_mcp-0.2.51.dist-info}/licenses/LICENSE +0 -0
@@ -307,6 +307,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
307
307
  request_params = self.get_request_params(request_params=request_params)
308
308
 
309
309
  responses: List[ContentBlock] = []
310
+ model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
310
311
 
311
312
  # TODO -- move this in to agent context management / agent group handling
312
313
  messages: List[ChatCompletionMessageParam] = []
@@ -347,7 +348,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
347
348
  stream = await self._openai_client().chat.completions.create(**arguments)
348
349
  # Process the stream
349
350
  response = await self._process_stream(stream, self.default_request_params.model)
350
-
351
351
  # Track usage if response is valid and has usage data
352
352
  if (
353
353
  hasattr(response, "usage")
@@ -391,6 +391,14 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
391
391
  # Convert to dict and remove None values
392
392
  message_dict = message.model_dump()
393
393
  message_dict = {k: v for k, v in message_dict.items() if v is not None}
394
+ if model_name in (
395
+ "deepseek-r1-distill-llama-70b",
396
+ "openai/gpt-oss-120b",
397
+ "openai/gpt-oss-20b",
398
+ ):
399
+ message_dict.pop("reasoning", None)
400
+ message_dict.pop("channel", None)
401
+
394
402
  messages.append(message_dict)
395
403
 
396
404
  message_text = message.content
@@ -412,9 +420,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
412
420
  )
413
421
 
414
422
  tool_results = []
415
-
423
+
416
424
  for tool_call in message.tool_calls:
417
-
418
425
  self.show_tool_call(
419
426
  available_tools,
420
427
  tool_call.function.name,
@@ -430,7 +437,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
430
437
  else from_json(tool_call.function.arguments, allow_partial=True),
431
438
  ),
432
439
  )
433
-
440
+
434
441
  try:
435
442
  result = await self.call_tool(tool_call_request, tool_call.id)
436
443
  self.show_tool_result(result)
@@ -439,10 +446,14 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
439
446
  except Exception as e:
440
447
  self.logger.error(f"Tool call {tool_call.id} failed with error: {e}")
441
448
  # Still add the tool_call_id with an error result to prevent missing responses
442
- error_result = CallToolResult(content=[TextContent(type="text", text=f"Tool call failed: {str(e)}")])
449
+ error_result = CallToolResult(
450
+ content=[TextContent(type="text", text=f"Tool call failed: {str(e)}")]
451
+ )
443
452
  tool_results.append((tool_call.id, error_result))
444
-
445
- converted_messages = OpenAIConverter.convert_function_results_to_openai(tool_results)
453
+
454
+ converted_messages = OpenAIConverter.convert_function_results_to_openai(
455
+ tool_results
456
+ )
446
457
  messages.extend(converted_messages)
447
458
 
448
459
  self.logger.debug(
@@ -17,24 +17,19 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
17
17
 
18
18
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
19
19
  """Initialize OpenRouter-specific default parameters."""
20
+ # Get base defaults from parent (includes ModelDatabase lookup)
21
+ base_params = super()._initialize_default_params(kwargs)
22
+
23
+ # Override with OpenRouter-specific settings
20
24
  # OpenRouter model names include the provider, e.g., "google/gemini-flash-1.5"
21
25
  # The model should be passed in the 'model' kwarg during factory creation.
22
26
  chosen_model = kwargs.get("model", DEFAULT_OPENROUTER_MODEL)
23
- if not chosen_model:
24
- # Unlike Deepseek, OpenRouter *requires* a model path in the identifier.
25
- # The factory should extract this before calling the constructor.
26
- # We rely on the model being passed correctly via kwargs.
27
- # If it's still None here, it indicates an issue upstream (factory or user input).
28
- # However, the base class _get_model handles the error if model is None.
29
- pass
30
-
31
- return RequestParams(
32
- model=chosen_model, # Will be validated by base class
33
- systemPrompt=self.instruction,
34
- parallel_tool_calls=True, # Default based on OpenAI provider
35
- max_iterations=20, # Default based on OpenAI provider
36
- use_history=True, # Default based on OpenAI provider
37
- )
27
+ if chosen_model:
28
+ base_params.model = chosen_model
29
+ # If it's still None here, it indicates an issue upstream (factory or user input).
30
+ # However, the base class _get_model handles the error if model is None.
31
+
32
+ return base_params
38
33
 
39
34
  def _base_url(self) -> str:
40
35
  """Retrieve the OpenRouter base URL from config or use the default."""
@@ -0,0 +1,127 @@
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionSystemMessageParam
4
+
5
+ from mcp_agent.core.request_params import RequestParams
6
+ from mcp_agent.llm.provider_types import Provider
7
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
8
+
9
+
10
+ class TensorZeroOpenAIAugmentedLLM(OpenAIAugmentedLLM):
11
+ """
12
+ An LLM augmentation that interacts with TensorZero's OpenAI-compatible inference endpoint.
13
+ This class extends the base OpenAIAugmentedLLM to handle TensorZero-specific
14
+ features, such as system template variables and custom parameters.
15
+ """
16
+
17
+ def __init__(self, *args, **kwargs) -> None:
18
+ """
19
+ Initializes the TensorZeroOpenAIAugmentedLLM.
20
+
21
+ Args:
22
+ *args: Variable length argument list.
23
+ **kwargs: Arbitrary keyword arguments.
24
+ """
25
+ self._t0_episode_id = kwargs.pop("episode_id", None)
26
+ self._t0_function_name = kwargs.get("model", "")
27
+
28
+ super().__init__(*args, provider=Provider.TENSORZERO, **kwargs)
29
+ self.logger.info("TensorZeroOpenAIAugmentedLLM initialized.")
30
+
31
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
32
+ """
33
+ Initializes TensorZero-specific default parameters. Ensures the model name
34
+ is correctly prefixed for the TensorZero API.
35
+ """
36
+ model = kwargs.get("model", "")
37
+ if not model.startswith("tensorzero::"):
38
+ model = f"tensorzero::function_name::{model}"
39
+
40
+ self.logger.debug(f"Initializing with TensorZero model: {model}")
41
+
42
+ return RequestParams(
43
+ model=model,
44
+ systemPrompt=self.instruction,
45
+ parallel_tool_calls=True,
46
+ max_iterations=10,
47
+ use_history=True,
48
+ )
49
+
50
+ def _base_url(self) -> str:
51
+ """
52
+ Constructs the TensorZero OpenAI-compatible endpoint URL.
53
+ """
54
+ default_url = "http://localhost:3000/openai/v1"
55
+ if self.context and self.context.config and hasattr(self.context.config, "tensorzero"):
56
+ base_url = getattr(self.context.config.tensorzero, "base_url", default_url)
57
+ # Ensure the path is correctly appended
58
+ if not base_url.endswith('/openai/v1'):
59
+ base_url = f"{base_url.rstrip('/')}/openai/v1"
60
+ self.logger.debug(f"Using TensorZero base URL from config: {base_url}")
61
+ return base_url
62
+ self.logger.debug(f"Using default TensorZero base URL: {default_url}")
63
+ return default_url
64
+
65
+ def _prepare_api_request(
66
+ self,
67
+ messages: List[ChatCompletionMessageParam],
68
+ tools: Optional[List[Any]],
69
+ request_params: RequestParams
70
+ ) -> Dict[str, Any]:
71
+ """
72
+ Prepares the API request for the TensorZero OpenAI-compatible endpoint.
73
+ This method injects system template variables and other TensorZero-specific
74
+ parameters into the request. It also handles multimodal inputs.
75
+ """
76
+ self.logger.debug("Preparing API request for TensorZero OpenAI endpoint.")
77
+
78
+ # Start with the base arguments from the parent class
79
+ arguments = super()._prepare_api_request(messages, tools, request_params)
80
+
81
+ # Handle system template variables
82
+ if request_params.template_vars:
83
+ self.logger.debug(f"Injecting template variables: {request_params.template_vars}")
84
+ system_message_found = False
85
+ for i, msg in enumerate(messages):
86
+ if msg.get("role") == "system":
87
+ # If content is a string, convert it to the TensorZero format
88
+ if isinstance(msg.get("content"), str):
89
+ messages[i] = ChatCompletionSystemMessageParam(
90
+ role="system",
91
+ content=[request_params.template_vars]
92
+ )
93
+ elif isinstance(msg.get("content"), list):
94
+ # If content is already a list, merge the template vars
95
+ msg["content"][0].update(request_params.template_vars)
96
+ system_message_found = True
97
+ break
98
+
99
+ if not system_message_found:
100
+ # If no system message exists, create one
101
+ messages.insert(0, ChatCompletionSystemMessageParam(
102
+ role="system",
103
+ content=[request_params.template_vars]
104
+ ))
105
+
106
+ # Add TensorZero-specific extra body parameters
107
+ extra_body = arguments.get("extra_body", {})
108
+
109
+ if self._t0_episode_id:
110
+ extra_body["tensorzero::episode_id"] = str(self._t0_episode_id)
111
+ self.logger.debug(f"Added tensorzero::episode_id: {self._t0_episode_id}")
112
+
113
+ # Merge metadata arguments
114
+ if request_params.metadata and isinstance(request_params.metadata, dict):
115
+ t0_args = request_params.metadata.get("tensorzero_arguments")
116
+ if t0_args:
117
+ self.logger.debug(f"Merging tensorzero_arguments from metadata: {t0_args}")
118
+ for msg in messages:
119
+ if msg.get("role") == "system" and isinstance(msg.get("content"), list):
120
+ msg["content"][0].update(t0_args)
121
+ break
122
+
123
+ if extra_body:
124
+ arguments["extra_body"] = extra_body
125
+
126
+ self.logger.debug(f"Final API request arguments: {arguments}")
127
+ return arguments
@@ -16,15 +16,15 @@ class XAIAugmentedLLM(OpenAIAugmentedLLM):
16
16
 
17
17
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
18
18
  """Initialize xAI parameters"""
19
+ # Get base defaults from parent (includes ModelDatabase lookup)
20
+ base_params = super()._initialize_default_params(kwargs)
21
+
22
+ # Override with xAI-specific settings
19
23
  chosen_model = kwargs.get("model", DEFAULT_XAI_MODEL)
20
-
21
- return RequestParams(
22
- model=chosen_model,
23
- systemPrompt=self.instruction,
24
- parallel_tool_calls=False,
25
- max_iterations=20,
26
- use_history=True,
27
- )
24
+ base_params.model = chosen_model
25
+ base_params.parallel_tool_calls = False
26
+
27
+ return base_params
28
28
 
29
29
  def _base_url(self) -> str:
30
30
  base_url = os.getenv("XAI_BASE_URL", XAI_BASE_URL)
@@ -336,6 +336,10 @@ class GoogleConverter:
336
336
  """
337
337
  Converts a single google.genai types.Content to a fast-agent PromptMessageMultipart.
338
338
  """
339
+ # Official fix for GitHub issue #207: Handle None content or content.parts
340
+ if content is None or not hasattr(content, "parts") or content.parts is None:
341
+ return PromptMessageMultipart(role="assistant", content=[])
342
+
339
343
  if content.role == "model" and any(part.function_call for part in content.parts):
340
344
  return PromptMessageMultipart(role="assistant", content=[])
341
345
 
@@ -82,6 +82,7 @@ class RichProgressDisplay:
82
82
  ProgressAction.PLANNING: "bold blue",
83
83
  ProgressAction.READY: "dim green",
84
84
  ProgressAction.CALLING_TOOL: "bold magenta",
85
+ ProgressAction.TOOL_PROGRESS: "bold magenta",
85
86
  ProgressAction.FINISHED: "black on green",
86
87
  ProgressAction.SHUTDOWN: "black on red",
87
88
  ProgressAction.AGGREGATOR_INITIALIZED: "bold green",
@@ -118,16 +119,38 @@ class RichProgressDisplay:
118
119
  # Add special formatting for calling tool with dimmed arrow
119
120
  formatted_text = f"▎[dim]◀[/dim] {event.action.value}".ljust(17 + 11)
120
121
  description = f"[{self._get_action_style(event.action)}]{formatted_text}"
122
+ elif event.action == ProgressAction.TOOL_PROGRESS:
123
+ # Format similar to streaming - show progress numbers
124
+ if event.progress is not None:
125
+ if event.total is not None:
126
+ progress_display = f"{int(event.progress)}/{int(event.total)}"
127
+ else:
128
+ progress_display = str(int(event.progress))
129
+ else:
130
+ progress_display = "Processing"
131
+ formatted_text = f"▎[dim]▶[/dim] {progress_display}".ljust(17 + 11)
132
+ description = f"[{self._get_action_style(event.action)}]{formatted_text}"
121
133
  else:
122
134
  description = f"[{self._get_action_style(event.action)}]▎ {event.action.value:<15}"
123
135
 
124
- self._progress.update(
125
- task_id,
126
- description=description,
127
- target=event.target or task_name, # Use task_name as fallback for target
128
- details=event.details or "",
129
- task_name=task_name,
130
- )
136
+ # Update basic task information
137
+ update_kwargs = {
138
+ "description": description,
139
+ "target": event.target or task_name, # Use task_name as fallback for target
140
+ "details": event.details or "",
141
+ "task_name": task_name,
142
+ }
143
+
144
+ # For TOOL_PROGRESS events, update progress if available
145
+ if event.action == ProgressAction.TOOL_PROGRESS and event.progress is not None:
146
+ if event.total is not None:
147
+ update_kwargs["completed"] = event.progress
148
+ update_kwargs["total"] = event.total
149
+ else:
150
+ # If no total, just show as indeterminate progress
151
+ self._progress.reset(task_id)
152
+
153
+ self._progress.update(task_id, **update_kwargs)
131
154
 
132
155
  if (
133
156
  event.action == ProgressAction.INITIALIZED
@@ -156,3 +156,32 @@ def get_resource_text(result: ReadResourceResult, index: int = 0) -> Optional[st
156
156
  return content.text
157
157
 
158
158
  return None
159
+
160
+
161
+ def split_thinking_content(message: str) -> tuple[Optional[str], str]:
162
+ """
163
+ Split a message into thinking and content parts.
164
+
165
+ Extracts content between <thinking> tags and returns it along with the remaining content.
166
+
167
+ Args:
168
+ message: A string that may contain a <thinking>...</thinking> block followed by content
169
+
170
+ Returns:
171
+ A tuple of (thinking_content, main_content) where:
172
+ - thinking_content: The content inside <thinking> tags, or None if not found/parsing fails
173
+ - main_content: The content after the thinking block, or the entire message if no thinking block
174
+ """
175
+ import re
176
+
177
+ # Pattern to match <thinking>...</thinking> at the start of the message
178
+ pattern = r"^<think>(.*?)</think>\s*(.*)$"
179
+ match = re.match(pattern, message, re.DOTALL)
180
+
181
+ if match:
182
+ thinking_content = match.group(1).strip()
183
+ main_content = match.group(2).strip()
184
+ return (thinking_content, main_content)
185
+ else:
186
+ # No thinking block found or parsing failed
187
+ return (None, message)
@@ -12,6 +12,7 @@ from typing import (
12
12
 
13
13
  from mcp import GetPromptResult, ReadResourceResult
14
14
  from mcp.client.session import ClientSession
15
+ from mcp.shared.session import ProgressFnT
15
16
  from mcp.types import (
16
17
  CallToolResult,
17
18
  ListToolsResult,
@@ -136,6 +137,24 @@ class MCPAggregator(ContextDependent):
136
137
  # Lock for refreshing tools from a server
137
138
  self._refresh_lock = Lock()
138
139
 
140
+ def _create_progress_callback(self, server_name: str, tool_name: str) -> "ProgressFnT":
141
+ """Create a progress callback function for tool execution."""
142
+ async def progress_callback(progress: float, total: float | None, message: str | None) -> None:
143
+ """Handle progress notifications from MCP tool execution."""
144
+ logger.info(
145
+ "Tool progress update",
146
+ data={
147
+ "progress_action": ProgressAction.TOOL_PROGRESS,
148
+ "tool_name": tool_name,
149
+ "server_name": server_name,
150
+ "agent_name": self.agent_name,
151
+ "progress": progress,
152
+ "total": total,
153
+ "details": message or "", # Put the message in details column
154
+ },
155
+ )
156
+ return progress_callback
157
+
139
158
  async def close(self) -> None:
140
159
  """
141
160
  Close all persistent connections when the aggregator is deleted.
@@ -468,6 +487,7 @@ class MCPAggregator(ContextDependent):
468
487
  method_name: str,
469
488
  method_args: Dict[str, Any] = None,
470
489
  error_factory: Callable[[str], R] = None,
490
+ progress_callback: ProgressFnT | None = None,
471
491
  ) -> R:
472
492
  """
473
493
  Generic method to execute operations on a specific server.
@@ -479,6 +499,7 @@ class MCPAggregator(ContextDependent):
479
499
  method_name: Name of the method to call on the client session
480
500
  method_args: Arguments to pass to the method
481
501
  error_factory: Function to create an error return value if the operation fails
502
+ progress_callback: Optional progress callback for operations that support it
482
503
 
483
504
  Returns:
484
505
  Result from the operation or an error result
@@ -487,7 +508,12 @@ class MCPAggregator(ContextDependent):
487
508
  async def try_execute(client: ClientSession):
488
509
  try:
489
510
  method = getattr(client, method_name)
490
- return await method(**method_args)
511
+ # For call_tool method, check if we need to add progress_callback
512
+ if method_name == "call_tool" and progress_callback:
513
+ # The call_tool method signature includes progress_callback parameter
514
+ return await method(**method_args, progress_callback=progress_callback)
515
+ else:
516
+ return await method(**method_args)
491
517
  except Exception as e:
492
518
  error_msg = (
493
519
  f"Failed to {method_name} '{operation_name}' on server '{server_name}': {e}"
@@ -597,6 +623,10 @@ class MCPAggregator(ContextDependent):
597
623
  with tracer.start_as_current_span(f"MCP Tool: {server_name}/{local_tool_name}"):
598
624
  trace.get_current_span().set_attribute("tool_name", local_tool_name)
599
625
  trace.get_current_span().set_attribute("server_name", server_name)
626
+
627
+ # Create progress callback for this tool execution
628
+ progress_callback = self._create_progress_callback(server_name, local_tool_name)
629
+
600
630
  return await self._execute_on_server(
601
631
  server_name=server_name,
602
632
  operation_type="tool",
@@ -609,6 +639,7 @@ class MCPAggregator(ContextDependent):
609
639
  error_factory=lambda msg: CallToolResult(
610
640
  isError=True, content=[TextContent(type="text", text=msg)]
611
641
  ),
642
+ progress_callback=progress_callback,
612
643
  )
613
644
 
614
645
  async def get_prompt(
@@ -0,0 +1,2 @@
1
+ OPENAI_API_KEY=
2
+ ANTHROPIC_API_KEY=
@@ -0,0 +1,31 @@
1
+ .PHONY: all
2
+
3
+ build:
4
+ docker compose build
5
+
6
+ up:
7
+ docker compose up -d
8
+
9
+ logs:
10
+ docker compose logs -f
11
+
12
+ tensorzero-logs:
13
+ docker compose logs -f gateway
14
+
15
+ mcp-logs:
16
+ docker compose logs -f mcp-server
17
+
18
+ minio-logs:
19
+ docker compose logs -f minio
20
+
21
+ stop:
22
+ docker compose stop
23
+
24
+ agent:
25
+ uv run agent.py --model=tensorzero.test_chat
26
+
27
+ simple-agent:
28
+ uv run simple_agent.py --model=tensorzero.simple_chat
29
+
30
+ image-test:
31
+ uv run image_demo.py
@@ -0,0 +1,55 @@
1
+ # About the tensorzero / fast-agent integration
2
+
3
+ [TensorZero](https://www.tensorzero.com/) is an open source project designed to help LLM application developers rapidly improve their inference calls. Its core features include:
4
+
5
+ - A uniform inference interface to all leading LLM platforms.
6
+ - The ability to dynamic route to different platforms and program failovers.
7
+ - Automated parameter tuning and training
8
+ - Advance templating features for your system prompts
9
+ - Organization of LLM inference data into a Clickhouse DB allowing for sophisticated downstream analytics
10
+ - A bunch of other good stuff is always in development
11
+
12
+ `tensorzero` is powerful heavy, so we provide here a quickstart example that combines the basic components of `fast-agent`, an MCP server, `tensorzero`, and other supporting services into a cohesive whole.
13
+
14
+ ## Quickstart guide
15
+
16
+ - Build and activate the `uv` `fast-agent` environment
17
+ - Ensure that ports `3000`, `4000`, `8000`, `9000`, and `9001` are unallocated before running this demo.
18
+ - Run `cp .env.sample .env` and then drop in at least one of `OPENAI_API_KEY` or `ANTHROPIC_API_KEY`. Make sure the accounts are funded.
19
+ - `make up`
20
+ - `make agent`
21
+
22
+ The demo test's our implementation's ability to:
23
+
24
+ - Implement the T0 model gateway as an inference backend
25
+ - Implement T0's dynamic templating feature
26
+ - Have in-conversation memory
27
+ - Describe and execute tool calls
28
+ - Remember previous tool calls
29
+
30
+ A version of a conversation to test all of this could be:
31
+
32
+ ```
33
+ Hi.
34
+
35
+ Tell me a poem.
36
+
37
+ Do you have any tools that you can use?
38
+
39
+ Please demonstrate the use of that tool on your last response.
40
+
41
+ Please summarize the conversation so far.
42
+
43
+ What tool calls have you executed in this session, and what were their results?
44
+ ```
45
+
46
+ ## Multimodal support
47
+
48
+ Run `make image-test` to test the gateway's ability to handle base64-encoded image data
49
+
50
+ ## Development notes:
51
+
52
+ - `make stop` will stop the MCP server and the tensorzero server
53
+ - `make tenzorzero-logs` will tail the tensorzero server logs
54
+ - `make mcp-logs` will tail the MCP server logs
55
+ - Generic `make logs` dumps all log output from all services to terminal
@@ -0,0 +1,35 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+ from mcp_agent.core.request_params import RequestParams
5
+
6
+ # Explicitly provide the path to the config file in the current directory
7
+ CONFIG_FILE = "fastagent.config.yaml"
8
+ fast = FastAgent("fast-agent example", config_path=CONFIG_FILE, ignore_unknown_args=True)
9
+
10
+ # Define T0 system variables here
11
+ my_t0_system_vars = {
12
+ "TEST_VARIABLE_1": "Roses are red",
13
+ "TEST_VARIABLE_2": "Violets are blue",
14
+ "TEST_VARIABLE_3": "Sugar is sweet",
15
+ "TEST_VARIABLE_4": "Vibe code responsibly 👍",
16
+ }
17
+
18
+
19
+ @fast.agent(
20
+ name="default",
21
+ instruction="""
22
+ You are an agent dedicated to helping developers understand the relationship between TensoZero and fast-agent. If the user makes a request
23
+ that requires you to invoke the test tools, please do so. When you use the tool, describe your rationale for doing so.
24
+ """,
25
+ servers=["tester"],
26
+ request_params=RequestParams(template_vars=my_t0_system_vars),
27
+ )
28
+ async def main():
29
+ async with fast.run() as agent_app: # Get the AgentApp wrapper
30
+ print("\nStarting interactive session with template_vars set via decorator...")
31
+ await agent_app.interactive()
32
+
33
+
34
+ if __name__ == "__main__":
35
+ asyncio.run(main()) # type: ignore