fast-agent-mcp 0.2.35__py3-none-any.whl → 0.2.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.35.dist-info → fast_agent_mcp-0.2.36.dist-info}/METADATA +6 -6
- {fast_agent_mcp-0.2.35.dist-info → fast_agent_mcp-0.2.36.dist-info}/RECORD +15 -15
- mcp_agent/agents/base_agent.py +2 -2
- mcp_agent/agents/workflow/router_agent.py +1 -1
- mcp_agent/core/enhanced_prompt.py +73 -13
- mcp_agent/core/interactive_prompt.py +118 -8
- mcp_agent/llm/augmented_llm.py +31 -0
- mcp_agent/llm/providers/augmented_llm_anthropic.py +11 -23
- mcp_agent/llm/providers/augmented_llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_openai.py +195 -12
- mcp_agent/llm/providers/multipart_converter_openai.py +4 -3
- mcp_agent/mcp/interfaces.py +1 -1
- {fast_agent_mcp-0.2.35.dist-info → fast_agent_mcp-0.2.36.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.35.dist-info → fast_agent_mcp-0.2.36.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.35.dist-info → fast_agent_mcp-0.2.36.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.36
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>
|
6
6
|
License: Apache License
|
@@ -209,15 +209,15 @@ Classifier: License :: OSI Approved :: Apache Software License
|
|
209
209
|
Classifier: Operating System :: OS Independent
|
210
210
|
Classifier: Programming Language :: Python :: 3
|
211
211
|
Requires-Python: >=3.10
|
212
|
-
Requires-Dist: a2a-
|
212
|
+
Requires-Dist: a2a-sdk>=0.2.9
|
213
213
|
Requires-Dist: aiohttp>=3.11.13
|
214
|
-
Requires-Dist: anthropic>=0.
|
214
|
+
Requires-Dist: anthropic>=0.55.0
|
215
215
|
Requires-Dist: azure-identity>=1.14.0
|
216
216
|
Requires-Dist: deprecated>=1.2.18
|
217
217
|
Requires-Dist: fastapi>=0.115.6
|
218
218
|
Requires-Dist: google-genai
|
219
|
-
Requires-Dist: mcp==1.
|
220
|
-
Requires-Dist: openai>=1.
|
219
|
+
Requires-Dist: mcp==1.10.1
|
220
|
+
Requires-Dist: openai>=1.93.0
|
221
221
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
222
222
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
223
223
|
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.40.7; python_version >= '3.10' and python_version < '4.0'
|
@@ -229,7 +229,7 @@ Requires-Dist: pydantic-settings>=2.7.0
|
|
229
229
|
Requires-Dist: pydantic>=2.10.4
|
230
230
|
Requires-Dist: pyyaml>=6.0.2
|
231
231
|
Requires-Dist: rich>=13.9.4
|
232
|
-
Requires-Dist: tensorzero>=2025.
|
232
|
+
Requires-Dist: tensorzero>=2025.6.3
|
233
233
|
Requires-Dist: typer>=0.15.1
|
234
234
|
Provides-Extra: azure
|
235
235
|
Requires-Dist: azure-identity>=1.14.0; extra == 'azure'
|
@@ -9,7 +9,7 @@ mcp_agent/mcp_server_registry.py,sha256=b3iSb-0ULYc5yUG2KHav41WGwSYWiJCGQsOwWHWB
|
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
mcp_agent/agents/agent.py,sha256=EAYlcP1qqI1D0_CS808I806z1048FBjZQxxpcCZPeIU,3154
|
12
|
-
mcp_agent/agents/base_agent.py,sha256=
|
12
|
+
mcp_agent/agents/base_agent.py,sha256=240FsLvgsjj_exhsj37WIBxEGEyuBuM8mx9d30T26zs,25841
|
13
13
|
mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
|
14
14
|
mcp_agent/agents/workflow/chain_agent.py,sha256=eIlImirrSXkqBJmPuAJgOKis81Cl6lZEGM0-6IyaUV8,6105
|
15
15
|
mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ysUMGM2NzeCIutgr_vXH6kUPpZMw0cX4J_Wl1r8eT84,13296
|
@@ -17,7 +17,7 @@ mcp_agent/agents/workflow/orchestrator_agent.py,sha256=lArV7wHwPYepSuxe0ybTGJRJv
|
|
17
17
|
mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
|
18
18
|
mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
|
19
19
|
mcp_agent/agents/workflow/parallel_agent.py,sha256=JaQFp35nmAdoBRLAwx8BfnK7kirVq9PMw24LQ3ZEzoc,7705
|
20
|
-
mcp_agent/agents/workflow/router_agent.py,sha256=
|
20
|
+
mcp_agent/agents/workflow/router_agent.py,sha256=6tvI5D_ssKNZ6-tNxYHmw6r6DAQMYgqz3PZKZz2rC44,9466
|
21
21
|
mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
22
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
23
23
|
mcp_agent/cli/main.py,sha256=XjrgXMBaPKkVqAFo8T9LJz6Tp1-ivrKDOuNYWke99YA,3090
|
@@ -32,11 +32,11 @@ mcp_agent/core/agent_app.py,sha256=KJdx0Qbh7Gb4wA8_LwKriogc27SraRIrvMqHsOCVVt0,1
|
|
32
32
|
mcp_agent/core/agent_types.py,sha256=DogMcOoRwk70CFSetZ09madRcPDlhPn1iXZVeOcLV8Q,1507
|
33
33
|
mcp_agent/core/direct_decorators.py,sha256=HY_7S7OtfZPqAeqC3_hPYa1d6zTnEyiOeI7JxvnWqTM,16786
|
34
34
|
mcp_agent/core/direct_factory.py,sha256=UNAjHHFRLrQ3D934RMsKsh0Oas7LXLIVslgrzcetM6A,19090
|
35
|
-
mcp_agent/core/enhanced_prompt.py,sha256=
|
35
|
+
mcp_agent/core/enhanced_prompt.py,sha256=5i676U9IUret8aLJ034jdCaetkGmKCcsGpOH8KVAGCY,26058
|
36
36
|
mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
|
37
37
|
mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
|
38
38
|
mcp_agent/core/fastagent.py,sha256=ak5rAyoreN5SqqoMUMP-Cr46JsOL5R2ieGyG7B5P3E8,23658
|
39
|
-
mcp_agent/core/interactive_prompt.py,sha256=
|
39
|
+
mcp_agent/core/interactive_prompt.py,sha256=J4MGENnBSmTFIv09kRAevTJutGbCdQsX2fqRwjDuK8s,30669
|
40
40
|
mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
|
41
41
|
mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
|
42
42
|
mcp_agent/core/request_params.py,sha256=qmFWZXeYEJyYw2IwonyrTnZWxQG7qX6bKpOPcqETa60,1603
|
@@ -50,7 +50,7 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
50
50
|
mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
|
51
51
|
mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
|
52
52
|
mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
|
53
|
-
mcp_agent/llm/augmented_llm.py,sha256=
|
53
|
+
mcp_agent/llm/augmented_llm.py,sha256=25LGtrHSxXJWhQC7imXQ46xxxvOMfHtTAs7vmNGaNO4,26875
|
54
54
|
mcp_agent/llm/augmented_llm_passthrough.py,sha256=F8KifmTwoQ7zyncjmoRek8SBfGdgc9yc5LRXwMQH-bg,8640
|
55
55
|
mcp_agent/llm/augmented_llm_playback.py,sha256=BQeBXRpO-xGAY9wIJxyde6xpHmZEdQPLd32frF8t3QQ,4916
|
56
56
|
mcp_agent/llm/augmented_llm_slow.py,sha256=DDSD8bL2flmQrVHZm-UDs7sR8aHRWkDOcOW-mX_GPok,2067
|
@@ -66,18 +66,18 @@ mcp_agent/llm/usage_tracking.py,sha256=HdBehPMt0bZzEgRmTnbMdgpLVuTp6L_VJTQx5Z25z
|
|
66
66
|
mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
|
67
67
|
mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
|
68
68
|
mcp_agent/llm/providers/augmented_llm_aliyun.py,sha256=XylkJKZ9theSVUxJKOZkf1244hgzng4Ng4Dr209Qb-w,1101
|
69
|
-
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=
|
70
|
-
mcp_agent/llm/providers/augmented_llm_azure.py,sha256=
|
69
|
+
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=Ve5Fy-cORgntFQQan3oajdQixA5a8y_q4n9ir6SvcY0,23942
|
70
|
+
mcp_agent/llm/providers/augmented_llm_azure.py,sha256=amhDGnFL9m8Jj_Eze5C98DVwC7Gm6s25WdGPdepH3fU,5903
|
71
71
|
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=zI9a90dwT4r6E1f_xp4K50Cj9sD7y7kNRgjo0s1pd5w,3804
|
72
72
|
mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
|
73
73
|
mcp_agent/llm/providers/augmented_llm_google_native.py,sha256=knMIUt-XvnIwpch8Er9_B9faraN4ZKKYYtZBk9Uvpho,22161
|
74
74
|
mcp_agent/llm/providers/augmented_llm_google_oai.py,sha256=cO4dvjTl9ymqEurCOo5nP09ATfXVjgkuk1yZAlWpS1s,1137
|
75
|
-
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=
|
75
|
+
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=J2lTyXW4Fl6rxKNmmzTDkMuZ4sqk-vJNBfV9YuW0tvo,24073
|
76
76
|
mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
|
77
77
|
mcp_agent/llm/providers/augmented_llm_tensorzero.py,sha256=Mol_Wzj_ZtccW-LMw0oFwWUt1m1yfofloay9QYNP23c,20729
|
78
78
|
mcp_agent/llm/providers/google_converter.py,sha256=zsqxJJ636WzCL2K6w-yB94O8bdNR6mo8f5mQEnUJFyg,16831
|
79
79
|
mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
|
80
|
-
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=
|
80
|
+
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=kCvtTFOcOejg2BVI3_-F9OCFxAoShSj2i0hdCajHCIw,15955
|
81
81
|
mcp_agent/llm/providers/multipart_converter_tensorzero.py,sha256=BFTdyVk42HZskDAuTHicfDTUJq89d1fz8C9nAOuHxlE,8646
|
82
82
|
mcp_agent/llm/providers/openai_multipart.py,sha256=qKBn7d3jSabnJmVgWweVzqh8q9mBqr09fsPmP92niAQ,6899
|
83
83
|
mcp_agent/llm/providers/openai_utils.py,sha256=T4bTCL9f7DsoS_zoKgQKv_FUv_4n98vgbvaUpdWZJr8,1875
|
@@ -94,7 +94,7 @@ mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
94
94
|
mcp_agent/mcp/common.py,sha256=MpSC0fLO21RcDz4VApah4C8_LisVGz7OXkR17Xw-9mY,431
|
95
95
|
mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
|
96
96
|
mcp_agent/mcp/hf_auth.py,sha256=YwEt7hMDJODFUIc6Zi1HLYsfVnvANGvyhpQwcPCMAgI,3379
|
97
|
-
mcp_agent/mcp/interfaces.py,sha256=
|
97
|
+
mcp_agent/mcp/interfaces.py,sha256=NxUDi4eI-qWfaOS1QHWr3EjAweV-CX_CBJeBCwTwr7g,7102
|
98
98
|
mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
|
99
99
|
mcp_agent/mcp/mcp_agent_client_session.py,sha256=V17Lj21rMGIKKVAIyNx5l5gmC8jQuohjJGpRcoCXfVA,6862
|
100
100
|
mcp_agent/mcp/mcp_aggregator.py,sha256=CrUtj-BHXXCb7sUlc_MF1d7HkiF9rjh6MKaGprflBB4,47076
|
@@ -158,8 +158,8 @@ mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkde
|
|
158
158
|
mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
|
159
159
|
mcp_agent/tools/tool_definition.py,sha256=L3Pxl-uLEXqlVoo-bYuFTFALeI-2pIU44YgFhsTKEtM,398
|
160
160
|
mcp_agent/ui/console_display.py,sha256=UKqax5V2TC0hkZZORmmd6UqUk0DGX7A25E3h1k9f42k,10982
|
161
|
-
fast_agent_mcp-0.2.
|
162
|
-
fast_agent_mcp-0.2.
|
163
|
-
fast_agent_mcp-0.2.
|
164
|
-
fast_agent_mcp-0.2.
|
165
|
-
fast_agent_mcp-0.2.
|
161
|
+
fast_agent_mcp-0.2.36.dist-info/METADATA,sha256=a03lmLxEkpn8uOkq0-jZUBsqxwVi80vmZeUgcVSPrxI,30798
|
162
|
+
fast_agent_mcp-0.2.36.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
163
|
+
fast_agent_mcp-0.2.36.dist-info/entry_points.txt,sha256=oKQeSUVn87pJv8_k1NQ7Ak8cXaaXHCnPAOJRCV_uUVg,230
|
164
|
+
fast_agent_mcp-0.2.36.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
165
|
+
fast_agent_mcp-0.2.36.dist-info/RECORD,,
|
mcp_agent/agents/base_agent.py
CHANGED
@@ -20,7 +20,7 @@ from typing import (
|
|
20
20
|
Union,
|
21
21
|
)
|
22
22
|
|
23
|
-
from
|
23
|
+
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
|
24
24
|
from mcp.types import (
|
25
25
|
CallToolResult,
|
26
26
|
EmbeddedResource,
|
@@ -704,7 +704,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
704
704
|
def usage_accumulator(self) -> Optional["UsageAccumulator"]:
|
705
705
|
"""
|
706
706
|
Return the usage accumulator for tracking token usage across turns.
|
707
|
-
|
707
|
+
|
708
708
|
Returns:
|
709
709
|
UsageAccumulator object if LLM is attached, None otherwise
|
710
710
|
"""
|
@@ -21,7 +21,7 @@ from mcp_agent.mcp.interfaces import AugmentedLLMProtocol, ModelT
|
|
21
21
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
22
22
|
|
23
23
|
if TYPE_CHECKING:
|
24
|
-
from
|
24
|
+
from a2a.types import AgentCard
|
25
25
|
|
26
26
|
from mcp_agent.context import Context
|
27
27
|
|
@@ -40,6 +40,59 @@ in_multiline_mode = False
|
|
40
40
|
# Track whether help text has been shown globally
|
41
41
|
help_message_shown = False
|
42
42
|
|
43
|
+
# Track which agents have shown their info
|
44
|
+
_agent_info_shown = set()
|
45
|
+
|
46
|
+
|
47
|
+
async def _display_agent_info_helper(agent_name: str, agent_provider: object) -> None:
|
48
|
+
"""Helper function to display agent information."""
|
49
|
+
# Only show once per agent
|
50
|
+
if agent_name in _agent_info_shown:
|
51
|
+
return
|
52
|
+
|
53
|
+
try:
|
54
|
+
# Get agent info
|
55
|
+
if hasattr(agent_provider, "_agent"):
|
56
|
+
# This is an AgentApp - get the specific agent
|
57
|
+
agent = agent_provider._agent(agent_name)
|
58
|
+
else:
|
59
|
+
# This is a single agent
|
60
|
+
agent = agent_provider
|
61
|
+
|
62
|
+
# Get counts
|
63
|
+
servers = await agent.list_servers()
|
64
|
+
server_count = len(servers) if servers else 0
|
65
|
+
|
66
|
+
tools_result = await agent.list_tools()
|
67
|
+
tool_count = (
|
68
|
+
len(tools_result.tools) if tools_result and hasattr(tools_result, "tools") else 0
|
69
|
+
)
|
70
|
+
|
71
|
+
prompts_dict = await agent.list_prompts()
|
72
|
+
prompt_count = sum(len(prompts) for prompts in prompts_dict.values()) if prompts_dict else 0
|
73
|
+
|
74
|
+
# Display with proper pluralization and subdued formatting
|
75
|
+
if server_count == 0:
|
76
|
+
rich_print(
|
77
|
+
f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]: No MCP Servers attached[/dim]"
|
78
|
+
)
|
79
|
+
else:
|
80
|
+
# Pluralization helpers
|
81
|
+
server_word = "Server" if server_count == 1 else "Servers"
|
82
|
+
tool_word = "tool" if tool_count == 1 else "tools"
|
83
|
+
prompt_word = "prompt" if prompt_count == 1 else "prompts"
|
84
|
+
|
85
|
+
rich_print(
|
86
|
+
f"[dim]Agent [/dim][blue]{agent_name}[/blue][dim]:[/dim] {server_count:,}[dim] MCP {server_word}, [/dim]{tool_count:,}[dim] {tool_word}, [/dim]{prompt_count:,}[dim] {prompt_word} available[/dim]"
|
87
|
+
)
|
88
|
+
|
89
|
+
# Mark as shown
|
90
|
+
_agent_info_shown.add(agent_name)
|
91
|
+
|
92
|
+
except Exception:
|
93
|
+
# Silently ignore errors to not disrupt the user experience
|
94
|
+
pass
|
95
|
+
|
43
96
|
|
44
97
|
class AgentCompleter(Completer):
|
45
98
|
"""Provide completion for agent names and common commands."""
|
@@ -54,11 +107,11 @@ class AgentCompleter(Completer):
|
|
54
107
|
self.agents = agents
|
55
108
|
# Map commands to their descriptions for better completion hints
|
56
109
|
self.commands = {
|
57
|
-
"
|
58
|
-
"
|
59
|
-
"prompt": "Apply a specific prompt by name (/prompt <name>)", # New command
|
110
|
+
"tools": "List and call MCP tools",
|
111
|
+
"prompt": "List and select MCP prompts, or apply specific prompt (/prompt <name>)",
|
60
112
|
"agents": "List available agents",
|
61
113
|
"usage": "Show current usage statistics",
|
114
|
+
"help": "Show available commands",
|
62
115
|
"clear": "Clear the screen",
|
63
116
|
"STOP": "Stop this prompting session and move to next workflow step",
|
64
117
|
"EXIT": "Exit fast-agent, terminating any running workflows",
|
@@ -66,8 +119,8 @@ class AgentCompleter(Completer):
|
|
66
119
|
}
|
67
120
|
if is_human_input:
|
68
121
|
self.commands.pop("agents")
|
69
|
-
self.commands.pop("prompts") # Remove prompts command in human input mode
|
70
122
|
self.commands.pop("prompt", None) # Remove prompt command in human input mode
|
123
|
+
self.commands.pop("tools", None) # Remove tools command in human input mode
|
71
124
|
self.commands.pop("usage", None) # Remove usage command in human input mode
|
72
125
|
self.agent_types = agent_types or {}
|
73
126
|
|
@@ -260,6 +313,7 @@ async def get_enhanced_input(
|
|
260
313
|
agent_types: dict[str, AgentType] = None,
|
261
314
|
is_human_input: bool = False,
|
262
315
|
toolbar_color: str = "ansiblue",
|
316
|
+
agent_provider: object = None,
|
263
317
|
) -> str:
|
264
318
|
"""
|
265
319
|
Enhanced input with advanced prompt_toolkit features.
|
@@ -274,6 +328,7 @@ async def get_enhanced_input(
|
|
274
328
|
agent_types: Dictionary mapping agent names to their types for display
|
275
329
|
is_human_input: Whether this is a human input request (disables agent selection features)
|
276
330
|
toolbar_color: Color to use for the agent name in the toolbar (default: "ansiblue")
|
331
|
+
agent_provider: Optional agent provider for displaying agent info
|
277
332
|
|
278
333
|
Returns:
|
279
334
|
User input string
|
@@ -300,14 +355,15 @@ async def get_enhanced_input(
|
|
300
355
|
if in_multiline_mode:
|
301
356
|
mode_style = "ansired" # More noticeable for multiline mode
|
302
357
|
mode_text = "MULTILINE"
|
303
|
-
toggle_text = "Normal
|
358
|
+
toggle_text = "Normal"
|
304
359
|
else:
|
305
360
|
mode_style = "ansigreen"
|
306
361
|
mode_text = "NORMAL"
|
307
|
-
toggle_text = "Multiline
|
362
|
+
toggle_text = "Multiline"
|
308
363
|
|
309
364
|
shortcuts = [
|
310
365
|
("Ctrl+T", toggle_text),
|
366
|
+
("Ctrl+E", "External"),
|
311
367
|
("Ctrl+L", "Clear"),
|
312
368
|
("↑/↓", "History"),
|
313
369
|
]
|
@@ -373,8 +429,13 @@ async def get_enhanced_input(
|
|
373
429
|
rich_print("[dim]Type /help for commands. Ctrl+T toggles multiline mode.[/dim]")
|
374
430
|
else:
|
375
431
|
rich_print(
|
376
|
-
"[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode.[/dim]"
|
432
|
+
"[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode.[/dim]\n"
|
377
433
|
)
|
434
|
+
|
435
|
+
# Display agent info right after help text if agent_provider is available
|
436
|
+
if agent_provider and not is_human_input:
|
437
|
+
await _display_agent_info_helper(agent_name, agent_provider)
|
438
|
+
|
378
439
|
rich_print()
|
379
440
|
help_message_shown = True
|
380
441
|
|
@@ -394,12 +455,8 @@ async def get_enhanced_input(
|
|
394
455
|
return "LIST_AGENTS"
|
395
456
|
elif cmd == "usage":
|
396
457
|
return "SHOW_USAGE"
|
397
|
-
elif cmd == "prompts":
|
398
|
-
# Return a dictionary with select_prompt action instead of a string
|
399
|
-
# This way it will match what the command handler expects
|
400
|
-
return {"select_prompt": True, "prompt_name": None}
|
401
458
|
elif cmd == "prompt":
|
402
|
-
# Handle /prompt with no arguments
|
459
|
+
# Handle /prompt with no arguments as interactive mode
|
403
460
|
if len(cmd_parts) > 1:
|
404
461
|
# Direct prompt selection with name or number
|
405
462
|
prompt_arg = cmd_parts[1].strip()
|
@@ -409,8 +466,11 @@ async def get_enhanced_input(
|
|
409
466
|
else:
|
410
467
|
return f"SELECT_PROMPT:{prompt_arg}"
|
411
468
|
else:
|
412
|
-
# If /prompt is used without arguments,
|
469
|
+
# If /prompt is used without arguments, show interactive selection
|
413
470
|
return {"select_prompt": True, "prompt_name": None}
|
471
|
+
elif cmd == "tools":
|
472
|
+
# Return a dictionary with list_tools action
|
473
|
+
return {"list_tools": True}
|
414
474
|
elif cmd == "exit":
|
415
475
|
return "EXIT"
|
416
476
|
elif cmd.lower() == "stop":
|
@@ -23,6 +23,7 @@ from rich.table import Table
|
|
23
23
|
|
24
24
|
from mcp_agent.core.agent_types import AgentType
|
25
25
|
from mcp_agent.core.enhanced_prompt import (
|
26
|
+
_display_agent_info_helper,
|
26
27
|
get_argument_input,
|
27
28
|
get_enhanced_input,
|
28
29
|
get_selection_input,
|
@@ -121,6 +122,7 @@ class InteractivePrompt:
|
|
121
122
|
multiline=False, # Default to single-line mode
|
122
123
|
available_agent_names=available_agents,
|
123
124
|
agent_types=self.agent_types, # Pass agent types for display
|
125
|
+
agent_provider=prompt_provider, # Pass agent provider for info display
|
124
126
|
)
|
125
127
|
|
126
128
|
# Handle special commands - pass "True" to enable agent switching
|
@@ -132,6 +134,9 @@ class InteractivePrompt:
|
|
132
134
|
new_agent = command_result["switch_agent"]
|
133
135
|
if new_agent in available_agents_set:
|
134
136
|
agent = new_agent
|
137
|
+
# Display new agent info immediately when switching
|
138
|
+
rich_print() # Add spacing
|
139
|
+
await _display_agent_info_helper(agent, prompt_provider)
|
135
140
|
continue
|
136
141
|
else:
|
137
142
|
rich_print(f"[red]Agent '{new_agent}' not found[/red]")
|
@@ -174,6 +179,10 @@ class InteractivePrompt:
|
|
174
179
|
# Use the name-based selection
|
175
180
|
await self._select_prompt(prompt_provider, agent, prompt_name)
|
176
181
|
continue
|
182
|
+
elif "list_tools" in command_result and prompt_provider:
|
183
|
+
# Handle tools list display
|
184
|
+
await self._list_tools(prompt_provider, agent)
|
185
|
+
continue
|
177
186
|
elif "show_usage" in command_result:
|
178
187
|
# Handle usage display
|
179
188
|
await self._show_usage(prompt_provider, agent)
|
@@ -333,13 +342,17 @@ class InteractivePrompt:
|
|
333
342
|
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
334
343
|
|
335
344
|
async def _select_prompt(
|
336
|
-
self,
|
345
|
+
self,
|
346
|
+
prompt_provider: PromptProvider,
|
347
|
+
agent_name: str,
|
348
|
+
requested_name: Optional[str] = None,
|
349
|
+
send_func: Optional[SendFunc] = None,
|
337
350
|
) -> None:
|
338
351
|
"""
|
339
352
|
Select and apply a prompt.
|
340
353
|
|
341
354
|
Args:
|
342
|
-
prompt_provider: Provider that implements list_prompts and
|
355
|
+
prompt_provider: Provider that implements list_prompts and get_prompt
|
343
356
|
agent_name: Name of the agent
|
344
357
|
requested_name: Optional name of the prompt to apply
|
345
358
|
"""
|
@@ -569,12 +582,54 @@ class InteractivePrompt:
|
|
569
582
|
if arg_value:
|
570
583
|
arg_values[arg_name] = arg_value
|
571
584
|
|
572
|
-
# Apply the prompt
|
585
|
+
# Apply the prompt using generate() for proper progress display
|
573
586
|
namespaced_name = selected_prompt["namespaced_name"]
|
574
587
|
rich_print(f"\n[bold]Applying prompt [cyan]{namespaced_name}[/cyan]...[/bold]")
|
575
588
|
|
576
|
-
#
|
577
|
-
|
589
|
+
# Get the agent directly for generate() call
|
590
|
+
if hasattr(prompt_provider, "_agent"):
|
591
|
+
# This is an AgentApp - get the specific agent
|
592
|
+
agent = prompt_provider._agent(agent_name)
|
593
|
+
else:
|
594
|
+
# This is a single agent
|
595
|
+
agent = prompt_provider
|
596
|
+
|
597
|
+
try:
|
598
|
+
# Use agent.apply_prompt() which handles everything properly:
|
599
|
+
# - get_prompt() to fetch template
|
600
|
+
# - convert to multipart
|
601
|
+
# - call generate() for progress display
|
602
|
+
# - return response text
|
603
|
+
# Response display is handled by the agent's show_ methods, don't print it here
|
604
|
+
|
605
|
+
# Fetch the prompt first (without progress display)
|
606
|
+
prompt_result = await agent.get_prompt(namespaced_name, arg_values)
|
607
|
+
|
608
|
+
if not prompt_result or not prompt_result.messages:
|
609
|
+
rich_print(
|
610
|
+
f"[red]Prompt '{namespaced_name}' could not be found or contains no messages[/red]"
|
611
|
+
)
|
612
|
+
return
|
613
|
+
|
614
|
+
# Convert to multipart format
|
615
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
616
|
+
|
617
|
+
multipart_messages = PromptMessageMultipart.from_get_prompt_result(prompt_result)
|
618
|
+
|
619
|
+
# Now start progress display for the actual generation
|
620
|
+
progress_display.resume()
|
621
|
+
try:
|
622
|
+
await agent.generate(multipart_messages, None)
|
623
|
+
finally:
|
624
|
+
# Pause again for the next UI interaction
|
625
|
+
progress_display.pause()
|
626
|
+
|
627
|
+
# Show usage info after the turn (same as send_wrapper does)
|
628
|
+
if hasattr(prompt_provider, "_show_turn_usage"):
|
629
|
+
prompt_provider._show_turn_usage(agent_name)
|
630
|
+
|
631
|
+
except Exception as e:
|
632
|
+
rich_print(f"[red]Error applying prompt: {e}[/red]")
|
578
633
|
|
579
634
|
except Exception as e:
|
580
635
|
import traceback
|
@@ -582,6 +637,61 @@ class InteractivePrompt:
|
|
582
637
|
rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
|
583
638
|
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
584
639
|
|
640
|
+
async def _list_tools(self, prompt_provider: PromptProvider, agent_name: str) -> None:
|
641
|
+
"""
|
642
|
+
List available tools for an agent.
|
643
|
+
|
644
|
+
Args:
|
645
|
+
prompt_provider: Provider that implements list_tools
|
646
|
+
agent_name: Name of the agent
|
647
|
+
"""
|
648
|
+
console = Console()
|
649
|
+
|
650
|
+
try:
|
651
|
+
# Get agent to list tools from
|
652
|
+
if hasattr(prompt_provider, "_agent"):
|
653
|
+
# This is an AgentApp - get the specific agent
|
654
|
+
agent = prompt_provider._agent(agent_name)
|
655
|
+
else:
|
656
|
+
# This is a single agent
|
657
|
+
agent = prompt_provider
|
658
|
+
|
659
|
+
rich_print(f"\n[bold]Fetching tools for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
660
|
+
|
661
|
+
# Get tools using list_tools
|
662
|
+
tools_result = await agent.list_tools()
|
663
|
+
|
664
|
+
if not tools_result or not hasattr(tools_result, "tools") or not tools_result.tools:
|
665
|
+
rich_print("[yellow]No tools available for this agent[/yellow]")
|
666
|
+
return
|
667
|
+
|
668
|
+
# Create a table for better display
|
669
|
+
table = Table(title="Available MCP Tools")
|
670
|
+
table.add_column("#", justify="right", style="cyan")
|
671
|
+
table.add_column("Tool Name", style="bright_blue")
|
672
|
+
table.add_column("Description")
|
673
|
+
|
674
|
+
# Add tools to table
|
675
|
+
for i, tool in enumerate(tools_result.tools):
|
676
|
+
table.add_row(
|
677
|
+
str(i + 1),
|
678
|
+
tool.name,
|
679
|
+
getattr(tool, "description", "No description") or "No description",
|
680
|
+
)
|
681
|
+
|
682
|
+
console.print(table)
|
683
|
+
|
684
|
+
# Add usage instructions
|
685
|
+
rich_print("\n[bold]Usage:[/bold]")
|
686
|
+
rich_print(" • Tools are automatically available in your conversation")
|
687
|
+
rich_print(" • Just ask the agent to use a tool by name or description")
|
688
|
+
|
689
|
+
except Exception as e:
|
690
|
+
import traceback
|
691
|
+
|
692
|
+
rich_print(f"[red]Error listing tools: {e}[/red]")
|
693
|
+
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
694
|
+
|
585
695
|
async def _show_usage(self, prompt_provider: PromptProvider, agent_name: str) -> None:
|
586
696
|
"""
|
587
697
|
Show usage statistics for the current agent(s) in a colorful table format.
|
@@ -593,13 +703,13 @@ class InteractivePrompt:
|
|
593
703
|
try:
|
594
704
|
# Collect all agents from the prompt provider
|
595
705
|
agents_to_show = collect_agents_from_provider(prompt_provider, agent_name)
|
596
|
-
|
706
|
+
|
597
707
|
if not agents_to_show:
|
598
708
|
rich_print("[yellow]No usage data available[/yellow]")
|
599
709
|
return
|
600
|
-
|
710
|
+
|
601
711
|
# Use the shared display utility
|
602
712
|
display_usage_report(agents_to_show, show_if_progress_disabled=True)
|
603
|
-
|
713
|
+
|
604
714
|
except Exception as e:
|
605
715
|
rich_print(f"[red]Error showing usage: {e}[/red]")
|
mcp_agent/llm/augmented_llm.py
CHANGED
@@ -554,6 +554,37 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
554
554
|
}
|
555
555
|
self.logger.debug("Chat in progress", data=data)
|
556
556
|
|
557
|
+
def _update_streaming_progress(self, content: str, model: str, estimated_tokens: int) -> int:
|
558
|
+
"""Update streaming progress with token estimation and formatting.
|
559
|
+
|
560
|
+
Args:
|
561
|
+
content: The text content from the streaming event
|
562
|
+
model: The model name
|
563
|
+
estimated_tokens: Current token count to update
|
564
|
+
|
565
|
+
Returns:
|
566
|
+
Updated estimated token count
|
567
|
+
"""
|
568
|
+
# Rough estimate: 1 token per 4 characters (OpenAI's typical ratio)
|
569
|
+
text_length = len(content)
|
570
|
+
additional_tokens = max(1, text_length // 4)
|
571
|
+
new_total = estimated_tokens + additional_tokens
|
572
|
+
|
573
|
+
# Format token count for display
|
574
|
+
token_str = str(new_total).rjust(5)
|
575
|
+
|
576
|
+
# Emit progress event
|
577
|
+
data = {
|
578
|
+
"progress_action": ProgressAction.STREAMING,
|
579
|
+
"model": model,
|
580
|
+
"agent_name": self.name,
|
581
|
+
"chat_turn": self.chat_turn(),
|
582
|
+
"details": token_str.strip(), # Token count goes in details for STREAMING action
|
583
|
+
}
|
584
|
+
self.logger.info("Streaming progress", data=data)
|
585
|
+
|
586
|
+
return new_total
|
587
|
+
|
557
588
|
def _log_chat_finished(self, model: Optional[str] = None) -> None:
|
558
589
|
"""Log a chat finished event"""
|
559
590
|
data = {
|
@@ -111,14 +111,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
111
111
|
and hasattr(event, "delta")
|
112
112
|
and event.delta.type == "text_delta"
|
113
113
|
):
|
114
|
-
#
|
115
|
-
|
116
|
-
estimated_tokens += max(1, text_length // 4)
|
117
|
-
|
118
|
-
# Update progress on every token for real-time display
|
119
|
-
token_str = str(estimated_tokens).rjust(5)
|
120
|
-
# print(f"DEBUG: Streaming tokens: {token_str}")
|
121
|
-
self._emit_streaming_progress(model, token_str)
|
114
|
+
# Use base class method for token estimation and progress emission
|
115
|
+
estimated_tokens = self._update_streaming_progress(event.delta.text, model, estimated_tokens)
|
122
116
|
|
123
117
|
# Also check for final message_delta events with actual usage info
|
124
118
|
elif (
|
@@ -127,9 +121,16 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
127
121
|
and event.usage.output_tokens
|
128
122
|
):
|
129
123
|
actual_tokens = event.usage.output_tokens
|
124
|
+
# Emit final progress with actual token count
|
130
125
|
token_str = str(actual_tokens).rjust(5)
|
131
|
-
|
132
|
-
|
126
|
+
data = {
|
127
|
+
"progress_action": ProgressAction.STREAMING,
|
128
|
+
"model": model,
|
129
|
+
"agent_name": self.name,
|
130
|
+
"chat_turn": self.chat_turn(),
|
131
|
+
"details": token_str.strip(),
|
132
|
+
}
|
133
|
+
self.logger.info("Streaming progress", data=data)
|
133
134
|
|
134
135
|
# Get the final message with complete usage data
|
135
136
|
message = await stream.get_final_message()
|
@@ -142,19 +143,6 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
142
143
|
|
143
144
|
return message
|
144
145
|
|
145
|
-
def _emit_streaming_progress(self, model: str, token_str: str) -> None:
|
146
|
-
"""Emit a streaming progress event that goes directly to progress display."""
|
147
|
-
data = {
|
148
|
-
"progress_action": ProgressAction.STREAMING,
|
149
|
-
"model": model,
|
150
|
-
"agent_name": self.name,
|
151
|
-
"chat_turn": self.chat_turn(),
|
152
|
-
"details": token_str.strip(), # Token count goes in details for STREAMING action
|
153
|
-
}
|
154
|
-
# print(f"DEBUG: Emitting streaming progress event with data: {data}")
|
155
|
-
# Use a special logger level or namespace to avoid polluting regular logs
|
156
|
-
self.logger.info("Streaming progress", data=data)
|
157
|
-
|
158
146
|
async def _anthropic_completion(
|
159
147
|
self,
|
160
148
|
message_param,
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from openai import
|
1
|
+
from openai import AsyncAzureOpenAI, AsyncOpenAI, AuthenticationError
|
2
2
|
|
3
3
|
from mcp_agent.core.exceptions import ProviderKeyError
|
4
4
|
from mcp_agent.llm.provider_types import Provider
|
@@ -93,7 +93,7 @@ class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
|
|
93
93
|
if not self.resource_name and self.base_url:
|
94
94
|
self.resource_name = _extract_resource_name(self.base_url)
|
95
95
|
|
96
|
-
def _openai_client(self) ->
|
96
|
+
def _openai_client(self) -> AsyncOpenAI:
|
97
97
|
"""
|
98
98
|
Returns an AzureOpenAI client, handling both API Key and DefaultAzureCredential.
|
99
99
|
"""
|
@@ -104,7 +104,7 @@ class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
|
|
104
104
|
"Missing Azure endpoint",
|
105
105
|
"azure_endpoint (base_url) is None at client creation time.",
|
106
106
|
)
|
107
|
-
return
|
107
|
+
return AsyncAzureOpenAI(
|
108
108
|
azure_ad_token_provider=self.get_azure_token,
|
109
109
|
azure_endpoint=self.base_url,
|
110
110
|
api_version=self.api_version,
|
@@ -116,7 +116,7 @@ class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
|
|
116
116
|
"Missing Azure endpoint",
|
117
117
|
"azure_endpoint (base_url) is None at client creation time.",
|
118
118
|
)
|
119
|
-
return
|
119
|
+
return AsyncAzureOpenAI(
|
120
120
|
api_key=self.api_key,
|
121
121
|
azure_endpoint=self.base_url,
|
122
122
|
api_version=self.api_version,
|
@@ -8,7 +8,8 @@ from mcp.types import (
|
|
8
8
|
ImageContent,
|
9
9
|
TextContent,
|
10
10
|
)
|
11
|
-
from openai import
|
11
|
+
from openai import AsyncOpenAI, AuthenticationError
|
12
|
+
from openai.lib.streaming.chat import ChatCompletionStreamState
|
12
13
|
|
13
14
|
# from openai.types.beta.chat import
|
14
15
|
from openai.types.chat import (
|
@@ -22,6 +23,7 @@ from rich.text import Text
|
|
22
23
|
|
23
24
|
from mcp_agent.core.exceptions import ProviderKeyError
|
24
25
|
from mcp_agent.core.prompt import Prompt
|
26
|
+
from mcp_agent.event_progress import ProgressAction
|
25
27
|
from mcp_agent.llm.augmented_llm import (
|
26
28
|
AugmentedLLM,
|
27
29
|
RequestParams,
|
@@ -103,9 +105,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
103
105
|
def _base_url(self) -> str:
|
104
106
|
return self.context.config.openai.base_url if self.context.config.openai else None
|
105
107
|
|
106
|
-
def _openai_client(self) ->
|
108
|
+
def _openai_client(self) -> AsyncOpenAI:
|
107
109
|
try:
|
108
|
-
return
|
110
|
+
return AsyncOpenAI(api_key=self._api_key(), base_url=self._base_url())
|
109
111
|
except AuthenticationError as e:
|
110
112
|
raise ProviderKeyError(
|
111
113
|
"Invalid OpenAI API key",
|
@@ -113,6 +115,182 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
113
115
|
"Please check that your API key is valid and not expired.",
|
114
116
|
) from e
|
115
117
|
|
118
|
+
async def _process_stream(self, stream, model: str):
|
119
|
+
"""Process the streaming response and display real-time token usage."""
|
120
|
+
# Track estimated output tokens by counting text chunks
|
121
|
+
estimated_tokens = 0
|
122
|
+
|
123
|
+
# For non-OpenAI providers (like Ollama), ChatCompletionStreamState might not work correctly
|
124
|
+
# Fall back to manual accumulation if needed
|
125
|
+
# TODO -- consider this and whether to subclass instead
|
126
|
+
if self.provider in [Provider.GENERIC, Provider.OPENROUTER]:
|
127
|
+
return await self._process_stream_manual(stream, model)
|
128
|
+
|
129
|
+
# Use ChatCompletionStreamState helper for accumulation (OpenAI only)
|
130
|
+
state = ChatCompletionStreamState()
|
131
|
+
|
132
|
+
# Process the stream chunks
|
133
|
+
async for chunk in stream:
|
134
|
+
# Handle chunk accumulation
|
135
|
+
state.handle_chunk(chunk)
|
136
|
+
|
137
|
+
# Count tokens in real-time from content deltas
|
138
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
139
|
+
content = chunk.choices[0].delta.content
|
140
|
+
# Use base class method for token estimation and progress emission
|
141
|
+
estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens)
|
142
|
+
|
143
|
+
# Get the final completion with usage data
|
144
|
+
final_completion = state.get_final_completion()
|
145
|
+
|
146
|
+
# Log final usage information
|
147
|
+
if hasattr(final_completion, "usage") and final_completion.usage:
|
148
|
+
actual_tokens = final_completion.usage.completion_tokens
|
149
|
+
# Emit final progress with actual token count
|
150
|
+
token_str = str(actual_tokens).rjust(5)
|
151
|
+
data = {
|
152
|
+
"progress_action": ProgressAction.STREAMING,
|
153
|
+
"model": model,
|
154
|
+
"agent_name": self.name,
|
155
|
+
"chat_turn": self.chat_turn(),
|
156
|
+
"details": token_str.strip(),
|
157
|
+
}
|
158
|
+
self.logger.info("Streaming progress", data=data)
|
159
|
+
|
160
|
+
self.logger.info(
|
161
|
+
f"Streaming complete - Model: {model}, Input tokens: {final_completion.usage.prompt_tokens}, Output tokens: {final_completion.usage.completion_tokens}"
|
162
|
+
)
|
163
|
+
|
164
|
+
return final_completion
|
165
|
+
|
166
|
+
# TODO - as per other comment this needs to go in another class. There are a number of "special" cases dealt with
|
167
|
+
# here to deal with OpenRouter idiosyncrasies between e.g. Anthropic and Gemini models.
|
168
|
+
async def _process_stream_manual(self, stream, model: str):
|
169
|
+
"""Manual stream processing for providers like Ollama that may not work with ChatCompletionStreamState."""
|
170
|
+
from openai.types.chat import ChatCompletionMessageToolCall
|
171
|
+
from openai.types.chat.chat_completion_message_tool_call import Function
|
172
|
+
|
173
|
+
# Track estimated output tokens by counting text chunks
|
174
|
+
estimated_tokens = 0
|
175
|
+
|
176
|
+
# Manual accumulation of response data
|
177
|
+
accumulated_content = ""
|
178
|
+
role = "assistant"
|
179
|
+
tool_calls_map = {} # Use a map to accumulate tool calls by index
|
180
|
+
function_call = None
|
181
|
+
finish_reason = None
|
182
|
+
usage_data = None
|
183
|
+
|
184
|
+
# Process the stream chunks manually
|
185
|
+
async for chunk in stream:
|
186
|
+
# Count tokens in real-time from content deltas
|
187
|
+
if chunk.choices and chunk.choices[0].delta.content:
|
188
|
+
content = chunk.choices[0].delta.content
|
189
|
+
accumulated_content += content
|
190
|
+
# Use base class method for token estimation and progress emission
|
191
|
+
estimated_tokens = self._update_streaming_progress(content, model, estimated_tokens)
|
192
|
+
|
193
|
+
# Extract other fields from the chunk
|
194
|
+
if chunk.choices:
|
195
|
+
choice = chunk.choices[0]
|
196
|
+
if choice.delta.role:
|
197
|
+
role = choice.delta.role
|
198
|
+
if choice.delta.tool_calls:
|
199
|
+
# Accumulate tool call deltas
|
200
|
+
for delta_tool_call in choice.delta.tool_calls:
|
201
|
+
if delta_tool_call.index is not None:
|
202
|
+
if delta_tool_call.index not in tool_calls_map:
|
203
|
+
tool_calls_map[delta_tool_call.index] = {
|
204
|
+
"id": delta_tool_call.id,
|
205
|
+
"type": delta_tool_call.type or "function",
|
206
|
+
"function": {
|
207
|
+
"name": delta_tool_call.function.name
|
208
|
+
if delta_tool_call.function
|
209
|
+
else None,
|
210
|
+
"arguments": "",
|
211
|
+
},
|
212
|
+
}
|
213
|
+
|
214
|
+
# Always update if we have new data (needed for OpenRouter Gemini)
|
215
|
+
if delta_tool_call.id:
|
216
|
+
tool_calls_map[delta_tool_call.index]["id"] = delta_tool_call.id
|
217
|
+
if delta_tool_call.function:
|
218
|
+
if delta_tool_call.function.name:
|
219
|
+
tool_calls_map[delta_tool_call.index]["function"]["name"] = (
|
220
|
+
delta_tool_call.function.name
|
221
|
+
)
|
222
|
+
# Handle arguments - they might come as None, empty string, or actual content
|
223
|
+
if delta_tool_call.function.arguments is not None:
|
224
|
+
tool_calls_map[delta_tool_call.index]["function"][
|
225
|
+
"arguments"
|
226
|
+
] += delta_tool_call.function.arguments
|
227
|
+
|
228
|
+
if choice.delta.function_call:
|
229
|
+
function_call = choice.delta.function_call
|
230
|
+
if choice.finish_reason:
|
231
|
+
finish_reason = choice.finish_reason
|
232
|
+
|
233
|
+
# Extract usage data if available
|
234
|
+
if hasattr(chunk, "usage") and chunk.usage:
|
235
|
+
usage_data = chunk.usage
|
236
|
+
|
237
|
+
# Convert accumulated tool calls to proper format.
|
238
|
+
tool_calls = None
|
239
|
+
if tool_calls_map:
|
240
|
+
tool_calls = []
|
241
|
+
for idx in sorted(tool_calls_map.keys()):
|
242
|
+
tool_call_data = tool_calls_map[idx]
|
243
|
+
# Only add tool calls that have valid data
|
244
|
+
if tool_call_data["id"] and tool_call_data["function"]["name"]:
|
245
|
+
tool_calls.append(
|
246
|
+
ChatCompletionMessageToolCall(
|
247
|
+
id=tool_call_data["id"],
|
248
|
+
type=tool_call_data["type"],
|
249
|
+
function=Function(
|
250
|
+
name=tool_call_data["function"]["name"],
|
251
|
+
arguments=tool_call_data["function"]["arguments"],
|
252
|
+
),
|
253
|
+
)
|
254
|
+
)
|
255
|
+
|
256
|
+
# Create a ChatCompletionMessage manually
|
257
|
+
message = ChatCompletionMessage(
|
258
|
+
content=accumulated_content,
|
259
|
+
role=role,
|
260
|
+
tool_calls=tool_calls if tool_calls else None,
|
261
|
+
function_call=function_call,
|
262
|
+
refusal=None,
|
263
|
+
annotations=None,
|
264
|
+
audio=None,
|
265
|
+
)
|
266
|
+
|
267
|
+
from types import SimpleNamespace
|
268
|
+
|
269
|
+
final_completion = SimpleNamespace()
|
270
|
+
final_completion.choices = [SimpleNamespace()]
|
271
|
+
final_completion.choices[0].message = message
|
272
|
+
final_completion.choices[0].finish_reason = finish_reason
|
273
|
+
final_completion.usage = usage_data
|
274
|
+
|
275
|
+
# Log final usage information
|
276
|
+
if usage_data:
|
277
|
+
actual_tokens = getattr(usage_data, "completion_tokens", estimated_tokens)
|
278
|
+
token_str = str(actual_tokens).rjust(5)
|
279
|
+
data = {
|
280
|
+
"progress_action": ProgressAction.STREAMING,
|
281
|
+
"model": model,
|
282
|
+
"agent_name": self.name,
|
283
|
+
"chat_turn": self.chat_turn(),
|
284
|
+
"details": token_str.strip(),
|
285
|
+
}
|
286
|
+
self.logger.info("Streaming progress", data=data)
|
287
|
+
|
288
|
+
self.logger.info(
|
289
|
+
f"Streaming complete - Model: {model}, Input tokens: {getattr(usage_data, 'prompt_tokens', 0)}, Output tokens: {actual_tokens}"
|
290
|
+
)
|
291
|
+
|
292
|
+
return final_completion
|
293
|
+
|
116
294
|
async def _openai_completion(
|
117
295
|
self,
|
118
296
|
message: OpenAIMessage,
|
@@ -151,7 +329,10 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
151
329
|
]
|
152
330
|
|
153
331
|
if not available_tools:
|
154
|
-
|
332
|
+
if self.provider == Provider.DEEPSEEK:
|
333
|
+
available_tools = None # deepseek does not allow empty array
|
334
|
+
else:
|
335
|
+
available_tools = []
|
155
336
|
|
156
337
|
# we do NOT send "stop sequences" as this causes errors with mutlimodal processing
|
157
338
|
for i in range(request_params.max_iterations):
|
@@ -160,11 +341,10 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
160
341
|
|
161
342
|
self._log_chat_progress(self.chat_turn(), model=self.default_request_params.model)
|
162
343
|
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
response = executor_result[0]
|
344
|
+
# Use basic streaming API
|
345
|
+
stream = await self._openai_client().chat.completions.create(**arguments)
|
346
|
+
# Process the stream
|
347
|
+
response = await self._process_stream(stream, self.default_request_params.model)
|
168
348
|
|
169
349
|
# Track usage if response is valid and has usage data
|
170
350
|
if (
|
@@ -204,10 +384,11 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
204
384
|
if message.content:
|
205
385
|
responses.append(TextContent(type="text", text=message.content))
|
206
386
|
|
207
|
-
|
208
|
-
|
387
|
+
# ParsedChatCompletionMessage is compatible with ChatCompletionMessage
|
388
|
+
# since it inherits from it, so we can use it directly
|
389
|
+
messages.append(message)
|
209
390
|
|
210
|
-
message_text =
|
391
|
+
message_text = message.content
|
211
392
|
if choice.finish_reason in ["tool_calls", "function_call"] and message.tool_calls:
|
212
393
|
if message_text:
|
213
394
|
await self.show_assistant_message(
|
@@ -347,6 +528,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
347
528
|
"model": self.default_request_params.model,
|
348
529
|
"messages": messages,
|
349
530
|
"tools": tools,
|
531
|
+
"stream": True, # Enable basic streaming
|
532
|
+
"stream_options": {"include_usage": True}, # Required for usage data in streaming
|
350
533
|
}
|
351
534
|
|
352
535
|
if self._reasoning:
|
@@ -360,7 +360,7 @@ class OpenAIConverter:
|
|
360
360
|
return {
|
361
361
|
"role": "tool",
|
362
362
|
"tool_call_id": tool_call_id,
|
363
|
-
"content": "[
|
363
|
+
"content": "[Tool completed successfully]",
|
364
364
|
}
|
365
365
|
|
366
366
|
# Separate text and non-text content
|
@@ -387,8 +387,9 @@ class OpenAIConverter:
|
|
387
387
|
converted.get("content", "")
|
388
388
|
)
|
389
389
|
|
390
|
-
|
391
|
-
|
390
|
+
# Ensure we always have non-empty content for compatibility
|
391
|
+
if not tool_message_content or tool_message_content.strip() == "":
|
392
|
+
tool_message_content = "[Tool completed successfully]"
|
392
393
|
|
393
394
|
# Create the tool message with just the text
|
394
395
|
tool_message = {
|
mcp_agent/mcp/interfaces.py
CHANGED
@@ -21,7 +21,7 @@ from typing import (
|
|
21
21
|
runtime_checkable,
|
22
22
|
)
|
23
23
|
|
24
|
-
from
|
24
|
+
from a2a.types import AgentCard
|
25
25
|
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
26
26
|
from deprecated import deprecated
|
27
27
|
from mcp import ClientSession
|
File without changes
|
File without changes
|
File without changes
|