fast-agent-mcp 0.2.27__py3-none-any.whl → 0.2.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.27.dist-info → fast_agent_mcp-0.2.28.dist-info}/METADATA +3 -1
- {fast_agent_mcp-0.2.27.dist-info → fast_agent_mcp-0.2.28.dist-info}/RECORD +24 -19
- mcp_agent/agents/agent.py +1 -17
- mcp_agent/agents/base_agent.py +2 -0
- mcp_agent/config.py +3 -0
- mcp_agent/context.py +2 -0
- mcp_agent/core/agent_app.py +7 -2
- mcp_agent/core/interactive_prompt.py +58 -51
- mcp_agent/llm/augmented_llm_slow.py +42 -0
- mcp_agent/llm/model_factory.py +74 -37
- mcp_agent/llm/provider_types.py +4 -3
- mcp_agent/llm/providers/augmented_llm_google_native.py +459 -0
- mcp_agent/llm/providers/{augmented_llm_google.py → augmented_llm_google_oai.py} +2 -2
- mcp_agent/llm/providers/google_converter.py +361 -0
- mcp_agent/mcp/helpers/server_config_helpers.py +23 -0
- mcp_agent/mcp/mcp_agent_client_session.py +51 -24
- mcp_agent/mcp/mcp_aggregator.py +18 -3
- mcp_agent/mcp/mcp_connection_manager.py +6 -5
- mcp_agent/mcp/sampling.py +40 -10
- mcp_agent/mcp_server_registry.py +15 -4
- mcp_agent/tools/tool_definition.py +14 -0
- {fast_agent_mcp-0.2.27.dist-info → fast_agent_mcp-0.2.28.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.27.dist-info → fast_agent_mcp-0.2.28.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.27.dist-info → fast_agent_mcp-0.2.28.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.28
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>
|
6
6
|
License: Apache License
|
@@ -214,11 +214,13 @@ Requires-Dist: aiohttp>=3.11.13
|
|
214
214
|
Requires-Dist: anthropic>=0.49.0
|
215
215
|
Requires-Dist: azure-identity>=1.14.0
|
216
216
|
Requires-Dist: fastapi>=0.115.6
|
217
|
+
Requires-Dist: google-genai
|
217
218
|
Requires-Dist: mcp==1.9.1
|
218
219
|
Requires-Dist: openai>=1.63.2
|
219
220
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
220
221
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
221
222
|
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.39.3; python_version >= '3.10' and python_version < '4.0'
|
223
|
+
Requires-Dist: opentelemetry-instrumentation-google-genai>=0.2b0
|
222
224
|
Requires-Dist: opentelemetry-instrumentation-mcp>=0.40.3; python_version >= '3.10' and python_version < '4.0'
|
223
225
|
Requires-Dist: opentelemetry-instrumentation-openai>=0.39.3; python_version >= '3.10' and python_version < '4.0'
|
224
226
|
Requires-Dist: prompt-toolkit>=3.0.50
|
@@ -1,15 +1,15 @@
|
|
1
1
|
mcp_agent/__init__.py,sha256=18T0AG0W9sJhTY38O9GFFOzliDhxx9p87CvRyti9zbw,1620
|
2
2
|
mcp_agent/app.py,sha256=WRsiUdwy_9IAnaGRDwuLm7pzgQpt2wgsg10vBOpfcwM,5539
|
3
|
-
mcp_agent/config.py,sha256=
|
3
|
+
mcp_agent/config.py,sha256=ITwLZ-Wzn8I2xYOMDP9XvNwZTLzzUbvQNnnna7PxflQ,13438
|
4
4
|
mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
|
5
|
-
mcp_agent/context.py,sha256=
|
5
|
+
mcp_agent/context.py,sha256=W9a91UVrpaT-GgoCtJp_ccHXnhmr8HaATDw7srm9NpU,7559
|
6
6
|
mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
|
7
7
|
mcp_agent/event_progress.py,sha256=040lrCCclcOuryi07YGSej25kTQF5_JMXY12Yj-3u1U,2773
|
8
|
-
mcp_agent/mcp_server_registry.py,sha256=
|
8
|
+
mcp_agent/mcp_server_registry.py,sha256=cZJJEXvN_FEUlTFWPtr2q2mRcDjPGBWVmRifc1TnLa0,12042
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
-
mcp_agent/agents/agent.py,sha256=
|
12
|
-
mcp_agent/agents/base_agent.py,sha256=
|
11
|
+
mcp_agent/agents/agent.py,sha256=EAYlcP1qqI1D0_CS808I806z1048FBjZQxxpcCZPeIU,3154
|
12
|
+
mcp_agent/agents/base_agent.py,sha256=0P3Onibs_NRfiyABL8RdRW_qvXDTSxNu-agBvgjBymY,25427
|
13
13
|
mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
|
14
14
|
mcp_agent/agents/workflow/chain_agent.py,sha256=eIlImirrSXkqBJmPuAJgOKis81Cl6lZEGM0-6IyaUV8,6105
|
15
15
|
mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ysUMGM2NzeCIutgr_vXH6kUPpZMw0cX4J_Wl1r8eT84,13296
|
@@ -28,7 +28,7 @@ mcp_agent/cli/commands/quickstart.py,sha256=SM3CHMzDgvTxIpKjFuX9BrS_N1vRoXNBDaO9
|
|
28
28
|
mcp_agent/cli/commands/setup.py,sha256=eOEd4TL-b0DaDeSJMGOfNOsTEItoZ67W88eTP4aP-bo,6482
|
29
29
|
mcp_agent/cli/commands/url_parser.py,sha256=7QL9bp9tO7w0cPnwhbpt8GwjbOJ1Rrry1o71uVJhSss,5655
|
30
30
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
|
-
mcp_agent/core/agent_app.py,sha256=
|
31
|
+
mcp_agent/core/agent_app.py,sha256=aVvOzMrXZ3TfRGyAsnvcrMMYZxBf8Saa0UuHiA7DV0w,9922
|
32
32
|
mcp_agent/core/agent_types.py,sha256=bQVQMTwKH7qHIJsNglj4C_d6PNFBBzC_0RIkcENSII4,1459
|
33
33
|
mcp_agent/core/direct_decorators.py,sha256=aaVR4G6a8H9pVg6X_PGEZ8GzreP0ZO1-48ksIKvMNDI,14452
|
34
34
|
mcp_agent/core/direct_factory.py,sha256=d96OM1yS3eIocIiaA9FQt6C2zr6VDUyCJBTZCp_D4bs,17912
|
@@ -36,7 +36,7 @@ mcp_agent/core/enhanced_prompt.py,sha256=bzvcengS7XzHWB7NWhyxHM3hhO2HI4zP5DbGXAO
|
|
36
36
|
mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
|
37
37
|
mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
|
38
38
|
mcp_agent/core/fastagent.py,sha256=uS_NSXeniUYFu6xce8OHGJ9PbEYNU-gm1XVpa1r0rZc,22893
|
39
|
-
mcp_agent/core/interactive_prompt.py,sha256=
|
39
|
+
mcp_agent/core/interactive_prompt.py,sha256=QuPdJQTBwXnnKyp52QjeVGyaXMiMm3fZK6g1ly2X7v4,24696
|
40
40
|
mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
|
41
41
|
mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
|
42
42
|
mcp_agent/core/request_params.py,sha256=qmFWZXeYEJyYw2IwonyrTnZWxQG7qX6bKpOPcqETa60,1603
|
@@ -52,11 +52,12 @@ mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
|
|
52
52
|
mcp_agent/llm/augmented_llm.py,sha256=CqtSGo_QrHE73tz_DHMd0wdt2F41gwuUu5Bue51FNm4,24199
|
53
53
|
mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyzFphLikwF_yVE0,6288
|
54
54
|
mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
|
55
|
+
mcp_agent/llm/augmented_llm_slow.py,sha256=6h4LXdBGBzDfKnvPBcfBh0RdfYl-UXo50EimA-W3tOY,1586
|
55
56
|
mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
|
56
|
-
mcp_agent/llm/model_factory.py,sha256=
|
57
|
+
mcp_agent/llm/model_factory.py,sha256=KkOBpn_G118DijJYu4Iwm_CXgG9FeZQ9PZj9f-q3vlI,10413
|
57
58
|
mcp_agent/llm/prompt_utils.py,sha256=yWQHykoK13QRF7evHUKxVF0SpVLN-Bsft0Yixzvn0g0,4825
|
58
59
|
mcp_agent/llm/provider_key_manager.py,sha256=-K_FuibN6hdSnweT32lB8mKTfCARnbja6zYYs0ErTKg,2802
|
59
|
-
mcp_agent/llm/provider_types.py,sha256=
|
60
|
+
mcp_agent/llm/provider_types.py,sha256=t44U2ShXHCHdReV2xWNLGCtchp3TuEyI3BbhwbwpRK8,511
|
60
61
|
mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6cG628cI,2926
|
61
62
|
mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
|
62
63
|
mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
|
@@ -65,10 +66,12 @@ mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=gK_IvllVBNJUUrSfpgFpdh
|
|
65
66
|
mcp_agent/llm/providers/augmented_llm_azure.py,sha256=VPrD6lNrEw6EdYUTa9MDvHDNIPjJU5CG5xnKCM3JYdA,5878
|
66
67
|
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=NiZK5nv91ZS2VgVFXpbsFNFYLsLcppcbo_RstlRMd7I,1145
|
67
68
|
mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
|
68
|
-
mcp_agent/llm/providers/
|
69
|
+
mcp_agent/llm/providers/augmented_llm_google_native.py,sha256=Axk6oKH5ctB6rXGnCjRKVkJq6O7rRqlD7aJ2He6UuZ8,20406
|
70
|
+
mcp_agent/llm/providers/augmented_llm_google_oai.py,sha256=cO4dvjTl9ymqEurCOo5nP09ATfXVjgkuk1yZAlWpS1s,1137
|
69
71
|
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=5CFHKayjm-aeCBpohIK3WelAEuX7_LDGZIKnWR_rq-s,14577
|
70
72
|
mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
|
71
73
|
mcp_agent/llm/providers/augmented_llm_tensorzero.py,sha256=Mol_Wzj_ZtccW-LMw0oFwWUt1m1yfofloay9QYNP23c,20729
|
74
|
+
mcp_agent/llm/providers/google_converter.py,sha256=bA0oYdB6tfRX_iuwTr8xTBYWlzNNskwYIS3Y9aFyEbo,16643
|
72
75
|
mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
|
73
76
|
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=XPIulWntNpZWNGWrc240StPzok2RqrDAV7OigDwQ1uU,15850
|
74
77
|
mcp_agent/llm/providers/multipart_converter_tensorzero.py,sha256=BFTdyVk42HZskDAuTHicfDTUJq89d1fz8C9nAOuHxlE,8646
|
@@ -88,17 +91,18 @@ mcp_agent/mcp/common.py,sha256=DiWLH9rxWvCgkKRsHQehY9mDhQl9gki1-q7LVUflDvI,425
|
|
88
91
|
mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
|
89
92
|
mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
|
90
93
|
mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
|
91
|
-
mcp_agent/mcp/mcp_agent_client_session.py,sha256=
|
92
|
-
mcp_agent/mcp/mcp_aggregator.py,sha256=
|
93
|
-
mcp_agent/mcp/mcp_connection_manager.py,sha256=
|
94
|
+
mcp_agent/mcp/mcp_agent_client_session.py,sha256=1zlBZaGRvCHuflrTd4dUeC4BzcPXxfPuciulJB0DIFI,7900
|
95
|
+
mcp_agent/mcp/mcp_aggregator.py,sha256=Mdmr-6gNlrcofHzhHZloz1QVbC5ZAnCSPNFY5fwm-Bs,47075
|
96
|
+
mcp_agent/mcp/mcp_connection_manager.py,sha256=5JekxOJsB46spHsiXt7pyRPicg8TGHMiSJRtXRW2JB8,17074
|
94
97
|
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
95
98
|
mcp_agent/mcp/prompt_message_multipart.py,sha256=BDwRdNwyWHb2q2bccDb2iR2VlORqVvkvoG3xYzcMpCE,4403
|
96
99
|
mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
|
97
100
|
mcp_agent/mcp/prompt_serialization.py,sha256=MQY6QxnhQTiq0oBDsyRzFtX8sBiovUjzUFX78As8q60,17974
|
98
101
|
mcp_agent/mcp/resource_utils.py,sha256=K4XY8bihmBMleRTZ2viMPiD2Y2HWxFnlgIJi6dd_PYE,6588
|
99
|
-
mcp_agent/mcp/sampling.py,sha256=
|
102
|
+
mcp_agent/mcp/sampling.py,sha256=kge2r-a72fRY2ncsvQSXHr6ahYjV_fJJvkoapPQT_V4,6388
|
100
103
|
mcp_agent/mcp/helpers/__init__.py,sha256=sKqwlUR3jSsd9PVJKjXtxHgZA1YOdzPtsSW4xVey77Q,52
|
101
104
|
mcp_agent/mcp/helpers/content_helpers.py,sha256=KsD77eCr1O6gv2Fz7vlVZxLyBgqscgsS25OqSJ8ksoY,3349
|
105
|
+
mcp_agent/mcp/helpers/server_config_helpers.py,sha256=yOiMFkdLyPuFHXBGqLoVJtefu9Gk-szL6y3eh8em92Y,748
|
102
106
|
mcp_agent/mcp/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
103
107
|
mcp_agent/mcp/prompts/__main__.py,sha256=gr1Tdz9fcK0EXjEuZg_BOnKUmvhYq5AH2lFZicVyNb0,237
|
104
108
|
mcp_agent/mcp/prompts/prompt_constants.py,sha256=Q9W0t3rOXl2LHIG9wcghApUV2QZ1iICuo7SwVwHUf3c,566
|
@@ -147,9 +151,10 @@ mcp_agent/resources/examples/workflows/orchestrator.py,sha256=rOGilFTliWWnZ3Jx5w
|
|
147
151
|
mcp_agent/resources/examples/workflows/parallel.py,sha256=DQ5vY5-h8Qa5QHcYjsWXhZ_FYrYoloVWOdgeXV9p2gI,1890
|
148
152
|
mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
|
149
153
|
mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
|
154
|
+
mcp_agent/tools/tool_definition.py,sha256=L3Pxl-uLEXqlVoo-bYuFTFALeI-2pIU44YgFhsTKEtM,398
|
150
155
|
mcp_agent/ui/console_display.py,sha256=UKqax5V2TC0hkZZORmmd6UqUk0DGX7A25E3h1k9f42k,10982
|
151
|
-
fast_agent_mcp-0.2.
|
152
|
-
fast_agent_mcp-0.2.
|
153
|
-
fast_agent_mcp-0.2.
|
154
|
-
fast_agent_mcp-0.2.
|
155
|
-
fast_agent_mcp-0.2.
|
156
|
+
fast_agent_mcp-0.2.28.dist-info/METADATA,sha256=DbIbErJ9uZh6NnL6Tl-OD4ihPgzSRXR_ExDpafUwGKQ,30581
|
157
|
+
fast_agent_mcp-0.2.28.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
158
|
+
fast_agent_mcp-0.2.28.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
|
159
|
+
fast_agent_mcp-0.2.28.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
160
|
+
fast_agent_mcp-0.2.28.dist-info/RECORD,,
|
mcp_agent/agents/agent.py
CHANGED
@@ -75,27 +75,11 @@ class Agent(BaseAgent):
|
|
75
75
|
async def send_wrapper(message, agent_name):
|
76
76
|
return await self.send(message)
|
77
77
|
|
78
|
-
# Define wrapper for apply_prompt function
|
79
|
-
async def apply_prompt_wrapper(prompt_name, args, agent_name):
|
80
|
-
# Just apply the prompt directly
|
81
|
-
return await self.apply_prompt(prompt_name, args)
|
82
|
-
|
83
|
-
# Define wrapper for list_prompts function
|
84
|
-
async def list_prompts_wrapper(agent_name):
|
85
|
-
# Always call list_prompts on this agent regardless of agent_name
|
86
|
-
return await self.list_prompts()
|
87
|
-
|
88
|
-
# Define wrapper for list_resources function
|
89
|
-
async def list_resources_wrapper(agent_name):
|
90
|
-
# Always call list_resources on this agent regardless of agent_name
|
91
|
-
return await self.list_resources()
|
92
|
-
|
93
78
|
# Start the prompt loop with just this agent
|
94
79
|
return await prompt.prompt_loop(
|
95
80
|
send_func=send_wrapper,
|
96
81
|
default_agent=agent_name_str,
|
97
82
|
available_agents=[agent_name_str], # Only this agent
|
98
|
-
|
99
|
-
list_prompts_func=list_prompts_wrapper,
|
83
|
+
prompt_provider=self, # Pass self as the prompt provider since we implement the protocol
|
100
84
|
default=default_prompt,
|
101
85
|
)
|
mcp_agent/agents/base_agent.py
CHANGED
@@ -456,6 +456,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
456
456
|
self,
|
457
457
|
prompt_name: str,
|
458
458
|
arguments: Dict[str, str] | None = None,
|
459
|
+
agent_name: str | None = None,
|
459
460
|
server_name: str | None = None,
|
460
461
|
) -> str:
|
461
462
|
"""
|
@@ -468,6 +469,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
468
469
|
Args:
|
469
470
|
prompt_name: The name of the prompt to apply
|
470
471
|
arguments: Optional dictionary of string arguments to pass to the prompt template
|
472
|
+
agent_name: Optional agent name (ignored at this level, used by multi-agent apps)
|
471
473
|
server_name: Optional name of the server to get the prompt from
|
472
474
|
|
473
475
|
Returns:
|
mcp_agent/config.py
CHANGED
@@ -291,6 +291,9 @@ class Settings(BaseSettings):
|
|
291
291
|
Default model for agents. Format is provider.model_name.<reasoning_effort>, for example openai.o3-mini.low
|
292
292
|
Aliases are provided for common models e.g. sonnet, haiku, gpt-4.1, o3-mini etc.
|
293
293
|
"""
|
294
|
+
|
295
|
+
auto_sampling: bool = True
|
296
|
+
"""Enable automatic sampling model selection if not explicitly configured"""
|
294
297
|
|
295
298
|
anthropic: AnthropicSettings | None = None
|
296
299
|
"""Settings for using Anthropic models in the fast-agent application"""
|
mcp_agent/context.py
CHANGED
@@ -11,6 +11,7 @@ from mcp import ServerSession
|
|
11
11
|
from opentelemetry import trace
|
12
12
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
13
13
|
from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
|
14
|
+
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
|
14
15
|
from opentelemetry.instrumentation.mcp import McpInstrumentor
|
15
16
|
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
16
17
|
from opentelemetry.propagate import set_global_textmap
|
@@ -112,6 +113,7 @@ async def configure_otel(config: "Settings") -> None:
|
|
112
113
|
trace.set_tracer_provider(tracer_provider)
|
113
114
|
AnthropicInstrumentor().instrument()
|
114
115
|
OpenAIInstrumentor().instrument()
|
116
|
+
GoogleGenAiSdkInstrumentor().instrument()
|
115
117
|
McpInstrumentor().instrument()
|
116
118
|
|
117
119
|
|
mcp_agent/core/agent_app.py
CHANGED
@@ -129,6 +129,12 @@ class AgentApp:
|
|
129
129
|
Returns:
|
130
130
|
Dictionary mapping server names to lists of available prompts
|
131
131
|
"""
|
132
|
+
if not agent_name:
|
133
|
+
results = {}
|
134
|
+
for agent in self._agents.values():
|
135
|
+
curr_prompts = await agent.list_prompts(server_name=server_name)
|
136
|
+
results.update(curr_prompts)
|
137
|
+
return results
|
132
138
|
return await self._agent(agent_name).list_prompts(server_name=server_name)
|
133
139
|
|
134
140
|
async def get_prompt(
|
@@ -262,7 +268,6 @@ class AgentApp:
|
|
262
268
|
send_func=send_wrapper,
|
263
269
|
default_agent=target_name, # Pass the agent name, not the agent object
|
264
270
|
available_agents=list(self._agents.keys()),
|
265
|
-
|
266
|
-
list_prompts_func=self.list_prompts,
|
271
|
+
prompt_provider=self, # Pass self as the prompt provider
|
267
272
|
default=default_prompt,
|
268
273
|
)
|
@@ -10,12 +10,13 @@ Usage:
|
|
10
10
|
send_func=agent_app.send,
|
11
11
|
default_agent="default_agent",
|
12
12
|
available_agents=["agent1", "agent2"],
|
13
|
-
|
13
|
+
prompt_provider=agent_app
|
14
14
|
)
|
15
15
|
"""
|
16
16
|
|
17
|
-
from typing import Dict, List, Optional
|
17
|
+
from typing import Awaitable, Callable, Dict, List, Mapping, Optional, Protocol, Union
|
18
18
|
|
19
|
+
from mcp.types import Prompt, PromptMessage
|
19
20
|
from rich import print as rich_print
|
20
21
|
from rich.console import Console
|
21
22
|
from rich.table import Table
|
@@ -28,8 +29,24 @@ from mcp_agent.core.enhanced_prompt import (
|
|
28
29
|
handle_special_commands,
|
29
30
|
)
|
30
31
|
from mcp_agent.mcp.mcp_aggregator import SEP # Import SEP once at the top
|
32
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
31
33
|
from mcp_agent.progress_display import progress_display
|
32
34
|
|
35
|
+
# Type alias for the send function
|
36
|
+
SendFunc = Callable[[Union[str, PromptMessage, PromptMessageMultipart], str], Awaitable[str]]
|
37
|
+
|
38
|
+
|
39
|
+
class PromptProvider(Protocol):
|
40
|
+
"""Protocol for objects that can provide prompt functionality."""
|
41
|
+
|
42
|
+
async def list_prompts(self, server_name: Optional[str] = None, agent_name: Optional[str] = None) -> Mapping[str, List[Prompt]]:
|
43
|
+
"""List available prompts."""
|
44
|
+
...
|
45
|
+
|
46
|
+
async def apply_prompt(self, prompt_name: str, arguments: Optional[Dict[str, str]] = None, agent_name: Optional[str] = None, **kwargs) -> str:
|
47
|
+
"""Apply a prompt."""
|
48
|
+
...
|
49
|
+
|
33
50
|
|
34
51
|
class InteractivePrompt:
|
35
52
|
"""
|
@@ -48,22 +65,20 @@ class InteractivePrompt:
|
|
48
65
|
|
49
66
|
async def prompt_loop(
|
50
67
|
self,
|
51
|
-
send_func,
|
68
|
+
send_func: SendFunc,
|
52
69
|
default_agent: str,
|
53
70
|
available_agents: List[str],
|
54
|
-
|
55
|
-
list_prompts_func=None,
|
71
|
+
prompt_provider: Optional[PromptProvider] = None,
|
56
72
|
default: str = "",
|
57
73
|
) -> str:
|
58
74
|
"""
|
59
75
|
Start an interactive prompt session.
|
60
76
|
|
61
77
|
Args:
|
62
|
-
send_func: Function to send messages to agents
|
78
|
+
send_func: Function to send messages to agents
|
63
79
|
default_agent: Name of the default agent to use
|
64
80
|
available_agents: List of available agent names
|
65
|
-
|
66
|
-
list_prompts_func: Optional function to list available prompts (signature: async (agent_name))
|
81
|
+
prompt_provider: Optional provider that implements list_prompts and apply_prompt
|
67
82
|
default: Default message to use when user presses enter
|
68
83
|
|
69
84
|
Returns:
|
@@ -110,13 +125,11 @@ class InteractivePrompt:
|
|
110
125
|
rich_print(f"[red]Agent '{new_agent}' not found[/red]")
|
111
126
|
continue
|
112
127
|
# Keep the existing list_prompts handler for backward compatibility
|
113
|
-
elif "list_prompts" in command_result and
|
114
|
-
# Use the
|
115
|
-
await self._list_prompts(
|
128
|
+
elif "list_prompts" in command_result and prompt_provider:
|
129
|
+
# Use the prompt_provider directly
|
130
|
+
await self._list_prompts(prompt_provider, agent)
|
116
131
|
continue
|
117
|
-
elif "select_prompt" in command_result and
|
118
|
-
list_prompts_func and apply_prompt_func
|
119
|
-
):
|
132
|
+
elif "select_prompt" in command_result and prompt_provider:
|
120
133
|
# Handle prompt selection, using both list_prompts and apply_prompt
|
121
134
|
prompt_name = command_result.get("prompt_name")
|
122
135
|
prompt_index = command_result.get("prompt_index")
|
@@ -124,7 +137,7 @@ class InteractivePrompt:
|
|
124
137
|
# If a specific index was provided (from /prompt <number>)
|
125
138
|
if prompt_index is not None:
|
126
139
|
# First get a list of all prompts to look up the index
|
127
|
-
all_prompts = await self._get_all_prompts(
|
140
|
+
all_prompts = await self._get_all_prompts(prompt_provider, agent)
|
128
141
|
if not all_prompts:
|
129
142
|
rich_print("[yellow]No prompts available[/yellow]")
|
130
143
|
continue
|
@@ -135,8 +148,7 @@ class InteractivePrompt:
|
|
135
148
|
selected_prompt = all_prompts[prompt_index - 1]
|
136
149
|
# Use the already created namespaced_name to ensure consistency
|
137
150
|
await self._select_prompt(
|
138
|
-
|
139
|
-
apply_prompt_func,
|
151
|
+
prompt_provider,
|
140
152
|
agent,
|
141
153
|
selected_prompt["namespaced_name"],
|
142
154
|
)
|
@@ -145,11 +157,11 @@ class InteractivePrompt:
|
|
145
157
|
f"[red]Invalid prompt number: {prompt_index}. Valid range is 1-{len(all_prompts)}[/red]"
|
146
158
|
)
|
147
159
|
# Show the prompt list for convenience
|
148
|
-
await self._list_prompts(
|
160
|
+
await self._list_prompts(prompt_provider, agent)
|
149
161
|
else:
|
150
162
|
# Use the name-based selection
|
151
163
|
await self._select_prompt(
|
152
|
-
|
164
|
+
prompt_provider, agent, prompt_name
|
153
165
|
)
|
154
166
|
continue
|
155
167
|
|
@@ -171,21 +183,21 @@ class InteractivePrompt:
|
|
171
183
|
|
172
184
|
return result
|
173
185
|
|
174
|
-
async def _get_all_prompts(self,
|
186
|
+
async def _get_all_prompts(self, prompt_provider: PromptProvider, agent_name: Optional[str] = None):
|
175
187
|
"""
|
176
188
|
Get a list of all available prompts.
|
177
189
|
|
178
190
|
Args:
|
179
|
-
|
180
|
-
agent_name:
|
191
|
+
prompt_provider: Provider that implements list_prompts
|
192
|
+
agent_name: Optional agent name (for multi-agent apps)
|
181
193
|
|
182
194
|
Returns:
|
183
195
|
List of prompt info dictionaries, sorted by server and name
|
184
196
|
"""
|
185
197
|
try:
|
186
|
-
#
|
187
|
-
|
188
|
-
|
198
|
+
# Call list_prompts on the provider
|
199
|
+
prompt_servers = await prompt_provider.list_prompts(server_name=None, agent_name=agent_name)
|
200
|
+
|
189
201
|
all_prompts = []
|
190
202
|
|
191
203
|
# Process the returned prompt servers
|
@@ -219,14 +231,18 @@ class InteractivePrompt:
|
|
219
231
|
}
|
220
232
|
)
|
221
233
|
else:
|
234
|
+
# Handle Prompt objects from mcp.types
|
235
|
+
prompt_name = getattr(prompt, "name", str(prompt))
|
236
|
+
description = getattr(prompt, "description", "No description")
|
237
|
+
arguments = getattr(prompt, "arguments", [])
|
222
238
|
all_prompts.append(
|
223
239
|
{
|
224
240
|
"server": server_name,
|
225
|
-
"name":
|
226
|
-
"namespaced_name": f"{server_name}{SEP}{
|
227
|
-
"description":
|
228
|
-
"arg_count":
|
229
|
-
"arguments":
|
241
|
+
"name": prompt_name,
|
242
|
+
"namespaced_name": f"{server_name}{SEP}{prompt_name}",
|
243
|
+
"description": description,
|
244
|
+
"arg_count": len(arguments),
|
245
|
+
"arguments": arguments,
|
230
246
|
}
|
231
247
|
)
|
232
248
|
|
@@ -244,27 +260,22 @@ class InteractivePrompt:
|
|
244
260
|
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
245
261
|
return []
|
246
262
|
|
247
|
-
async def _list_prompts(self,
|
263
|
+
async def _list_prompts(self, prompt_provider: PromptProvider, agent_name: str) -> None:
|
248
264
|
"""
|
249
265
|
List available prompts for an agent.
|
250
266
|
|
251
267
|
Args:
|
252
|
-
|
268
|
+
prompt_provider: Provider that implements list_prompts
|
253
269
|
agent_name: Name of the agent
|
254
270
|
"""
|
255
|
-
from rich import print as rich_print
|
256
|
-
from rich.console import Console
|
257
|
-
from rich.table import Table
|
258
|
-
|
259
271
|
console = Console()
|
260
272
|
|
261
273
|
try:
|
262
274
|
# Directly call the list_prompts function for this agent
|
263
275
|
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
264
276
|
|
265
|
-
# Get all prompts using the helper function
|
266
|
-
|
267
|
-
all_prompts = await self._get_all_prompts(list_prompts_func, None)
|
277
|
+
# Get all prompts using the helper function
|
278
|
+
all_prompts = await self._get_all_prompts(prompt_provider, agent_name)
|
268
279
|
|
269
280
|
if all_prompts:
|
270
281
|
# Create a table for better display
|
@@ -300,28 +311,24 @@ class InteractivePrompt:
|
|
300
311
|
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
301
312
|
|
302
313
|
async def _select_prompt(
|
303
|
-
self,
|
314
|
+
self, prompt_provider: PromptProvider, agent_name: str, requested_name: Optional[str] = None
|
304
315
|
) -> None:
|
305
316
|
"""
|
306
317
|
Select and apply a prompt.
|
307
318
|
|
308
319
|
Args:
|
309
|
-
|
310
|
-
apply_prompt_func: Function to apply prompts
|
320
|
+
prompt_provider: Provider that implements list_prompts and apply_prompt
|
311
321
|
agent_name: Name of the agent
|
312
322
|
requested_name: Optional name of the prompt to apply
|
313
323
|
"""
|
314
|
-
# We already imported these at the top
|
315
|
-
from rich import print as rich_print
|
316
|
-
|
317
324
|
console = Console()
|
318
325
|
|
319
326
|
try:
|
320
|
-
# Get all available prompts directly from the
|
327
|
+
# Get all available prompts directly from the prompt provider
|
321
328
|
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
322
|
-
|
323
|
-
#
|
324
|
-
prompt_servers = await
|
329
|
+
|
330
|
+
# Call list_prompts on the provider
|
331
|
+
prompt_servers = await prompt_provider.list_prompts(server_name=None, agent_name=agent_name)
|
325
332
|
|
326
333
|
if not prompt_servers:
|
327
334
|
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
@@ -542,8 +549,8 @@ class InteractivePrompt:
|
|
542
549
|
namespaced_name = selected_prompt["namespaced_name"]
|
543
550
|
rich_print(f"\n[bold]Applying prompt [cyan]{namespaced_name}[/cyan]...[/bold]")
|
544
551
|
|
545
|
-
# Call apply_prompt
|
546
|
-
await
|
552
|
+
# Call apply_prompt on the provider with the prompt name and arguments
|
553
|
+
await prompt_provider.apply_prompt(namespaced_name, arg_values, agent_name)
|
547
554
|
|
548
555
|
except Exception as e:
|
549
556
|
import traceback
|
@@ -0,0 +1,42 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any, List, Optional, Union
|
3
|
+
|
4
|
+
from mcp_agent.llm.augmented_llm import (
|
5
|
+
MessageParamT,
|
6
|
+
RequestParams,
|
7
|
+
)
|
8
|
+
from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
|
9
|
+
from mcp_agent.llm.provider_types import Provider
|
10
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
11
|
+
|
12
|
+
|
13
|
+
class SlowLLM(PassthroughLLM):
|
14
|
+
"""
|
15
|
+
A specialized LLM implementation that sleeps for 3 seconds before responding like PassthroughLLM.
|
16
|
+
|
17
|
+
This is useful for testing scenarios where you want to simulate slow responses
|
18
|
+
or for debugging timing-related issues in parallel workflows.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(
|
22
|
+
self, provider=Provider.FAST_AGENT, name: str = "Slow", **kwargs: dict[str, Any]
|
23
|
+
) -> None:
|
24
|
+
super().__init__(name=name, provider=provider, **kwargs)
|
25
|
+
|
26
|
+
async def generate_str(
|
27
|
+
self,
|
28
|
+
message: Union[str, MessageParamT, List[MessageParamT]],
|
29
|
+
request_params: Optional[RequestParams] = None,
|
30
|
+
) -> str:
|
31
|
+
"""Sleep for 3 seconds then return the input message as a string."""
|
32
|
+
await asyncio.sleep(3)
|
33
|
+
return await super().generate_str(message, request_params)
|
34
|
+
|
35
|
+
async def _apply_prompt_provider_specific(
|
36
|
+
self,
|
37
|
+
multipart_messages: List["PromptMessageMultipart"],
|
38
|
+
request_params: RequestParams | None = None,
|
39
|
+
) -> PromptMessageMultipart:
|
40
|
+
"""Sleep for 3 seconds then apply prompt like PassthroughLLM."""
|
41
|
+
await asyncio.sleep(3)
|
42
|
+
return await super()._apply_prompt_provider_specific(multipart_messages, request_params)
|