fast-agent-mcp 0.2.28__py3-none-any.whl → 0.2.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.2.28.dist-info → fast_agent_mcp-0.2.30.dist-info}/METADATA +11 -8
- {fast_agent_mcp-0.2.28.dist-info → fast_agent_mcp-0.2.30.dist-info}/RECORD +23 -21
- mcp_agent/app.py +1 -1
- mcp_agent/cli/commands/check_config.py +11 -2
- mcp_agent/cli/commands/url_parser.py +7 -1
- mcp_agent/config.py +16 -1
- mcp_agent/context.py +5 -3
- mcp_agent/core/enhanced_prompt.py +105 -0
- mcp_agent/core/interactive_prompt.py +2 -2
- mcp_agent/llm/model_factory.py +6 -0
- mcp_agent/llm/provider_key_manager.py +1 -0
- mcp_agent/llm/provider_types.py +2 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py +30 -0
- mcp_agent/llm/providers/augmented_llm_deepseek.py +63 -0
- mcp_agent/llm/providers/google_converter.py +4 -0
- mcp_agent/mcp/common.py +2 -2
- mcp_agent/mcp/hf_auth.py +106 -0
- mcp_agent/mcp/mcp_agent_client_session.py +16 -40
- mcp_agent/mcp/mcp_aggregator.py +2 -1
- mcp_agent/mcp_server_registry.py +10 -3
- {fast_agent_mcp-0.2.28.dist-info → fast_agent_mcp-0.2.30.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.28.dist-info → fast_agent_mcp-0.2.30.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.28.dist-info → fast_agent_mcp-0.2.30.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.30
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>
|
6
6
|
License: Apache License
|
@@ -213,16 +213,17 @@ Requires-Dist: a2a-types>=0.1.0
|
|
213
213
|
Requires-Dist: aiohttp>=3.11.13
|
214
214
|
Requires-Dist: anthropic>=0.49.0
|
215
215
|
Requires-Dist: azure-identity>=1.14.0
|
216
|
+
Requires-Dist: deprecated>=1.2.18
|
216
217
|
Requires-Dist: fastapi>=0.115.6
|
217
218
|
Requires-Dist: google-genai
|
218
|
-
Requires-Dist: mcp==1.9.
|
219
|
+
Requires-Dist: mcp==1.9.3
|
219
220
|
Requires-Dist: openai>=1.63.2
|
220
221
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
221
222
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
222
|
-
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.
|
223
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.40.7; python_version >= '3.10' and python_version < '4.0'
|
223
224
|
Requires-Dist: opentelemetry-instrumentation-google-genai>=0.2b0
|
224
|
-
Requires-Dist: opentelemetry-instrumentation-mcp>=0.40.
|
225
|
-
Requires-Dist: opentelemetry-instrumentation-openai>=0.
|
225
|
+
Requires-Dist: opentelemetry-instrumentation-mcp>=0.40.7; python_version >= '3.10' and python_version < '4.0'
|
226
|
+
Requires-Dist: opentelemetry-instrumentation-openai>=0.0.40.7; python_version >= '3.10' and python_version < '4.0'
|
226
227
|
Requires-Dist: prompt-toolkit>=3.0.50
|
227
228
|
Requires-Dist: pydantic-settings>=2.7.0
|
228
229
|
Requires-Dist: pydantic>=2.10.4
|
@@ -286,11 +287,13 @@ Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Pyt
|
|
286
287
|
|
287
288
|
```bash
|
288
289
|
uv pip install fast-agent-mcp # install fast-agent!
|
289
|
-
|
290
|
-
|
290
|
+
fast-agent go # start an interactive session
|
291
|
+
fast-agent go https://hf.co/mcp # with a remote MCP
|
292
|
+
fast-agent go --model=generic.qwen2.5 # use ollama qwen 2.5
|
293
|
+
fast-agent setup # create an example agent and config files
|
291
294
|
uv run agent.py # run your first agent
|
292
295
|
uv run agent.py --model=o3-mini.low # specify a model
|
293
|
-
|
296
|
+
fast-agent quickstart workflow # create "building effective agents" examples
|
294
297
|
```
|
295
298
|
|
296
299
|
Other quickstart examples include a Researcher Agent (with Evaluator-Optimizer workflow) and Data Analysis Agent (similar to the ChatGPT experience), demonstrating MCP Roots support.
|
@@ -1,11 +1,11 @@
|
|
1
1
|
mcp_agent/__init__.py,sha256=18T0AG0W9sJhTY38O9GFFOzliDhxx9p87CvRyti9zbw,1620
|
2
|
-
mcp_agent/app.py,sha256=
|
3
|
-
mcp_agent/config.py,sha256=
|
2
|
+
mcp_agent/app.py,sha256=3mtHP1nRQcRaKhhxgTmCOv00alh70nT7UxNA8bN47QE,5560
|
3
|
+
mcp_agent/config.py,sha256=c3KxDNXuOhLSBQ7InVw6sUaTc_5K5YbzVPnTMxgF_34,13924
|
4
4
|
mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
|
5
|
-
mcp_agent/context.py,sha256=
|
5
|
+
mcp_agent/context.py,sha256=H7JbaZ_8SzzTagLmIgUPUPxX5370C5qjQAsasFPZG2Y,7510
|
6
6
|
mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
|
7
7
|
mcp_agent/event_progress.py,sha256=040lrCCclcOuryi07YGSej25kTQF5_JMXY12Yj-3u1U,2773
|
8
|
-
mcp_agent/mcp_server_registry.py,sha256=
|
8
|
+
mcp_agent/mcp_server_registry.py,sha256=b3iSb-0ULYc5yUG2KHav41WGwSYWiJCGQsOwWHWByxo,12346
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
mcp_agent/agents/agent.py,sha256=EAYlcP1qqI1D0_CS808I806z1048FBjZQxxpcCZPeIU,3154
|
@@ -22,21 +22,21 @@ mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
22
22
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
23
23
|
mcp_agent/cli/main.py,sha256=XjrgXMBaPKkVqAFo8T9LJz6Tp1-ivrKDOuNYWke99YA,3090
|
24
24
|
mcp_agent/cli/terminal.py,sha256=GRwD-RGW7saIz2IOWZn5vD6JjiArscELBThm1GTFkuI,1065
|
25
|
-
mcp_agent/cli/commands/check_config.py,sha256=
|
25
|
+
mcp_agent/cli/commands/check_config.py,sha256=JKOHniuMlU1bJ5vmyY7g05HDP7ZYGSQktl19bNx5I4Y,18775
|
26
26
|
mcp_agent/cli/commands/go.py,sha256=LIsOJQuTdfCUcNm7JT-NQDU8cI-GCnYwYjN2VOWxvqs,8658
|
27
27
|
mcp_agent/cli/commands/quickstart.py,sha256=SM3CHMzDgvTxIpKjFuX9BrS_N1vRoXNBDaO90aWx1Rk,14586
|
28
28
|
mcp_agent/cli/commands/setup.py,sha256=eOEd4TL-b0DaDeSJMGOfNOsTEItoZ67W88eTP4aP-bo,6482
|
29
|
-
mcp_agent/cli/commands/url_parser.py,sha256=
|
29
|
+
mcp_agent/cli/commands/url_parser.py,sha256=5VdtcHRHzi67YignStVbz7u-rcvNNErw9oJLAUFOtEY,5855
|
30
30
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
31
|
mcp_agent/core/agent_app.py,sha256=aVvOzMrXZ3TfRGyAsnvcrMMYZxBf8Saa0UuHiA7DV0w,9922
|
32
32
|
mcp_agent/core/agent_types.py,sha256=bQVQMTwKH7qHIJsNglj4C_d6PNFBBzC_0RIkcENSII4,1459
|
33
33
|
mcp_agent/core/direct_decorators.py,sha256=aaVR4G6a8H9pVg6X_PGEZ8GzreP0ZO1-48ksIKvMNDI,14452
|
34
34
|
mcp_agent/core/direct_factory.py,sha256=d96OM1yS3eIocIiaA9FQt6C2zr6VDUyCJBTZCp_D4bs,17912
|
35
|
-
mcp_agent/core/enhanced_prompt.py,sha256=
|
35
|
+
mcp_agent/core/enhanced_prompt.py,sha256=_JlX_7tBpWm1rScBaprD9Tvcep1qPPfXrUsFlnWrTpE,23497
|
36
36
|
mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
|
37
37
|
mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
|
38
38
|
mcp_agent/core/fastagent.py,sha256=uS_NSXeniUYFu6xce8OHGJ9PbEYNU-gm1XVpa1r0rZc,22893
|
39
|
-
mcp_agent/core/interactive_prompt.py,sha256=
|
39
|
+
mcp_agent/core/interactive_prompt.py,sha256=JKEU1Gvq6zXaLniDPx8wll08ZTC6g1rQflL7khmnhs8,24710
|
40
40
|
mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
|
41
41
|
mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
|
42
42
|
mcp_agent/core/request_params.py,sha256=qmFWZXeYEJyYw2IwonyrTnZWxQG7qX6bKpOPcqETa60,1603
|
@@ -54,24 +54,25 @@ mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyz
|
|
54
54
|
mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
|
55
55
|
mcp_agent/llm/augmented_llm_slow.py,sha256=6h4LXdBGBzDfKnvPBcfBh0RdfYl-UXo50EimA-W3tOY,1586
|
56
56
|
mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
|
57
|
-
mcp_agent/llm/model_factory.py,sha256=
|
57
|
+
mcp_agent/llm/model_factory.py,sha256=gR_MBL74f-KF9PvYbVjXc9Qv5GyoMrRR7biXO6oGDvk,10686
|
58
58
|
mcp_agent/llm/prompt_utils.py,sha256=yWQHykoK13QRF7evHUKxVF0SpVLN-Bsft0Yixzvn0g0,4825
|
59
|
-
mcp_agent/llm/provider_key_manager.py,sha256
|
60
|
-
mcp_agent/llm/provider_types.py,sha256=
|
59
|
+
mcp_agent/llm/provider_key_manager.py,sha256=usMWozSMhek_FIlM1MeVDwAbs-P96SrEVPGd3YwF9E4,2833
|
60
|
+
mcp_agent/llm/provider_types.py,sha256=AkQl1r67wZ0gSIY6CXsiZiS3uw5DBF9E5yhIn3THayk,633
|
61
61
|
mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6cG628cI,2926
|
62
62
|
mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
|
63
63
|
mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
|
64
64
|
mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
|
65
|
+
mcp_agent/llm/providers/augmented_llm_aliyun.py,sha256=XylkJKZ9theSVUxJKOZkf1244hgzng4Ng4Dr209Qb-w,1101
|
65
66
|
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=gK_IvllVBNJUUrSfpgFpdhM-d4liCt0MLq7d2lXS7RI,15510
|
66
67
|
mcp_agent/llm/providers/augmented_llm_azure.py,sha256=VPrD6lNrEw6EdYUTa9MDvHDNIPjJU5CG5xnKCM3JYdA,5878
|
67
|
-
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=
|
68
|
+
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=zI9a90dwT4r6E1f_xp4K50Cj9sD7y7kNRgjo0s1pd5w,3804
|
68
69
|
mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
|
69
70
|
mcp_agent/llm/providers/augmented_llm_google_native.py,sha256=Axk6oKH5ctB6rXGnCjRKVkJq6O7rRqlD7aJ2He6UuZ8,20406
|
70
71
|
mcp_agent/llm/providers/augmented_llm_google_oai.py,sha256=cO4dvjTl9ymqEurCOo5nP09ATfXVjgkuk1yZAlWpS1s,1137
|
71
72
|
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=5CFHKayjm-aeCBpohIK3WelAEuX7_LDGZIKnWR_rq-s,14577
|
72
73
|
mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
|
73
74
|
mcp_agent/llm/providers/augmented_llm_tensorzero.py,sha256=Mol_Wzj_ZtccW-LMw0oFwWUt1m1yfofloay9QYNP23c,20729
|
74
|
-
mcp_agent/llm/providers/google_converter.py,sha256=
|
75
|
+
mcp_agent/llm/providers/google_converter.py,sha256=zsqxJJ636WzCL2K6w-yB94O8bdNR6mo8f5mQEnUJFyg,16831
|
75
76
|
mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
|
76
77
|
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=XPIulWntNpZWNGWrc240StPzok2RqrDAV7OigDwQ1uU,15850
|
77
78
|
mcp_agent/llm/providers/multipart_converter_tensorzero.py,sha256=BFTdyVk42HZskDAuTHicfDTUJq89d1fz8C9nAOuHxlE,8646
|
@@ -87,12 +88,13 @@ mcp_agent/logging/logger.py,sha256=l02OGX_c5FOyH0rspd4ZvnkJcbb0FahhUhlh2KI8mqE,1
|
|
87
88
|
mcp_agent/logging/rich_progress.py,sha256=oY9fjb4Tyw6887v8sgO6EGIK4lnmIoR3NNxhA_-Ln_M,4893
|
88
89
|
mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
|
89
90
|
mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
90
|
-
mcp_agent/mcp/common.py,sha256=
|
91
|
+
mcp_agent/mcp/common.py,sha256=MpSC0fLO21RcDz4VApah4C8_LisVGz7OXkR17Xw-9mY,431
|
91
92
|
mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
|
93
|
+
mcp_agent/mcp/hf_auth.py,sha256=YwEt7hMDJODFUIc6Zi1HLYsfVnvANGvyhpQwcPCMAgI,3379
|
92
94
|
mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
|
93
95
|
mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
|
94
|
-
mcp_agent/mcp/mcp_agent_client_session.py,sha256=
|
95
|
-
mcp_agent/mcp/mcp_aggregator.py,sha256=
|
96
|
+
mcp_agent/mcp/mcp_agent_client_session.py,sha256=V17Lj21rMGIKKVAIyNx5l5gmC8jQuohjJGpRcoCXfVA,6862
|
97
|
+
mcp_agent/mcp/mcp_aggregator.py,sha256=CrUtj-BHXXCb7sUlc_MF1d7HkiF9rjh6MKaGprflBB4,47076
|
96
98
|
mcp_agent/mcp/mcp_connection_manager.py,sha256=5JekxOJsB46spHsiXt7pyRPicg8TGHMiSJRtXRW2JB8,17074
|
97
99
|
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
98
100
|
mcp_agent/mcp/prompt_message_multipart.py,sha256=BDwRdNwyWHb2q2bccDb2iR2VlORqVvkvoG3xYzcMpCE,4403
|
@@ -153,8 +155,8 @@ mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkde
|
|
153
155
|
mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
|
154
156
|
mcp_agent/tools/tool_definition.py,sha256=L3Pxl-uLEXqlVoo-bYuFTFALeI-2pIU44YgFhsTKEtM,398
|
155
157
|
mcp_agent/ui/console_display.py,sha256=UKqax5V2TC0hkZZORmmd6UqUk0DGX7A25E3h1k9f42k,10982
|
156
|
-
fast_agent_mcp-0.2.
|
157
|
-
fast_agent_mcp-0.2.
|
158
|
-
fast_agent_mcp-0.2.
|
159
|
-
fast_agent_mcp-0.2.
|
160
|
-
fast_agent_mcp-0.2.
|
158
|
+
fast_agent_mcp-0.2.30.dist-info/METADATA,sha256=7q7NZipQQtERgEt6C6t60vtvEwqzyvjadJllHE5KOC4,30799
|
159
|
+
fast_agent_mcp-0.2.30.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
160
|
+
fast_agent_mcp-0.2.30.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
|
161
|
+
fast_agent_mcp-0.2.30.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
162
|
+
fast_agent_mcp-0.2.30.dist-info/RECORD,,
|
mcp_agent/app.py
CHANGED
@@ -119,7 +119,7 @@ class MCPApp:
|
|
119
119
|
if self._initialized:
|
120
120
|
return
|
121
121
|
|
122
|
-
self._context = await initialize_context(self._config_or_path)
|
122
|
+
self._context = await initialize_context(self._config_or_path, store_globally=True)
|
123
123
|
|
124
124
|
# Set the properties that were passed in the constructor
|
125
125
|
self._context.human_input_handler = self._human_input_callback
|
@@ -226,8 +226,17 @@ def get_config_summary(config_path: Optional[Path]) -> dict:
|
|
226
226
|
|
227
227
|
# Determine transport type
|
228
228
|
if "url" in server_config:
|
229
|
-
|
230
|
-
server_info["url"] =
|
229
|
+
url = server_config.get("url", "")
|
230
|
+
server_info["url"] = url
|
231
|
+
|
232
|
+
# Use URL path to determine transport type
|
233
|
+
try:
|
234
|
+
from .url_parser import parse_server_url
|
235
|
+
_, transport_type, _ = parse_server_url(url)
|
236
|
+
server_info["transport"] = transport_type.upper()
|
237
|
+
except Exception:
|
238
|
+
# Fallback to HTTP if URL parsing fails
|
239
|
+
server_info["transport"] = "HTTP"
|
231
240
|
|
232
241
|
# Get command and args
|
233
242
|
command = server_config.get("command", "")
|
@@ -8,6 +8,8 @@ import re
|
|
8
8
|
from typing import Dict, List, Literal, Tuple
|
9
9
|
from urllib.parse import urlparse
|
10
10
|
|
11
|
+
from mcp_agent.mcp.hf_auth import add_hf_auth_header
|
12
|
+
|
11
13
|
|
12
14
|
def parse_server_url(
|
13
15
|
url: str,
|
@@ -131,7 +133,11 @@ def parse_server_urls(
|
|
131
133
|
result = []
|
132
134
|
for url in url_list:
|
133
135
|
server_name, transport_type, parsed_url = parse_server_url(url)
|
134
|
-
|
136
|
+
|
137
|
+
# Apply HuggingFace authentication if appropriate
|
138
|
+
final_headers = add_hf_auth_header(parsed_url, headers)
|
139
|
+
|
140
|
+
result.append((server_name, transport_type, parsed_url, final_headers))
|
135
141
|
|
136
142
|
return result
|
137
143
|
|
mcp_agent/config.py
CHANGED
@@ -222,6 +222,15 @@ class TensorZeroSettings(BaseModel):
|
|
222
222
|
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
223
223
|
|
224
224
|
|
225
|
+
class HuggingFaceSettings(BaseModel):
|
226
|
+
"""
|
227
|
+
Settings for HuggingFace authentication (used for MCP connections).
|
228
|
+
"""
|
229
|
+
|
230
|
+
api_key: Optional[str] = None
|
231
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
232
|
+
|
233
|
+
|
225
234
|
class LoggerSettings(BaseModel):
|
226
235
|
"""
|
227
236
|
Logger settings for the fast-agent application.
|
@@ -291,7 +300,7 @@ class Settings(BaseSettings):
|
|
291
300
|
Default model for agents. Format is provider.model_name.<reasoning_effort>, for example openai.o3-mini.low
|
292
301
|
Aliases are provided for common models e.g. sonnet, haiku, gpt-4.1, o3-mini etc.
|
293
302
|
"""
|
294
|
-
|
303
|
+
|
295
304
|
auto_sampling: bool = True
|
296
305
|
"""Enable automatic sampling model selection if not explicitly configured"""
|
297
306
|
|
@@ -322,6 +331,12 @@ class Settings(BaseSettings):
|
|
322
331
|
azure: AzureSettings | None = None
|
323
332
|
"""Settings for using Azure OpenAI Service in the fast-agent application"""
|
324
333
|
|
334
|
+
aliyun: OpenAISettings | None = None
|
335
|
+
"""Settings for using Aliyun OpenAI Service in the fast-agent application"""
|
336
|
+
|
337
|
+
huggingface: HuggingFaceSettings | None = None
|
338
|
+
"""Settings for HuggingFace authentication (used for MCP connections)"""
|
339
|
+
|
325
340
|
logger: LoggerSettings | None = LoggerSettings()
|
326
341
|
"""Logger settings for the fast-agent application"""
|
327
342
|
|
mcp_agent/context.py
CHANGED
@@ -12,7 +12,8 @@ from opentelemetry import trace
|
|
12
12
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
13
13
|
from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
|
14
14
|
from opentelemetry.instrumentation.google_genai import GoogleGenAiSdkInstrumentor
|
15
|
-
|
15
|
+
|
16
|
+
# from opentelemetry.instrumentation.mcp import McpInstrumentor
|
16
17
|
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
17
18
|
from opentelemetry.propagate import set_global_textmap
|
18
19
|
from opentelemetry.sdk.resources import Resource
|
@@ -114,7 +115,9 @@ async def configure_otel(config: "Settings") -> None:
|
|
114
115
|
AnthropicInstrumentor().instrument()
|
115
116
|
OpenAIInstrumentor().instrument()
|
116
117
|
GoogleGenAiSdkInstrumentor().instrument()
|
117
|
-
|
118
|
+
|
119
|
+
|
120
|
+
# McpInstrumentor().instrument()
|
118
121
|
|
119
122
|
|
120
123
|
async def configure_logger(config: "Settings") -> None:
|
@@ -198,7 +201,6 @@ _global_context: Context | None = None
|
|
198
201
|
def get_current_context() -> Context:
|
199
202
|
"""
|
200
203
|
Synchronous initializer/getter for global application context.
|
201
|
-
For async usage, use aget_current_context instead.
|
202
204
|
"""
|
203
205
|
global _global_context
|
204
206
|
if _global_context is None:
|
@@ -2,6 +2,11 @@
|
|
2
2
|
Enhanced prompt functionality with advanced prompt_toolkit features.
|
3
3
|
"""
|
4
4
|
|
5
|
+
import asyncio
|
6
|
+
import os
|
7
|
+
import shlex
|
8
|
+
import subprocess
|
9
|
+
import tempfile
|
5
10
|
from importlib.metadata import version
|
6
11
|
from typing import List, Optional
|
7
12
|
|
@@ -96,6 +101,85 @@ class AgentCompleter(Completer):
|
|
96
101
|
)
|
97
102
|
|
98
103
|
|
104
|
+
# Helper function to open text in an external editor
|
105
|
+
def get_text_from_editor(initial_text: str = "") -> str:
|
106
|
+
"""
|
107
|
+
Opens the user\'s configured editor ($VISUAL or $EDITOR) to edit the initial_text.
|
108
|
+
Falls back to \'nano\' (Unix) or \'notepad\' (Windows) if neither is set.
|
109
|
+
Returns the edited text, or the original text if an error occurs.
|
110
|
+
"""
|
111
|
+
editor_cmd_str = os.environ.get("VISUAL") or os.environ.get("EDITOR")
|
112
|
+
|
113
|
+
if not editor_cmd_str:
|
114
|
+
if os.name == "nt": # Windows
|
115
|
+
editor_cmd_str = "notepad"
|
116
|
+
else: # Unix-like (Linux, macOS)
|
117
|
+
editor_cmd_str = "nano" # A common, usually available, simple editor
|
118
|
+
|
119
|
+
# Use shlex.split to handle editors with arguments (e.g., "code --wait")
|
120
|
+
try:
|
121
|
+
editor_cmd_list = shlex.split(editor_cmd_str)
|
122
|
+
if not editor_cmd_list: # Handle empty string from shlex.split
|
123
|
+
raise ValueError("Editor command string is empty or invalid.")
|
124
|
+
except ValueError as e:
|
125
|
+
rich_print(f"[red]Error: Invalid editor command string ('{editor_cmd_str}'): {e}[/red]")
|
126
|
+
return initial_text
|
127
|
+
|
128
|
+
# Create a temporary file for the editor to use.
|
129
|
+
# Using a suffix can help some editors with syntax highlighting or mode.
|
130
|
+
try:
|
131
|
+
with tempfile.NamedTemporaryFile(
|
132
|
+
mode="w+", delete=False, suffix=".txt", encoding="utf-8"
|
133
|
+
) as tmp_file:
|
134
|
+
if initial_text:
|
135
|
+
tmp_file.write(initial_text)
|
136
|
+
tmp_file.flush() # Ensure content is written to disk before editor opens it
|
137
|
+
temp_file_path = tmp_file.name
|
138
|
+
except Exception as e:
|
139
|
+
rich_print(f"[red]Error: Could not create temporary file for editor: {e}[/red]")
|
140
|
+
return initial_text
|
141
|
+
|
142
|
+
try:
|
143
|
+
# Construct the full command: editor_parts + [temp_file_path]
|
144
|
+
# e.g., [\'vim\', \'/tmp/somefile.txt\'] or [\'code\', \'--wait\', \'/tmp/somefile.txt\']
|
145
|
+
full_cmd = editor_cmd_list + [temp_file_path]
|
146
|
+
|
147
|
+
# Run the editor. This is a blocking call.
|
148
|
+
subprocess.run(full_cmd, check=True)
|
149
|
+
|
150
|
+
# Read the content back from the temporary file.
|
151
|
+
with open(temp_file_path, "r", encoding="utf-8") as f:
|
152
|
+
edited_text = f.read()
|
153
|
+
|
154
|
+
except FileNotFoundError:
|
155
|
+
rich_print(
|
156
|
+
f"[red]Error: Editor command '{editor_cmd_list[0]}' not found. "
|
157
|
+
f"Please set $VISUAL or $EDITOR correctly, or install '{editor_cmd_list[0]}'.[/red]"
|
158
|
+
)
|
159
|
+
return initial_text
|
160
|
+
except subprocess.CalledProcessError as e:
|
161
|
+
rich_print(
|
162
|
+
f"[red]Error: Editor '{editor_cmd_list[0]}' closed with an error (code {e.returncode}).[/red]"
|
163
|
+
)
|
164
|
+
return initial_text
|
165
|
+
except Exception as e:
|
166
|
+
rich_print(
|
167
|
+
f"[red]An unexpected error occurred while launching or using the editor: {e}[/red]"
|
168
|
+
)
|
169
|
+
return initial_text
|
170
|
+
finally:
|
171
|
+
# Always attempt to clean up the temporary file.
|
172
|
+
if "temp_file_path" in locals() and os.path.exists(temp_file_path):
|
173
|
+
try:
|
174
|
+
os.remove(temp_file_path)
|
175
|
+
except Exception as e:
|
176
|
+
rich_print(
|
177
|
+
f"[yellow]Warning: Could not remove temporary file {temp_file_path}: {e}[/yellow]"
|
178
|
+
)
|
179
|
+
|
180
|
+
return edited_text.strip() # Added strip() to remove trailing newlines often added by editors
|
181
|
+
|
182
|
+
|
99
183
|
def create_keybindings(on_toggle_multiline=None, app=None):
|
100
184
|
"""Create custom key bindings."""
|
101
185
|
kb = KeyBindings()
|
@@ -140,6 +224,27 @@ def create_keybindings(on_toggle_multiline=None, app=None):
|
|
140
224
|
"""Ctrl+L: Clear the input buffer."""
|
141
225
|
event.current_buffer.text = ""
|
142
226
|
|
227
|
+
@kb.add("c-e")
|
228
|
+
async def _(event) -> None:
|
229
|
+
"""Ctrl+E: Edit current buffer in $EDITOR."""
|
230
|
+
current_text = event.app.current_buffer.text
|
231
|
+
try:
|
232
|
+
# Run the synchronous editor function in a thread
|
233
|
+
edited_text = await event.app.loop.run_in_executor(
|
234
|
+
None, get_text_from_editor, current_text
|
235
|
+
)
|
236
|
+
event.app.current_buffer.text = edited_text
|
237
|
+
# Optionally, move cursor to the end of the edited text
|
238
|
+
event.app.current_buffer.cursor_position = len(edited_text)
|
239
|
+
except asyncio.CancelledError:
|
240
|
+
rich_print("[yellow]Editor interaction cancelled.[/yellow]")
|
241
|
+
except Exception as e:
|
242
|
+
rich_print(f"[red]Error during editor interaction: {e}[/red]")
|
243
|
+
finally:
|
244
|
+
# Ensure the UI is updated
|
245
|
+
if event.app:
|
246
|
+
event.app.invalidate()
|
247
|
+
|
143
248
|
return kb
|
144
249
|
|
145
250
|
|
@@ -351,7 +351,7 @@ class InteractivePrompt:
|
|
351
351
|
for prompt in prompts:
|
352
352
|
# Get basic prompt info
|
353
353
|
prompt_name = getattr(prompt, "name", "Unknown")
|
354
|
-
|
354
|
+
prompt_description = getattr(prompt, "description", "No description")
|
355
355
|
|
356
356
|
# Extract argument information
|
357
357
|
arg_names = []
|
@@ -387,7 +387,7 @@ class InteractivePrompt:
|
|
387
387
|
"server": server_name,
|
388
388
|
"name": prompt_name,
|
389
389
|
"namespaced_name": namespaced_name,
|
390
|
-
"description":
|
390
|
+
"description": prompt_description,
|
391
391
|
"arg_count": len(arg_names),
|
392
392
|
"arg_names": arg_names,
|
393
393
|
"required_args": required_args,
|
mcp_agent/llm/model_factory.py
CHANGED
@@ -10,6 +10,7 @@ from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
|
|
10
10
|
from mcp_agent.llm.augmented_llm_playback import PlaybackLLM
|
11
11
|
from mcp_agent.llm.augmented_llm_slow import SlowLLM
|
12
12
|
from mcp_agent.llm.provider_types import Provider
|
13
|
+
from mcp_agent.llm.providers.augmented_llm_aliyun import AliyunAugmentedLLM
|
13
14
|
from mcp_agent.llm.providers.augmented_llm_anthropic import AnthropicAugmentedLLM
|
14
15
|
from mcp_agent.llm.providers.augmented_llm_azure import AzureOpenAIAugmentedLLM
|
15
16
|
from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
|
@@ -103,6 +104,10 @@ class ModelFactory:
|
|
103
104
|
"gemini-2.0-flash": Provider.GOOGLE,
|
104
105
|
"gemini-2.5-flash-preview-05-20": Provider.GOOGLE,
|
105
106
|
"gemini-2.5-pro-preview-05-06": Provider.GOOGLE,
|
107
|
+
"qwen-turbo": Provider.ALIYUN,
|
108
|
+
"qwen-plus": Provider.ALIYUN,
|
109
|
+
"qwen-max": Provider.ALIYUN,
|
110
|
+
"qwen-long": Provider.ALIYUN,
|
106
111
|
}
|
107
112
|
|
108
113
|
MODEL_ALIASES = {
|
@@ -136,6 +141,7 @@ class ModelFactory:
|
|
136
141
|
Provider.OPENROUTER: OpenRouterAugmentedLLM,
|
137
142
|
Provider.TENSORZERO: TensorZeroAugmentedLLM,
|
138
143
|
Provider.AZURE: AzureOpenAIAugmentedLLM,
|
144
|
+
Provider.ALIYUN: AliyunAugmentedLLM,
|
139
145
|
}
|
140
146
|
|
141
147
|
# Mapping of special model names to their specific LLM classes
|
mcp_agent/llm/provider_types.py
CHANGED
@@ -0,0 +1,30 @@
|
|
1
|
+
from mcp_agent.core.request_params import RequestParams
|
2
|
+
from mcp_agent.llm.provider_types import Provider
|
3
|
+
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
4
|
+
|
5
|
+
ALIYUN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
6
|
+
DEFAULT_QWEN_MODEL = "qwen-turbo"
|
7
|
+
|
8
|
+
|
9
|
+
class AliyunAugmentedLLM(OpenAIAugmentedLLM):
|
10
|
+
def __init__(self, *args, **kwargs) -> None:
|
11
|
+
super().__init__(*args, provider=Provider.ALIYUN, **kwargs)
|
12
|
+
|
13
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
14
|
+
"""Initialize Aliyun-specific default parameters"""
|
15
|
+
chosen_model = kwargs.get("model", DEFAULT_QWEN_MODEL)
|
16
|
+
|
17
|
+
return RequestParams(
|
18
|
+
model=chosen_model,
|
19
|
+
systemPrompt=self.instruction,
|
20
|
+
parallel_tool_calls=True,
|
21
|
+
max_iterations=10,
|
22
|
+
use_history=True,
|
23
|
+
)
|
24
|
+
|
25
|
+
def _base_url(self) -> str:
|
26
|
+
base_url = None
|
27
|
+
if self.context.config and self.context.config.aliyun:
|
28
|
+
base_url = self.context.config.aliyun.base_url
|
29
|
+
|
30
|
+
return base_url if base_url else ALIYUN_BASE_URL
|
@@ -1,6 +1,16 @@
|
|
1
|
+
from copy import copy
|
2
|
+
from typing import List, Tuple, Type, cast
|
3
|
+
|
4
|
+
from openai.types.chat import (
|
5
|
+
ChatCompletionAssistantMessageParam,
|
6
|
+
ChatCompletionMessage,
|
7
|
+
)
|
8
|
+
|
1
9
|
from mcp_agent.core.request_params import RequestParams
|
2
10
|
from mcp_agent.llm.provider_types import Provider
|
3
11
|
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
12
|
+
from mcp_agent.mcp.interfaces import ModelT
|
13
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
4
14
|
|
5
15
|
DEEPSEEK_BASE_URL = "https://api.deepseek.com"
|
6
16
|
DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type models
|
@@ -28,3 +38,56 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
|
|
28
38
|
base_url = self.context.config.deepseek.base_url
|
29
39
|
|
30
40
|
return base_url if base_url else DEEPSEEK_BASE_URL
|
41
|
+
|
42
|
+
async def _apply_prompt_provider_specific_structured(
|
43
|
+
self,
|
44
|
+
multipart_messages: List[PromptMessageMultipart],
|
45
|
+
model: Type[ModelT],
|
46
|
+
request_params: RequestParams | None = None,
|
47
|
+
) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
|
48
|
+
request_params = self.get_request_params(request_params)
|
49
|
+
|
50
|
+
request_params.response_format = {"type": "json_object"}
|
51
|
+
|
52
|
+
# Get the full schema and extract just the properties
|
53
|
+
full_schema = model.model_json_schema()
|
54
|
+
properties = full_schema.get("properties", {})
|
55
|
+
required_fields = full_schema.get("required", [])
|
56
|
+
|
57
|
+
# Create a cleaner format description
|
58
|
+
format_description = "{\n"
|
59
|
+
for field_name, field_info in properties.items():
|
60
|
+
field_type = field_info.get("type", "string")
|
61
|
+
description = field_info.get("description", "")
|
62
|
+
format_description += f' "{field_name}": "{field_type}"'
|
63
|
+
if description:
|
64
|
+
format_description += f" // {description}"
|
65
|
+
if field_name in required_fields:
|
66
|
+
format_description += " // REQUIRED"
|
67
|
+
format_description += "\n"
|
68
|
+
format_description += "}"
|
69
|
+
|
70
|
+
multipart_messages[-1].add_text(
|
71
|
+
f"""YOU MUST RESPOND WITH A JSON OBJECT IN EXACTLY THIS FORMAT:
|
72
|
+
{format_description}
|
73
|
+
|
74
|
+
IMPORTANT RULES:
|
75
|
+
- Respond ONLY with the JSON object, no other text
|
76
|
+
- Do NOT include "properties" or "schema" wrappers
|
77
|
+
- Do NOT use code fences or markdown
|
78
|
+
- The response must be valid JSON that matches the format above
|
79
|
+
- All required fields must be included"""
|
80
|
+
)
|
81
|
+
|
82
|
+
result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
|
83
|
+
multipart_messages, request_params
|
84
|
+
)
|
85
|
+
return self._structured_from_multipart(result, model)
|
86
|
+
|
87
|
+
@classmethod
|
88
|
+
def convert_message_to_message_param(cls, message: ChatCompletionMessage, **kwargs) -> ChatCompletionAssistantMessageParam:
|
89
|
+
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
|
90
|
+
if hasattr(message, "reasoning_content"):
|
91
|
+
message = copy(message)
|
92
|
+
del message.reasoning_content
|
93
|
+
return cast("ChatCompletionAssistantMessageParam", message)
|
@@ -166,6 +166,10 @@ class GoogleConverter:
|
|
166
166
|
fast_agent_parts: List[
|
167
167
|
TextContent | ImageContent | EmbeddedResource | CallToolRequestParams
|
168
168
|
] = []
|
169
|
+
|
170
|
+
if content is None or not hasattr(content, 'parts') or content.parts is None:
|
171
|
+
return [] # Google API response 'content' object is None. Cannot extract parts.
|
172
|
+
|
169
173
|
for part in content.parts:
|
170
174
|
if part.text:
|
171
175
|
fast_agent_parts.append(TextContent(type="text", text=part.text))
|
mcp_agent/mcp/common.py
CHANGED
@@ -8,9 +8,9 @@ SEP = "-"
|
|
8
8
|
|
9
9
|
def create_namespaced_name(server_name: str, resource_name: str) -> str:
|
10
10
|
"""Create a namespaced resource name from server and resource names"""
|
11
|
-
return f"{server_name}{SEP}{resource_name}"
|
11
|
+
return f"{server_name}{SEP}{resource_name}"[:64]
|
12
12
|
|
13
13
|
|
14
14
|
def is_namespaced_name(name: str) -> bool:
|
15
15
|
"""Check if a name is already namespaced"""
|
16
|
-
return SEP in name
|
16
|
+
return SEP in name
|
mcp_agent/mcp/hf_auth.py
ADDED
@@ -0,0 +1,106 @@
|
|
1
|
+
"""HuggingFace authentication utilities for MCP connections."""
|
2
|
+
|
3
|
+
import os
|
4
|
+
from typing import Dict, Optional
|
5
|
+
from urllib.parse import urlparse
|
6
|
+
|
7
|
+
|
8
|
+
def is_huggingface_url(url: str) -> bool:
|
9
|
+
"""
|
10
|
+
Check if a URL is a HuggingFace URL that should receive HF_TOKEN authentication.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
url: The URL to check
|
14
|
+
|
15
|
+
Returns:
|
16
|
+
True if the URL is a HuggingFace URL, False otherwise
|
17
|
+
"""
|
18
|
+
try:
|
19
|
+
parsed = urlparse(url)
|
20
|
+
hostname = parsed.hostname
|
21
|
+
if hostname is None:
|
22
|
+
return False
|
23
|
+
|
24
|
+
# Check for HuggingFace domains
|
25
|
+
if hostname in {"hf.co", "huggingface.co"}:
|
26
|
+
return True
|
27
|
+
|
28
|
+
# Check for HuggingFace Spaces (*.hf.space)
|
29
|
+
# Use endswith to match subdomains like space-name.hf.space
|
30
|
+
# but ensure exact match to prevent spoofing like evil.hf.space.com
|
31
|
+
if hostname.endswith(".hf.space") and hostname.count(".") >= 2:
|
32
|
+
# Additional validation: ensure it's a valid HF Space domain
|
33
|
+
# Format should be: {space-name}.hf.space
|
34
|
+
parts = hostname.split(".")
|
35
|
+
if len(parts) == 3 and parts[-2:] == ["hf", "space"]:
|
36
|
+
space_name = parts[0]
|
37
|
+
# Validate space name: not empty, not just hyphens/dots, no spaces
|
38
|
+
return (len(space_name) > 0 and
|
39
|
+
space_name != "-" and
|
40
|
+
not space_name.startswith(".") and
|
41
|
+
not space_name.endswith(".") and
|
42
|
+
" " not in space_name)
|
43
|
+
|
44
|
+
return False
|
45
|
+
except Exception:
|
46
|
+
return False
|
47
|
+
|
48
|
+
|
49
|
+
def get_hf_token_from_env() -> Optional[str]:
|
50
|
+
"""
|
51
|
+
Get the HuggingFace token from the HF_TOKEN environment variable.
|
52
|
+
|
53
|
+
Returns:
|
54
|
+
The HF_TOKEN value if set, None otherwise
|
55
|
+
"""
|
56
|
+
return os.environ.get("HF_TOKEN")
|
57
|
+
|
58
|
+
|
59
|
+
def should_add_hf_auth(url: str, existing_headers: Optional[Dict[str, str]]) -> bool:
|
60
|
+
"""
|
61
|
+
Determine if HuggingFace authentication should be added to the headers.
|
62
|
+
|
63
|
+
Args:
|
64
|
+
url: The URL to check
|
65
|
+
existing_headers: Existing headers dictionary (may be None)
|
66
|
+
|
67
|
+
Returns:
|
68
|
+
True if HF auth should be added, False otherwise
|
69
|
+
"""
|
70
|
+
# Only add HF auth if:
|
71
|
+
# 1. URL is a HuggingFace URL
|
72
|
+
# 2. No existing Authorization header is set
|
73
|
+
# 3. HF_TOKEN environment variable is available
|
74
|
+
|
75
|
+
if not is_huggingface_url(url):
|
76
|
+
return False
|
77
|
+
|
78
|
+
if existing_headers and "Authorization" in existing_headers:
|
79
|
+
return False
|
80
|
+
|
81
|
+
return get_hf_token_from_env() is not None
|
82
|
+
|
83
|
+
|
84
|
+
def add_hf_auth_header(url: str, headers: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
|
85
|
+
"""
|
86
|
+
Add HuggingFace authentication header if appropriate.
|
87
|
+
|
88
|
+
Args:
|
89
|
+
url: The URL to check
|
90
|
+
headers: Existing headers dictionary (may be None)
|
91
|
+
|
92
|
+
Returns:
|
93
|
+
Updated headers dictionary with HF auth if appropriate, or original headers
|
94
|
+
"""
|
95
|
+
if not should_add_hf_auth(url, headers):
|
96
|
+
return headers
|
97
|
+
|
98
|
+
hf_token = get_hf_token_from_env()
|
99
|
+
if hf_token is None:
|
100
|
+
return headers
|
101
|
+
|
102
|
+
# Create new headers dict or copy existing one
|
103
|
+
result_headers = dict(headers) if headers else {}
|
104
|
+
result_headers["Authorization"] = f"Bearer {hf_token}"
|
105
|
+
|
106
|
+
return result_headers
|
@@ -7,15 +7,13 @@ from datetime import timedelta
|
|
7
7
|
from typing import TYPE_CHECKING
|
8
8
|
|
9
9
|
from mcp import ClientSession, ServerNotification
|
10
|
+
from mcp.shared.message import MessageMetadata
|
10
11
|
from mcp.shared.session import (
|
11
12
|
ProgressFnT,
|
12
13
|
ReceiveResultT,
|
13
|
-
RequestId,
|
14
|
-
SendNotificationT,
|
15
14
|
SendRequestT,
|
16
|
-
SendResultT,
|
17
15
|
)
|
18
|
-
from mcp.types import
|
16
|
+
from mcp.types import Implementation, ListRootsResult, Root, ToolListChangedNotification
|
19
17
|
from pydantic import FileUrl
|
20
18
|
|
21
19
|
from mcp_agent.context_dependent import ContextDependent
|
@@ -76,12 +74,16 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
76
74
|
|
77
75
|
# Only register callbacks if the server_config has the relevant settings
|
78
76
|
list_roots_cb = list_roots if (self.server_config and self.server_config.roots) else None
|
79
|
-
|
77
|
+
|
80
78
|
# Register sampling callback if either:
|
81
79
|
# 1. Sampling is explicitly configured, OR
|
82
80
|
# 2. Application-level auto_sampling is enabled
|
83
81
|
sampling_cb = None
|
84
|
-
if
|
82
|
+
if (
|
83
|
+
self.server_config
|
84
|
+
and hasattr(self.server_config, "sampling")
|
85
|
+
and self.server_config.sampling
|
86
|
+
):
|
85
87
|
# Explicit sampling configuration
|
86
88
|
sampling_cb = sample
|
87
89
|
elif self._should_enable_auto_sampling():
|
@@ -100,9 +102,10 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
100
102
|
"""Check if auto_sampling is enabled at the application level."""
|
101
103
|
try:
|
102
104
|
from mcp_agent.context import get_current_context
|
105
|
+
|
103
106
|
context = get_current_context()
|
104
107
|
if context and context.config:
|
105
|
-
return getattr(context.config,
|
108
|
+
return getattr(context.config, "auto_sampling", True)
|
106
109
|
except Exception:
|
107
110
|
pass
|
108
111
|
return True # Default to True if can't access config
|
@@ -112,6 +115,7 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
112
115
|
request: SendRequestT,
|
113
116
|
result_type: type[ReceiveResultT],
|
114
117
|
request_read_timeout_seconds: timedelta | None = None,
|
118
|
+
metadata: MessageMetadata | None = None,
|
115
119
|
progress_callback: ProgressFnT | None = None,
|
116
120
|
) -> ReceiveResultT:
|
117
121
|
logger.debug("send_request: request=", data=request.model_dump())
|
@@ -120,32 +124,18 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
120
124
|
request=request,
|
121
125
|
result_type=result_type,
|
122
126
|
request_read_timeout_seconds=request_read_timeout_seconds,
|
127
|
+
metadata=metadata,
|
123
128
|
progress_callback=progress_callback,
|
124
|
-
metadata=None,
|
125
129
|
)
|
126
|
-
logger.debug(
|
130
|
+
logger.debug(
|
131
|
+
"send_request: response=",
|
132
|
+
data=result.model_dump() if result is not None else "no response returned",
|
133
|
+
)
|
127
134
|
return result
|
128
135
|
except Exception as e:
|
129
136
|
logger.error(f"send_request failed: {str(e)}")
|
130
137
|
raise
|
131
138
|
|
132
|
-
async def send_notification(self, notification: SendNotificationT) -> None:
|
133
|
-
logger.debug("send_notification:", data=notification.model_dump())
|
134
|
-
try:
|
135
|
-
return await super().send_notification(notification)
|
136
|
-
except Exception as e:
|
137
|
-
logger.error("send_notification failed", data=e)
|
138
|
-
raise
|
139
|
-
|
140
|
-
async def _send_response(
|
141
|
-
self, request_id: RequestId, response: SendResultT | ErrorData
|
142
|
-
) -> None:
|
143
|
-
logger.debug(
|
144
|
-
f"send_response: request_id={request_id}, response=",
|
145
|
-
data=response.model_dump(),
|
146
|
-
)
|
147
|
-
return await super()._send_response(request_id, response)
|
148
|
-
|
149
139
|
async def _received_notification(self, notification: ServerNotification) -> None:
|
150
140
|
"""
|
151
141
|
Can be overridden by subclasses to handle a notification without needing
|
@@ -189,17 +179,3 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
189
179
|
await self._tool_list_changed_callback(server_name)
|
190
180
|
except Exception as e:
|
191
181
|
logger.error(f"Error in tool list changed callback: {e}")
|
192
|
-
|
193
|
-
async def send_progress_notification(
|
194
|
-
self, progress_token: str | int, progress: float, total: float | None = None
|
195
|
-
) -> None:
|
196
|
-
"""
|
197
|
-
Sends a progress notification for a request that is currently being
|
198
|
-
processed.
|
199
|
-
"""
|
200
|
-
logger.debug(
|
201
|
-
"send_progress_notification: progress_token={progress_token}, progress={progress}, total={total}"
|
202
|
-
)
|
203
|
-
return await super().send_progress_notification(
|
204
|
-
progress_token=progress_token, progress=progress, total=total
|
205
|
-
)
|
mcp_agent/mcp/mcp_aggregator.py
CHANGED
@@ -82,13 +82,14 @@ class MCPAggregator(ContextDependent):
|
|
82
82
|
await self.context._connection_manager.__aenter__()
|
83
83
|
self._persistent_connection_manager = self.context._connection_manager
|
84
84
|
|
85
|
-
await self.load_servers()
|
86
85
|
# Import the display component here to avoid circular imports
|
87
86
|
from mcp_agent.ui.console_display import ConsoleDisplay
|
88
87
|
|
89
88
|
# Initialize the display component
|
90
89
|
self.display = ConsoleDisplay(config=self.context.config)
|
91
90
|
|
91
|
+
await self.load_servers()
|
92
|
+
|
92
93
|
return self
|
93
94
|
|
94
95
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
mcp_agent/mcp_server_registry.py
CHANGED
@@ -27,6 +27,7 @@ from mcp_agent.config import (
|
|
27
27
|
get_settings,
|
28
28
|
)
|
29
29
|
from mcp_agent.logging.logger import get_logger
|
30
|
+
from mcp_agent.mcp.hf_auth import add_hf_auth_header
|
30
31
|
from mcp_agent.mcp.logger_textio import get_stderr_handler
|
31
32
|
from mcp_agent.mcp.mcp_connection_manager import (
|
32
33
|
MCPConnectionManager,
|
@@ -176,11 +177,14 @@ class ServerRegistry:
|
|
176
177
|
if not config.url:
|
177
178
|
raise ValueError(f"URL is required for SSE transport: {server_name}")
|
178
179
|
|
180
|
+
# Apply HuggingFace authentication if appropriate
|
181
|
+
headers = add_hf_auth_header(config.url, config.headers)
|
182
|
+
|
179
183
|
# Use sse_client to get the read and write streams
|
180
184
|
async with _add_none_to_context(
|
181
185
|
sse_client(
|
182
186
|
config.url,
|
183
|
-
|
187
|
+
headers,
|
184
188
|
sse_read_timeout=config.read_transport_sse_timeout_seconds,
|
185
189
|
)
|
186
190
|
) as (read_stream, write_stream, _):
|
@@ -198,9 +202,12 @@ class ServerRegistry:
|
|
198
202
|
logger.debug(f"{server_name}: Closed session to server")
|
199
203
|
elif config.transport == "http":
|
200
204
|
if not config.url:
|
201
|
-
raise ValueError(f"URL is required for
|
205
|
+
raise ValueError(f"URL is required for HTTP transport: {server_name}")
|
206
|
+
|
207
|
+
# Apply HuggingFace authentication if appropriate
|
208
|
+
headers = add_hf_auth_header(config.url, config.headers)
|
202
209
|
|
203
|
-
async with streamablehttp_client(config.url,
|
210
|
+
async with streamablehttp_client(config.url, headers) as (
|
204
211
|
read_stream,
|
205
212
|
write_stream,
|
206
213
|
_,
|
File without changes
|
File without changes
|
File without changes
|