fast-agent-mcp 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/METADATA +12 -8
  2. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/RECORD +34 -31
  3. mcp_agent/__init__.py +2 -2
  4. mcp_agent/agents/agent.py +5 -0
  5. mcp_agent/agents/base_agent.py +158 -42
  6. mcp_agent/agents/workflow/chain_agent.py +9 -13
  7. mcp_agent/agents/workflow/evaluator_optimizer.py +3 -3
  8. mcp_agent/agents/workflow/orchestrator_agent.py +9 -7
  9. mcp_agent/agents/workflow/parallel_agent.py +2 -2
  10. mcp_agent/agents/workflow/router_agent.py +7 -5
  11. mcp_agent/core/{direct_agent_app.py → agent_app.py} +115 -15
  12. mcp_agent/core/direct_factory.py +12 -12
  13. mcp_agent/core/fastagent.py +17 -4
  14. mcp_agent/core/mcp_content.py +38 -5
  15. mcp_agent/core/prompt.py +70 -8
  16. mcp_agent/core/validation.py +1 -1
  17. mcp_agent/llm/augmented_llm.py +38 -8
  18. mcp_agent/llm/model_factory.py +17 -27
  19. mcp_agent/llm/providers/augmented_llm_generic.py +5 -4
  20. mcp_agent/llm/providers/augmented_llm_openai.py +3 -3
  21. mcp_agent/llm/providers/multipart_converter_anthropic.py +8 -8
  22. mcp_agent/llm/providers/multipart_converter_openai.py +9 -9
  23. mcp_agent/mcp/helpers/__init__.py +3 -0
  24. mcp_agent/mcp/helpers/content_helpers.py +116 -0
  25. mcp_agent/mcp/interfaces.py +39 -16
  26. mcp_agent/mcp/mcp_aggregator.py +117 -13
  27. mcp_agent/mcp/prompt_message_multipart.py +29 -22
  28. mcp_agent/mcp/prompt_render.py +18 -15
  29. mcp_agent/mcp/prompts/prompt_helpers.py +22 -112
  30. mcp_agent/mcp_server/agent_server.py +2 -2
  31. mcp_agent/resources/examples/internal/history_transfer.py +35 -0
  32. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/WHEEL +0 -0
  33. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/entry_points.txt +0 -0
  34. {fast_agent_mcp-0.2.2.dist-info → fast_agent_mcp-0.2.4.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -251,6 +251,9 @@ Description-Content-Type: text/markdown
251
251
 
252
252
  ## Overview
253
253
 
254
+ > [!TIP]
255
+ > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not.
256
+
254
257
  **`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o family, o1/o3 family) are supported.
255
258
 
256
259
  The simple declarative syntax lets you concentrate on composing your Prompts and MCP Servers to [build effective agents](https://www.anthropic.com/research/building-effective-agents).
@@ -325,7 +328,6 @@ fast = FastAgent("Agent Example")
325
328
  @fast.agent(
326
329
  instruction="Given an object, respond only with an estimate of its size."
327
330
  )
328
-
329
331
  async def main():
330
332
  async with fast.run() as agent:
331
333
  await agent()
@@ -340,7 +342,7 @@ Specify a model with the `--model` switch - for example `uv run sizer.py --model
340
342
 
341
343
  ### Combining Agents and using MCP Servers
342
344
 
343
- _To generate examples use `fast-agent bootstrap workflow`. This example can be run with `uv run chaining.py`. fast-agent looks for configuration files in the current directory before checking parent directories recursively._
345
+ _To generate examples use `fast-agent bootstrap workflow`. This example can be run with `uv run workflow/chaining.py`. fast-agent looks for configuration files in the current directory before checking parent directories recursively._
344
346
 
345
347
  Agents can be chained to build a workflow, using MCP Servers defined in the `fastagent.config.yaml` file:
346
348
 
@@ -357,12 +359,14 @@ Agents can be chained to build a workflow, using MCP Servers defined in the `fas
357
359
  Respond only with the post, never use hashtags.
358
360
  """,
359
361
  )
360
-
362
+ @fast.chain(
363
+ name="post_writer",
364
+ sequence=["url_fetcher", "social_media"],
365
+ )
361
366
  async def main():
362
367
  async with fast.run() as agent:
363
- await agent.social_media(
364
- await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
365
- )
368
+ # using chain workflow
369
+ await agent.post_writer("http://llmindset.co.uk")
366
370
  ```
367
371
 
368
372
  All Agents and Workflows respond to `.send("message")` or `.prompt()` to begin a chat session.
@@ -370,7 +374,7 @@ All Agents and Workflows respond to `.send("message")` or `.prompt()` to begin a
370
374
  Saved as `social.py` we can now run this workflow from the command line with:
371
375
 
372
376
  ```bash
373
- uv run social.py --agent social_media --message "<url>"
377
+ uv run workflow/chaining.py --agent post_writer --message "<url>"
374
378
  ```
375
379
 
376
380
  Add the `--quiet` switch to disable progress and message display and return only the final response - useful for simple automations.
@@ -1,4 +1,4 @@
1
- mcp_agent/__init__.py,sha256=6ewnFA5JA8SUjWsWPVyjEQx5xfZKPbVz2WjavA55uvA,1683
1
+ mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
2
2
  mcp_agent/app.py,sha256=jBmzYM_o50g8vhlTgkkf5TGiBWNbXWViYnd0WANbpzo,10276
3
3
  mcp_agent/config.py,sha256=lI4B9VhF0qplez20cPeegxPA1ZcFBIxzepm_FOhW_08,10758
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
@@ -8,16 +8,16 @@ mcp_agent/event_progress.py,sha256=25iz0yyg-O4glMmtijcYpDdUmtUIKsCmR_8A52GgeC4,2
8
8
  mcp_agent/mcp_server_registry.py,sha256=r24xX4BYXj4BbWbU37uwuW9e1mFOYgpb258OMb21SaY,9928
9
9
  mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
10
10
  mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- mcp_agent/agents/agent.py,sha256=yLKtYiAnXHjmjH61rkEtgZFc-9zIMKui7RPIHG-IrE8,3635
12
- mcp_agent/agents/base_agent.py,sha256=SEd45M5EU4UbmUvd54C0d1XCvQCClbx3apwa9DZ0_xw,19007
11
+ mcp_agent/agents/agent.py,sha256=NKz8HTCdjIBDSJwl6EHU2NDnZcAtYiaYH3YnbVGrc3Q,3882
12
+ mcp_agent/agents/base_agent.py,sha256=jQ91SBazFwGlwajveLvitMP5oLOQcuEXwUqwe0JPpd8,23203
13
13
  mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
14
- mcp_agent/agents/workflow/chain_agent.py,sha256=tsyWQd_USU9_N_ouUKp97ufn6rrJbxbGEzcNFHiccTA,6228
15
- mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=vgnNGbvnnZv9vAJlyN_V0u7UCl8nu4NIJDTeo3jpjkw,13065
16
- mcp_agent/agents/workflow/orchestrator_agent.py,sha256=j-xulY2i1Jxry-iOJvXwUpp8-Vlhv0uZ5LknqCcMNjc,21230
14
+ mcp_agent/agents/workflow/chain_agent.py,sha256=ff5ksaJiAm007MMl8QO4pBTTIgQLcf9GLZpRtYkfBJQ,6201
15
+ mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ArM2CySsTY0gSPndox1DdjecRdNtWIj-Qm9ApUWkygw,13103
16
+ mcp_agent/agents/workflow/orchestrator_agent.py,sha256=nf7_Rq3XOiLfLsZ7MHVKAMkhaWJIz5NkWe2xYFH3yqc,21409
17
17
  mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
18
18
  mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
19
- mcp_agent/agents/workflow/parallel_agent.py,sha256=yGT0Ftg3kn1BdJ9k0Yy9tp9hcBu_99BMl9oVjJSUMWU,6866
20
- mcp_agent/agents/workflow/router_agent.py,sha256=PMm2fJsDekh_Bm5aQK2JOHbW-JarZ0ZFt6-4AiWdJOI,10337
19
+ mcp_agent/agents/workflow/parallel_agent.py,sha256=denkFKrvZJZ4c3Cja9cx-EOUhd-mDO30UIU48WGMnEM,6901
20
+ mcp_agent/agents/workflow/router_agent.py,sha256=iOIPhMP9-w-lho50obx1rvGphI_-7Hdr9E9ohlKmtJk,10484
21
21
  mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
23
23
  mcp_agent/cli/main.py,sha256=oumTbJPbiLFomm5IcdKJNjP3kXVkeYTxZEtYTkBBfLA,2742
@@ -26,19 +26,19 @@ mcp_agent/cli/commands/bootstrap.py,sha256=Pv3LQUQLK_5-8nbOQ6iibJI7awgD04P9xh6-V
26
26
  mcp_agent/cli/commands/config.py,sha256=jU2gl4d5YESrdUboh3u6mxf7CxVT-_DT_sK8Vuh3ajw,231
27
27
  mcp_agent/cli/commands/setup.py,sha256=iXsKrf31Szv4Umbk9JfR5as9HcivFJchhE1KKzHxyIo,6345
28
28
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
+ mcp_agent/core/agent_app.py,sha256=9c7V-gZKudl-6u2BB__aLEvC4iemJeWpyoI8fGpXsXk,10145
29
30
  mcp_agent/core/agent_types.py,sha256=Qyhvzy2CcD7wMaxavuMUOQnD_rg5LZ1RT3DwXVYaM1Q,1345
30
- mcp_agent/core/direct_agent_app.py,sha256=666Xe3ebFdNtZ2zq6YA_5ZJMupfydUldHuzCZLOiIW8,6559
31
31
  mcp_agent/core/direct_decorators.py,sha256=_kS0C9UbwRQ54z58vfUapFXXyJrm1XRVyLMuB7bL0b8,14471
32
- mcp_agent/core/direct_factory.py,sha256=uyPGZ20FkR1ObAARjAHHBP-dVyZG7meVZt2fdUpaUFk,17767
32
+ mcp_agent/core/direct_factory.py,sha256=PGkQCeTd7ACd1aHwDWsPOQPyG5_NbC8U2wxzbldTJ1Q,17764
33
33
  mcp_agent/core/enhanced_prompt.py,sha256=loLFJfBgFaAxYqZednUJLLuunFiwrYQb60y1_wNJsgQ,17927
34
34
  mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
35
35
  mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
36
- mcp_agent/core/fastagent.py,sha256=ImromTBDkkA1Lf8NPVaGHP6Qmn5BP0K9Q8SZnJQ4kHU,12025
36
+ mcp_agent/core/fastagent.py,sha256=ttkBs2yYaZb1gsL6-iYaopSsBKUt4kVeddj16TuWZ2o,12477
37
37
  mcp_agent/core/interactive_prompt.py,sha256=04yoeOX2JLatr2tuOFfnb84GMwFUIBnBC7y1M_gqOM8,17692
38
- mcp_agent/core/mcp_content.py,sha256=u9x-W9JBphHySAOGOLw7dROplA-bdcZrgO5by6y2Cb4,6909
39
- mcp_agent/core/prompt.py,sha256=nEx_O9euFCX-o02b420MAVe6lH_IcgQRHnbFeAPrVt0,4274
38
+ mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
39
+ mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
40
40
  mcp_agent/core/request_params.py,sha256=bEjWo86fqxdiWm2U5nPDd1uCUpcIQO9oiCinhB8lQN0,1185
41
- mcp_agent/core/validation.py,sha256=CLScPnMopneOCrmUppebxDFCnyovJ5xQwIL--d8WuD4,11211
41
+ mcp_agent/core/validation.py,sha256=euK9FD7rn0px9mBMCBecWHChnZTNVIdc4452ruOTB0Q,11207
42
42
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
43
  mcp_agent/executor/decorator_registry.py,sha256=CozMdIzE1LyyBfE3_R_kuxb5yi-mi--u_czKOZRp6Dg,3832
44
44
  mcp_agent/executor/executor.py,sha256=MzLSnW9nHrLHYChR3oQa5B8dajQGX26q6-S2BJCxv0o,9507
@@ -50,11 +50,11 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
50
50
  mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
51
51
  mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
52
52
  mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
53
- mcp_agent/llm/augmented_llm.py,sha256=PdEMlglHFCTenqVDWsvEj3iBZh5ljFP7ZBdMibTGUxQ,16861
53
+ mcp_agent/llm/augmented_llm.py,sha256=3T7F-VcNc4_n7f9ODBcIhE1KYeFgACOdMslPG0FV624,18124
54
54
  mcp_agent/llm/augmented_llm_passthrough.py,sha256=_DC6lGYbXPMXBeJn9Ot2fq-fXJ5GP7HhRmlY9pNvJ2s,6033
55
55
  mcp_agent/llm/augmented_llm_playback.py,sha256=YVR2adzjMf9Q5WfYBytryWMRqJ87a3kNBnjxhApsMcU,3413
56
56
  mcp_agent/llm/memory.py,sha256=UakoBCJBf59JBtB6uyZM0OZjlxDW_VHtSfDs08ibVEc,3312
57
- mcp_agent/llm/model_factory.py,sha256=NFe-m8AmIyHWDVmspJR_nEDs02eBAttf6avesOjAOhs,8336
57
+ mcp_agent/llm/model_factory.py,sha256=y65gUc8IyX8vbdK_oOIIjauIYk5h7jyrDAdjTlRdBcc,7655
58
58
  mcp_agent/llm/prompt_utils.py,sha256=yWQHykoK13QRF7evHUKxVF0SpVLN-Bsft0Yixzvn0g0,4825
59
59
  mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6cG628cI,2926
60
60
  mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
@@ -62,10 +62,10 @@ mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ
62
62
  mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
63
63
  mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=9JXyweks5Joes4ERtmi2wX8i7ZsXydKM7IkMq7s7dIU,15429
64
64
  mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=SdYDqZZ9hM9sBvW1FSItNn_ENEKQXGNKwVHGnjqjyAA,1927
65
- mcp_agent/llm/providers/augmented_llm_generic.py,sha256=qh4I8_wwududAbYRHDTN3rwsRfwZ7PKf6VjHewK9azQ,1550
66
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=-EkkekxYn85QUXsCmqX6Mk8vUE91ZJCUxzL0N9-fHXg,18136
67
- mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=3eRu7MSkbvBzKLptNdC_u0QohU_6eJ1ikRTaT6yPm0Y,16650
68
- mcp_agent/llm/providers/multipart_converter_openai.py,sha256=-EUAVP7ZB5JzvVe4Gtn5av9syWyN74BoDzhK-sOpSdg,16709
65
+ mcp_agent/llm/providers/augmented_llm_generic.py,sha256=IIgwPYsVGwDdL2mMYsc5seY3pVFblMwmnxoI5dbxras,1524
66
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=6ZUEOXW-cDENAizMPUKJhhklJyQf73IcyVqT9-3To80,18215
67
+ mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
68
+ mcp_agent/llm/providers/multipart_converter_openai.py,sha256=zCj0LBgd9FDG8aL_GeTrPo2ssloYnmC_Uj3ENWVUJAg,16753
69
69
  mcp_agent/llm/providers/openai_multipart.py,sha256=qKBn7d3jSabnJmVgWweVzqh8q9mBqr09fsPmP92niAQ,6899
70
70
  mcp_agent/llm/providers/openai_utils.py,sha256=T4bTCL9f7DsoS_zoKgQKv_FUv_4n98vgbvaUpdWZJr8,1875
71
71
  mcp_agent/llm/providers/sampling_converter_anthropic.py,sha256=35WzBWkPklnuMlu5S6XsQIq0YL58NOy8Ja6A_l4m6eM,1612
@@ -80,28 +80,30 @@ mcp_agent/logging/tracing.py,sha256=d5lSXakzzi5PtQpUkVkOnYaGX8NduGPq__S7vx-Ln8U,
80
80
  mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
81
81
  mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
82
  mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
83
- mcp_agent/mcp/interfaces.py,sha256=YuE-wBtKIzaeejQ4YuzETIVlBJCun332ciez89Frijk,5858
83
+ mcp_agent/mcp/interfaces.py,sha256=8fsQj8r7sMrIyrJHHbUIEP86SVLyXPpEI36KZq1abc0,6644
84
84
  mcp_agent/mcp/logger_textio.py,sha256=OpnqMam9Pu0oVzYQWFMhrX1dRg2f5Fqb3qqPA6QAATM,2778
85
85
  mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
86
86
  mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
87
87
  mcp_agent/mcp/mcp_agent_server.py,sha256=SnKJ9KCMnklGLHKZ9UsgAxd9IOKqiSCRPmQeTGu0CK8,1643
88
- mcp_agent/mcp/mcp_aggregator.py,sha256=q-NeCBiderNEf11qGsw8vJ3vssTxdvgJR5lpSFAijlo,36497
88
+ mcp_agent/mcp/mcp_aggregator.py,sha256=jaWbOvb3wioECohZ47CubyxfJ5QkfNSshu1hwhZksG4,40486
89
89
  mcp_agent/mcp/mcp_connection_manager.py,sha256=desQBreHbIcjY7AidcDO6pFomHOx9oOZPOWIcHAx1K0,13761
90
90
  mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
91
- mcp_agent/mcp/prompt_message_multipart.py,sha256=Hzgb3hN8KEa3H6lxtu5I_Ifr6_F8uyRTuV5A5B-1m-g,3451
92
- mcp_agent/mcp/prompt_render.py,sha256=SHjHbK5RP2E1T9BOrDGHr4_5JOM6N_V1ClvBrKFeOGY,2907
91
+ mcp_agent/mcp/prompt_message_multipart.py,sha256=IpIndd75tAcCbJbfqjpAF0tOUUP1TQceDbWoxO5gvpo,3684
92
+ mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
93
93
  mcp_agent/mcp/prompt_serialization.py,sha256=-qmE6CmGyB-wmFqZ3L7PyJtidw8kmwuXpt6x1XHdJVk,15856
94
94
  mcp_agent/mcp/resource_utils.py,sha256=K4XY8bihmBMleRTZ2viMPiD2Y2HWxFnlgIJi6dd_PYE,6588
95
95
  mcp_agent/mcp/sampling.py,sha256=vzWrIdI1CyFSxDWO-O69TpD6RwQcCM694BqMlYPVtaw,4584
96
+ mcp_agent/mcp/helpers/__init__.py,sha256=sKqwlUR3jSsd9PVJKjXtxHgZA1YOdzPtsSW4xVey77Q,52
97
+ mcp_agent/mcp/helpers/content_helpers.py,sha256=KsD77eCr1O6gv2Fz7vlVZxLyBgqscgsS25OqSJ8ksoY,3349
96
98
  mcp_agent/mcp/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
97
99
  mcp_agent/mcp/prompts/__main__.py,sha256=gr1Tdz9fcK0EXjEuZg_BOnKUmvhYq5AH2lFZicVyNb0,237
98
100
  mcp_agent/mcp/prompts/prompt_constants.py,sha256=Q9W0t3rOXl2LHIG9wcghApUV2QZ1iICuo7SwVwHUf3c,566
99
- mcp_agent/mcp/prompts/prompt_helpers.py,sha256=qhSdB4ZhLsU_h0nZyXXPnoSfgKsDNcYrH3rHdsvZIjw,10044
101
+ mcp_agent/mcp/prompts/prompt_helpers.py,sha256=Joqo2t09pTKDP-Wge3G-ozPEHikzjaqwV6GVk8hNR50,7534
100
102
  mcp_agent/mcp/prompts/prompt_load.py,sha256=ohRvAzJwZvgrN_7iKzy0RA7ILWVlOoD8KvI1c0Xr_eI,3908
101
103
  mcp_agent/mcp/prompts/prompt_server.py,sha256=tXtQd4EnH86MmdAvHlXm4oOS1dWLSCW5PvoA7uU1TvA,16493
102
104
  mcp_agent/mcp/prompts/prompt_template.py,sha256=EejiqGkau8OizORNyKTUwUjrPof5V-hH1H_MBQoQfXw,15732
103
105
  mcp_agent/mcp_server/__init__.py,sha256=zBU51ITHIEPScd9nRafnhEddsWqXRPAAvHhkrbRI2_4,155
104
- mcp_agent/mcp_server/agent_server.py,sha256=I3jh2izfiUSQFZzboEavtp8DJpkx3tuaeLqRReOTuiM,4491
106
+ mcp_agent/mcp_server/agent_server.py,sha256=vGMPy9ZjceqlOAnwDgm29bzjZegxChXZQHvj7OUs5Oo,4472
105
107
  mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=QdNdo0-7LR4Uzw61hEU_jVKmWyk6A9YpGo81kMwVobM,7267
106
108
  mcp_agent/resources/examples/data-analysis/analysis.py,sha256=M9z8Q4YC5OGuqSa5uefYmmfmctqMn-WqCSfg5LI407o,2609
107
109
  mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
@@ -110,6 +112,7 @@ mcp_agent/resources/examples/in_dev/agent_build.py,sha256=eetMEdYDbmnRH4CLO7chpQ
110
112
  mcp_agent/resources/examples/in_dev/slides.py,sha256=-SEFeGIg9SLF253NIxmA0NjlanLe8CR1yjDBBp2LXgs,4904
111
113
  mcp_agent/resources/examples/internal/agent.py,sha256=RZOMb5cJzIY1k0V28YgrHcUFSt0Uy977towy4yJE7bA,502
112
114
  mcp_agent/resources/examples/internal/fastagent.config.yaml,sha256=7anEzFqlMg3i5T8fEiqGYSO3fgMGI5P2BRsOJKsKl8k,1983
115
+ mcp_agent/resources/examples/internal/history_transfer.py,sha256=ETyX2wMMvUnMpUhu4ij9ZTpw2wQR6vPpBwlXJswPnhM,1157
113
116
  mcp_agent/resources/examples/internal/job.py,sha256=ANF3c01gHJ4O4pIxaAtC3rdgYqVObMySaCUBS4dApW4,4102
114
117
  mcp_agent/resources/examples/internal/prompt_category.py,sha256=kMvqNX_zu0sV-kTaAR3skc_tsq9t8QSEofciK0m4aJc,551
115
118
  mcp_agent/resources/examples/internal/prompt_sizing.py,sha256=bskgxulN57hVkc0V9W0fnjnqSRCK5Tkw9Ggf2MmGIVU,1989
@@ -132,8 +135,8 @@ mcp_agent/resources/examples/workflows/orchestrator.py,sha256=rOGilFTliWWnZ3Jx5w
132
135
  mcp_agent/resources/examples/workflows/parallel.py,sha256=n0dFN26QvYd2wjgohcaUBflac2SzXYx-bCyxMSousJE,1884
133
136
  mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
134
137
  mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
135
- fast_agent_mcp-0.2.2.dist-info/METADATA,sha256=868y8r88XRjKBhQk5rIJYVv6x1N1y7V-Wxwn0szRz6g,29647
136
- fast_agent_mcp-0.2.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
137
- fast_agent_mcp-0.2.2.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
138
- fast_agent_mcp-0.2.2.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
139
- fast_agent_mcp-0.2.2.dist-info/RECORD,,
138
+ fast_agent_mcp-0.2.4.dist-info/METADATA,sha256=ZZc6lPJvUVd0k4Cw9nqy-tOrNjw5KtIlT_Cb8cOR0tk,29841
139
+ fast_agent_mcp-0.2.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
140
+ fast_agent_mcp-0.2.4.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
141
+ fast_agent_mcp-0.2.4.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
142
+ fast_agent_mcp-0.2.4.dist-info/RECORD,,
mcp_agent/__init__.py CHANGED
@@ -16,7 +16,7 @@ from mcp.types import (
16
16
 
17
17
  # Core agent components
18
18
  from mcp_agent.agents.agent import Agent, AgentConfig
19
- from mcp_agent.core.direct_agent_app import DirectAgentApp
19
+ from mcp_agent.core.agent_app import AgentApp
20
20
 
21
21
  # Workflow decorators
22
22
  from mcp_agent.core.direct_decorators import (
@@ -62,7 +62,7 @@ __all__ = [
62
62
  "PromptMessageMultipart",
63
63
  # FastAgent components
64
64
  "FastAgent",
65
- "DirectAgentApp",
65
+ "AgentApp",
66
66
  # Workflow decorators
67
67
  "agent",
68
68
  "orchestrator",
mcp_agent/agents/agent.py CHANGED
@@ -84,6 +84,11 @@ class Agent(BaseAgent):
84
84
  async def list_prompts_wrapper(agent_name):
85
85
  # Always call list_prompts on this agent regardless of agent_name
86
86
  return await self.list_prompts()
87
+
88
+ # Define wrapper for list_resources function
89
+ async def list_resources_wrapper(agent_name):
90
+ # Always call list_resources on this agent regardless of agent_name
91
+ return await self.list_resources()
87
92
 
88
93
  # Start the prompt loop with just this agent
89
94
  return await prompt.prompt_loop(
@@ -7,12 +7,25 @@ and delegates operations to an attached AugmentedLLMProtocol instance.
7
7
 
8
8
  import asyncio
9
9
  import uuid
10
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, TypeVar, Union
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Any,
13
+ Callable,
14
+ Dict,
15
+ List,
16
+ Optional,
17
+ Tuple,
18
+ Type,
19
+ TypeVar,
20
+ Union,
21
+ )
11
22
 
12
23
  from mcp.types import (
13
24
  CallToolResult,
14
25
  EmbeddedResource,
26
+ GetPromptResult,
15
27
  ListToolsResult,
28
+ PromptMessage,
16
29
  ReadResourceResult,
17
30
  TextContent,
18
31
  Tool,
@@ -102,24 +115,54 @@ class BaseAgent(MCPAggregator, AgentProtocol):
102
115
  """
103
116
  await self.__aenter__() # This initializes the connection manager and loads the servers
104
117
 
105
- async def attach_llm(self, llm_factory: Union[Type[LLM], Callable[..., LLM]], **kwargs) -> LLM:
106
- """
107
- Create an LLM instance for the agent.
108
-
118
+ async def attach_llm(
119
+ self,
120
+ llm_factory: Union[Type[AugmentedLLMProtocol], Callable[..., AugmentedLLMProtocol]],
121
+ model: Optional[str] = None,
122
+ request_params: Optional[RequestParams] = None,
123
+ **additional_kwargs
124
+ ) -> AugmentedLLMProtocol:
125
+ """
126
+ Create and attach an LLM instance to this agent.
127
+
128
+ Parameters have the following precedence (highest to lowest):
129
+ 1. Explicitly passed parameters to this method
130
+ 2. Agent's default_request_params
131
+ 3. LLM's default values
132
+
109
133
  Args:
110
- llm_factory: A class or callable that constructs an AugmentedLLM or its subclass.
111
- The factory should accept keyword arguments matching the
112
- AugmentedLLM constructor parameters.
113
- **kwargs: Additional keyword arguments to pass to the LLM constructor.
114
-
134
+ llm_factory: A class or callable that constructs an AugmentedLLM
135
+ model: Optional model name override
136
+ request_params: Optional request parameters override
137
+ **additional_kwargs: Additional parameters passed to the LLM constructor
138
+
115
139
  Returns:
116
- An instance of AugmentedLLM or one of its subclasses.
117
- """
118
-
140
+ The created LLM instance
141
+ """
142
+ # Start with agent's default params
143
+ effective_params = self._default_request_params.model_copy() if self._default_request_params else None
144
+
145
+ # Override with explicitly passed request_params
146
+ if request_params:
147
+ if effective_params:
148
+ # Update non-None values
149
+ for k, v in request_params.model_dump(exclude_unset=True).items():
150
+ if v is not None:
151
+ setattr(effective_params, k, v)
152
+ else:
153
+ effective_params = request_params
154
+
155
+ # Override model if explicitly specified
156
+ if model and effective_params:
157
+ effective_params.model = model
158
+
159
+ # Create the LLM instance
119
160
  self._llm = llm_factory(
120
- agent=self, default_request_params=self._default_request_params, **kwargs
161
+ agent=self,
162
+ request_params=effective_params,
163
+ **additional_kwargs
121
164
  )
122
-
165
+
123
166
  return self._llm
124
167
 
125
168
  async def shutdown(self) -> None:
@@ -154,27 +197,49 @@ class BaseAgent(MCPAggregator, AgentProtocol):
154
197
  result: PromptMessageMultipart = await self.generate([Prompt.user(message)], request_params)
155
198
  return result.first_text()
156
199
 
157
- async def send(self, message: Union[str, PromptMessageMultipart]) -> str:
200
+ async def send(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str:
158
201
  """
159
202
  Send a message to the agent and get a response.
160
203
 
161
204
  Args:
162
- message: Either a string message or a PromptMessageMultipart object
205
+ message: Message content in various formats:
206
+ - String: Converted to a user PromptMessageMultipart
207
+ - PromptMessage: Converted to PromptMessageMultipart
208
+ - PromptMessageMultipart: Used directly
163
209
 
164
210
  Returns:
165
211
  The agent's response as a string
166
212
  """
167
-
168
- # Create a PromptMessageMultipart if we received a string
169
- if isinstance(message, str):
170
- prompt = Prompt.user(message)
171
- else:
172
- prompt = message
213
+ # Convert the input to a PromptMessageMultipart
214
+ prompt = self._normalize_message_input(message)
173
215
 
174
216
  # Use the LLM to generate a response
175
217
  response = await self.generate([prompt], None)
176
218
  return response.first_text()
177
219
 
220
+ def _normalize_message_input(
221
+ self, message: Union[str, PromptMessage, PromptMessageMultipart]
222
+ ) -> PromptMessageMultipart:
223
+ """
224
+ Convert a message of any supported type to PromptMessageMultipart.
225
+
226
+ Args:
227
+ message: Message in various formats (string, PromptMessage, or PromptMessageMultipart)
228
+
229
+ Returns:
230
+ A PromptMessageMultipart object
231
+ """
232
+ # Handle single message
233
+ if isinstance(message, str):
234
+ return Prompt.user(message)
235
+ elif isinstance(message, PromptMessage):
236
+ return PromptMessageMultipart(role=message.role, content=[message.content])
237
+ elif isinstance(message, PromptMessageMultipart):
238
+ return message
239
+ else:
240
+ # Try to convert to string as fallback
241
+ return Prompt.user(str(message))
242
+
178
243
  async def prompt(self, default_prompt: str = "") -> str:
179
244
  """
180
245
  Start an interactive prompt session with the agent.
@@ -360,10 +425,34 @@ class BaseAgent(MCPAggregator, AgentProtocol):
360
425
  content=[TextContent(type="text", text=f"Error requesting human input: {str(e)}")],
361
426
  )
362
427
 
363
- async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str:
428
+ async def get_prompt(
429
+ self,
430
+ prompt_name: str,
431
+ arguments: Dict[str, str] | None = None,
432
+ server_name: str | None = None,
433
+ ) -> GetPromptResult:
434
+ """
435
+ Get a prompt from a server.
436
+
437
+ Args:
438
+ prompt_name: Name of the prompt, optionally namespaced
439
+ arguments: Optional dictionary of arguments to pass to the prompt template
440
+ server_name: Optional name of the server to get the prompt from
441
+
442
+ Returns:
443
+ GetPromptResult containing the prompt information
444
+ """
445
+ return await super().get_prompt(prompt_name, arguments, server_name)
446
+
447
+ async def apply_prompt(
448
+ self,
449
+ prompt_name: str,
450
+ arguments: Dict[str, str] | None = None,
451
+ server_name: str | None = None,
452
+ ) -> str:
364
453
  """
365
454
  Apply an MCP Server Prompt by name and return the assistant's response.
366
- Will search all available servers for the prompt if not namespaced.
455
+ Will search all available servers for the prompt if not namespaced and no server_name provided.
367
456
 
368
457
  If the last message in the prompt is from a user, this will automatically
369
458
  generate an assistant response to ensure we always end with an assistant message.
@@ -371,6 +460,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
371
460
  Args:
372
461
  prompt_name: The name of the prompt to apply
373
462
  arguments: Optional dictionary of string arguments to pass to the prompt template
463
+ server_name: Optional name of the server to get the prompt from
374
464
 
375
465
  Returns:
376
466
  The assistant's response or error message
@@ -378,7 +468,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
378
468
 
379
469
  # Get the prompt - this will search all servers if needed
380
470
  self.logger.debug(f"Loading prompt '{prompt_name}'")
381
- prompt_result = await self.get_prompt(prompt_name, arguments)
471
+ prompt_result = await self.get_prompt(prompt_name, arguments, server_name)
382
472
 
383
473
  if not prompt_result or not prompt_result.messages:
384
474
  error_msg = f"Prompt '{prompt_name}' could not be found or contains no messages"
@@ -386,10 +476,11 @@ class BaseAgent(MCPAggregator, AgentProtocol):
386
476
  return error_msg
387
477
 
388
478
  # Get the display name (namespaced version)
389
- getattr(prompt_result, "namespaced_name", prompt_name)
479
+ namespaced_name = getattr(prompt_result, "namespaced_name", prompt_name)
480
+ self.logger.debug(f"Using prompt '{namespaced_name}'")
390
481
 
391
- # Convert prompt messages to multipart format
392
- multipart_messages = PromptMessageMultipart.to_multipart(prompt_result.messages)
482
+ # Convert prompt messages to multipart format using the safer method
483
+ multipart_messages = PromptMessageMultipart.from_get_prompt_result(prompt_result)
393
484
 
394
485
  # Always call generate to ensure LLM implementations can handle prompt templates
395
486
  # This is critical for stateful LLMs like PlaybackLLM
@@ -397,14 +488,14 @@ class BaseAgent(MCPAggregator, AgentProtocol):
397
488
  return response.first_text()
398
489
 
399
490
  async def get_embedded_resources(
400
- self, server_name: str, resource_name: str
491
+ self, resource_uri: str, server_name: str | None = None
401
492
  ) -> List[EmbeddedResource]:
402
493
  """
403
494
  Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
404
495
 
405
496
  Args:
406
- server_name: Name of the MCP server to retrieve the resource from
407
- resource_name: Name or URI of the resource to retrieve
497
+ resource_uri: URI of the resource to retrieve
498
+ server_name: Optional name of the MCP server to retrieve the resource from
408
499
 
409
500
  Returns:
410
501
  List of EmbeddedResource objects ready to use in a PromptMessageMultipart
@@ -413,7 +504,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
413
504
  ValueError: If the server doesn't exist or the resource couldn't be found
414
505
  """
415
506
  # Get the raw resource result
416
- result: ReadResourceResult = await super().get_resource(server_name, resource_name)
507
+ result: ReadResourceResult = await self.get_resource(resource_uri, server_name)
417
508
 
418
509
  # Convert each resource content to an EmbeddedResource
419
510
  embedded_resources: List[EmbeddedResource] = []
@@ -427,24 +518,27 @@ class BaseAgent(MCPAggregator, AgentProtocol):
427
518
 
428
519
  async def with_resource(
429
520
  self,
430
- prompt_content: Union[str, PromptMessageMultipart],
431
- server_name: str,
432
- resource_name: str,
521
+ prompt_content: Union[str, PromptMessage, PromptMessageMultipart],
522
+ resource_uri: str,
523
+ server_name: str | None = None,
433
524
  ) -> str:
434
525
  """
435
526
  Create a prompt with the given content and resource, then send it to the agent.
436
527
 
437
528
  Args:
438
- prompt_content: Either a string message or an existing PromptMessageMultipart
439
- server_name: Name of the MCP server to retrieve the resource from
440
- resource_name: Name or URI of the resource to retrieve
529
+ prompt_content: Content in various formats:
530
+ - String: Converted to a user message with the text
531
+ - PromptMessage: Converted to PromptMessageMultipart
532
+ - PromptMessageMultipart: Used directly
533
+ resource_uri: URI of the resource to retrieve
534
+ server_name: Optional name of the MCP server to retrieve the resource from
441
535
 
442
536
  Returns:
443
537
  The agent's response as a string
444
538
  """
445
539
  # Get the embedded resources
446
540
  embedded_resources: List[EmbeddedResource] = await self.get_embedded_resources(
447
- server_name, resource_name
541
+ resource_uri, server_name
448
542
  )
449
543
 
450
544
  # Create or update the prompt message
@@ -454,12 +548,19 @@ class BaseAgent(MCPAggregator, AgentProtocol):
454
548
  content = [TextContent(type="text", text=prompt_content)]
455
549
  content.extend(embedded_resources)
456
550
  prompt = PromptMessageMultipart(role="user", content=content)
551
+ elif isinstance(prompt_content, PromptMessage):
552
+ # Convert PromptMessage to PromptMessageMultipart and add resources
553
+ content = [prompt_content.content]
554
+ content.extend(embedded_resources)
555
+ prompt = PromptMessageMultipart(role=prompt_content.role, content=content)
457
556
  elif isinstance(prompt_content, PromptMessageMultipart):
458
557
  # Add resources to the existing prompt
459
558
  prompt = prompt_content
460
559
  prompt.content.extend(embedded_resources)
461
560
  else:
462
- raise TypeError("prompt_content must be a string or PromptMessageMultipart")
561
+ raise TypeError(
562
+ "prompt_content must be a string, PromptMessage, or PromptMessageMultipart"
563
+ )
463
564
 
464
565
  response: PromptMessageMultipart = await self.generate([prompt], None)
465
566
  return response.first_text()
@@ -488,7 +589,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
488
589
  prompt: List[PromptMessageMultipart],
489
590
  model: Type[ModelT],
490
591
  request_params: RequestParams | None = None,
491
- ) -> ModelT | None:
592
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
492
593
  """
493
594
  Apply the prompt and return the result as a Pydantic model.
494
595
  Delegates to the attached LLM.
@@ -520,3 +621,18 @@ class BaseAgent(MCPAggregator, AgentProtocol):
520
621
 
521
622
  response = await self.generate(prompts, request_params)
522
623
  return response.first_text()
624
+
625
+ @property
626
+ def message_history(self) -> List[PromptMessageMultipart]:
627
+ """
628
+ Return the agent's message history as PromptMessageMultipart objects.
629
+
630
+ This history can be used to transfer state between agents or for
631
+ analysis and debugging purposes.
632
+
633
+ Returns:
634
+ List of PromptMessageMultipart objects representing the conversation history
635
+ """
636
+ if self._llm:
637
+ return self._llm.message_history
638
+ return []
@@ -5,7 +5,7 @@ This provides an implementation that delegates operations to a sequence of
5
5
  other agents, chaining their outputs together.
6
6
  """
7
7
 
8
- from typing import Any, List, Optional, Type
8
+ from typing import Any, List, Optional, Tuple, Type
9
9
 
10
10
  from mcp.types import TextContent
11
11
 
@@ -77,7 +77,7 @@ class ChainAgent(BaseAgent):
77
77
  response: PromptMessageMultipart = await self.agents[0].generate(multipart_messages)
78
78
  # Process the rest of the agents in the chain
79
79
  for agent in self.agents[1:]:
80
- next_message = Prompt.user(response.content)
80
+ next_message = Prompt.user(response.content[0].text)
81
81
  response = await agent.generate([next_message])
82
82
 
83
83
  return response
@@ -123,7 +123,7 @@ class ChainAgent(BaseAgent):
123
123
  prompt: List[PromptMessageMultipart],
124
124
  model: Type[ModelT],
125
125
  request_params: Optional[RequestParams] = None,
126
- ) -> Optional[ModelT]:
126
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
127
127
  """
128
128
  Chain the request through multiple agents and parse the final response.
129
129
 
@@ -137,16 +137,12 @@ class ChainAgent(BaseAgent):
137
137
  """
138
138
  # Generate response through the chain
139
139
  response = await self.generate(prompt, request_params)
140
-
141
- # Let the last agent in the chain try to parse the response
142
- if self.agents:
143
- last_agent = self.agents[-1]
144
- try:
145
- return await last_agent.structured([response], model, request_params)
146
- except Exception as e:
147
- self.logger.warning(f"Failed to parse response from chain: {str(e)}")
148
- return None
149
- return None
140
+ last_agent = self.agents[-1]
141
+ try:
142
+ return await last_agent.structured([response], model, request_params)
143
+ except Exception as e:
144
+ self.logger.warning(f"Failed to parse response from chain: {str(e)}")
145
+ return None, Prompt.assistant("Failed to parse response from chain: {str(e)}")
150
146
 
151
147
  async def initialize(self) -> None:
152
148
  """
@@ -8,7 +8,7 @@ or a maximum number of refinements is attempted.
8
8
  """
9
9
 
10
10
  from enum import Enum
11
- from typing import Any, List, Optional, Type
11
+ from typing import Any, List, Optional, Tuple, Type
12
12
 
13
13
  from pydantic import BaseModel, Field
14
14
 
@@ -139,7 +139,7 @@ class EvaluatorOptimizerAgent(BaseAgent):
139
139
 
140
140
  # Create evaluation message and get structured evaluation result
141
141
  eval_message = Prompt.user(eval_prompt)
142
- evaluation_result = await self.evaluator_agent.structured(
142
+ evaluation_result, _ = await self.evaluator_agent.structured(
143
143
  [eval_message], EvaluationResult, request_params
144
144
  )
145
145
 
@@ -202,7 +202,7 @@ class EvaluatorOptimizerAgent(BaseAgent):
202
202
  prompt: List[PromptMessageMultipart],
203
203
  model: Type[ModelT],
204
204
  request_params: Optional[RequestParams] = None,
205
- ) -> Optional[ModelT]:
205
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
206
206
  """
207
207
  Generate an optimized response and parse it into a structured format.
208
208