fast-agent-mcp 0.2.33__py3-none-any.whl → 0.2.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.33
3
+ Version: 0.2.35
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>
6
6
  License: Apache License
@@ -1,15 +1,15 @@
1
1
  mcp_agent/__init__.py,sha256=18T0AG0W9sJhTY38O9GFFOzliDhxx9p87CvRyti9zbw,1620
2
2
  mcp_agent/app.py,sha256=3mtHP1nRQcRaKhhxgTmCOv00alh70nT7UxNA8bN47QE,5560
3
- mcp_agent/config.py,sha256=9GDvMugKIeT9SKRGGEv2gN3lsC78hQ_Oy-HSpItuqo0,15841
3
+ mcp_agent/config.py,sha256=ZIGFCSWrhMqhlHhapQf3QXo9N6EuTVy5iZIFiiqwE2M,16289
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
5
- mcp_agent/context.py,sha256=H7JbaZ_8SzzTagLmIgUPUPxX5370C5qjQAsasFPZG2Y,7510
5
+ mcp_agent/context.py,sha256=f729LJcW4YoFXb0Rg_kEU-5FlrOnFgqplI6W0fVqomg,7631
6
6
  mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
7
- mcp_agent/event_progress.py,sha256=040lrCCclcOuryi07YGSej25kTQF5_JMXY12Yj-3u1U,2773
7
+ mcp_agent/event_progress.py,sha256=d7T1hQ1D289MYh2Z5bMPB4JqjGqTOzveJuOHE03B_Xo,3720
8
8
  mcp_agent/mcp_server_registry.py,sha256=b3iSb-0ULYc5yUG2KHav41WGwSYWiJCGQsOwWHWByxo,12346
9
9
  mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
10
10
  mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  mcp_agent/agents/agent.py,sha256=EAYlcP1qqI1D0_CS808I806z1048FBjZQxxpcCZPeIU,3154
12
- mcp_agent/agents/base_agent.py,sha256=0P3Onibs_NRfiyABL8RdRW_qvXDTSxNu-agBvgjBymY,25427
12
+ mcp_agent/agents/base_agent.py,sha256=PHsHD3HynjHa4mILPHL6oZRpyCJrgQhzpgjTUnAcHzs,25855
13
13
  mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
14
14
  mcp_agent/agents/workflow/chain_agent.py,sha256=eIlImirrSXkqBJmPuAJgOKis81Cl6lZEGM0-6IyaUV8,6105
15
15
  mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ysUMGM2NzeCIutgr_vXH6kUPpZMw0cX4J_Wl1r8eT84,13296
@@ -28,18 +28,19 @@ mcp_agent/cli/commands/quickstart.py,sha256=SM3CHMzDgvTxIpKjFuX9BrS_N1vRoXNBDaO9
28
28
  mcp_agent/cli/commands/setup.py,sha256=eOEd4TL-b0DaDeSJMGOfNOsTEItoZ67W88eTP4aP-bo,6482
29
29
  mcp_agent/cli/commands/url_parser.py,sha256=5VdtcHRHzi67YignStVbz7u-rcvNNErw9oJLAUFOtEY,5855
30
30
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- mcp_agent/core/agent_app.py,sha256=lHVU2c0y0_V9V0ll-RNvWC06aPVV256QzN_0PJVgqe4,10307
31
+ mcp_agent/core/agent_app.py,sha256=KJdx0Qbh7Gb4wA8_LwKriogc27SraRIrvMqHsOCVVt0,12119
32
32
  mcp_agent/core/agent_types.py,sha256=DogMcOoRwk70CFSetZ09madRcPDlhPn1iXZVeOcLV8Q,1507
33
33
  mcp_agent/core/direct_decorators.py,sha256=HY_7S7OtfZPqAeqC3_hPYa1d6zTnEyiOeI7JxvnWqTM,16786
34
34
  mcp_agent/core/direct_factory.py,sha256=UNAjHHFRLrQ3D934RMsKsh0Oas7LXLIVslgrzcetM6A,19090
35
- mcp_agent/core/enhanced_prompt.py,sha256=_JlX_7tBpWm1rScBaprD9Tvcep1qPPfXrUsFlnWrTpE,23497
35
+ mcp_agent/core/enhanced_prompt.py,sha256=M8mrJwzO0T1pPR-Y1HucmEYi2bJHrQvK6S27BYsXCqE,23918
36
36
  mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
37
37
  mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
38
- mcp_agent/core/fastagent.py,sha256=EHpvDZfslOrgSGiicv6ikOGmFB26gfxvPbpV-SoJOqY,23004
39
- mcp_agent/core/interactive_prompt.py,sha256=JKEU1Gvq6zXaLniDPx8wll08ZTC6g1rQflL7khmnhs8,24710
38
+ mcp_agent/core/fastagent.py,sha256=ak5rAyoreN5SqqoMUMP-Cr46JsOL5R2ieGyG7B5P3E8,23658
39
+ mcp_agent/core/interactive_prompt.py,sha256=Fk0xkvDxRrDOLs096n-0etP0-9qiEFNjFfRDKcxfFV4,26101
40
40
  mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
41
41
  mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
42
42
  mcp_agent/core/request_params.py,sha256=qmFWZXeYEJyYw2IwonyrTnZWxQG7qX6bKpOPcqETa60,1603
43
+ mcp_agent/core/usage_display.py,sha256=_slMP6bkQSH0n8fgF6l8eehr5nYsvTNyqaUpAc-8XYQ,6884
43
44
  mcp_agent/core/validation.py,sha256=RIBKFlh0GJg4rTcFQXoXp8A0sK1HpsCigKcYSK3gFaY,12090
44
45
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
46
  mcp_agent/executor/executor.py,sha256=E44p6d-o3OMRoP_dNs_cDnyti91LQ3P9eNU88mSi1kc,9462
@@ -49,27 +50,29 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
49
50
  mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
50
51
  mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
51
52
  mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
52
- mcp_agent/llm/augmented_llm.py,sha256=CqtSGo_QrHE73tz_DHMd0wdt2F41gwuUu5Bue51FNm4,24199
53
- mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyzFphLikwF_yVE0,6288
54
- mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
55
- mcp_agent/llm/augmented_llm_slow.py,sha256=6h4LXdBGBzDfKnvPBcfBh0RdfYl-UXo50EimA-W3tOY,1586
56
- mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
57
- mcp_agent/llm/model_factory.py,sha256=CCzRhBSn7ttIyNGlJko3sf0gkPa4ah-NqT-gwZ_oUtg,10717
53
+ mcp_agent/llm/augmented_llm.py,sha256=ekVZQla3oOyWSysJif-2ZutklYB8HTK99I7HzSye6ag,25705
54
+ mcp_agent/llm/augmented_llm_passthrough.py,sha256=F8KifmTwoQ7zyncjmoRek8SBfGdgc9yc5LRXwMQH-bg,8640
55
+ mcp_agent/llm/augmented_llm_playback.py,sha256=BQeBXRpO-xGAY9wIJxyde6xpHmZEdQPLd32frF8t3QQ,4916
56
+ mcp_agent/llm/augmented_llm_slow.py,sha256=DDSD8bL2flmQrVHZm-UDs7sR8aHRWkDOcOW-mX_GPok,2067
57
+ mcp_agent/llm/memory.py,sha256=pTOaTDV3EA3X68yKwEtUAu7s0xGIQQ_cKBhfYUnfR0w,8614
58
+ mcp_agent/llm/model_database.py,sha256=mfy039QZP_8-f0aHWR0Fpj2qnlys5430haSzrA86aXw,8485
59
+ mcp_agent/llm/model_factory.py,sha256=u60O4SWe22wN6CpmIfaF4C5aUziJs8O3N0Jo7erPjp8,10753
58
60
  mcp_agent/llm/prompt_utils.py,sha256=yWQHykoK13QRF7evHUKxVF0SpVLN-Bsft0Yixzvn0g0,4825
59
61
  mcp_agent/llm/provider_key_manager.py,sha256=usMWozSMhek_FIlM1MeVDwAbs-P96SrEVPGd3YwF9E4,2833
60
62
  mcp_agent/llm/provider_types.py,sha256=AkQl1r67wZ0gSIY6CXsiZiS3uw5DBF9E5yhIn3THayk,633
61
63
  mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6cG628cI,2926
62
64
  mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
65
+ mcp_agent/llm/usage_tracking.py,sha256=HdBehPMt0bZzEgRmTnbMdgpLVuTp6L_VJTQx5Z25zCM,15321
63
66
  mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
64
67
  mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
65
68
  mcp_agent/llm/providers/augmented_llm_aliyun.py,sha256=XylkJKZ9theSVUxJKOZkf1244hgzng4Ng4Dr209Qb-w,1101
66
- mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=gK_IvllVBNJUUrSfpgFpdhM-d4liCt0MLq7d2lXS7RI,15510
69
+ mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=xCLqLi2HeBHPSvN_xD9Gl26ENTGT0E_1KLnN38BVXiE,24624
67
70
  mcp_agent/llm/providers/augmented_llm_azure.py,sha256=VPrD6lNrEw6EdYUTa9MDvHDNIPjJU5CG5xnKCM3JYdA,5878
68
71
  mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=zI9a90dwT4r6E1f_xp4K50Cj9sD7y7kNRgjo0s1pd5w,3804
69
72
  mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
70
- mcp_agent/llm/providers/augmented_llm_google_native.py,sha256=xnp754WOEBP1_F5VhFZCMD5l1DrDy0Z4jzEAWeL4G5M,21368
73
+ mcp_agent/llm/providers/augmented_llm_google_native.py,sha256=knMIUt-XvnIwpch8Er9_B9faraN4ZKKYYtZBk9Uvpho,22161
71
74
  mcp_agent/llm/providers/augmented_llm_google_oai.py,sha256=cO4dvjTl9ymqEurCOo5nP09ATfXVjgkuk1yZAlWpS1s,1137
72
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=5CFHKayjm-aeCBpohIK3WelAEuX7_LDGZIKnWR_rq-s,14577
75
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=b5d9eCNVFNizCddMri_mlKUzoT_zmoKn0ocnMLJyYjU,15275
73
76
  mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
74
77
  mcp_agent/llm/providers/augmented_llm_tensorzero.py,sha256=Mol_Wzj_ZtccW-LMw0oFwWUt1m1yfofloay9QYNP23c,20729
75
78
  mcp_agent/llm/providers/google_converter.py,sha256=zsqxJJ636WzCL2K6w-yB94O8bdNR6mo8f5mQEnUJFyg,16831
@@ -81,17 +84,17 @@ mcp_agent/llm/providers/openai_utils.py,sha256=T4bTCL9f7DsoS_zoKgQKv_FUv_4n98vgb
81
84
  mcp_agent/llm/providers/sampling_converter_anthropic.py,sha256=35WzBWkPklnuMlu5S6XsQIq0YL58NOy8Ja6A_l4m6eM,1612
82
85
  mcp_agent/llm/providers/sampling_converter_openai.py,sha256=GA-LfTJzOwH9Vwk0Q4K37nG6zxpzqS-JGaM7cTH-Epc,841
83
86
  mcp_agent/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
- mcp_agent/logging/events.py,sha256=iHTSgrxK3BWnRoej6NhxVL5899MIHr-ktsA7hxMoo9k,3437
87
+ mcp_agent/logging/events.py,sha256=dSJJfuCd59-ZyYTVcf0M4HQd6iXb5k50PSAeoq1CpH0,4278
85
88
  mcp_agent/logging/json_serializer.py,sha256=qkfxnR9ka6OgvwSpM2CggELbEtzzkApm0s_KYz11RDY,5791
86
89
  mcp_agent/logging/listeners.py,sha256=_S4Jp5_KWp0kUfrx4BxDdNCeQK3MNT3Zi9AaolPri7A,6648
87
90
  mcp_agent/logging/logger.py,sha256=l02OGX_c5FOyH0rspd4ZvnkJcbb0FahhUhlh2KI8mqE,10724
88
- mcp_agent/logging/rich_progress.py,sha256=oY9fjb4Tyw6887v8sgO6EGIK4lnmIoR3NNxhA_-Ln_M,4893
91
+ mcp_agent/logging/rich_progress.py,sha256=NQbW010VxfzgJw8BRaqKVTIFlTNvDfmMcoOt7pxGvzQ,5362
89
92
  mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
90
93
  mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
94
  mcp_agent/mcp/common.py,sha256=MpSC0fLO21RcDz4VApah4C8_LisVGz7OXkR17Xw-9mY,431
92
95
  mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
93
96
  mcp_agent/mcp/hf_auth.py,sha256=YwEt7hMDJODFUIc6Zi1HLYsfVnvANGvyhpQwcPCMAgI,3379
94
- mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
97
+ mcp_agent/mcp/interfaces.py,sha256=BqwoXd-mZuaT26cruzqVdy54xGcgQOtO7S5gGtkJvsw,7108
95
98
  mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
96
99
  mcp_agent/mcp/mcp_agent_client_session.py,sha256=V17Lj21rMGIKKVAIyNx5l5gmC8jQuohjJGpRcoCXfVA,6862
97
100
  mcp_agent/mcp/mcp_aggregator.py,sha256=CrUtj-BHXXCb7sUlc_MF1d7HkiF9rjh6MKaGprflBB4,47076
@@ -155,8 +158,8 @@ mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkde
155
158
  mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
156
159
  mcp_agent/tools/tool_definition.py,sha256=L3Pxl-uLEXqlVoo-bYuFTFALeI-2pIU44YgFhsTKEtM,398
157
160
  mcp_agent/ui/console_display.py,sha256=UKqax5V2TC0hkZZORmmd6UqUk0DGX7A25E3h1k9f42k,10982
158
- fast_agent_mcp-0.2.33.dist-info/METADATA,sha256=nWUeuUtV_vX9ugzUQqS5FY4RBGyZQxTDJg6i8PaHGV4,30799
159
- fast_agent_mcp-0.2.33.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
160
- fast_agent_mcp-0.2.33.dist-info/entry_points.txt,sha256=oKQeSUVn87pJv8_k1NQ7Ak8cXaaXHCnPAOJRCV_uUVg,230
161
- fast_agent_mcp-0.2.33.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
162
- fast_agent_mcp-0.2.33.dist-info/RECORD,,
161
+ fast_agent_mcp-0.2.35.dist-info/METADATA,sha256=O4DhiiD77uzvQeJvBbJxWMHwycs3M2fCilt7lJmPzUE,30799
162
+ fast_agent_mcp-0.2.35.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
163
+ fast_agent_mcp-0.2.35.dist-info/entry_points.txt,sha256=oKQeSUVn87pJv8_k1NQ7Ak8cXaaXHCnPAOJRCV_uUVg,230
164
+ fast_agent_mcp-0.2.35.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
165
+ fast_agent_mcp-0.2.35.dist-info/RECORD,,
@@ -58,6 +58,7 @@ LLM = TypeVar("LLM", bound=AugmentedLLMProtocol)
58
58
  HUMAN_INPUT_TOOL_NAME = "__human_input__"
59
59
  if TYPE_CHECKING:
60
60
  from mcp_agent.context import Context
61
+ from mcp_agent.llm.usage_tracking import UsageAccumulator
61
62
 
62
63
 
63
64
  DEFAULT_CAPABILITIES = AgentCapabilities(
@@ -698,3 +699,15 @@ class BaseAgent(MCPAggregator, AgentProtocol):
698
699
  if self._llm:
699
700
  return self._llm.message_history
700
701
  return []
702
+
703
+ @property
704
+ def usage_accumulator(self) -> Optional["UsageAccumulator"]:
705
+ """
706
+ Return the usage accumulator for tracking token usage across turns.
707
+
708
+ Returns:
709
+ UsageAccumulator object if LLM is attached, None otherwise
710
+ """
711
+ if self._llm:
712
+ return self._llm.usage_accumulator
713
+ return None
mcp_agent/config.py CHANGED
@@ -115,6 +115,14 @@ class AnthropicSettings(BaseModel):
115
115
 
116
116
  base_url: str | None = None
117
117
 
118
+ cache_mode: Literal["off", "prompt", "auto"] = "auto"
119
+ """
120
+ Controls how caching is applied for Anthropic models when prompt_caching is enabled globally.
121
+ - "off": No caching, even if global prompt_caching is true.
122
+ - "prompt": Caches tools+system prompt (1 block) and template content. Useful for large, static prompts.
123
+ - "auto": Currently same as "prompt" - caches tools+system prompt (1 block) and template content.
124
+ """
125
+
118
126
  model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
119
127
 
120
128
 
mcp_agent/context.py CHANGED
@@ -25,7 +25,7 @@ from pydantic import BaseModel, ConfigDict
25
25
  from mcp_agent.config import Settings, get_settings
26
26
  from mcp_agent.executor.executor import AsyncioExecutor, Executor
27
27
  from mcp_agent.executor.task_registry import ActivityRegistry
28
- from mcp_agent.logging.events import EventFilter
28
+ from mcp_agent.logging.events import EventFilter, StreamingExclusionFilter
29
29
  from mcp_agent.logging.logger import LoggingConfig, get_logger
30
30
  from mcp_agent.logging.transport import create_transport
31
31
  from mcp_agent.mcp_server_registry import ServerRegistry
@@ -124,7 +124,8 @@ async def configure_logger(config: "Settings") -> None:
124
124
  """
125
125
  Configure logging and tracing based on the application config.
126
126
  """
127
- event_filter: EventFilter = EventFilter(min_level=config.logger.level)
127
+ # Use StreamingExclusionFilter to prevent streaming events from flooding logs
128
+ event_filter: EventFilter = StreamingExclusionFilter(min_level=config.logger.level)
128
129
  logger.info(f"Configuring logger with level: {config.logger.level}")
129
130
  transport = create_transport(settings=config.logger, event_filter=event_filter)
130
131
  await LoggingConfig.configure(
@@ -6,10 +6,12 @@ from typing import Dict, List, Optional, Union
6
6
 
7
7
  from deprecated import deprecated
8
8
  from mcp.types import PromptMessage
9
+ from rich import print as rich_print
9
10
 
10
11
  from mcp_agent.agents.agent import Agent
11
12
  from mcp_agent.core.interactive_prompt import InteractivePrompt
12
13
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
14
+ from mcp_agent.progress_display import progress_display
13
15
 
14
16
 
15
17
  class AgentApp:
@@ -272,7 +274,12 @@ class AgentApp:
272
274
 
273
275
  # Define the wrapper for send function
274
276
  async def send_wrapper(message, agent_name):
275
- return await self.send(message, agent_name)
277
+ result = await self.send(message, agent_name)
278
+
279
+ # Show usage info after each turn if progress display is enabled
280
+ self._show_turn_usage(agent_name)
281
+
282
+ return result
276
283
 
277
284
  # Start the prompt loop with the agent name (not the agent object)
278
285
  return await prompt.prompt_loop(
@@ -282,3 +289,36 @@ class AgentApp:
282
289
  prompt_provider=self, # Pass self as the prompt provider
283
290
  default=default_prompt,
284
291
  )
292
+
293
+ def _show_turn_usage(self, agent_name: str) -> None:
294
+ """Show subtle usage information after each turn."""
295
+ agent = self._agents.get(agent_name)
296
+ if not agent or not agent.usage_accumulator:
297
+ return
298
+
299
+ # Get the last turn's usage (if any)
300
+ turns = agent.usage_accumulator.turns
301
+ if not turns:
302
+ return
303
+
304
+ last_turn = turns[-1]
305
+ input_tokens = last_turn.display_input_tokens
306
+ output_tokens = last_turn.output_tokens
307
+
308
+ # Build cache indicators with bright colors
309
+ cache_indicators = ""
310
+ if last_turn.cache_usage.cache_write_tokens > 0:
311
+ cache_indicators += "[bright_yellow]^[/bright_yellow]"
312
+ if last_turn.cache_usage.cache_read_tokens > 0 or last_turn.cache_usage.cache_hit_tokens > 0:
313
+ cache_indicators += "[bright_green]*[/bright_green]"
314
+
315
+ # Build context percentage - get from accumulator, not individual turn
316
+ context_info = ""
317
+ context_percentage = agent.usage_accumulator.context_usage_percentage
318
+ if context_percentage is not None:
319
+ context_info = f" ({context_percentage:.1f}%)"
320
+
321
+ # Show subtle usage line - pause progress display to ensure visibility
322
+ with progress_display.paused():
323
+ cache_suffix = f" {cache_indicators}" if cache_indicators else ""
324
+ rich_print(f"[dim]Last turn: {input_tokens:,} Input, {output_tokens:,} Output{context_info}[/dim]{cache_suffix}")
@@ -58,6 +58,7 @@ class AgentCompleter(Completer):
58
58
  "prompts": "List and select MCP prompts", # Changed description
59
59
  "prompt": "Apply a specific prompt by name (/prompt <name>)", # New command
60
60
  "agents": "List available agents",
61
+ "usage": "Show current usage statistics",
61
62
  "clear": "Clear the screen",
62
63
  "STOP": "Stop this prompting session and move to next workflow step",
63
64
  "EXIT": "Exit fast-agent, terminating any running workflows",
@@ -67,6 +68,7 @@ class AgentCompleter(Completer):
67
68
  self.commands.pop("agents")
68
69
  self.commands.pop("prompts") # Remove prompts command in human input mode
69
70
  self.commands.pop("prompt", None) # Remove prompt command in human input mode
71
+ self.commands.pop("usage", None) # Remove usage command in human input mode
70
72
  self.agent_types = agent_types or {}
71
73
 
72
74
  def get_completions(self, document, complete_event):
@@ -390,6 +392,8 @@ async def get_enhanced_input(
390
392
  return "CLEAR"
391
393
  elif cmd == "agents":
392
394
  return "LIST_AGENTS"
395
+ elif cmd == "usage":
396
+ return "SHOW_USAGE"
393
397
  elif cmd == "prompts":
394
398
  # Return a dictionary with select_prompt action instead of a string
395
399
  # This way it will match what the command handler expects
@@ -566,6 +570,7 @@ async def handle_special_commands(command, agent_app=None):
566
570
  rich_print(" /agents - List available agents")
567
571
  rich_print(" /prompts - List and select MCP prompts")
568
572
  rich_print(" /prompt <name> - Apply a specific prompt by name")
573
+ rich_print(" /usage - Show current usage statistics")
569
574
  rich_print(" @agent_name - Switch to agent")
570
575
  rich_print(" STOP - Return control back to the workflow")
571
576
  rich_print(" EXIT - Exit fast-agent, terminating any running workflows")
@@ -594,6 +599,10 @@ async def handle_special_commands(command, agent_app=None):
594
599
  rich_print("[yellow]No agents available[/yellow]")
595
600
  return True
596
601
 
602
+ elif command == "SHOW_USAGE":
603
+ # Return a dictionary to signal that usage should be shown
604
+ return {"show_usage": True}
605
+
597
606
  elif command == "SELECT_PROMPT" or (
598
607
  isinstance(command, str) and command.startswith("SELECT_PROMPT:")
599
608
  ):
@@ -54,6 +54,7 @@ from mcp_agent.core.exceptions import (
54
54
  ServerConfigError,
55
55
  ServerInitializationError,
56
56
  )
57
+ from mcp_agent.core.usage_display import display_usage_report
57
58
  from mcp_agent.core.validation import (
58
59
  validate_server_references,
59
60
  validate_workflow_references,
@@ -392,6 +393,10 @@ class FastAgent:
392
393
 
393
394
  yield wrapper
394
395
 
396
+ except PromptExitError as e:
397
+ # User requested exit - not an error, show usage report
398
+ self._handle_error(e)
399
+ raise SystemExit(0)
395
400
  except (
396
401
  ServerConfigError,
397
402
  ProviderKeyError,
@@ -399,15 +404,18 @@ class FastAgent:
399
404
  ServerInitializationError,
400
405
  ModelConfigError,
401
406
  CircularDependencyError,
402
- PromptExitError,
403
407
  ) as e:
404
408
  had_error = True
405
409
  self._handle_error(e)
406
410
  raise SystemExit(1)
407
411
 
408
412
  finally:
409
- # Clean up any active agents
413
+ # Print usage report before cleanup (show for user exits too)
410
414
  if active_agents and not had_error:
415
+ self._print_usage_report(active_agents)
416
+
417
+ # Clean up any active agents (always cleanup, even on errors)
418
+ if active_agents:
411
419
  for agent in active_agents.values():
412
420
  try:
413
421
  await agent.shutdown()
@@ -472,6 +480,10 @@ class FastAgent:
472
480
  else:
473
481
  handle_error(e, error_type or "Error", "An unexpected error occurred.")
474
482
 
483
+ def _print_usage_report(self, active_agents: dict) -> None:
484
+ """Print a formatted table of token usage for all agents."""
485
+ display_usage_report(active_agents, show_if_progress_disabled=False, subdued_colors=True)
486
+
475
487
  async def start_server(
476
488
  self,
477
489
  transport: str = "sse",
@@ -28,6 +28,7 @@ from mcp_agent.core.enhanced_prompt import (
28
28
  get_selection_input,
29
29
  handle_special_commands,
30
30
  )
31
+ from mcp_agent.core.usage_display import collect_agents_from_provider, display_usage_report
31
32
  from mcp_agent.mcp.mcp_aggregator import SEP # Import SEP once at the top
32
33
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
33
34
  from mcp_agent.progress_display import progress_display
@@ -35,15 +36,26 @@ from mcp_agent.progress_display import progress_display
35
36
  # Type alias for the send function
36
37
  SendFunc = Callable[[Union[str, PromptMessage, PromptMessageMultipart], str], Awaitable[str]]
37
38
 
39
+ # Type alias for the agent getter function
40
+ AgentGetter = Callable[[str], Optional[object]]
41
+
38
42
 
39
43
  class PromptProvider(Protocol):
40
44
  """Protocol for objects that can provide prompt functionality."""
41
-
42
- async def list_prompts(self, server_name: Optional[str] = None, agent_name: Optional[str] = None) -> Mapping[str, List[Prompt]]:
45
+
46
+ async def list_prompts(
47
+ self, server_name: Optional[str] = None, agent_name: Optional[str] = None
48
+ ) -> Mapping[str, List[Prompt]]:
43
49
  """List available prompts."""
44
50
  ...
45
-
46
- async def apply_prompt(self, prompt_name: str, arguments: Optional[Dict[str, str]] = None, agent_name: Optional[str] = None, **kwargs) -> str:
51
+
52
+ async def apply_prompt(
53
+ self,
54
+ prompt_name: str,
55
+ arguments: Optional[Dict[str, str]] = None,
56
+ agent_name: Optional[str] = None,
57
+ **kwargs,
58
+ ) -> str:
47
59
  """Apply a prompt."""
48
60
  ...
49
61
 
@@ -160,9 +172,11 @@ class InteractivePrompt:
160
172
  await self._list_prompts(prompt_provider, agent)
161
173
  else:
162
174
  # Use the name-based selection
163
- await self._select_prompt(
164
- prompt_provider, agent, prompt_name
165
- )
175
+ await self._select_prompt(prompt_provider, agent, prompt_name)
176
+ continue
177
+ elif "show_usage" in command_result:
178
+ # Handle usage display
179
+ await self._show_usage(prompt_provider, agent)
166
180
  continue
167
181
 
168
182
  # Skip further processing if:
@@ -170,7 +184,11 @@ class InteractivePrompt:
170
184
  # 2. The original input was a dictionary (special command like /prompt)
171
185
  # 3. The command result itself is a dictionary (special command handling result)
172
186
  # This fixes the issue where /prompt without arguments gets sent to the LLM
173
- if command_result or isinstance(user_input, dict) or isinstance(command_result, dict):
187
+ if (
188
+ command_result
189
+ or isinstance(user_input, dict)
190
+ or isinstance(command_result, dict)
191
+ ):
174
192
  continue
175
193
 
176
194
  if user_input.upper() == "STOP":
@@ -183,7 +201,9 @@ class InteractivePrompt:
183
201
 
184
202
  return result
185
203
 
186
- async def _get_all_prompts(self, prompt_provider: PromptProvider, agent_name: Optional[str] = None):
204
+ async def _get_all_prompts(
205
+ self, prompt_provider: PromptProvider, agent_name: Optional[str] = None
206
+ ):
187
207
  """
188
208
  Get a list of all available prompts.
189
209
 
@@ -196,8 +216,10 @@ class InteractivePrompt:
196
216
  """
197
217
  try:
198
218
  # Call list_prompts on the provider
199
- prompt_servers = await prompt_provider.list_prompts(server_name=None, agent_name=agent_name)
200
-
219
+ prompt_servers = await prompt_provider.list_prompts(
220
+ server_name=None, agent_name=agent_name
221
+ )
222
+
201
223
  all_prompts = []
202
224
 
203
225
  # Process the returned prompt servers
@@ -326,9 +348,11 @@ class InteractivePrompt:
326
348
  try:
327
349
  # Get all available prompts directly from the prompt provider
328
350
  rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
329
-
351
+
330
352
  # Call list_prompts on the provider
331
- prompt_servers = await prompt_provider.list_prompts(server_name=None, agent_name=agent_name)
353
+ prompt_servers = await prompt_provider.list_prompts(
354
+ server_name=None, agent_name=agent_name
355
+ )
332
356
 
333
357
  if not prompt_servers:
334
358
  rich_print("[yellow]No prompts available for this agent[/yellow]")
@@ -557,3 +581,25 @@ class InteractivePrompt:
557
581
 
558
582
  rich_print(f"[red]Error selecting or applying prompt: {e}[/red]")
559
583
  rich_print(f"[dim]{traceback.format_exc()}[/dim]")
584
+
585
+ async def _show_usage(self, prompt_provider: PromptProvider, agent_name: str) -> None:
586
+ """
587
+ Show usage statistics for the current agent(s) in a colorful table format.
588
+
589
+ Args:
590
+ prompt_provider: Provider that has access to agents
591
+ agent_name: Name of the current agent
592
+ """
593
+ try:
594
+ # Collect all agents from the prompt provider
595
+ agents_to_show = collect_agents_from_provider(prompt_provider, agent_name)
596
+
597
+ if not agents_to_show:
598
+ rich_print("[yellow]No usage data available[/yellow]")
599
+ return
600
+
601
+ # Use the shared display utility
602
+ display_usage_report(agents_to_show, show_if_progress_disabled=True)
603
+
604
+ except Exception as e:
605
+ rich_print(f"[red]Error showing usage: {e}[/red]")