fast-agent-mcp 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.1.dist-info → fast_agent_mcp-0.1.2.dist-info}/METADATA +1 -1
- {fast_agent_mcp-0.1.1.dist-info → fast_agent_mcp-0.1.2.dist-info}/RECORD +8 -8
- mcp_agent/resources/examples/workflows/agent_build.py +2 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +25 -23
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +14 -2
- {fast_agent_mcp-0.1.1.dist-info → fast_agent_mcp-0.1.2.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.1.dist-info → fast_agent_mcp-0.1.2.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.1.dist-info → fast_agent_mcp-0.1.2.dist-info}/licenses/LICENSE +0 -0
@@ -65,7 +65,7 @@ mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE
|
|
65
65
|
mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=2_VXZneckR6zk6RWzzL-smV_oWmgg4uSkLWqZv8jF0I,1995
|
66
66
|
mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
67
67
|
mcp_agent/resources/examples/researcher/researcher.py,sha256=jPRafm7jbpHKkX_dQiYGG3Sw-e1Dm86q-JZT-WZDhM0,1425
|
68
|
-
mcp_agent/resources/examples/workflows/agent_build.py,sha256=
|
68
|
+
mcp_agent/resources/examples/workflows/agent_build.py,sha256=ioG4X8IbR8wwja8Zdncsk8YAu0VD2Xt1Vhr7saNJCZQ,2855
|
69
69
|
mcp_agent/resources/examples/workflows/chaining.py,sha256=1G_0XBcFkSJCOXb6N_iXWlSc_oGAlhENR0k_CN1vJKI,1208
|
70
70
|
mcp_agent/resources/examples/workflows/evaluator.py,sha256=3XmW1mjImlaWb0c5FWHYS9yP8nVGTbEdJySAoWXwrDg,3109
|
71
71
|
mcp_agent/resources/examples/workflows/fastagent.config.yaml,sha256=k2AiapOcK42uqG2nWDVvnSLqN4okQIQZK0FTbZufBpY,809
|
@@ -98,9 +98,9 @@ mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu
|
|
98
98
|
mcp_agent/workflows/llm/model_factory.py,sha256=7zTJrO2ReHa_6dfh_gY6xO8dTySqGFCKlOG9-AMJ-i8,6920
|
99
99
|
mcp_agent/workflows/llm/prompt_utils.py,sha256=EY3eddqnmc_YDUQJFysPnpTH6hr4r2HneeEmX76P8TQ,4948
|
100
100
|
mcp_agent/workflows/orchestrator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
101
|
-
mcp_agent/workflows/orchestrator/orchestrator.py,sha256=
|
101
|
+
mcp_agent/workflows/orchestrator/orchestrator.py,sha256=Cu8cfDoTpT_FhGJp-T4NnCVvjkyDO1sbEJ7oKamK47k,26021
|
102
102
|
mcp_agent/workflows/orchestrator/orchestrator_models.py,sha256=1ldku1fYA_hu2F6K4l2C96mAdds05VibtSzSQrGm3yw,7321
|
103
|
-
mcp_agent/workflows/orchestrator/orchestrator_prompts.py,sha256=
|
103
|
+
mcp_agent/workflows/orchestrator/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
|
104
104
|
mcp_agent/workflows/parallel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
105
105
|
mcp_agent/workflows/parallel/fan_in.py,sha256=EivpUL5-qftctws-tlfwmYS1QeSwr07POIbBUbwvwOk,13184
|
106
106
|
mcp_agent/workflows/parallel/fan_out.py,sha256=J-yezgjzAWxfueW_Qcgwoet4PFDRIh0h4m48lIbFA4c,7023
|
@@ -115,8 +115,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
115
115
|
mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
|
116
116
|
mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
|
117
117
|
mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
|
118
|
-
fast_agent_mcp-0.1.
|
119
|
-
fast_agent_mcp-0.1.
|
120
|
-
fast_agent_mcp-0.1.
|
121
|
-
fast_agent_mcp-0.1.
|
122
|
-
fast_agent_mcp-0.1.
|
118
|
+
fast_agent_mcp-0.1.2.dist-info/METADATA,sha256=qcYR5D0SlhnnqX7er7yFF_0nEOmt4J74hbWiftzw6iI,27861
|
119
|
+
fast_agent_mcp-0.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
120
|
+
fast_agent_mcp-0.1.2.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
|
121
|
+
fast_agent_mcp-0.1.2.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
122
|
+
fast_agent_mcp-0.1.2.dist-info/RECORD,,
|
@@ -33,6 +33,8 @@ is expected to be adjusted and refined later.
|
|
33
33
|
|
34
34
|
If you are unsure about how to proceed, request input from the Human.
|
35
35
|
|
36
|
+
Use the filesystem tools to save your completed fastagent program, in an appropriately named '.py' file.
|
37
|
+
|
36
38
|
""",
|
37
39
|
servers=["filesystem", "fetch"],
|
38
40
|
request_params=RequestParams(maxTokens=8192),
|
@@ -91,7 +91,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
91
91
|
# Initialize with orchestrator-specific defaults
|
92
92
|
orchestrator_params = RequestParams(
|
93
93
|
use_history=False, # Orchestrator doesn't support history
|
94
|
-
max_iterations=
|
94
|
+
max_iterations=5, # Reduced from 10 to prevent excessive iterations
|
95
95
|
maxTokens=8192, # Higher default for planning
|
96
96
|
parallel_tool_calls=True,
|
97
97
|
)
|
@@ -262,26 +262,22 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
262
262
|
plan_result.plan = plan
|
263
263
|
|
264
264
|
if plan.is_complete:
|
265
|
-
#
|
266
|
-
|
267
|
-
plan_result.is_complete = True
|
268
|
-
|
269
|
-
# Synthesize final result into a single message
|
270
|
-
# Use the structured XML format for better context
|
271
|
-
synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(
|
272
|
-
plan_result=format_plan_result(plan_result)
|
273
|
-
)
|
265
|
+
# Modified: Remove the requirement for steps to be executed
|
266
|
+
plan_result.is_complete = True
|
274
267
|
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
268
|
+
# Synthesize final result into a single message
|
269
|
+
# Use the structured XML format for better context
|
270
|
+
synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(
|
271
|
+
plan_result=format_plan_result(plan_result)
|
272
|
+
)
|
280
273
|
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
274
|
+
# Use planner directly - planner already has PLANNING verb
|
275
|
+
plan_result.result = await self.planner.generate_str(
|
276
|
+
message=synthesis_prompt,
|
277
|
+
request_params=params.model_copy(update={"max_iterations": 1}),
|
278
|
+
)
|
279
|
+
|
280
|
+
return plan_result
|
285
281
|
|
286
282
|
# Execute each step, collecting results
|
287
283
|
# Note that in iterative mode this will only be a single step
|
@@ -311,6 +307,14 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
311
307
|
logger.debug(
|
312
308
|
f"Iteration {iterations}: Intermediate plan result:", data=plan_result
|
313
309
|
)
|
310
|
+
|
311
|
+
# Check for diminishing returns
|
312
|
+
if iterations > 2 and len(plan.steps) <= 1:
|
313
|
+
# If plan has 0-1 steps after multiple iterations, might be done
|
314
|
+
self.logger.info("Minimal new steps detected, marking plan as complete")
|
315
|
+
plan_result.is_complete = True
|
316
|
+
break
|
317
|
+
|
314
318
|
iterations += 1
|
315
319
|
|
316
320
|
# If we get here, we've hit the iteration limit without completing
|
@@ -457,10 +461,8 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
457
461
|
|
458
462
|
# Fix the iteration counting display
|
459
463
|
max_iterations = params.max_iterations
|
460
|
-
#
|
461
|
-
current_iteration = len(plan_result.step_results)
|
462
|
-
1 if self.plan_type == "iterative" else len(plan_result.step_results) or 1
|
463
|
-
)
|
464
|
+
# Simplified iteration counting logic
|
465
|
+
current_iteration = len(plan_result.step_results)
|
464
466
|
current_iteration = min(current_iteration, max_iterations - 1) # Cap at max-1
|
465
467
|
iterations_remaining = max(
|
466
468
|
0, max_iterations - current_iteration - 1
|
@@ -78,7 +78,13 @@ Return your response in the following JSON structure:
|
|
78
78
|
"is_complete": false
|
79
79
|
}}
|
80
80
|
|
81
|
-
Set "is_complete" to true
|
81
|
+
Set "is_complete" to true when ANY of these conditions are met:
|
82
|
+
1. The objective has been achieved in full or substantively
|
83
|
+
2. The remaining work is minor or trivial compared to what's been accomplished
|
84
|
+
3. Additional steps provide minimal value toward the core objective
|
85
|
+
4. The plan has gathered sufficient information to answer the original request
|
86
|
+
|
87
|
+
Be decisive - avoid excessive planning steps that add little value. It's better to complete a plan early than to continue with marginal improvements. Focus on the core intent of the objective, not perfection.
|
82
88
|
|
83
89
|
You must respond with valid JSON only, with no triple backticks. No markdown formatting.
|
84
90
|
No extra text. Do not wrap in ```json code fences.
|
@@ -136,7 +142,13 @@ Return your response in the following JSON structure:
|
|
136
142
|
"is_complete": false
|
137
143
|
}}
|
138
144
|
|
139
|
-
Set "is_complete" to true
|
145
|
+
Set "is_complete" to true when ANY of these conditions are met:
|
146
|
+
1. The objective has been achieved in full or substantively
|
147
|
+
2. The remaining work is minor or trivial compared to what's been accomplished
|
148
|
+
3. Additional steps provide minimal value toward the core objective
|
149
|
+
4. The plan has gathered sufficient information to answer the original request
|
150
|
+
|
151
|
+
Be decisive - avoid excessive planning steps that add little value. It's better to complete a plan early than to continue with marginal improvements. Focus on the core intent of the objective, not perfection.
|
140
152
|
|
141
153
|
You must respond with valid JSON only, with no triple backticks. No markdown formatting.
|
142
154
|
No extra text. Do not wrap in ```json code fences.
|
File without changes
|
File without changes
|
File without changes
|