fast-agent-mcp 0.0.15__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/METADATA +121 -21
  2. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/RECORD +27 -25
  3. mcp_agent/cli/__main__.py +3 -0
  4. mcp_agent/cli/commands/bootstrap.py +1 -1
  5. mcp_agent/cli/commands/setup.py +4 -1
  6. mcp_agent/cli/main.py +13 -3
  7. mcp_agent/config.py +19 -11
  8. mcp_agent/core/agent_app.py +1 -1
  9. mcp_agent/core/enhanced_prompt.py +13 -5
  10. mcp_agent/core/fastagent.py +87 -49
  11. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +188 -0
  12. mcp_agent/resources/examples/data-analysis/analysis.py +26 -0
  13. mcp_agent/resources/examples/workflows/evaluator.py +3 -3
  14. mcp_agent/resources/examples/workflows/orchestrator.py +1 -1
  15. mcp_agent/resources/examples/workflows/parallel.py +0 -4
  16. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +229 -91
  17. mcp_agent/workflows/llm/augmented_llm_anthropic.py +16 -2
  18. mcp_agent/workflows/llm/augmented_llm_openai.py +13 -1
  19. mcp_agent/workflows/llm/prompt_utils.py +137 -0
  20. mcp_agent/workflows/orchestrator/orchestrator.py +252 -50
  21. mcp_agent/workflows/orchestrator/orchestrator_models.py +81 -9
  22. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +112 -42
  23. mcp_agent/workflows/router/router_base.py +113 -21
  24. mcp_agent/workflows/router/router_llm.py +19 -5
  25. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/WHEEL +0 -0
  26. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/entry_points.txt +0 -0
  27. {fast_agent_mcp-0.0.15.dist-info → fast_agent_mcp-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -89,21 +89,23 @@ class FastAgent(ContextDependent):
89
89
  help="Specify the agent to send a message to (used with --message)",
90
90
  )
91
91
  parser.add_argument(
92
- "-m", "--message",
92
+ "-m",
93
+ "--message",
93
94
  help="Message to send to the specified agent (requires --agent)",
94
95
  )
95
96
  parser.add_argument(
96
- "--quiet", action="store_true",
97
+ "--quiet",
98
+ action="store_true",
97
99
  help="Disable progress display, tool and message logging for cleaner output",
98
100
  )
99
101
  self.args = parser.parse_args()
100
-
102
+
101
103
  # Quiet mode will be handled in _load_config()
102
104
 
103
105
  self.name = name
104
106
  self.config_path = config_path
105
107
  self._load_config()
106
-
108
+
107
109
  # Create the MCPApp with the config
108
110
  self.app = MCPApp(
109
111
  name=name,
@@ -244,10 +246,24 @@ class FastAgent(ContextDependent):
244
246
  if child_data["type"] == AgentType.BASIC.value:
245
247
  # For basic agents, we'll validate LLM config during creation
246
248
  continue
247
- elif not isinstance(child_data["func"], AugmentedLLM):
249
+ # Check if it's a workflow type or has LLM capability
250
+ # Workflows like EvaluatorOptimizer and Parallel are valid for orchestrator
251
+ func = child_data["func"]
252
+ workflow_types = [
253
+ AgentType.EVALUATOR_OPTIMIZER.value,
254
+ AgentType.PARALLEL.value,
255
+ AgentType.ROUTER.value,
256
+ AgentType.CHAIN.value,
257
+ ]
258
+
259
+ if not (
260
+ isinstance(func, AugmentedLLM)
261
+ or child_data["type"] in workflow_types
262
+ or (hasattr(func, "_llm") and func._llm is not None)
263
+ ):
248
264
  raise AgentConfigError(
249
265
  f"Agent '{agent_name}' used by orchestrator '{name}' lacks LLM capability",
250
- "All agents used by orchestrators must be LLM-capable",
266
+ "All agents used by orchestrators must be LLM-capable (either an AugmentedLLM or have an _llm property)",
251
267
  )
252
268
 
253
269
  elif agent_type == AgentType.ROUTER.value:
@@ -536,7 +552,7 @@ class FastAgent(ContextDependent):
536
552
  Args:
537
553
  name: Name of the parallel executing agent
538
554
  fan_out: List of parallel execution agents
539
- fan_in: Optional name of collecting agent. If not provided, a passthrough agent
555
+ fan_in: Optional name of collecting agent. If not provided, a passthrough agent
540
556
  will be created automatically with the name "{name}_fan_in"
541
557
  instruction: Optional instruction for the parallel agent
542
558
  model: Model specification string
@@ -547,7 +563,7 @@ class FastAgent(ContextDependent):
547
563
  # If fan_in is not provided, create a passthrough agent with a derived name
548
564
  if fan_in is None:
549
565
  passthrough_name = f"{name}_fan_in"
550
-
566
+
551
567
  # Register the passthrough agent directly in self.agents
552
568
  self.agents[passthrough_name] = {
553
569
  "config": AgentConfig(
@@ -559,10 +575,10 @@ class FastAgent(ContextDependent):
559
575
  "type": AgentType.BASIC.value, # Using BASIC type since we're just attaching a PassthroughLLM
560
576
  "func": lambda x: x, # Simple passthrough function (never actually called)
561
577
  }
562
-
578
+
563
579
  # Use this passthrough as the fan-in
564
580
  fan_in = passthrough_name
565
-
581
+
566
582
  decorator = self._create_decorator(
567
583
  AgentType.PARALLEL,
568
584
  default_instruction="",
@@ -589,6 +605,7 @@ class FastAgent(ContextDependent):
589
605
  max_refinements: int = 3,
590
606
  use_history: bool = True,
591
607
  request_params: Optional[Dict] = None,
608
+ instruction: Optional[str] = None,
592
609
  ) -> Callable:
593
610
  """
594
611
  Decorator to create and register an evaluator-optimizer workflow.
@@ -601,10 +618,11 @@ class FastAgent(ContextDependent):
601
618
  max_refinements: Maximum number of refinement iterations
602
619
  use_history: Whether to maintain conversation history
603
620
  request_params: Additional request parameters for the LLM
621
+ instruction: Optional instruction for the workflow (if not provided, uses generator's instruction)
604
622
  """
605
623
  decorator = self._create_decorator(
606
624
  AgentType.EVALUATOR_OPTIMIZER,
607
- default_instruction="",
625
+ default_instruction="", # We'll get instruction from generator or override
608
626
  default_servers=[],
609
627
  default_use_history=True,
610
628
  wrapper_needed=True,
@@ -616,6 +634,7 @@ class FastAgent(ContextDependent):
616
634
  max_refinements=max_refinements,
617
635
  use_history=use_history,
618
636
  request_params=request_params,
637
+ instruction=instruction, # Pass through any custom instruction
619
638
  )
620
639
  return decorator
621
640
 
@@ -645,7 +664,7 @@ class FastAgent(ContextDependent):
645
664
  AgentType.ROUTER,
646
665
  default_instruction="",
647
666
  default_servers=[],
648
- default_use_history=True,
667
+ default_use_history=False,
649
668
  wrapper_needed=True,
650
669
  )(
651
670
  name=name,
@@ -708,17 +727,14 @@ class FastAgent(ContextDependent):
708
727
  continue_with_final=continue_with_final,
709
728
  )
710
729
  return decorator
711
-
730
+
712
731
  def passthrough(
713
- self,
714
- name: str = "Passthrough",
715
- use_history: bool = True,
716
- **kwargs
732
+ self, name: str = "Passthrough", use_history: bool = True, **kwargs
717
733
  ) -> Callable:
718
734
  """
719
735
  Decorator to create and register a passthrough agent.
720
736
  A passthrough agent simply returns any input message without modification.
721
-
737
+
722
738
  This is useful for parallel workflows where no fan-in aggregation is needed
723
739
  (the fan-in agent can be a passthrough that simply returns the combined outputs).
724
740
 
@@ -774,15 +790,17 @@ class FastAgent(ContextDependent):
774
790
  if agent_type == AgentType.BASIC:
775
791
  # Get the agent name for special handling
776
792
  agent_name = agent_data["config"].name
777
-
793
+
778
794
  # Check if this is an agent that should use the PassthroughLLM
779
- if agent_name.endswith("_fan_in") or agent_name.startswith("passthrough"):
795
+ if agent_name.endswith("_fan_in") or agent_name.startswith(
796
+ "passthrough"
797
+ ):
780
798
  # Import here to avoid circular imports
781
799
  from mcp_agent.workflows.llm.augmented_llm import PassthroughLLM
782
-
800
+
783
801
  # Create basic agent with configuration
784
802
  agent = Agent(config=config, context=agent_app.context)
785
-
803
+
786
804
  # Set up a PassthroughLLM directly
787
805
  async with agent:
788
806
  agent._llm = PassthroughLLM(
@@ -791,13 +809,13 @@ class FastAgent(ContextDependent):
791
809
  agent=agent,
792
810
  default_request_params=config.default_request_params,
793
811
  )
794
-
812
+
795
813
  # Store the agent
796
814
  instance = agent
797
815
  else:
798
816
  # Standard basic agent with LLM
799
817
  agent = Agent(config=config, context=agent_app.context)
800
-
818
+
801
819
  # Set up LLM with proper configuration
802
820
  async with agent:
803
821
  llm_factory = self._get_model_factory(
@@ -805,7 +823,7 @@ class FastAgent(ContextDependent):
805
823
  request_params=config.default_request_params,
806
824
  )
807
825
  agent._llm = await agent.attach_llm(llm_factory)
808
-
826
+
809
827
  # Store the agent
810
828
  instance = agent
811
829
 
@@ -885,18 +903,19 @@ class FastAgent(ContextDependent):
885
903
  f"evaluator={agent_data['evaluator']}"
886
904
  )
887
905
 
888
- # TODO: Remove legacy - factory usage is only needed for str evaluators
889
- # Later this should only be passed when evaluator is a string
890
906
  optimizer_model = (
891
907
  generator.config.model if isinstance(generator, Agent) else None
892
908
  )
909
+
893
910
  instance = EvaluatorOptimizerLLM(
911
+ name=config.name, # Pass name from config
894
912
  generator=generator,
895
913
  evaluator=evaluator,
896
914
  min_rating=QualityRating[agent_data["min_rating"]],
897
915
  max_refinements=agent_data["max_refinements"],
898
916
  llm_factory=self._get_model_factory(model=optimizer_model),
899
917
  context=agent_app.context,
918
+ instruction=config.instruction, # Pass any custom instruction
900
919
  )
901
920
 
902
921
  elif agent_type == AgentType.ROUTER:
@@ -1249,77 +1268,96 @@ class FastAgent(ContextDependent):
1249
1268
  """
1250
1269
  active_agents = {}
1251
1270
  had_error = False
1252
-
1271
+
1253
1272
  # Handle quiet mode by disabling logger settings after initialization
1254
1273
  quiet_mode = hasattr(self, "args") and self.args.quiet
1255
-
1274
+
1256
1275
  try:
1257
1276
  async with self.app.run() as agent_app:
1258
1277
  # Apply quiet mode directly to the context's config if needed
1259
- if quiet_mode and hasattr(agent_app.context, "config") and hasattr(agent_app.context.config, "logger"):
1278
+ if (
1279
+ quiet_mode
1280
+ and hasattr(agent_app.context, "config")
1281
+ and hasattr(agent_app.context.config, "logger")
1282
+ ):
1260
1283
  # Apply after initialization but before agents are created
1261
1284
  agent_app.context.config.logger.progress_display = False
1262
1285
  agent_app.context.config.logger.show_chat = False
1263
1286
  agent_app.context.config.logger.show_tools = False
1264
-
1287
+
1265
1288
  # Directly disable the progress display singleton
1266
1289
  from mcp_agent.progress_display import progress_display
1290
+
1267
1291
  progress_display.stop() # This will stop and hide the display
1268
-
1292
+
1269
1293
  # Pre-flight validation
1270
1294
  self._validate_server_references()
1271
1295
  self._validate_workflow_references()
1272
1296
 
1273
1297
  # Create all types of agents in dependency order
1298
+ # First create basic agents
1274
1299
  active_agents = await self._create_basic_agents(agent_app)
1275
1300
 
1276
- orchestrators = await self._create_orchestrators(
1301
+ # Create workflow types that don't depend on other workflows first
1302
+ evaluator_optimizers = await self._create_evaluator_optimizers(
1277
1303
  agent_app, active_agents
1278
1304
  )
1305
+ active_agents.update(evaluator_optimizers)
1306
+
1307
+ # Create parallel agents next as they might be dependencies
1279
1308
  parallel_agents = await self._create_parallel_agents(
1280
1309
  agent_app, active_agents
1281
1310
  )
1282
- evaluator_optimizers = await self._create_evaluator_optimizers(
1283
- agent_app, active_agents
1284
- )
1311
+ active_agents.update(parallel_agents)
1312
+
1313
+ # Create routers next
1285
1314
  routers = await self._create_routers(agent_app, active_agents)
1315
+ active_agents.update(routers)
1316
+
1317
+ # Create chains next
1286
1318
  chains = await self._create_agents_in_dependency_order(
1287
1319
  agent_app, active_agents, AgentType.CHAIN
1288
1320
  )
1321
+ active_agents.update(chains)
1289
1322
 
1290
- # Merge all agents into active_agents
1323
+ # Create orchestrators last as they might depend on any other agent type
1324
+ orchestrators = await self._create_orchestrators(
1325
+ agent_app, active_agents
1326
+ )
1327
+
1328
+ # Add orchestrators to active_agents (other types were already added)
1291
1329
  active_agents.update(orchestrators)
1292
- active_agents.update(parallel_agents)
1293
- active_agents.update(evaluator_optimizers)
1294
- active_agents.update(routers)
1295
- active_agents.update(chains)
1296
1330
 
1297
1331
  # Create wrapper with all agents
1298
1332
  wrapper = AgentApp(agent_app, active_agents)
1299
-
1333
+
1300
1334
  # Handle direct message sending if --agent and --message are provided
1301
1335
  if self.args.agent and self.args.message:
1302
1336
  agent_name = self.args.agent
1303
1337
  message = self.args.message
1304
-
1338
+
1305
1339
  if agent_name not in active_agents:
1306
1340
  available_agents = ", ".join(active_agents.keys())
1307
- print(f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}")
1341
+ print(
1342
+ f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
1343
+ )
1308
1344
  raise SystemExit(1)
1309
-
1345
+
1310
1346
  try:
1311
1347
  # Get response
1312
1348
  response = await wrapper[agent_name].send(message)
1313
-
1349
+
1314
1350
  # Only print the response in quiet mode
1315
1351
  if self.args.quiet:
1316
1352
  print(f"{response}")
1317
-
1353
+
1318
1354
  raise SystemExit(0)
1319
1355
  except Exception as e:
1320
- print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
1356
+ print(
1357
+ f"\n\nError sending message to agent '{agent_name}': {str(e)}"
1358
+ )
1321
1359
  raise SystemExit(1)
1322
-
1360
+
1323
1361
  yield wrapper
1324
1362
 
1325
1363
  except ServerConfigError as e:
@@ -0,0 +1,188 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
5
+
6
+ # Create the application
7
+ fast = FastAgent("Data Analysis & Campaign Generator")
8
+
9
+
10
+ # Original data analysis components
11
+ @fast.agent(
12
+ name="data_analysis",
13
+ instruction="""
14
+ You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
15
+ Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
16
+ You can add further packages if needed.
17
+ Data files are accessible from the /mnt/data/ directory (this is the current working directory).
18
+ Visualisations should be saved as .png files in the current working directory.
19
+ Extract key insights that would be compelling for a social media campaign.
20
+ """,
21
+ servers=["interpreter"],
22
+ request_params=RequestParams(maxTokens=8192),
23
+ model="sonnet",
24
+ )
25
+ @fast.agent(
26
+ "evaluator",
27
+ """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
28
+ You must make sure that the tool has:
29
+ - Considered the best way for a Human to interpret the data
30
+ - Produced insightful visualisations.
31
+ - Provided a high level summary report for the Human.
32
+ - Has had its findings challenged, and justified
33
+ - Extracted compelling insights suitable for social media promotion
34
+ """,
35
+ request_params=RequestParams(maxTokens=8192),
36
+ model="gpt-4o",
37
+ )
38
+ @fast.evaluator_optimizer(
39
+ "analysis_tool",
40
+ generator="data_analysis",
41
+ evaluator="evaluator",
42
+ max_refinements=3,
43
+ min_rating="EXCELLENT",
44
+ )
45
+ # Research component using Brave search
46
+ @fast.agent(
47
+ "context_researcher",
48
+ """You are a research specialist who provides cultural context for different regions.
49
+ For any given data insight and target language/region, research:
50
+ 1. Cultural sensitivities related to presenting this type of data
51
+ 2. Local social media trends and preferences
52
+ 3. Region-specific considerations for marketing campaigns
53
+
54
+ Always provide actionable recommendations for adapting content to each culture.
55
+ """,
56
+ servers=["fetch", "brave"], # Using the fetch MCP server for Brave search
57
+ request_params=RequestParams(temperature=0.3),
58
+ model="gpt-4o",
59
+ )
60
+ # Social media content generator
61
+ @fast.agent(
62
+ "campaign_generator",
63
+ """Generate engaging social media content based on data insights.
64
+ Create compelling, shareable content that:
65
+ - Highlights key research findings in an accessible way
66
+ - Uses appropriate tone for the platform (Twitter/X, LinkedIn, Instagram, etc.)
67
+ - Is concise and impactful
68
+ - Includes suggested hashtags and posting schedule
69
+
70
+ Format your response with clear sections for each platform.
71
+ Save different campaign elements as separate files in the current directory.
72
+ """,
73
+ servers=["filesystem"], # Using filesystem MCP server to save files
74
+ request_params=RequestParams(temperature=0.7),
75
+ model="sonnet",
76
+ use_history=False,
77
+ )
78
+ # Translation agents with cultural adaptation
79
+ @fast.agent(
80
+ "translate_fr",
81
+ """Translate social media content to French with cultural adaptation.
82
+ Consider French cultural norms, expressions, and social media preferences.
83
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
84
+ Save the translated content to a file with appropriate naming.
85
+ """,
86
+ model="haiku",
87
+ use_history=False,
88
+ servers=["filesystem"],
89
+ )
90
+ @fast.agent(
91
+ "translate_es",
92
+ """Translate social media content to Spanish with cultural adaptation.
93
+ Consider Spanish-speaking cultural contexts, expressions, and social media preferences.
94
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
95
+ Save the translated content to a file with appropriate naming.
96
+ """,
97
+ model="haiku",
98
+ use_history=False,
99
+ servers=["filesystem"],
100
+ )
101
+ @fast.agent(
102
+ "translate_de",
103
+ """Translate social media content to German with cultural adaptation.
104
+ Consider German cultural norms, expressions, and social media preferences.
105
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
106
+ Save the translated content to a file with appropriate naming.
107
+ """,
108
+ model="haiku",
109
+ use_history=False,
110
+ servers=["filesystem"],
111
+ )
112
+ @fast.agent(
113
+ "translate_ja",
114
+ """Translate social media content to Japanese with cultural adaptation.
115
+ Consider Japanese cultural norms, expressions, and social media preferences.
116
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
117
+ Save the translated content to a file with appropriate naming.
118
+ """,
119
+ model="haiku",
120
+ use_history=False,
121
+ servers=["filesystem"],
122
+ )
123
+ # Parallel workflow for translations
124
+ @fast.parallel(
125
+ "translate_campaign",
126
+ instruction="Translates content to French, Spanish, German and Japanese. Supply the content to translate, translations will be saved to the filesystem.",
127
+ fan_out=["translate_fr", "translate_es", "translate_de", "translate_ja"],
128
+ include_request=True,
129
+ )
130
+ # Cultural sensitivity review agent
131
+ @fast.agent(
132
+ "cultural_reviewer",
133
+ """Review all translated content for cultural sensitivity and appropriateness.
134
+ For each language version, evaluate:
135
+ - Cultural appropriateness
136
+ - Potential misunderstandings or sensitivities
137
+ - Effectiveness for the target culture
138
+
139
+ Provide specific recommendations for any needed adjustments and save a review report.
140
+ """,
141
+ servers=["filesystem"],
142
+ request_params=RequestParams(temperature=0.2),
143
+ )
144
+ # Campaign optimization workflow
145
+ @fast.evaluator_optimizer(
146
+ "campaign_optimizer",
147
+ generator="campaign_generator",
148
+ evaluator="cultural_reviewer",
149
+ max_refinements=2,
150
+ min_rating="EXCELLENT",
151
+ )
152
+ # Main workflow orchestration
153
+ @fast.orchestrator(
154
+ "research_campaign_creator",
155
+ instruction="""
156
+ Create a complete multi-lingual social media campaign based on data analysis results.
157
+ The workflow will:
158
+ 1. Analyze the provided data and extract key insights
159
+ 2. Research cultural contexts for target languages
160
+ 3. Generate appropriate social media content
161
+ 4. Translate and culturally adapt the content
162
+ 5. Review and optimize all materials
163
+ 6. Save all campaign elements to files
164
+ """,
165
+ agents=[
166
+ "analysis_tool",
167
+ "context_researcher",
168
+ "campaign_optimizer",
169
+ "translate_campaign",
170
+ ],
171
+ model="sonnet", # Using a more capable model for orchestration
172
+ request_params=RequestParams(maxTokens=8192),
173
+ plan_type="full",
174
+ )
175
+ async def main():
176
+ # Use the app's context manager
177
+ print(
178
+ "WARNING: This workflow will likely run for >10 minutes and consume a lot of tokens. Press Enter to accept the default prompt and proceed"
179
+ )
180
+
181
+ async with fast.run() as agent:
182
+ await agent.research_campaign_creator.prompt(
183
+ default_prompt="Analyze the CSV file in the current directory and create a comprehensive multi-lingual social media campaign based on the findings. Save all campaign elements as separate files."
184
+ )
185
+
186
+
187
+ if __name__ == "__main__":
188
+ asyncio.run(main())
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
 
3
3
  from mcp_agent.core.fastagent import FastAgent
4
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
4
5
 
5
6
  # Create the application
6
7
  fast = FastAgent("Data Analysis (Roots)")
@@ -16,6 +17,7 @@ Data files are accessible from the /mnt/data/ directory (this is the current wor
16
17
  Visualisations should be saved as .png files in the current working directory.
17
18
  """,
18
19
  servers=["interpreter"],
20
+ request_params=RequestParams(maxTokens=8192),
19
21
  )
20
22
  async def main():
21
23
  # Use the app's context manager
@@ -29,7 +31,31 @@ async def main():
29
31
  "Use MatPlotLib to produce insightful visualisations. Save them as '.png' files in the current directory. Be sure to run the code and save the files.\n"
30
32
  "Produce a summary with major insights to the data",
31
33
  )
34
+ await agent()
32
35
 
33
36
 
34
37
  if __name__ == "__main__":
35
38
  asyncio.run(main())
39
+
40
+
41
+ ############################################################################################################
42
+ # Example of evaluator/optimizer flow
43
+ ############################################################################################################
44
+ # @fast.agent(
45
+ # "evaluator",
46
+ # """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
47
+ # You must make sure that the tool has:
48
+ # - Considered the best way for a Human to interpret the data
49
+ # - Produced insightful visualasions.
50
+ # - Provided a high level summary report for the Human.
51
+ # - Has had its findings challenged, and justified
52
+ # """,
53
+ # request_params=RequestParams(maxTokens=8192),
54
+ # )
55
+ # @fast.evaluator_optimizer(
56
+ # "analysis_tool",
57
+ # generator="data_analysis",
58
+ # evaluator="evaluator",
59
+ # max_refinements=3,
60
+ # min_rating="EXCELLENT",
61
+ # )
@@ -17,7 +17,8 @@ fast = FastAgent("Evaluator-Optimizer")
17
17
  candidate details, and company information. Tailor the response to the company and job requirements.
18
18
  """,
19
19
  servers=["fetch"],
20
- model="gpt-4o-mini",
20
+ model="haiku3",
21
+ use_history=True,
21
22
  )
22
23
  # Define evaluator agent
23
24
  @fast.agent(
@@ -38,8 +39,7 @@ fast = FastAgent("Evaluator-Optimizer")
38
39
  Summarize your evaluation as a structured response with:
39
40
  - Overall quality rating.
40
41
  - Specific feedback and areas for improvement.""",
41
- # instructor doesn't seem to work for sonnet37
42
- # model="sonnet35",
42
+ model="gpt-4o",
43
43
  )
44
44
  # Define the evaluator-optimizer workflow
45
45
  @fast.evaluator_optimizer(
@@ -49,7 +49,6 @@ fast = FastAgent("Orchestrator-Workers")
49
49
  )
50
50
  async def main():
51
51
  async with fast.run() as agent:
52
-
53
52
  await agent.author(
54
53
  "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
55
54
  )
@@ -68,5 +67,6 @@ async def main():
68
67
  await agent.orchestrate(task)
69
68
  await agent()
70
69
 
70
+
71
71
  if __name__ == "__main__":
72
72
  asyncio.run(main())
@@ -60,10 +60,6 @@ and whispers of a hidden agenda linger among the villagers.
60
60
  and give an overall grade based on the feedback.""",
61
61
  model="o3-mini.low",
62
62
  )
63
- @fast.agent(
64
- name="cats-to-dogs",
65
- instruction="you should take any text, and change references about cats to dogs",
66
- )
67
63
  @fast.parallel(
68
64
  fan_out=["proofreader", "fact_checker", "style_enforcer"],
69
65
  fan_in="grader",