fast-agent-mcp 0.0.16__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,188 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
5
+
6
+ # Create the application
7
+ fast = FastAgent("Data Analysis & Campaign Generator")
8
+
9
+
10
+ # Original data analysis components
11
+ @fast.agent(
12
+ name="data_analysis",
13
+ instruction="""
14
+ You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
15
+ Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
16
+ You can add further packages if needed.
17
+ Data files are accessible from the /mnt/data/ directory (this is the current working directory).
18
+ Visualisations should be saved as .png files in the current working directory.
19
+ Extract key insights that would be compelling for a social media campaign.
20
+ """,
21
+ servers=["interpreter"],
22
+ request_params=RequestParams(maxTokens=8192),
23
+ model="sonnet",
24
+ )
25
+ @fast.agent(
26
+ "evaluator",
27
+ """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
28
+ You must make sure that the tool has:
29
+ - Considered the best way for a Human to interpret the data
30
+ - Produced insightful visualisations.
31
+ - Provided a high level summary report for the Human.
32
+ - Has had its findings challenged, and justified
33
+ - Extracted compelling insights suitable for social media promotion
34
+ """,
35
+ request_params=RequestParams(maxTokens=8192),
36
+ model="gpt-4o",
37
+ )
38
+ @fast.evaluator_optimizer(
39
+ "analysis_tool",
40
+ generator="data_analysis",
41
+ evaluator="evaluator",
42
+ max_refinements=3,
43
+ min_rating="EXCELLENT",
44
+ )
45
+ # Research component using Brave search
46
+ @fast.agent(
47
+ "context_researcher",
48
+ """You are a research specialist who provides cultural context for different regions.
49
+ For any given data insight and target language/region, research:
50
+ 1. Cultural sensitivities related to presenting this type of data
51
+ 2. Local social media trends and preferences
52
+ 3. Region-specific considerations for marketing campaigns
53
+
54
+ Always provide actionable recommendations for adapting content to each culture.
55
+ """,
56
+ servers=["fetch", "brave"], # Using the fetch MCP server for Brave search
57
+ request_params=RequestParams(temperature=0.3),
58
+ model="gpt-4o",
59
+ )
60
+ # Social media content generator
61
+ @fast.agent(
62
+ "campaign_generator",
63
+ """Generate engaging social media content based on data insights.
64
+ Create compelling, shareable content that:
65
+ - Highlights key research findings in an accessible way
66
+ - Uses appropriate tone for the platform (Twitter/X, LinkedIn, Instagram, etc.)
67
+ - Is concise and impactful
68
+ - Includes suggested hashtags and posting schedule
69
+
70
+ Format your response with clear sections for each platform.
71
+ Save different campaign elements as separate files in the current directory.
72
+ """,
73
+ servers=["filesystem"], # Using filesystem MCP server to save files
74
+ request_params=RequestParams(temperature=0.7),
75
+ model="sonnet",
76
+ use_history=False,
77
+ )
78
+ # Translation agents with cultural adaptation
79
+ @fast.agent(
80
+ "translate_fr",
81
+ """Translate social media content to French with cultural adaptation.
82
+ Consider French cultural norms, expressions, and social media preferences.
83
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
84
+ Save the translated content to a file with appropriate naming.
85
+ """,
86
+ model="haiku",
87
+ use_history=False,
88
+ servers=["filesystem"],
89
+ )
90
+ @fast.agent(
91
+ "translate_es",
92
+ """Translate social media content to Spanish with cultural adaptation.
93
+ Consider Spanish-speaking cultural contexts, expressions, and social media preferences.
94
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
95
+ Save the translated content to a file with appropriate naming.
96
+ """,
97
+ model="haiku",
98
+ use_history=False,
99
+ servers=["filesystem"],
100
+ )
101
+ @fast.agent(
102
+ "translate_de",
103
+ """Translate social media content to German with cultural adaptation.
104
+ Consider German cultural norms, expressions, and social media preferences.
105
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
106
+ Save the translated content to a file with appropriate naming.
107
+ """,
108
+ model="haiku",
109
+ use_history=False,
110
+ servers=["filesystem"],
111
+ )
112
+ @fast.agent(
113
+ "translate_ja",
114
+ """Translate social media content to Japanese with cultural adaptation.
115
+ Consider Japanese cultural norms, expressions, and social media preferences.
116
+ Ensure the translation maintains the impact of the original while being culturally appropriate.
117
+ Save the translated content to a file with appropriate naming.
118
+ """,
119
+ model="haiku",
120
+ use_history=False,
121
+ servers=["filesystem"],
122
+ )
123
+ # Parallel workflow for translations
124
+ @fast.parallel(
125
+ "translate_campaign",
126
+ instruction="Translates content to French, Spanish, German and Japanese. Supply the content to translate, translations will be saved to the filesystem.",
127
+ fan_out=["translate_fr", "translate_es", "translate_de", "translate_ja"],
128
+ include_request=True,
129
+ )
130
+ # Cultural sensitivity review agent
131
+ @fast.agent(
132
+ "cultural_reviewer",
133
+ """Review all translated content for cultural sensitivity and appropriateness.
134
+ For each language version, evaluate:
135
+ - Cultural appropriateness
136
+ - Potential misunderstandings or sensitivities
137
+ - Effectiveness for the target culture
138
+
139
+ Provide specific recommendations for any needed adjustments and save a review report.
140
+ """,
141
+ servers=["filesystem"],
142
+ request_params=RequestParams(temperature=0.2),
143
+ )
144
+ # Campaign optimization workflow
145
+ @fast.evaluator_optimizer(
146
+ "campaign_optimizer",
147
+ generator="campaign_generator",
148
+ evaluator="cultural_reviewer",
149
+ max_refinements=2,
150
+ min_rating="EXCELLENT",
151
+ )
152
+ # Main workflow orchestration
153
+ @fast.orchestrator(
154
+ "research_campaign_creator",
155
+ instruction="""
156
+ Create a complete multi-lingual social media campaign based on data analysis results.
157
+ The workflow will:
158
+ 1. Analyze the provided data and extract key insights
159
+ 2. Research cultural contexts for target languages
160
+ 3. Generate appropriate social media content
161
+ 4. Translate and culturally adapt the content
162
+ 5. Review and optimize all materials
163
+ 6. Save all campaign elements to files
164
+ """,
165
+ agents=[
166
+ "analysis_tool",
167
+ "context_researcher",
168
+ "campaign_optimizer",
169
+ "translate_campaign",
170
+ ],
171
+ model="sonnet", # Using a more capable model for orchestration
172
+ request_params=RequestParams(maxTokens=8192),
173
+ plan_type="full",
174
+ )
175
+ async def main():
176
+ # Use the app's context manager
177
+ print(
178
+ "WARNING: This workflow will likely run for >10 minutes and consume a lot of tokens. Press Enter to accept the default prompt and proceed"
179
+ )
180
+
181
+ async with fast.run() as agent:
182
+ await agent.research_campaign_creator.prompt(
183
+ default_prompt="Analyze the CSV file in the current directory and create a comprehensive multi-lingual social media campaign based on the findings. Save all campaign elements as separate files."
184
+ )
185
+
186
+
187
+ if __name__ == "__main__":
188
+ asyncio.run(main())
@@ -19,43 +19,43 @@ Visualisations should be saved as .png files in the current working directory.
19
19
  servers=["interpreter"],
20
20
  request_params=RequestParams(maxTokens=8192),
21
21
  )
22
- @fast.agent(
23
- "evaluator",
24
- """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
25
- You must make sure that the tool has:
26
- - Considered the best way for a Human to interpret the data
27
- - Produced insightful visualasions.
28
- - Provided a high level summary report for the Human.
29
- - Has had its findings challenged, and justified
30
- """,
31
- request_params=RequestParams(maxTokens=8192),
32
- )
33
- @fast.evaluator_optimizer(
34
- "analysis_tool",
35
- generator="data_analysis",
36
- evaluator="evaluator",
37
- max_refinements=3,
38
- min_rating="EXCELLENT",
39
- )
40
- @fast.passthrough(
41
- "sample",
42
- )
43
22
  async def main():
44
23
  # Use the app's context manager
45
24
  async with fast.run() as agent:
46
- # await agent(
47
- # "There is a csv file in the current directory. "
48
- # "Analyse the file, produce a detailed description of the data, and any patterns it contains.",
49
- # )
50
- # await agent(
51
- # "Consider the data, and how to usefully group it for presentation to a Human. Find insights, using the Python Interpreter as needed.\n"
52
- # "Use MatPlotLib to produce insightful visualisations. Save them as '.png' files in the current directory. Be sure to run the code and save the files.\n"
53
- # "Produce a summary with major insights to the data",
54
- # )
55
- await agent.analysis_tool.prompt(
56
- "Analyse the CSV File in the working directory"
25
+ await agent(
26
+ "There is a csv file in the current directory. "
27
+ "Analyse the file, produce a detailed description of the data, and any patterns it contains.",
28
+ )
29
+ await agent(
30
+ "Consider the data, and how to usefully group it for presentation to a Human. Find insights, using the Python Interpreter as needed.\n"
31
+ "Use MatPlotLib to produce insightful visualisations. Save them as '.png' files in the current directory. Be sure to run the code and save the files.\n"
32
+ "Produce a summary with major insights to the data",
57
33
  )
34
+ await agent()
58
35
 
59
36
 
60
37
  if __name__ == "__main__":
61
38
  asyncio.run(main())
39
+
40
+
41
+ ############################################################################################################
42
+ # Example of evaluator/optimizer flow
43
+ ############################################################################################################
44
+ # @fast.agent(
45
+ # "evaluator",
46
+ # """You are collaborating with a Data Analysis tool that has the capability to analyse data and produce visualisations.
47
+ # You must make sure that the tool has:
48
+ # - Considered the best way for a Human to interpret the data
49
+ # - Produced insightful visualasions.
50
+ # - Provided a high level summary report for the Human.
51
+ # - Has had its findings challenged, and justified
52
+ # """,
53
+ # request_params=RequestParams(maxTokens=8192),
54
+ # )
55
+ # @fast.evaluator_optimizer(
56
+ # "analysis_tool",
57
+ # generator="data_analysis",
58
+ # evaluator="evaluator",
59
+ # max_refinements=3,
60
+ # min_rating="EXCELLENT",
61
+ # )
@@ -17,7 +17,8 @@ fast = FastAgent("Evaluator-Optimizer")
17
17
  candidate details, and company information. Tailor the response to the company and job requirements.
18
18
  """,
19
19
  servers=["fetch"],
20
- model="gpt-4o-mini",
20
+ model="haiku3",
21
+ use_history=True,
21
22
  )
22
23
  # Define evaluator agent
23
24
  @fast.agent(
@@ -38,6 +39,7 @@ fast = FastAgent("Evaluator-Optimizer")
38
39
  Summarize your evaluation as a structured response with:
39
40
  - Overall quality rating.
40
41
  - Specific feedback and areas for improvement.""",
42
+ model="gpt-4o",
41
43
  )
42
44
  # Define the evaluator-optimizer workflow
43
45
  @fast.evaluator_optimizer(
@@ -49,7 +49,6 @@ fast = FastAgent("Orchestrator-Workers")
49
49
  )
50
50
  async def main():
51
51
  async with fast.run() as agent:
52
-
53
52
  await agent.author(
54
53
  "write a 250 word short story about kittens discovering a castle, and save it to short_story.md"
55
54
  )
@@ -68,5 +67,6 @@ async def main():
68
67
  await agent.orchestrate(task)
69
68
  await agent()
70
69
 
70
+
71
71
  if __name__ == "__main__":
72
72
  asyncio.run(main())
@@ -10,7 +10,7 @@ from mcp_agent.workflows.llm.augmented_llm import (
10
10
  ModelT,
11
11
  RequestParams,
12
12
  )
13
- from mcp_agent.agents.agent import Agent
13
+ from mcp_agent.agents.agent import Agent, AgentConfig
14
14
  from mcp_agent.logging.logger import get_logger
15
15
 
16
16
  if TYPE_CHECKING:
@@ -64,6 +64,25 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
64
64
  - Document writing requiring multiple revisions
65
65
  """
66
66
 
67
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
68
+ """Initialize default parameters using the workflow's settings."""
69
+ return RequestParams(
70
+ modelPreferences=self.model_preferences,
71
+ systemPrompt=self.instruction,
72
+ parallel_tool_calls=True,
73
+ max_iterations=10,
74
+ use_history=self.generator_use_history, # Use generator's history setting
75
+ )
76
+
77
+ def _init_request_params(self):
78
+ """Initialize request parameters for both generator and evaluator components."""
79
+ # Set up workflow's default params based on generator's history setting
80
+ self.default_request_params = self._initialize_default_params({})
81
+
82
+ # Ensure evaluator's request params have history disabled
83
+ if hasattr(self.evaluator_llm, "default_request_params"):
84
+ self.evaluator_llm.default_request_params.use_history = False
85
+
67
86
  def __init__(
68
87
  self,
69
88
  generator: Agent | AugmentedLLM,
@@ -73,6 +92,8 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
73
92
  llm_factory: Callable[[Agent], AugmentedLLM]
74
93
  | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
75
94
  context: Optional["Context"] = None,
95
+ name: Optional[str] = None, # Allow overriding the name
96
+ instruction: Optional[str] = None, # Allow overriding the instruction
76
97
  ):
77
98
  """
78
99
  Initialize the evaluator-optimizer workflow.
@@ -87,16 +108,50 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
87
108
  min_rating: Minimum acceptable quality rating
88
109
  max_refinements: Maximum refinement iterations
89
110
  llm_factory: Optional factory to create LLMs from agents
111
+ name: Optional name for the workflow (defaults to generator's name)
112
+ instruction: Optional instruction (defaults to generator's instruction)
113
+
114
+ Note on History Management:
115
+ This workflow manages two distinct history contexts:
116
+ 1. Generator History: Controlled by the generator's use_history setting. When False,
117
+ each refinement iteration starts fresh without previous context.
118
+ 2. Evaluator History: Always disabled as each evaluation should be independent
119
+ and based solely on the current response.
90
120
  """
91
- super().__init__(context=context)
92
-
93
- # Set up the optimizer
94
- self.name = generator.name
121
+ # Set up initial instance attributes - allow name override
122
+ self.name = name or generator.name
95
123
  self.llm_factory = llm_factory
96
124
  self.generator = generator
97
125
  self.evaluator = evaluator
126
+ self.min_rating = min_rating
127
+ self.max_refinements = max_refinements
128
+
129
+ # Determine generator's history setting before super().__init__
130
+ if isinstance(generator, Agent):
131
+ self.generator_use_history = generator.config.use_history
132
+ elif isinstance(generator, AugmentedLLM):
133
+ if hasattr(generator, "aggregator") and isinstance(
134
+ generator.aggregator, Agent
135
+ ):
136
+ self.generator_use_history = generator.aggregator.config.use_history
137
+ else:
138
+ self.generator_use_history = getattr(
139
+ generator,
140
+ "use_history",
141
+ getattr(generator.default_request_params, "use_history", False),
142
+ )
143
+ else:
144
+ raise ValueError(f"Unsupported optimizer type: {type(generator)}")
145
+
146
+ # Now we can call super().__init__ which will use generator_use_history
147
+ super().__init__(context=context, name=name or generator.name)
148
+
149
+ # Add a PassthroughLLM as _llm property for compatibility with Orchestrator
150
+ from mcp_agent.workflows.llm.augmented_llm import PassthroughLLM
98
151
 
99
- # TODO: Remove legacy - optimizer should always be an AugmentedLLM, no conversion needed
152
+ self._llm = PassthroughLLM(name=f"{self.name}_passthrough", context=context)
153
+
154
+ # Set up the generator
100
155
  if isinstance(generator, Agent):
101
156
  if not llm_factory:
102
157
  raise ValueError("llm_factory is required when using an Agent")
@@ -109,9 +164,12 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
109
164
 
110
165
  self.aggregator = generator
111
166
  self.instruction = (
112
- generator.instruction
113
- if isinstance(generator.instruction, str)
114
- else None
167
+ instruction # Use provided instruction if any
168
+ or (
169
+ generator.instruction
170
+ if isinstance(generator.instruction, str)
171
+ else None
172
+ ) # Fallback to generator's
115
173
  )
116
174
 
117
175
  elif isinstance(generator, AugmentedLLM):
@@ -119,46 +177,58 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
119
177
  self.aggregator = generator.aggregator
120
178
  self.instruction = generator.instruction
121
179
 
122
- else:
123
- raise ValueError(f"Unsupported optimizer type: {type(generator)}")
124
-
125
- self.history = self.generator_llm.history
126
-
127
- # Set up the evaluator
180
+ # Set up the evaluator - evaluations should be independent, so history is always disabled
128
181
  if isinstance(evaluator, AugmentedLLM):
129
182
  self.evaluator_llm = evaluator
130
- # TODO: Remove legacy - evaluator should be either AugmentedLLM or str
183
+ # Override evaluator's history setting
184
+ if hasattr(evaluator, "default_request_params"):
185
+ evaluator.default_request_params.use_history = False
131
186
  elif isinstance(evaluator, Agent):
132
187
  if not llm_factory:
133
188
  raise ValueError(
134
189
  "llm_factory is required when using an Agent evaluator"
135
190
  )
136
191
 
137
- # Only create new LLM if agent doesn't have one
192
+ # Create evaluator with history disabled
138
193
  if hasattr(evaluator, "_llm") and evaluator._llm:
139
194
  self.evaluator_llm = evaluator._llm
195
+ if hasattr(self.evaluator_llm, "default_request_params"):
196
+ self.evaluator_llm.default_request_params.use_history = False
140
197
  else:
198
+ # Force history off in config before creating LLM
199
+ evaluator.config.use_history = False
141
200
  self.evaluator_llm = llm_factory(agent=evaluator)
142
201
  elif isinstance(evaluator, str):
143
- # If a string is passed as the evaluator, we use it as the evaluation criteria
144
- # and create an evaluator agent with that instruction
145
202
  if not llm_factory:
146
203
  raise ValueError(
147
204
  "llm_factory is required when using a string evaluator"
148
205
  )
149
206
 
150
- self.evaluator_llm = llm_factory(
151
- agent=Agent(name="Evaluator", instruction=evaluator)
207
+ # Create evaluator agent with history disabled
208
+ evaluator_agent = Agent(
209
+ name="Evaluator",
210
+ instruction=evaluator,
211
+ config=AgentConfig(
212
+ name="Evaluator",
213
+ instruction=evaluator,
214
+ servers=[],
215
+ use_history=False, # Force history off for evaluator
216
+ ),
152
217
  )
218
+ self.evaluator_llm = llm_factory(agent=evaluator_agent)
153
219
  else:
154
220
  raise ValueError(f"Unsupported evaluator type: {type(evaluator)}")
155
221
 
156
- self.min_rating = min_rating
157
- self.max_refinements = max_refinements
158
-
159
- # Track iteration history
222
+ # Track iteration history (for the workflow itself)
160
223
  self.refinement_history = []
161
224
 
225
+ # Set up workflow's default params based on generator's history setting
226
+ self.default_request_params = self._initialize_default_params({})
227
+
228
+ # Ensure evaluator's request params have history disabled
229
+ if hasattr(self.evaluator_llm, "default_request_params"):
230
+ self.evaluator_llm.default_request_params.use_history = False
231
+
162
232
  async def generate(
163
233
  self,
164
234
  message: str | MessageParamT | List[MessageParamT],
@@ -171,6 +241,9 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
171
241
  best_rating = QualityRating.POOR
172
242
  self.refinement_history = []
173
243
 
244
+ # Get request params with proper use_history setting
245
+ params = self.get_request_params(request_params)
246
+
174
247
  # Use a single AsyncExitStack for the entire method to maintain connections
175
248
  async with contextlib.AsyncExitStack() as stack:
176
249
  # Enter all agent contexts once at the beginning
@@ -180,22 +253,20 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
180
253
  await stack.enter_async_context(self.evaluator)
181
254
 
182
255
  # Initial generation
183
- response = await self.generator_llm.generate(
256
+ response = await self.generator_llm.generate_str(
184
257
  message=message,
185
- request_params=request_params,
258
+ request_params=params, # Pass params which may override use_history
186
259
  )
187
260
 
188
261
  best_response = response
189
262
 
190
263
  while refinement_count < self.max_refinements:
191
- logger.debug("Optimizer result:", data=response)
264
+ logger.debug("Generator result:", data=response)
192
265
 
193
266
  # Evaluate current response
194
267
  eval_prompt = self._build_eval_prompt(
195
268
  original_request=str(message),
196
- current_response="\n".join(str(r) for r in response)
197
- if isinstance(response, list)
198
- else str(response),
269
+ current_response=response, # response is already a string
199
270
  iteration=refinement_count,
200
271
  )
201
272
 
@@ -244,22 +315,23 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
244
315
  # Generate refined response
245
316
  refinement_prompt = self._build_refinement_prompt(
246
317
  original_request=str(message),
247
- current_response="\n".join(str(r) for r in response)
248
- if isinstance(response, list)
249
- else str(response),
318
+ current_response=response,
250
319
  feedback=evaluation_result,
251
320
  iteration=refinement_count,
321
+ use_history=self.generator_use_history, # Use the generator's history setting
252
322
  )
253
323
 
254
- # No nested AsyncExitStack here either
255
- response = await self.generator_llm.generate(
324
+ response = await self.generator_llm.generate_str(
256
325
  message=refinement_prompt,
257
- request_params=request_params,
326
+ request_params=params, # Pass params which may override use_history
258
327
  )
259
328
 
260
329
  refinement_count += 1
261
330
 
262
- return best_response
331
+ # Return the best response as a list with a single string element
332
+ # This makes it consistent with other AugmentedLLM implementations
333
+ # that return List[MessageT]
334
+ return [best_response]
263
335
 
264
336
  async def generate_str(
265
337
  self,
@@ -271,28 +343,8 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
271
343
  message=message,
272
344
  request_params=request_params,
273
345
  )
274
-
275
- # Handle case where response is a single message
276
- if not isinstance(response, list):
277
- return str(response)
278
-
279
- # Convert all messages to strings, handling different message types
280
- result_strings = []
281
- for r in response:
282
- if hasattr(r, "text"):
283
- result_strings.append(r.text)
284
- elif hasattr(r, "content"):
285
- # Handle ToolUseBlock and similar
286
- if isinstance(r.content, list):
287
- # Typically content is a list of blocks
288
- result_strings.extend(str(block) for block in r.content)
289
- else:
290
- result_strings.append(str(r.content))
291
- else:
292
- # Fallback to string representation
293
- result_strings.append(str(r))
294
-
295
- return "\n".join(result_strings)
346
+ # Since generate now returns [best_response], just return the first element
347
+ return str(response[0])
296
348
 
297
349
  async def generate_structured(
298
350
  self,
@@ -367,9 +419,14 @@ Be concrete and actionable in your recommendations.
367
419
  current_response: str,
368
420
  feedback: EvaluationResult,
369
421
  iteration: int,
422
+ use_history: bool = None,
370
423
  ) -> str:
371
424
  """Build the refinement prompt for the optimizer"""
372
- history_enabled = hasattr(self, "history") and self.history
425
+ # Get the correct history setting - use param if provided, otherwise class default
426
+ if use_history is None:
427
+ use_history = (
428
+ self.generator_use_history
429
+ ) # Use generator's setting as default
373
430
 
374
431
  # Start with clear non-delimited instructions
375
432
  prompt = f"""
@@ -391,7 +448,7 @@ Your goal is to address all feedback points while maintaining accuracy and relev
391
448
  """
392
449
 
393
450
  # Only include previous response if history is not enabled
394
- if not history_enabled:
451
+ if not use_history:
395
452
  prompt += f"""
396
453
  <fastagent:previous-response>
397
454
  {current_response}
@@ -409,7 +466,7 @@ Your goal is to address all feedback points while maintaining accuracy and relev
409
466
  """
410
467
 
411
468
  # Customize instruction based on history availability
412
- if not history_enabled:
469
+ if not use_history:
413
470
  prompt += """
414
471
  <fastagent:instruction>
415
472
  Create an improved version of the response that: