opik-optimizer 1.0.6__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. opik_optimizer/__init__.py +4 -0
  2. opik_optimizer/_throttle.py +2 -1
  3. opik_optimizer/base_optimizer.py +402 -28
  4. opik_optimizer/data/context7_eval.jsonl +3 -0
  5. opik_optimizer/datasets/context7_eval.py +90 -0
  6. opik_optimizer/datasets/tiny_test.py +33 -34
  7. opik_optimizer/datasets/truthful_qa.py +2 -2
  8. opik_optimizer/evolutionary_optimizer/crossover_ops.py +194 -0
  9. opik_optimizer/evolutionary_optimizer/evaluation_ops.py +136 -0
  10. opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py +289 -966
  11. opik_optimizer/evolutionary_optimizer/helpers.py +10 -0
  12. opik_optimizer/evolutionary_optimizer/llm_support.py +136 -0
  13. opik_optimizer/evolutionary_optimizer/mcp.py +249 -0
  14. opik_optimizer/evolutionary_optimizer/mutation_ops.py +306 -0
  15. opik_optimizer/evolutionary_optimizer/population_ops.py +228 -0
  16. opik_optimizer/evolutionary_optimizer/prompts.py +352 -0
  17. opik_optimizer/evolutionary_optimizer/reporting.py +28 -4
  18. opik_optimizer/evolutionary_optimizer/style_ops.py +86 -0
  19. opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py +90 -81
  20. opik_optimizer/few_shot_bayesian_optimizer/reporting.py +12 -5
  21. opik_optimizer/gepa_optimizer/__init__.py +3 -0
  22. opik_optimizer/gepa_optimizer/adapter.py +154 -0
  23. opik_optimizer/gepa_optimizer/gepa_optimizer.py +653 -0
  24. opik_optimizer/gepa_optimizer/reporting.py +181 -0
  25. opik_optimizer/logging_config.py +42 -7
  26. opik_optimizer/mcp_utils/__init__.py +22 -0
  27. opik_optimizer/mcp_utils/mcp.py +541 -0
  28. opik_optimizer/mcp_utils/mcp_second_pass.py +152 -0
  29. opik_optimizer/mcp_utils/mcp_simulator.py +116 -0
  30. opik_optimizer/mcp_utils/mcp_workflow.py +547 -0
  31. opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py +470 -134
  32. opik_optimizer/meta_prompt_optimizer/reporting.py +16 -2
  33. opik_optimizer/mipro_optimizer/_lm.py +30 -23
  34. opik_optimizer/mipro_optimizer/_mipro_optimizer_v2.py +52 -51
  35. opik_optimizer/mipro_optimizer/mipro_optimizer.py +126 -46
  36. opik_optimizer/mipro_optimizer/utils.py +2 -4
  37. opik_optimizer/optimizable_agent.py +21 -16
  38. opik_optimizer/optimization_config/chat_prompt.py +44 -23
  39. opik_optimizer/optimization_config/configs.py +3 -3
  40. opik_optimizer/optimization_config/mappers.py +9 -8
  41. opik_optimizer/optimization_result.py +22 -14
  42. opik_optimizer/reporting_utils.py +61 -10
  43. opik_optimizer/task_evaluator.py +9 -8
  44. opik_optimizer/utils/__init__.py +15 -0
  45. opik_optimizer/utils/colbert.py +236 -0
  46. opik_optimizer/{utils.py → utils/core.py} +160 -33
  47. opik_optimizer/utils/dataset_utils.py +49 -0
  48. opik_optimizer/utils/prompt_segments.py +186 -0
  49. opik_optimizer-2.0.0.dist-info/METADATA +345 -0
  50. opik_optimizer-2.0.0.dist-info/RECORD +74 -0
  51. opik_optimizer-2.0.0.dist-info/licenses/LICENSE +203 -0
  52. opik_optimizer-1.0.6.dist-info/METADATA +0 -181
  53. opik_optimizer-1.0.6.dist-info/RECORD +0 -50
  54. opik_optimizer-1.0.6.dist-info/licenses/LICENSE +0 -21
  55. {opik_optimizer-1.0.6.dist-info → opik_optimizer-2.0.0.dist-info}/WHEEL +0 -0
  56. {opik_optimizer-1.0.6.dist-info → opik_optimizer-2.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,306 @@
1
+ from typing import Any, TYPE_CHECKING
2
+ from collections.abc import Callable
3
+
4
+ import json
5
+ import logging
6
+ import random
7
+
8
+ from . import prompts as evo_prompts
9
+ from .mcp import EvolutionaryMCPContext, tool_description_mutation
10
+ from ..optimization_config import chat_prompt
11
+ from .. import utils
12
+ from . import reporting
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class MutationOps:
19
+ if TYPE_CHECKING:
20
+ _calculate_population_diversity: Any
21
+ DEFAULT_DIVERSITY_THRESHOLD: float
22
+ verbose: int
23
+ output_style_guidance: str
24
+ _get_task_description_for_llm: Any
25
+ _call_model: Any
26
+ _mcp_context: EvolutionaryMCPContext | None
27
+ _update_individual_with_prompt: Callable[[Any, chat_prompt.ChatPrompt], Any]
28
+
29
+ def _deap_mutation(
30
+ self, individual: Any, initial_prompt: chat_prompt.ChatPrompt
31
+ ) -> Any:
32
+ """Enhanced mutation operation with multiple strategies."""
33
+ prompt = chat_prompt.ChatPrompt(messages=individual)
34
+
35
+ mcp_context = getattr(self, "_mcp_context", None)
36
+ if mcp_context is not None:
37
+ mutated_prompt = tool_description_mutation(self, prompt, mcp_context)
38
+ if mutated_prompt is not None:
39
+ reporting.display_success(
40
+ " Mutation successful, tool description updated (MCP mutation).",
41
+ verbose=self.verbose,
42
+ )
43
+ return self._update_individual_with_prompt(individual, mutated_prompt)
44
+
45
+ # Choose mutation strategy based on current diversity
46
+ diversity = self._calculate_population_diversity()
47
+
48
+ # Determine thresholds based on diversity
49
+ if diversity < self.DEFAULT_DIVERSITY_THRESHOLD:
50
+ # Low diversity - use more aggressive mutations (higher chance for semantic)
51
+ semantic_threshold = 0.5
52
+ structural_threshold = 0.8 # semantic_threshold + 0.3
53
+ else:
54
+ # Good diversity - use more conservative mutations (higher chance for word_level)
55
+ semantic_threshold = 0.4
56
+ structural_threshold = 0.7 # semantic_threshold + 0.3
57
+
58
+ mutation_choice = random.random()
59
+
60
+ if mutation_choice > structural_threshold:
61
+ mutated_prompt = self._word_level_mutation_prompt(prompt)
62
+ reporting.display_success(
63
+ " Mutation successful, prompt has been edited by randomizing words (word-level mutation).",
64
+ verbose=self.verbose,
65
+ )
66
+ return self._update_individual_with_prompt(individual, mutated_prompt)
67
+ elif mutation_choice > semantic_threshold:
68
+ mutated_prompt = self._structural_mutation(prompt)
69
+ reporting.display_success(
70
+ " Mutation successful, prompt has been edited by reordering, combining, or splitting sentences (structural mutation).",
71
+ verbose=self.verbose,
72
+ )
73
+ return self._update_individual_with_prompt(individual, mutated_prompt)
74
+ else:
75
+ mutated_prompt = self._semantic_mutation(prompt, initial_prompt)
76
+ reporting.display_success(
77
+ " Mutation successful, prompt has been edited using an LLM (semantic mutation).",
78
+ verbose=self.verbose,
79
+ )
80
+ return self._update_individual_with_prompt(individual, mutated_prompt)
81
+
82
+ def _semantic_mutation(
83
+ self, prompt: chat_prompt.ChatPrompt, initial_prompt: chat_prompt.ChatPrompt
84
+ ) -> chat_prompt.ChatPrompt:
85
+ """Enhanced semantic mutation with multiple strategies."""
86
+ current_output_style_guidance = self.output_style_guidance
87
+ if random.random() < 0.1:
88
+ return self._radical_innovation_mutation(prompt, initial_prompt)
89
+
90
+ try:
91
+ strategy = random.choice(
92
+ [
93
+ "rephrase",
94
+ "simplify",
95
+ "elaborate",
96
+ "restructure",
97
+ "focus",
98
+ "increase_complexity_and_detail",
99
+ ]
100
+ )
101
+
102
+ strategy_prompts = evo_prompts.mutation_strategy_prompts(
103
+ current_output_style_guidance
104
+ )
105
+ user_prompt_for_semantic_mutation = (
106
+ evo_prompts.semantic_mutation_user_prompt(
107
+ prompt.get_messages(),
108
+ self._get_task_description_for_llm(initial_prompt),
109
+ current_output_style_guidance,
110
+ strategy_prompts[strategy],
111
+ )
112
+ )
113
+ response = self._call_model(
114
+ messages=[
115
+ {
116
+ "role": "system",
117
+ "content": evo_prompts.semantic_mutation_system_prompt(
118
+ current_output_style_guidance
119
+ ),
120
+ },
121
+ {"role": "user", "content": user_prompt_for_semantic_mutation},
122
+ ],
123
+ is_reasoning=True,
124
+ )
125
+
126
+ try:
127
+ messages = utils.json_to_dict(response.strip())
128
+ except Exception as parse_exc:
129
+ raise RuntimeError(
130
+ f"Error parsing semantic mutation response as JSON. "
131
+ f"Response: {response!r}\nOriginal error: {parse_exc}"
132
+ ) from parse_exc
133
+ return chat_prompt.ChatPrompt(messages=messages)
134
+ except Exception as e:
135
+ reporting.display_error(
136
+ f" Error in semantic mutation, this is usually a parsing error: {e}",
137
+ verbose=self.verbose,
138
+ )
139
+ return prompt
140
+
141
+ def _structural_mutation(
142
+ self, prompt: chat_prompt.ChatPrompt
143
+ ) -> chat_prompt.ChatPrompt:
144
+ """Perform structural mutation (reordering, combining, splitting)."""
145
+ mutated_messages: list[dict[str, str]] = []
146
+
147
+ for message in prompt.get_messages():
148
+ content = message["content"]
149
+ role = message["role"]
150
+
151
+ sentences = [s.strip() for s in content.split(".") if s.strip()]
152
+ if len(sentences) <= 1:
153
+ mutated_messages.append(
154
+ {"role": role, "content": self._word_level_mutation(content)}
155
+ )
156
+ continue
157
+
158
+ mutation_type = random.random()
159
+ if mutation_type < 0.3:
160
+ random.shuffle(sentences)
161
+ mutated_messages.append(
162
+ {"role": role, "content": ". ".join(sentences) + "."}
163
+ )
164
+ continue
165
+ elif mutation_type < 0.6:
166
+ if len(sentences) >= 2:
167
+ idx = random.randint(0, len(sentences) - 2)
168
+ combined = sentences[idx] + " and " + sentences[idx + 1]
169
+ sentences[idx : idx + 2] = [combined]
170
+ mutated_messages.append(
171
+ {"role": role, "content": ". ".join(sentences) + "."}
172
+ )
173
+ continue
174
+ else:
175
+ idx = random.randint(0, len(sentences) - 1)
176
+ words = sentences[idx].split()
177
+ if len(words) > 3:
178
+ split_point = random.randint(2, len(words) - 2)
179
+ sentences[idx : idx + 1] = [
180
+ " ".join(words[:split_point]),
181
+ " ".join(words[split_point:]),
182
+ ]
183
+ mutated_messages.append(
184
+ {"role": role, "content": ". ".join(sentences) + "."}
185
+ )
186
+ continue
187
+ else:
188
+ mutated_messages.append({"role": role, "content": content})
189
+
190
+ return chat_prompt.ChatPrompt(messages=mutated_messages)
191
+
192
+ def _word_level_mutation_prompt(
193
+ self, prompt: chat_prompt.ChatPrompt
194
+ ) -> chat_prompt.ChatPrompt:
195
+ mutated_messages: list[dict[str, str]] = []
196
+ for message in prompt.get_messages():
197
+ mutated_messages.append(
198
+ {
199
+ "role": message["role"],
200
+ "content": self._word_level_mutation(message["content"]),
201
+ }
202
+ )
203
+ return chat_prompt.ChatPrompt(messages=mutated_messages)
204
+
205
+ def _word_level_mutation(self, msg_content: str) -> str:
206
+ """Perform word-level mutation."""
207
+ words = msg_content.split()
208
+ if len(words) <= 1:
209
+ return msg_content
210
+
211
+ mutation_type = random.random()
212
+ if mutation_type < 0.3:
213
+ idx = random.randint(0, len(words) - 1)
214
+ words[idx] = self._get_synonym(words[idx])
215
+ elif mutation_type < 0.6:
216
+ if len(words) > 2:
217
+ i, j = random.sample(range(len(words)), 2)
218
+ words[i], words[j] = words[j], words[i]
219
+ else:
220
+ idx = random.randint(0, len(words) - 1)
221
+ words[idx] = self._modify_phrase(words[idx])
222
+
223
+ return " ".join(words)
224
+
225
+ def _get_synonym(self, word: str) -> str:
226
+ """Get a synonym for a word using LLM."""
227
+ try:
228
+ response = self._call_model(
229
+ messages=[
230
+ {"role": "system", "content": evo_prompts.synonyms_system_prompt()},
231
+ {
232
+ "role": "user",
233
+ "content": (
234
+ f"Give me a single synonym for the word '{word}'. Return only the synonym, nothing else."
235
+ ),
236
+ },
237
+ ],
238
+ is_reasoning=True,
239
+ )
240
+ return response.strip()
241
+ except Exception as e:
242
+ logger.warning(f"Error getting synonym for '{word}': {e}")
243
+ return word
244
+
245
+ def _modify_phrase(self, phrase: str) -> str:
246
+ """Modify a phrase while preserving meaning using LLM."""
247
+ try:
248
+ response = self._call_model(
249
+ messages=[
250
+ {"role": "system", "content": evo_prompts.rephrase_system_prompt()},
251
+ {
252
+ "role": "user",
253
+ "content": (
254
+ f"Modify this phrase while keeping the same meaning: '{phrase}'. Return only the modified phrase, nothing else."
255
+ ),
256
+ },
257
+ ],
258
+ is_reasoning=True,
259
+ )
260
+ return response.strip()
261
+ except Exception as e:
262
+ logger.warning(f"Error modifying phrase '{phrase}': {e}")
263
+ return phrase
264
+
265
+ def _radical_innovation_mutation(
266
+ self, prompt: chat_prompt.ChatPrompt, initial_prompt: chat_prompt.ChatPrompt
267
+ ) -> chat_prompt.ChatPrompt:
268
+ """Attempts to generate a significantly improved and potentially very different prompt using an LLM."""
269
+ logger.debug(
270
+ f"Attempting radical innovation for prompt: {json.dumps(prompt.get_messages())[:70]}..."
271
+ )
272
+ task_desc_for_llm = self._get_task_description_for_llm(initial_prompt)
273
+ current_output_style_guidance = self.output_style_guidance
274
+
275
+ user_prompt_for_radical_innovation = evo_prompts.radical_innovation_user_prompt(
276
+ task_desc_for_llm, current_output_style_guidance, prompt.get_messages()
277
+ )
278
+ try:
279
+ new_prompt_str = self._call_model(
280
+ messages=[
281
+ {
282
+ "role": "system",
283
+ "content": evo_prompts.radical_innovation_system_prompt(
284
+ current_output_style_guidance
285
+ ),
286
+ },
287
+ {"role": "user", "content": user_prompt_for_radical_innovation},
288
+ ],
289
+ is_reasoning=True,
290
+ )
291
+ logger.info(
292
+ f"Radical innovation LLM result (truncated): {new_prompt_str[:200]}"
293
+ )
294
+ try:
295
+ new_messages = utils.json_to_dict(new_prompt_str)
296
+ except Exception as parse_exc:
297
+ logger.warning(
298
+ f"Failed to parse LLM output in radical innovation mutation for prompt '{json.dumps(prompt.get_messages())[:50]}...'. Output: {new_prompt_str[:200]}. Error: {parse_exc}. Returning original."
299
+ )
300
+ return prompt
301
+ return chat_prompt.ChatPrompt(messages=new_messages)
302
+ except Exception as e:
303
+ logger.warning(
304
+ f"Radical innovation mutation failed for prompt '{json.dumps(prompt.get_messages())[:50]}...': {e}. Returning original."
305
+ )
306
+ return prompt
@@ -0,0 +1,228 @@
1
+ from typing import Any, TYPE_CHECKING
2
+
3
+ import json
4
+ import logging
5
+
6
+ from deap import tools
7
+ from deap import creator as _creator
8
+
9
+ from . import prompts as evo_prompts
10
+ from . import reporting
11
+ from .mcp import EvolutionaryMCPContext, initialize_population_mcp
12
+ from ..optimization_config import chat_prompt
13
+ from .. import utils
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+ creator = _creator
18
+
19
+
20
+ class PopulationOps:
21
+ if TYPE_CHECKING:
22
+ _get_task_description_for_llm: Any
23
+ output_style_guidance: str
24
+ _call_model: Any
25
+ toolbox: Any
26
+ _mcp_context: EvolutionaryMCPContext | None
27
+ # Hints for mixin attributes provided by the primary optimizer class
28
+ _gens_since_pop_improvement: int
29
+ _best_primary_score_history: list[float]
30
+ DEFAULT_RESTART_THRESHOLD: float
31
+ DEFAULT_RESTART_GENERATIONS: int
32
+ enable_moo: bool
33
+ elitism_size: int
34
+ population_size: int
35
+ verbose: int
36
+
37
+ def _initialize_population(
38
+ self, prompt: chat_prompt.ChatPrompt
39
+ ) -> list[chat_prompt.ChatPrompt]:
40
+ """Initialize the population with diverse variations of the initial prompt,
41
+ including some 'fresh start' prompts based purely on task description.
42
+ All generated prompts should aim to elicit answers matching self.output_style_guidance.
43
+ """
44
+ mcp_context = getattr(self, "_mcp_context", None)
45
+ if mcp_context is not None:
46
+ return initialize_population_mcp(self, prompt, mcp_context)
47
+ with reporting.initializing_population(verbose=self.verbose) as init_pop_report:
48
+ init_pop_report.start(self.population_size)
49
+
50
+ population = [prompt]
51
+ if self.population_size <= 1:
52
+ return population
53
+
54
+ num_to_generate_total = self.population_size - 1
55
+ num_fresh_starts = max(1, int(num_to_generate_total * 0.2))
56
+ num_variations_on_initial = num_to_generate_total - num_fresh_starts
57
+
58
+ task_desc_for_llm = self._get_task_description_for_llm(prompt)
59
+ current_output_style_guidance = self.output_style_guidance
60
+
61
+ # Fresh starts
62
+ if num_fresh_starts > 0:
63
+ init_pop_report.start_fresh_prompts(num_fresh_starts)
64
+ fresh_start_user_prompt = evo_prompts.fresh_start_user_prompt(
65
+ task_desc_for_llm, current_output_style_guidance, num_fresh_starts
66
+ )
67
+ try:
68
+ response_content = self._call_model(
69
+ messages=[
70
+ {
71
+ "role": "system",
72
+ "content": evo_prompts.fresh_start_system_prompt(
73
+ current_output_style_guidance
74
+ ),
75
+ },
76
+ {"role": "user", "content": fresh_start_user_prompt},
77
+ ],
78
+ is_reasoning=True,
79
+ )
80
+
81
+ logger.debug(
82
+ f"Raw LLM response for fresh start prompts: {response_content}"
83
+ )
84
+
85
+ fresh_prompts = utils.json_to_dict(response_content)
86
+ if isinstance(fresh_prompts, list):
87
+ if all(isinstance(p, dict) for p in fresh_prompts) and all(
88
+ p.get("role") is not None for p in fresh_prompts
89
+ ):
90
+ population.append(
91
+ chat_prompt.ChatPrompt(messages=fresh_prompts)
92
+ )
93
+ init_pop_report.success_fresh_prompts(1)
94
+ elif all(isinstance(p, list) for p in fresh_prompts):
95
+ population.extend(
96
+ [
97
+ chat_prompt.ChatPrompt(messages=p)
98
+ for p in fresh_prompts[:num_fresh_starts]
99
+ ]
100
+ )
101
+ init_pop_report.success_fresh_prompts(
102
+ len(fresh_prompts[:num_fresh_starts])
103
+ )
104
+ else:
105
+ init_pop_report.failed_fresh_prompts(
106
+ num_fresh_starts,
107
+ f"LLM response for fresh starts was not a valid list of strings or was empty: {response_content}. Skipping fresh start prompts.",
108
+ )
109
+ except json.JSONDecodeError as e_json:
110
+ init_pop_report.failed_fresh_prompts(
111
+ num_fresh_starts,
112
+ f"JSONDecodeError generating fresh start prompts: {e_json}. LLM response: '{response_content}'. Skipping fresh start prompts.",
113
+ )
114
+ except Exception as e:
115
+ init_pop_report.failed_fresh_prompts(
116
+ num_fresh_starts,
117
+ f"Error generating fresh start prompts: {e}. Skipping fresh start prompts.",
118
+ )
119
+
120
+ # Variations on the initial prompt
121
+ if num_variations_on_initial > 0:
122
+ init_pop_report.start_variations(num_variations_on_initial)
123
+ user_prompt_for_variation = evo_prompts.variation_user_prompt(
124
+ prompt.get_messages(),
125
+ task_desc_for_llm,
126
+ current_output_style_guidance,
127
+ num_variations_on_initial,
128
+ )
129
+ try:
130
+ response_content_variations = self._call_model(
131
+ messages=[
132
+ {
133
+ "role": "system",
134
+ "content": evo_prompts.variation_system_prompt(
135
+ current_output_style_guidance
136
+ ),
137
+ },
138
+ {"role": "user", "content": user_prompt_for_variation},
139
+ ],
140
+ is_reasoning=True,
141
+ )
142
+ logger.debug(
143
+ f"Raw response for population variations: {response_content_variations}"
144
+ )
145
+ json_response_variations = json.loads(response_content_variations)
146
+ generated_prompts_variations = [
147
+ p["prompt"]
148
+ for p in json_response_variations.get("prompts", [])
149
+ if isinstance(p, dict) and "prompt" in p
150
+ ]
151
+
152
+ if generated_prompts_variations:
153
+ init_pop_report.success_variations(
154
+ len(
155
+ generated_prompts_variations[:num_variations_on_initial]
156
+ )
157
+ )
158
+ population.extend(
159
+ [
160
+ chat_prompt.ChatPrompt(messages=p)
161
+ for p in generated_prompts_variations[
162
+ :num_variations_on_initial
163
+ ]
164
+ ]
165
+ )
166
+ else:
167
+ init_pop_report.failed_variations(
168
+ num_variations_on_initial,
169
+ "Could not parse 'prompts' list for variations. Skipping variations.",
170
+ )
171
+ except Exception as e:
172
+ init_pop_report.failed_variations(
173
+ num_variations_on_initial,
174
+ f"Error calling LLM for initial population variations: {e}",
175
+ )
176
+
177
+ # Ensure population is of the required size using unique prompts
178
+ final_population_set: set[str] = set()
179
+ final_population_list: list[chat_prompt.ChatPrompt] = []
180
+ for p in population:
181
+ if json.dumps(p.get_messages()) not in final_population_set:
182
+ final_population_set.add(json.dumps(p.get_messages()))
183
+ final_population_list.append(p)
184
+
185
+ init_pop_report.end(final_population_list)
186
+ return final_population_list[: self.population_size]
187
+
188
+ def _should_restart_population(self, curr_best: float) -> bool:
189
+ """Update internal counters and decide if we should trigger a population restart."""
190
+ if self._best_primary_score_history:
191
+ threshold = self._best_primary_score_history[-1] * (
192
+ 1 + self.DEFAULT_RESTART_THRESHOLD
193
+ )
194
+ if curr_best < threshold:
195
+ self._gens_since_pop_improvement += 1 # type: ignore[attr-defined]
196
+ else:
197
+ self._gens_since_pop_improvement = 0 # type: ignore[attr-defined]
198
+ self._best_primary_score_history.append(curr_best)
199
+ return self._gens_since_pop_improvement >= self.DEFAULT_RESTART_GENERATIONS # type: ignore[attr-defined]
200
+
201
+ def _restart_population(
202
+ self,
203
+ hof: tools.HallOfFame,
204
+ population: list[Any],
205
+ best_prompt_so_far: chat_prompt.ChatPrompt,
206
+ ) -> list[Any]:
207
+ """Return a fresh, evaluated population seeded by elites."""
208
+ if self.enable_moo:
209
+ elites = list(hof)
210
+ else:
211
+ elites = tools.selBest(population, self.elitism_size)
212
+
213
+ seed_prompt = (
214
+ chat_prompt.ChatPrompt(
215
+ messages=max(elites, key=lambda x: x.fitness.values[0])
216
+ )
217
+ if elites
218
+ else best_prompt_so_far
219
+ )
220
+
221
+ prompt_variants = self._initialize_population(seed_prompt)
222
+ new_pop = [creator.Individual(p.get_messages()) for p in prompt_variants]
223
+
224
+ for ind, fit in zip(new_pop, map(self.toolbox.evaluate, new_pop)):
225
+ ind.fitness.values = fit
226
+
227
+ self._gens_since_pop_improvement = 0 # type: ignore[attr-defined]
228
+ return new_pop