deepeval 3.7.3__py3-none-any.whl → 3.7.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/cli/test.py +1 -1
- deepeval/config/settings.py +102 -13
- deepeval/evaluate/configs.py +1 -1
- deepeval/evaluate/execute.py +4 -1
- deepeval/metrics/answer_relevancy/template.py +4 -4
- deepeval/metrics/argument_correctness/template.py +2 -2
- deepeval/metrics/bias/template.py +3 -3
- deepeval/metrics/contextual_precision/template.py +6 -6
- deepeval/metrics/contextual_recall/template.py +2 -2
- deepeval/metrics/contextual_relevancy/template.py +3 -3
- deepeval/metrics/conversation_completeness/template.py +2 -2
- deepeval/metrics/conversational_dag/templates.py +4 -4
- deepeval/metrics/conversational_g_eval/template.py +4 -3
- deepeval/metrics/dag/templates.py +4 -4
- deepeval/metrics/faithfulness/template.py +4 -4
- deepeval/metrics/hallucination/template.py +4 -4
- deepeval/metrics/misuse/template.py +2 -2
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/template.py +7 -7
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/template.py +6 -6
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/template.py +2 -2
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/template.py +3 -3
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/template.py +9 -9
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/template.py +4 -4
- deepeval/metrics/non_advice/template.py +2 -2
- deepeval/metrics/pii_leakage/template.py +2 -2
- deepeval/metrics/prompt_alignment/template.py +4 -4
- deepeval/metrics/role_violation/template.py +2 -2
- deepeval/metrics/step_efficiency/step_efficiency.py +1 -1
- deepeval/metrics/toxicity/template.py +4 -4
- deepeval/metrics/turn_relevancy/template.py +2 -2
- deepeval/models/embedding_models/azure_embedding_model.py +28 -15
- deepeval/models/embedding_models/local_embedding_model.py +23 -10
- deepeval/models/embedding_models/ollama_embedding_model.py +8 -6
- deepeval/models/embedding_models/openai_embedding_model.py +18 -2
- deepeval/models/llms/anthropic_model.py +17 -5
- deepeval/models/llms/azure_model.py +30 -18
- deepeval/models/llms/deepseek_model.py +22 -12
- deepeval/models/llms/gemini_model.py +120 -87
- deepeval/models/llms/grok_model.py +23 -16
- deepeval/models/llms/kimi_model.py +23 -12
- deepeval/models/llms/litellm_model.py +63 -25
- deepeval/models/llms/local_model.py +26 -18
- deepeval/models/llms/ollama_model.py +17 -7
- deepeval/models/llms/openai_model.py +22 -17
- deepeval/models/llms/portkey_model.py +132 -0
- deepeval/models/mlllms/azure_model.py +28 -19
- deepeval/models/mlllms/gemini_model.py +102 -73
- deepeval/models/mlllms/ollama_model.py +40 -9
- deepeval/models/mlllms/openai_model.py +65 -14
- deepeval/models/utils.py +48 -3
- deepeval/optimization/__init__.py +13 -0
- deepeval/optimization/adapters/__init__.py +2 -0
- deepeval/optimization/adapters/deepeval_scoring_adapter.py +588 -0
- deepeval/optimization/aggregates.py +14 -0
- deepeval/optimization/configs.py +34 -0
- deepeval/optimization/copro/configs.py +31 -0
- deepeval/optimization/copro/loop.py +837 -0
- deepeval/optimization/gepa/__init__.py +7 -0
- deepeval/optimization/gepa/configs.py +115 -0
- deepeval/optimization/gepa/loop.py +677 -0
- deepeval/optimization/miprov2/configs.py +134 -0
- deepeval/optimization/miprov2/loop.py +785 -0
- deepeval/optimization/mutations/__init__.py +0 -0
- deepeval/optimization/mutations/prompt_rewriter.py +458 -0
- deepeval/optimization/policies/__init__.py +16 -0
- deepeval/optimization/policies/selection.py +166 -0
- deepeval/optimization/policies/tie_breaker.py +67 -0
- deepeval/optimization/prompt_optimizer.py +462 -0
- deepeval/optimization/simba/__init__.py +0 -0
- deepeval/optimization/simba/configs.py +33 -0
- deepeval/optimization/simba/loop.py +983 -0
- deepeval/optimization/simba/types.py +15 -0
- deepeval/optimization/types.py +361 -0
- deepeval/optimization/utils.py +598 -0
- deepeval/prompt/prompt.py +10 -5
- deepeval/test_run/cache.py +2 -0
- deepeval/test_run/test_run.py +6 -1
- deepeval/utils.py +24 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.4.dist-info}/METADATA +1 -1
- {deepeval-3.7.3.dist-info → deepeval-3.7.4.dist-info}/RECORD +84 -59
- {deepeval-3.7.3.dist-info → deepeval-3.7.4.dist-info}/LICENSE.md +0 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.4.dist-info}/WHEEL +0 -0
- {deepeval-3.7.3.dist-info → deepeval-3.7.4.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,837 @@
|
|
|
1
|
+
# - COPRO cooperative 0-shot variant:
|
|
2
|
+
# - Works on a single set of goldens (no D_pareto split).
|
|
3
|
+
# - Maintains a bounded population of candidate prompts
|
|
4
|
+
# (size controlled by `population_size`).
|
|
5
|
+
# - At each iteration:
|
|
6
|
+
# - Select a parent via epsilon-greedy on mean minibatch score.
|
|
7
|
+
# - Sample a minibatch of goldens for scoring.
|
|
8
|
+
# - Compute feedback once for the parent + minibatch.
|
|
9
|
+
# - Propose multiple child prompts cooperatively from the same parent
|
|
10
|
+
# (up to `proposals_per_step` children).
|
|
11
|
+
# - For each child, accept it if its minibatch score improves on the
|
|
12
|
+
# parent by at least `min_delta`, add it to the pool, and prune
|
|
13
|
+
# low-scoring candidates if the population exceeds `population_size`.
|
|
14
|
+
# - Uses `full_eval_every` (if set) to periodically re-score the current
|
|
15
|
+
# best candidate on the full golden set.
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import random
|
|
20
|
+
import time
|
|
21
|
+
import uuid
|
|
22
|
+
from typing import (
|
|
23
|
+
TYPE_CHECKING,
|
|
24
|
+
Awaitable,
|
|
25
|
+
Callable,
|
|
26
|
+
Dict,
|
|
27
|
+
List,
|
|
28
|
+
Optional,
|
|
29
|
+
Tuple,
|
|
30
|
+
Union,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
from deepeval.errors import DeepEvalError
|
|
34
|
+
from deepeval.optimization.aggregates import Aggregator, mean_of_all
|
|
35
|
+
from deepeval.optimization.types import (
|
|
36
|
+
AcceptedIterationDict,
|
|
37
|
+
ModuleId,
|
|
38
|
+
OptimizationResult,
|
|
39
|
+
PromptConfiguration,
|
|
40
|
+
PromptConfigurationId,
|
|
41
|
+
RunnerStatusCallbackProtocol,
|
|
42
|
+
RunnerStatusType,
|
|
43
|
+
ScoreTable,
|
|
44
|
+
ScoringAdapter,
|
|
45
|
+
)
|
|
46
|
+
from deepeval.optimization.utils import (
|
|
47
|
+
build_prompt_config_snapshots,
|
|
48
|
+
)
|
|
49
|
+
from deepeval.prompt.api import PromptType
|
|
50
|
+
from deepeval.prompt.prompt import Prompt
|
|
51
|
+
from deepeval.optimization.mutations.prompt_rewriter import PromptRewriter
|
|
52
|
+
|
|
53
|
+
from .configs import COPROConfig
|
|
54
|
+
|
|
55
|
+
if TYPE_CHECKING: # pragma: no cover - type-checking only
|
|
56
|
+
from deepeval.dataset.golden import ConversationalGolden, Golden
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class COPRORunner:
|
|
60
|
+
"""
|
|
61
|
+
COPRO style cooperative prompt optimization loop with sync/async execution.
|
|
62
|
+
|
|
63
|
+
This runner is intentionally low level and does not know about metrics,
|
|
64
|
+
models, or async configs. It relies on a preconfigured ScoringAdapter and
|
|
65
|
+
PromptRewriter, which are typically constructed by PromptOptimizer.
|
|
66
|
+
|
|
67
|
+
- Optimizes a single Prompt (instruction) against a list of Goldens.
|
|
68
|
+
- Uses mini-batches of goldens for trial scoring and epsilon-greedy
|
|
69
|
+
selection over prompt candidates based on mean minibatch scores,
|
|
70
|
+
extended with cooperative proposals:
|
|
71
|
+
- At each iteration, a parent candidate is selected.
|
|
72
|
+
- A shared feedback string is computed on a minibatch.
|
|
73
|
+
- Multiple child prompts are proposed from that parent using the
|
|
74
|
+
same feedback but different LLM samples.
|
|
75
|
+
- Any child whose minibatch score improves over the parent by at
|
|
76
|
+
least ``min_delta`` is added to the candidate pool.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
SINGLE_MODULE_ID: ModuleId = "__module__"
|
|
80
|
+
|
|
81
|
+
def __init__(
|
|
82
|
+
self,
|
|
83
|
+
*,
|
|
84
|
+
config: COPROConfig,
|
|
85
|
+
aggregate_instances: Aggregator = mean_of_all,
|
|
86
|
+
scoring_adapter: Optional[ScoringAdapter] = None,
|
|
87
|
+
) -> None:
|
|
88
|
+
self.config = config
|
|
89
|
+
self.aggregate_instances = aggregate_instances
|
|
90
|
+
self.scoring_adapter = scoring_adapter
|
|
91
|
+
|
|
92
|
+
# Random seeded from config is used for minibatch sampling and
|
|
93
|
+
# epsilon-greedy candidate selection.
|
|
94
|
+
self.random_state = random.Random(config.random_seed)
|
|
95
|
+
|
|
96
|
+
self.random_state = random.Random(config.random_seed)
|
|
97
|
+
|
|
98
|
+
# Runtime state to be reset between runs
|
|
99
|
+
self.reset_state()
|
|
100
|
+
|
|
101
|
+
# Status callback set by PromptOptimizer:
|
|
102
|
+
# (kind, step_index, total_steps, detail) -> None
|
|
103
|
+
self.status_callback: Optional[RunnerStatusCallbackProtocol] = None
|
|
104
|
+
|
|
105
|
+
# Model callback used by the rewriter set by PromptOptimizer.
|
|
106
|
+
self.model_callback: Optional[
|
|
107
|
+
Callable[
|
|
108
|
+
...,
|
|
109
|
+
Union[
|
|
110
|
+
str,
|
|
111
|
+
Dict,
|
|
112
|
+
Tuple[Union[str, Dict], float],
|
|
113
|
+
],
|
|
114
|
+
]
|
|
115
|
+
] = None
|
|
116
|
+
|
|
117
|
+
# Lazy-loaded PromptRewriter set by PromptOptimizer
|
|
118
|
+
self._rewriter: Optional[PromptRewriter] = None
|
|
119
|
+
|
|
120
|
+
##############
|
|
121
|
+
# Public API #
|
|
122
|
+
##############
|
|
123
|
+
|
|
124
|
+
def execute(
|
|
125
|
+
self,
|
|
126
|
+
*,
|
|
127
|
+
prompt: Prompt,
|
|
128
|
+
goldens: Union[List["Golden"], List["ConversationalGolden"]],
|
|
129
|
+
) -> Tuple[Prompt, Dict]:
|
|
130
|
+
"""
|
|
131
|
+
Synchronous COPRO run from a full list of goldens.
|
|
132
|
+
|
|
133
|
+
The full goldens set is used both for mini-batched scoring during
|
|
134
|
+
optimization and for a final full evaluation of the best candidate.
|
|
135
|
+
"""
|
|
136
|
+
total_goldens = len(goldens)
|
|
137
|
+
if total_goldens < 1:
|
|
138
|
+
raise DeepEvalError(
|
|
139
|
+
"COPRO prompt optimization requires at least 1 golden, but "
|
|
140
|
+
f"received {total_goldens}. Provide at least one golden to run "
|
|
141
|
+
"the optimizer."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
self._ensure_scoring_adapter()
|
|
145
|
+
self._ensure_rewriter()
|
|
146
|
+
self.reset_state()
|
|
147
|
+
|
|
148
|
+
# Seed candidate pool with the root prompt configuration.
|
|
149
|
+
seed_prompts_by_module = {self.SINGLE_MODULE_ID: prompt}
|
|
150
|
+
root_prompt_configuration = PromptConfiguration.new(
|
|
151
|
+
prompts=dict(seed_prompts_by_module)
|
|
152
|
+
)
|
|
153
|
+
# Add root candidate to the pool, but defer its first minibatch
|
|
154
|
+
# evaluation until the first iteration so that any long running
|
|
155
|
+
# model calls happen under the main loop (with progress updates).
|
|
156
|
+
self._add_prompt_configuration(root_prompt_configuration)
|
|
157
|
+
|
|
158
|
+
accepted_iterations: List[Dict] = []
|
|
159
|
+
self.trial_index = 0
|
|
160
|
+
|
|
161
|
+
def _one_iteration() -> bool:
|
|
162
|
+
nonlocal accepted_iterations
|
|
163
|
+
|
|
164
|
+
if not goldens:
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
# Lazily seed with a minibatch score for the root
|
|
168
|
+
# candidate on the first iteration.
|
|
169
|
+
if not self._minibatch_score_counts:
|
|
170
|
+
seed_minibatch = self._draw_minibatch(goldens)
|
|
171
|
+
root_score = self.scoring_adapter.minibatch_score(
|
|
172
|
+
root_prompt_configuration, seed_minibatch
|
|
173
|
+
)
|
|
174
|
+
self._record_minibatch_score(
|
|
175
|
+
root_prompt_configuration.id, root_score
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# 1. Choose which candidate prompt to mutate.
|
|
179
|
+
parent_prompt_configuration = self._select_candidate()
|
|
180
|
+
selected_module_id: ModuleId = self.SINGLE_MODULE_ID
|
|
181
|
+
|
|
182
|
+
minibatch = self._draw_minibatch(goldens)
|
|
183
|
+
|
|
184
|
+
# Compute shared feedback for this parent/minibatch that will be
|
|
185
|
+
# used by all cooperative child proposals.
|
|
186
|
+
feedback_text = self.scoring_adapter.minibatch_feedback(
|
|
187
|
+
parent_prompt_configuration, selected_module_id, minibatch
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
before_mean = self._mean_minibatch_score(
|
|
191
|
+
parent_prompt_configuration.id
|
|
192
|
+
)
|
|
193
|
+
jitter = 1e-6
|
|
194
|
+
min_delta = max(self.config.min_delta, jitter)
|
|
195
|
+
|
|
196
|
+
# 2. Generate multiple cooperative child prompts and evaluate them.
|
|
197
|
+
num_proposals = int(self.config.proposals_per_step)
|
|
198
|
+
for _ in range(num_proposals):
|
|
199
|
+
child_prompt = self._generate_child_prompt(
|
|
200
|
+
selected_module_id,
|
|
201
|
+
parent_prompt_configuration,
|
|
202
|
+
feedback_text,
|
|
203
|
+
)
|
|
204
|
+
if child_prompt is None:
|
|
205
|
+
# No child, nothing more to do this iteration
|
|
206
|
+
continue
|
|
207
|
+
|
|
208
|
+
child_prompt_configuration = self._make_child(
|
|
209
|
+
selected_module_id,
|
|
210
|
+
parent_prompt_configuration,
|
|
211
|
+
child_prompt,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
child_score = self.scoring_adapter.minibatch_score(
|
|
215
|
+
child_prompt_configuration, minibatch
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# 3. Evaluate & decide whether to accept the child.
|
|
219
|
+
if child_score >= before_mean + min_delta:
|
|
220
|
+
# Accept: add to pool, update surrogate stats, and record iteration.
|
|
221
|
+
self._add_prompt_configuration(child_prompt_configuration)
|
|
222
|
+
self._record_minibatch_score(
|
|
223
|
+
child_prompt_configuration.id, child_score
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
accepted_iterations.append(
|
|
227
|
+
AcceptedIterationDict(
|
|
228
|
+
parent=parent_prompt_configuration.id,
|
|
229
|
+
child=child_prompt_configuration.id,
|
|
230
|
+
module=selected_module_id,
|
|
231
|
+
before=before_mean,
|
|
232
|
+
after=child_score,
|
|
233
|
+
)
|
|
234
|
+
)
|
|
235
|
+
# else: reject; do not add child to the candidate pool.
|
|
236
|
+
|
|
237
|
+
self.trial_index += 1
|
|
238
|
+
if (
|
|
239
|
+
self.config.full_eval_every is not None
|
|
240
|
+
and self.trial_index % self.config.full_eval_every == 0
|
|
241
|
+
):
|
|
242
|
+
self._full_evaluate_best(goldens)
|
|
243
|
+
|
|
244
|
+
return True
|
|
245
|
+
|
|
246
|
+
self._run_loop_iteration(_one_iteration)
|
|
247
|
+
|
|
248
|
+
# Ensure at least one candidate has been fully evaluated.
|
|
249
|
+
if not self.pareto_score_table:
|
|
250
|
+
self._full_evaluate_best(goldens)
|
|
251
|
+
|
|
252
|
+
best = self._best_by_aggregate()
|
|
253
|
+
prompt_config_snapshots = build_prompt_config_snapshots(
|
|
254
|
+
self.prompt_configurations_by_id
|
|
255
|
+
)
|
|
256
|
+
report = OptimizationResult(
|
|
257
|
+
optimization_id=self.optimization_id,
|
|
258
|
+
best_id=best.id,
|
|
259
|
+
accepted_iterations=accepted_iterations,
|
|
260
|
+
pareto_scores=self.pareto_score_table,
|
|
261
|
+
parents=self.parents_by_id,
|
|
262
|
+
prompt_configurations=prompt_config_snapshots,
|
|
263
|
+
)
|
|
264
|
+
return best.prompts[self.SINGLE_MODULE_ID], report.as_dict()
|
|
265
|
+
|
|
266
|
+
async def a_execute(
|
|
267
|
+
self,
|
|
268
|
+
*,
|
|
269
|
+
prompt: Prompt,
|
|
270
|
+
goldens: Union[List["Golden"], List["ConversationalGolden"]],
|
|
271
|
+
) -> Tuple[Prompt, Dict]:
|
|
272
|
+
"""
|
|
273
|
+
Asynchronous twin of execute().
|
|
274
|
+
"""
|
|
275
|
+
total_goldens = len(goldens)
|
|
276
|
+
if total_goldens < 1:
|
|
277
|
+
raise DeepEvalError(
|
|
278
|
+
"COPRO prompt optimization requires at least 1 golden, but "
|
|
279
|
+
f"received {total_goldens}. Provide at least one golden to run "
|
|
280
|
+
"the optimizer."
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
self._ensure_scoring_adapter()
|
|
284
|
+
self._ensure_rewriter()
|
|
285
|
+
self.reset_state()
|
|
286
|
+
|
|
287
|
+
seed_prompts_by_module = {self.SINGLE_MODULE_ID: prompt}
|
|
288
|
+
root_prompt_configuration = PromptConfiguration.new(
|
|
289
|
+
prompts=dict(seed_prompts_by_module)
|
|
290
|
+
)
|
|
291
|
+
# Add root candidate to the pool, but defer its first minibatch
|
|
292
|
+
# evaluation until the first iteration so that any long running
|
|
293
|
+
# model calls happen under the main loop (with progress updates).
|
|
294
|
+
self._add_prompt_configuration(root_prompt_configuration)
|
|
295
|
+
|
|
296
|
+
accepted_iterations: List[Dict] = []
|
|
297
|
+
self.trial_index = 0
|
|
298
|
+
|
|
299
|
+
async def _one_iteration() -> bool:
|
|
300
|
+
nonlocal accepted_iterations
|
|
301
|
+
|
|
302
|
+
if not goldens:
|
|
303
|
+
return False
|
|
304
|
+
|
|
305
|
+
# Lazily seed with a minibatch score for the root
|
|
306
|
+
# candidate on the first iteration.
|
|
307
|
+
if not self._minibatch_score_counts:
|
|
308
|
+
seed_minibatch = self._draw_minibatch(goldens)
|
|
309
|
+
root_score = await self.scoring_adapter.a_minibatch_score(
|
|
310
|
+
root_prompt_configuration, seed_minibatch
|
|
311
|
+
)
|
|
312
|
+
self._record_minibatch_score(
|
|
313
|
+
root_prompt_configuration.id, root_score
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
parent_prompt_configuration = self._select_candidate()
|
|
317
|
+
selected_module_id: ModuleId = self.SINGLE_MODULE_ID
|
|
318
|
+
|
|
319
|
+
minibatch = self._draw_minibatch(goldens)
|
|
320
|
+
|
|
321
|
+
feedback_text = await self.scoring_adapter.a_minibatch_feedback(
|
|
322
|
+
parent_prompt_configuration, selected_module_id, minibatch
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
before_mean = self._mean_minibatch_score(
|
|
326
|
+
parent_prompt_configuration.id
|
|
327
|
+
)
|
|
328
|
+
jitter = 1e-6
|
|
329
|
+
min_delta = max(self.config.min_delta, jitter)
|
|
330
|
+
|
|
331
|
+
num_proposals = int(self.config.proposals_per_step)
|
|
332
|
+
for _ in range(num_proposals):
|
|
333
|
+
child_prompt = await self._a_generate_child_prompt(
|
|
334
|
+
selected_module_id,
|
|
335
|
+
parent_prompt_configuration,
|
|
336
|
+
feedback_text,
|
|
337
|
+
)
|
|
338
|
+
if child_prompt is None:
|
|
339
|
+
continue
|
|
340
|
+
|
|
341
|
+
child_prompt_configuration = self._make_child(
|
|
342
|
+
selected_module_id,
|
|
343
|
+
parent_prompt_configuration,
|
|
344
|
+
child_prompt,
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
child_score = await self.scoring_adapter.a_minibatch_score(
|
|
348
|
+
child_prompt_configuration, minibatch
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
if child_score >= before_mean + min_delta:
|
|
352
|
+
self._add_prompt_configuration(child_prompt_configuration)
|
|
353
|
+
self._record_minibatch_score(
|
|
354
|
+
child_prompt_configuration.id, child_score
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
accepted_iterations.append(
|
|
358
|
+
AcceptedIterationDict(
|
|
359
|
+
parent=parent_prompt_configuration.id,
|
|
360
|
+
child=child_prompt_configuration.id,
|
|
361
|
+
module=selected_module_id,
|
|
362
|
+
before=before_mean,
|
|
363
|
+
after=child_score,
|
|
364
|
+
)
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
self.trial_index += 1
|
|
368
|
+
if (
|
|
369
|
+
self.config.full_eval_every is not None
|
|
370
|
+
and self.trial_index % self.config.full_eval_every == 0
|
|
371
|
+
):
|
|
372
|
+
await self._a_full_evaluate_best(goldens)
|
|
373
|
+
|
|
374
|
+
return True
|
|
375
|
+
|
|
376
|
+
await self._a_run_loop_iteration(_one_iteration)
|
|
377
|
+
|
|
378
|
+
if not self.pareto_score_table:
|
|
379
|
+
await self._a_full_evaluate_best(goldens)
|
|
380
|
+
|
|
381
|
+
best = self._best_by_aggregate()
|
|
382
|
+
prompt_config_snapshots = build_prompt_config_snapshots(
|
|
383
|
+
self.prompt_configurations_by_id
|
|
384
|
+
)
|
|
385
|
+
report = OptimizationResult(
|
|
386
|
+
optimization_id=self.optimization_id,
|
|
387
|
+
best_id=best.id,
|
|
388
|
+
accepted_iterations=accepted_iterations,
|
|
389
|
+
pareto_scores=self.pareto_score_table,
|
|
390
|
+
parents=self.parents_by_id,
|
|
391
|
+
prompt_configurations=prompt_config_snapshots,
|
|
392
|
+
)
|
|
393
|
+
return best.prompts[self.SINGLE_MODULE_ID], report.as_dict()
|
|
394
|
+
|
|
395
|
+
###################
|
|
396
|
+
# State & helpers #
|
|
397
|
+
###################
|
|
398
|
+
|
|
399
|
+
def reset_state(self) -> None:
|
|
400
|
+
self.optimization_id = str(uuid.uuid4())
|
|
401
|
+
self.prompt_configurations_by_id: Dict[
|
|
402
|
+
PromptConfigurationId, PromptConfiguration
|
|
403
|
+
] = {}
|
|
404
|
+
self.parents_by_id: Dict[
|
|
405
|
+
PromptConfigurationId, Optional[PromptConfigurationId]
|
|
406
|
+
] = {}
|
|
407
|
+
# For COPRO we reuse the same field name as GEPA for full evaluation scores.
|
|
408
|
+
self.pareto_score_table: ScoreTable = {}
|
|
409
|
+
|
|
410
|
+
# Surrogate stats: running mean minibatch scores per candidate.
|
|
411
|
+
self._minibatch_score_sums: Dict[PromptConfigurationId, float] = {}
|
|
412
|
+
self._minibatch_score_counts: Dict[PromptConfigurationId, int] = {}
|
|
413
|
+
|
|
414
|
+
# Trial counter (used for full_eval_every).
|
|
415
|
+
self.trial_index: int = 0
|
|
416
|
+
|
|
417
|
+
def _ensure_scoring_adapter(self) -> None:
|
|
418
|
+
if self.scoring_adapter is None:
|
|
419
|
+
raise DeepEvalError(
|
|
420
|
+
"COPRORunner requires a `scoring_adapter`. "
|
|
421
|
+
"Construct one (for example, DeepEvalScoringAdapter) in "
|
|
422
|
+
"PromptOptimizer and assign it to `runner.scoring_adapter`."
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
def _ensure_rewriter(self) -> None:
|
|
426
|
+
if self._rewriter is not None:
|
|
427
|
+
return
|
|
428
|
+
|
|
429
|
+
# Default basic PromptRewriter; PromptOptimizer can override this and
|
|
430
|
+
# pass a configured instance (e.g. with list-mutation config).
|
|
431
|
+
self._rewriter = PromptRewriter(
|
|
432
|
+
max_chars=self.config.rewrite_instruction_max_chars,
|
|
433
|
+
random_state=self.random_state,
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
def _prompts_equivalent(
|
|
437
|
+
self,
|
|
438
|
+
old_prompt: Prompt,
|
|
439
|
+
new_prompt: Prompt,
|
|
440
|
+
) -> bool:
|
|
441
|
+
"""
|
|
442
|
+
Compare two Prompts for optimization purposes.
|
|
443
|
+
|
|
444
|
+
We treat a child as "no change" if:
|
|
445
|
+
- The types differ, or
|
|
446
|
+
- For TEXT: trimmed text_template matches.
|
|
447
|
+
- For LIST: messages_template length, roles, and trimmed content match.
|
|
448
|
+
"""
|
|
449
|
+
|
|
450
|
+
if new_prompt.type == PromptType.LIST:
|
|
451
|
+
old_msgs = old_prompt.messages_template
|
|
452
|
+
new_msgs = new_prompt.messages_template
|
|
453
|
+
if len(old_msgs) != len(new_msgs):
|
|
454
|
+
return False
|
|
455
|
+
|
|
456
|
+
for old_msg, new_msg in zip(old_msgs, new_msgs):
|
|
457
|
+
if old_msg.role != new_msg.role:
|
|
458
|
+
return False
|
|
459
|
+
if (old_msg.content or "").strip() != (
|
|
460
|
+
new_msg.content or ""
|
|
461
|
+
).strip():
|
|
462
|
+
return False
|
|
463
|
+
|
|
464
|
+
return True
|
|
465
|
+
|
|
466
|
+
old_txt = (old_prompt.text_template or "").strip()
|
|
467
|
+
new_txt = (new_prompt.text_template or "").strip()
|
|
468
|
+
return new_txt == old_txt
|
|
469
|
+
|
|
470
|
+
def _add_prompt_configuration(
|
|
471
|
+
self,
|
|
472
|
+
prompt_configuration: PromptConfiguration,
|
|
473
|
+
) -> None:
|
|
474
|
+
"""
|
|
475
|
+
Add a candidate to the active pool and, if a population limit is set,
|
|
476
|
+
prune the worst-scoring candidates to enforce it.
|
|
477
|
+
"""
|
|
478
|
+
self.prompt_configurations_by_id[prompt_configuration.id] = (
|
|
479
|
+
prompt_configuration
|
|
480
|
+
)
|
|
481
|
+
self.parents_by_id[prompt_configuration.id] = (
|
|
482
|
+
prompt_configuration.parent
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# If we exceed the population size, iteratively prune the worst
|
|
486
|
+
# (by mean minibatch score), never removing the current best.
|
|
487
|
+
while (
|
|
488
|
+
len(self.prompt_configurations_by_id) > self.config.population_size
|
|
489
|
+
):
|
|
490
|
+
best_id: Optional[PromptConfigurationId] = None
|
|
491
|
+
best_score = float("-inf")
|
|
492
|
+
for cand_id in self.prompt_configurations_by_id.keys():
|
|
493
|
+
mean_score = self._mean_minibatch_score(cand_id)
|
|
494
|
+
if mean_score > best_score:
|
|
495
|
+
best_score = mean_score
|
|
496
|
+
best_id = cand_id
|
|
497
|
+
|
|
498
|
+
worst_id: Optional[PromptConfigurationId] = None
|
|
499
|
+
worst_score = float("inf")
|
|
500
|
+
for cand_id in self.prompt_configurations_by_id.keys():
|
|
501
|
+
if cand_id == best_id:
|
|
502
|
+
continue
|
|
503
|
+
mean_score = self._mean_minibatch_score(cand_id)
|
|
504
|
+
if mean_score < worst_score:
|
|
505
|
+
worst_score = mean_score
|
|
506
|
+
worst_id = cand_id
|
|
507
|
+
|
|
508
|
+
if worst_id is None or worst_id == best_id:
|
|
509
|
+
break
|
|
510
|
+
|
|
511
|
+
# Prune the chosen worst candidate from all bookkeeping tables.
|
|
512
|
+
self.prompt_configurations_by_id.pop(worst_id, None)
|
|
513
|
+
self.parents_by_id.pop(worst_id, None)
|
|
514
|
+
self._minibatch_score_sums.pop(worst_id, None)
|
|
515
|
+
self._minibatch_score_counts.pop(worst_id, None)
|
|
516
|
+
self.pareto_score_table.pop(worst_id, None)
|
|
517
|
+
|
|
518
|
+
def _record_minibatch_score(
|
|
519
|
+
self,
|
|
520
|
+
prompt_configuration_id: PromptConfigurationId,
|
|
521
|
+
score: float,
|
|
522
|
+
) -> None:
|
|
523
|
+
self._minibatch_score_sums[prompt_configuration_id] = (
|
|
524
|
+
self._minibatch_score_sums.get(prompt_configuration_id, 0.0)
|
|
525
|
+
+ float(score)
|
|
526
|
+
)
|
|
527
|
+
self._minibatch_score_counts[prompt_configuration_id] = (
|
|
528
|
+
self._minibatch_score_counts.get(prompt_configuration_id, 0) + 1
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
def _mean_minibatch_score(
|
|
532
|
+
self,
|
|
533
|
+
prompt_configuration_id: PromptConfigurationId,
|
|
534
|
+
) -> float:
|
|
535
|
+
total = self._minibatch_score_sums.get(prompt_configuration_id, 0.0)
|
|
536
|
+
count = self._minibatch_score_counts.get(prompt_configuration_id, 0)
|
|
537
|
+
if count <= 0:
|
|
538
|
+
# Use a sentinel that will not dominate selection if a scored
|
|
539
|
+
# candidate exists. Root is seeded explicitly in the first iteration.
|
|
540
|
+
return float("-inf")
|
|
541
|
+
return total / count
|
|
542
|
+
|
|
543
|
+
def _best_by_minibatch(self) -> PromptConfiguration:
|
|
544
|
+
"""
|
|
545
|
+
Return the candidate with the highest mean minibatch score.
|
|
546
|
+
"""
|
|
547
|
+
if not self.prompt_configurations_by_id:
|
|
548
|
+
raise DeepEvalError(
|
|
549
|
+
"COPRORunner has no prompt configurations; this should not happen."
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
best_id: Optional[PromptConfigurationId] = None
|
|
553
|
+
best_score = float("-inf")
|
|
554
|
+
|
|
555
|
+
for cand_id in self.prompt_configurations_by_id.keys():
|
|
556
|
+
mean_score = self._mean_minibatch_score(cand_id)
|
|
557
|
+
if mean_score > best_score:
|
|
558
|
+
best_score = mean_score
|
|
559
|
+
best_id = cand_id
|
|
560
|
+
|
|
561
|
+
if best_id is None:
|
|
562
|
+
# Fallback to the first candidate if all means are -inf.
|
|
563
|
+
best_id = next(iter(self.prompt_configurations_by_id.keys()))
|
|
564
|
+
|
|
565
|
+
return self.prompt_configurations_by_id[best_id]
|
|
566
|
+
|
|
567
|
+
def _best_by_aggregate(self) -> PromptConfiguration:
|
|
568
|
+
"""
|
|
569
|
+
Return the best candidate based on full-eval scores.
|
|
570
|
+
|
|
571
|
+
If no full evaluation scores are available (should be rare, but possible if
|
|
572
|
+
full_eval_every is very large and the loop exits early), fall back to
|
|
573
|
+
best-by-minibatch.
|
|
574
|
+
"""
|
|
575
|
+
if not self.pareto_score_table:
|
|
576
|
+
return self._best_by_minibatch()
|
|
577
|
+
|
|
578
|
+
totals = {
|
|
579
|
+
prompt_configuration_id: self.aggregate_instances(vector)
|
|
580
|
+
for prompt_configuration_id, vector in self.pareto_score_table.items()
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
best_ids: List[PromptConfigurationId] = []
|
|
584
|
+
best_val = float("-inf")
|
|
585
|
+
|
|
586
|
+
for cand_id, aggregate in totals.items():
|
|
587
|
+
if aggregate > best_val + 1e-12:
|
|
588
|
+
best_val = aggregate
|
|
589
|
+
best_ids = [cand_id]
|
|
590
|
+
elif abs(aggregate - best_val) <= 1e-12:
|
|
591
|
+
best_ids.append(cand_id)
|
|
592
|
+
|
|
593
|
+
chosen_id = self.random_state.choice(best_ids)
|
|
594
|
+
return self.prompt_configurations_by_id[chosen_id]
|
|
595
|
+
|
|
596
|
+
def _select_candidate(self) -> PromptConfiguration:
|
|
597
|
+
"""
|
|
598
|
+
Epsilon-greedy candidate selection:
|
|
599
|
+
|
|
600
|
+
- With probability ``exploration_probability``, pick a random candidate.
|
|
601
|
+
- Otherwise, pick the candidate with the highest mean minibatch score.
|
|
602
|
+
"""
|
|
603
|
+
if not self.prompt_configurations_by_id:
|
|
604
|
+
raise DeepEvalError(
|
|
605
|
+
"COPRORunner has no prompt configurations to select from."
|
|
606
|
+
)
|
|
607
|
+
|
|
608
|
+
candidate_ids = list(self.prompt_configurations_by_id.keys())
|
|
609
|
+
if not candidate_ids:
|
|
610
|
+
raise DeepEvalError(
|
|
611
|
+
"COPRORunner has an empty candidate pool; this should not happen."
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
eps = float(self.config.exploration_probability)
|
|
615
|
+
if eps > 0.0 and self.random_state.random() < eps:
|
|
616
|
+
chosen_id = self.random_state.choice(candidate_ids)
|
|
617
|
+
else:
|
|
618
|
+
chosen_id = self._best_by_minibatch().id
|
|
619
|
+
|
|
620
|
+
return self.prompt_configurations_by_id[chosen_id]
|
|
621
|
+
|
|
622
|
+
def _draw_minibatch(
|
|
623
|
+
self,
|
|
624
|
+
goldens: Union[List["Golden"], List["ConversationalGolden"]],
|
|
625
|
+
) -> Union[List["Golden"], List["ConversationalGolden"]]:
|
|
626
|
+
"""
|
|
627
|
+
Determine effective minibatch size from COPROConfig, bounded by the
|
|
628
|
+
available goldens, and sample with replacement.
|
|
629
|
+
"""
|
|
630
|
+
n = len(goldens)
|
|
631
|
+
if n <= 0:
|
|
632
|
+
return []
|
|
633
|
+
|
|
634
|
+
if self.config.minibatch_size is not None:
|
|
635
|
+
size = self.config.minibatch_size
|
|
636
|
+
else:
|
|
637
|
+
dynamic = max(1, int(round(n * self.config.minibatch_ratio)))
|
|
638
|
+
size = max(
|
|
639
|
+
self.config.minibatch_min_size,
|
|
640
|
+
min(dynamic, self.config.minibatch_max_size),
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
size = max(1, min(size, n))
|
|
644
|
+
|
|
645
|
+
return [goldens[self.random_state.randrange(0, n)] for _ in range(size)]
|
|
646
|
+
|
|
647
|
+
async def _a_full_evaluate_best(
|
|
648
|
+
self,
|
|
649
|
+
goldens: Union[List["Golden"], List["ConversationalGolden"]],
|
|
650
|
+
) -> None:
|
|
651
|
+
if not self.prompt_configurations_by_id:
|
|
652
|
+
return
|
|
653
|
+
|
|
654
|
+
best = self._best_by_minibatch()
|
|
655
|
+
if best.id in self.pareto_score_table:
|
|
656
|
+
return
|
|
657
|
+
|
|
658
|
+
scores = await self.scoring_adapter.a_score_on_pareto(best, goldens)
|
|
659
|
+
self.pareto_score_table[best.id] = scores
|
|
660
|
+
|
|
661
|
+
def _full_evaluate_best(
|
|
662
|
+
self,
|
|
663
|
+
goldens: Union[List["Golden"], List["ConversationalGolden"]],
|
|
664
|
+
) -> None:
|
|
665
|
+
if not self.prompt_configurations_by_id:
|
|
666
|
+
return
|
|
667
|
+
|
|
668
|
+
best = self._best_by_minibatch()
|
|
669
|
+
if best.id in self.pareto_score_table:
|
|
670
|
+
return
|
|
671
|
+
|
|
672
|
+
scores = self.scoring_adapter.score_on_pareto(best, goldens)
|
|
673
|
+
self.pareto_score_table[best.id] = scores
|
|
674
|
+
|
|
675
|
+
async def _a_generate_child_prompt(
|
|
676
|
+
self,
|
|
677
|
+
selected_module_id: ModuleId,
|
|
678
|
+
parent_prompt_configuration: PromptConfiguration,
|
|
679
|
+
feedback_text: str,
|
|
680
|
+
) -> Optional[Prompt]:
|
|
681
|
+
try:
|
|
682
|
+
old_prompt = parent_prompt_configuration.prompts[selected_module_id]
|
|
683
|
+
except KeyError as exc:
|
|
684
|
+
raise DeepEvalError(
|
|
685
|
+
"COPRORunner expected a prompt for module_id "
|
|
686
|
+
f"{selected_module_id!r} but none was found in the "
|
|
687
|
+
"current prompt configuration."
|
|
688
|
+
) from exc
|
|
689
|
+
|
|
690
|
+
new_prompt = await self._rewriter.a_rewrite(
|
|
691
|
+
model_callback=self.model_callback,
|
|
692
|
+
module_id=selected_module_id,
|
|
693
|
+
old_prompt=old_prompt,
|
|
694
|
+
feedback_text=feedback_text,
|
|
695
|
+
)
|
|
696
|
+
|
|
697
|
+
if old_prompt.type != new_prompt.type or self._prompts_equivalent(
|
|
698
|
+
old_prompt, new_prompt
|
|
699
|
+
):
|
|
700
|
+
# Don't accept if new prompt is the same as parent, or if type changed.
|
|
701
|
+
return None
|
|
702
|
+
return new_prompt
|
|
703
|
+
|
|
704
|
+
def _generate_child_prompt(
|
|
705
|
+
self,
|
|
706
|
+
selected_module_id: ModuleId,
|
|
707
|
+
parent_prompt_configuration: PromptConfiguration,
|
|
708
|
+
feedback_text: str,
|
|
709
|
+
) -> Optional[Prompt]:
|
|
710
|
+
try:
|
|
711
|
+
old_prompt = parent_prompt_configuration.prompts[selected_module_id]
|
|
712
|
+
except KeyError as exc:
|
|
713
|
+
# This should never happen in normal operation.
|
|
714
|
+
raise DeepEvalError(
|
|
715
|
+
"COPRORunner expected a prompt for module_id "
|
|
716
|
+
f"{selected_module_id!r} but none was found in the "
|
|
717
|
+
"current prompt configuration."
|
|
718
|
+
) from exc
|
|
719
|
+
|
|
720
|
+
new_prompt = self._rewriter.rewrite(
|
|
721
|
+
model_callback=self.model_callback,
|
|
722
|
+
module_id=selected_module_id,
|
|
723
|
+
old_prompt=old_prompt,
|
|
724
|
+
feedback_text=feedback_text,
|
|
725
|
+
)
|
|
726
|
+
|
|
727
|
+
if old_prompt.type != new_prompt.type or self._prompts_equivalent(
|
|
728
|
+
old_prompt, new_prompt
|
|
729
|
+
):
|
|
730
|
+
# Don't accept if new prompt is the same as parent, or if type changed.
|
|
731
|
+
return None
|
|
732
|
+
return new_prompt
|
|
733
|
+
|
|
734
|
+
def _make_child(
|
|
735
|
+
self,
|
|
736
|
+
selected_module_id: ModuleId,
|
|
737
|
+
parent_prompt_configuration: PromptConfiguration,
|
|
738
|
+
child_prompt: Prompt,
|
|
739
|
+
) -> PromptConfiguration:
|
|
740
|
+
child_prompt_configuration = PromptConfiguration.new(
|
|
741
|
+
prompts=dict(parent_prompt_configuration.prompts),
|
|
742
|
+
parent=parent_prompt_configuration.id,
|
|
743
|
+
)
|
|
744
|
+
child_prompt_configuration.prompts[selected_module_id] = child_prompt
|
|
745
|
+
return child_prompt_configuration
|
|
746
|
+
|
|
747
|
+
def _update_progress(
|
|
748
|
+
self,
|
|
749
|
+
total_iterations: int,
|
|
750
|
+
iteration: int,
|
|
751
|
+
remaining_iterations: int,
|
|
752
|
+
elapsed: float,
|
|
753
|
+
) -> None:
|
|
754
|
+
if self.status_callback is not None:
|
|
755
|
+
detail = (
|
|
756
|
+
f"(iterations={total_iterations}) "
|
|
757
|
+
f"• iteration {iteration}/{total_iterations} "
|
|
758
|
+
f"• {elapsed:.2f}s • remaining={remaining_iterations}"
|
|
759
|
+
)
|
|
760
|
+
self.status_callback(
|
|
761
|
+
RunnerStatusType.PROGRESS,
|
|
762
|
+
step_index=iteration,
|
|
763
|
+
total_steps=total_iterations,
|
|
764
|
+
detail=detail,
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
def _update_error(
|
|
768
|
+
self,
|
|
769
|
+
total_iterations: int,
|
|
770
|
+
iteration: int,
|
|
771
|
+
exc: Exception,
|
|
772
|
+
) -> None:
|
|
773
|
+
# Report a user-facing error event.
|
|
774
|
+
if self.status_callback is not None:
|
|
775
|
+
detail = (
|
|
776
|
+
f"(iterations={total_iterations}) "
|
|
777
|
+
f"• error {exc.__class__.__name__}: {exc} "
|
|
778
|
+
f"• halted at iteration {iteration}"
|
|
779
|
+
)
|
|
780
|
+
self.status_callback(
|
|
781
|
+
RunnerStatusType.ERROR,
|
|
782
|
+
step_index=iteration,
|
|
783
|
+
total_steps=total_iterations,
|
|
784
|
+
detail=detail,
|
|
785
|
+
)
|
|
786
|
+
|
|
787
|
+
def _run_loop_iteration(
|
|
788
|
+
self,
|
|
789
|
+
copro_iteration: Callable[[], bool],
|
|
790
|
+
) -> None:
|
|
791
|
+
total_iterations = self.config.iterations
|
|
792
|
+
remaining_iterations = total_iterations
|
|
793
|
+
iteration = 0
|
|
794
|
+
self._update_progress(
|
|
795
|
+
total_iterations, iteration, remaining_iterations, 0.0
|
|
796
|
+
)
|
|
797
|
+
while remaining_iterations > 0:
|
|
798
|
+
iteration += 1
|
|
799
|
+
start_time = time.perf_counter()
|
|
800
|
+
try:
|
|
801
|
+
ok = copro_iteration()
|
|
802
|
+
except Exception as exc:
|
|
803
|
+
self._update_error(total_iterations, iteration, exc)
|
|
804
|
+
break
|
|
805
|
+
elapsed = time.perf_counter() - start_time
|
|
806
|
+
if not ok:
|
|
807
|
+
break
|
|
808
|
+
remaining_iterations -= 1
|
|
809
|
+
self._update_progress(
|
|
810
|
+
total_iterations, iteration, remaining_iterations, elapsed
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
async def _a_run_loop_iteration(
|
|
814
|
+
self,
|
|
815
|
+
a_copro_iteration: Callable[[], Awaitable[bool]],
|
|
816
|
+
) -> None:
|
|
817
|
+
total_iterations = self.config.iterations
|
|
818
|
+
remaining_iterations = total_iterations
|
|
819
|
+
iteration = 0
|
|
820
|
+
self._update_progress(
|
|
821
|
+
total_iterations, iteration, remaining_iterations, 0.0
|
|
822
|
+
)
|
|
823
|
+
while remaining_iterations > 0:
|
|
824
|
+
iteration += 1
|
|
825
|
+
start_time = time.perf_counter()
|
|
826
|
+
try:
|
|
827
|
+
ok = await a_copro_iteration()
|
|
828
|
+
except Exception as exc:
|
|
829
|
+
self._update_error(total_iterations, iteration, exc)
|
|
830
|
+
break
|
|
831
|
+
elapsed = time.perf_counter() - start_time
|
|
832
|
+
if not ok:
|
|
833
|
+
break
|
|
834
|
+
remaining_iterations -= 1
|
|
835
|
+
self._update_progress(
|
|
836
|
+
total_iterations, iteration, remaining_iterations, elapsed
|
|
837
|
+
)
|