opik-optimizer 1.0.6__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opik_optimizer/__init__.py +4 -0
- opik_optimizer/_throttle.py +2 -1
- opik_optimizer/base_optimizer.py +402 -28
- opik_optimizer/data/context7_eval.jsonl +3 -0
- opik_optimizer/datasets/context7_eval.py +90 -0
- opik_optimizer/datasets/tiny_test.py +33 -34
- opik_optimizer/datasets/truthful_qa.py +2 -2
- opik_optimizer/evolutionary_optimizer/crossover_ops.py +194 -0
- opik_optimizer/evolutionary_optimizer/evaluation_ops.py +136 -0
- opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py +289 -966
- opik_optimizer/evolutionary_optimizer/helpers.py +10 -0
- opik_optimizer/evolutionary_optimizer/llm_support.py +136 -0
- opik_optimizer/evolutionary_optimizer/mcp.py +249 -0
- opik_optimizer/evolutionary_optimizer/mutation_ops.py +306 -0
- opik_optimizer/evolutionary_optimizer/population_ops.py +228 -0
- opik_optimizer/evolutionary_optimizer/prompts.py +352 -0
- opik_optimizer/evolutionary_optimizer/reporting.py +28 -4
- opik_optimizer/evolutionary_optimizer/style_ops.py +86 -0
- opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py +90 -81
- opik_optimizer/few_shot_bayesian_optimizer/reporting.py +12 -5
- opik_optimizer/gepa_optimizer/__init__.py +3 -0
- opik_optimizer/gepa_optimizer/adapter.py +154 -0
- opik_optimizer/gepa_optimizer/gepa_optimizer.py +653 -0
- opik_optimizer/gepa_optimizer/reporting.py +181 -0
- opik_optimizer/logging_config.py +42 -7
- opik_optimizer/mcp_utils/__init__.py +22 -0
- opik_optimizer/mcp_utils/mcp.py +541 -0
- opik_optimizer/mcp_utils/mcp_second_pass.py +152 -0
- opik_optimizer/mcp_utils/mcp_simulator.py +116 -0
- opik_optimizer/mcp_utils/mcp_workflow.py +547 -0
- opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py +470 -134
- opik_optimizer/meta_prompt_optimizer/reporting.py +16 -2
- opik_optimizer/mipro_optimizer/_lm.py +30 -23
- opik_optimizer/mipro_optimizer/_mipro_optimizer_v2.py +52 -51
- opik_optimizer/mipro_optimizer/mipro_optimizer.py +126 -46
- opik_optimizer/mipro_optimizer/utils.py +2 -4
- opik_optimizer/optimizable_agent.py +21 -16
- opik_optimizer/optimization_config/chat_prompt.py +44 -23
- opik_optimizer/optimization_config/configs.py +3 -3
- opik_optimizer/optimization_config/mappers.py +9 -8
- opik_optimizer/optimization_result.py +22 -14
- opik_optimizer/reporting_utils.py +61 -10
- opik_optimizer/task_evaluator.py +9 -8
- opik_optimizer/utils/__init__.py +15 -0
- opik_optimizer/utils/colbert.py +236 -0
- opik_optimizer/{utils.py → utils/core.py} +160 -33
- opik_optimizer/utils/dataset_utils.py +49 -0
- opik_optimizer/utils/prompt_segments.py +186 -0
- opik_optimizer-2.0.0.dist-info/METADATA +345 -0
- opik_optimizer-2.0.0.dist-info/RECORD +74 -0
- opik_optimizer-2.0.0.dist-info/licenses/LICENSE +203 -0
- opik_optimizer-1.0.6.dist-info/METADATA +0 -181
- opik_optimizer-1.0.6.dist-info/RECORD +0 -50
- opik_optimizer-1.0.6.dist-info/licenses/LICENSE +0 -21
- {opik_optimizer-1.0.6.dist-info → opik_optimizer-2.0.0.dist-info}/WHEEL +0 -0
- {opik_optimizer-1.0.6.dist-info → opik_optimizer-2.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,352 @@
|
|
1
|
+
# Centralized prompt templates used by EvolutionaryOptimizer. This file contains
|
2
|
+
# only string builders and constants; it has no side effects.
|
3
|
+
|
4
|
+
|
5
|
+
INFER_STYLE_SYSTEM_PROMPT = """You are an expert in linguistic analysis and prompt engineering. Your task is to analyze a few input-output examples from a dataset and provide a concise, actionable description of the desired output style. This description will be used to guide other LLMs in generating and refining prompts.
|
6
|
+
|
7
|
+
Focus on characteristics like:
|
8
|
+
- **Length**: (e.g., single word, short phrase, one sentence, multiple sentences, a paragraph)
|
9
|
+
- **Tone**: (e.g., factual, formal, informal, conversational, academic)
|
10
|
+
- **Structure**: (e.g., direct answer first, explanation then answer, list, yes/no then explanation)
|
11
|
+
- **Content Details**: (e.g., includes only the answer, includes reasoning, provides examples, avoids pleasantries)
|
12
|
+
- **Keywords/Phrasing**: Any recurring keywords or phrasing patterns in the outputs.
|
13
|
+
|
14
|
+
Provide a single string that summarizes this style. This summary should be directly usable as an instruction for another LLM.
|
15
|
+
For example: 'Outputs should be a single, concise proper noun.' OR 'Outputs should be a short paragraph explaining the reasoning, followed by a direct answer, avoiding conversational pleasantries.' OR 'Outputs are typically 1-2 sentences, providing a direct factual answer.'
|
16
|
+
Return ONLY this descriptive string, with no preamble or extra formatting.
|
17
|
+
"""
|
18
|
+
|
19
|
+
|
20
|
+
def style_inference_user_prompt(examples_str: str) -> str:
|
21
|
+
return f"""Please analyze the following examples from a dataset and provide a concise, actionable description of the REQUIRED output style for the target LLM. Before describing the output style, make sure to understand the dataset content and structure as it can include input, output and metadata fields. This description will be used to guide other LLMs in generating and refining prompts.
|
22
|
+
|
23
|
+
{examples_str}
|
24
|
+
|
25
|
+
Based on these examples, what is the desired output style description?
|
26
|
+
Remember to focus on aspects like length, tone, structure, content details, and any recurring keywords or phrasing patterns in the outputs.
|
27
|
+
The description should be a single string that can be directly used as an instruction for another LLM.
|
28
|
+
Return ONLY this descriptive string.
|
29
|
+
"""
|
30
|
+
|
31
|
+
|
32
|
+
def semantic_mutation_system_prompt(output_style_guidance: str | None) -> str:
|
33
|
+
style = (
|
34
|
+
output_style_guidance
|
35
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
36
|
+
)
|
37
|
+
return (
|
38
|
+
"You are a prompt engineering expert. Your goal is to modify prompts to improve their "
|
39
|
+
f"effectiveness in eliciting specific types of answers, particularly matching the style: '{style}'. "
|
40
|
+
"Follow the specific modification instruction provided."
|
41
|
+
)
|
42
|
+
|
43
|
+
|
44
|
+
def synonyms_system_prompt() -> str:
|
45
|
+
return (
|
46
|
+
"You are a helpful assistant that provides synonyms. Return only the synonym word, "
|
47
|
+
"no explanation or additional text."
|
48
|
+
)
|
49
|
+
|
50
|
+
|
51
|
+
def rephrase_system_prompt() -> str:
|
52
|
+
return (
|
53
|
+
"You are a helpful assistant that rephrases text. Return only the modified phrase, "
|
54
|
+
"no explanation or additional text."
|
55
|
+
)
|
56
|
+
|
57
|
+
|
58
|
+
def fresh_start_system_prompt(output_style_guidance: str | None) -> str:
|
59
|
+
style = (
|
60
|
+
output_style_guidance
|
61
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
62
|
+
)
|
63
|
+
return (
|
64
|
+
"You are an expert prompt engineer. Your task is to generate novel, effective prompts from scratch "
|
65
|
+
"based on a task description, specifically aiming for prompts that elicit answers in the style: "
|
66
|
+
f"'{style}'. Output ONLY a raw JSON list of strings."
|
67
|
+
)
|
68
|
+
|
69
|
+
|
70
|
+
def variation_system_prompt(output_style_guidance: str | None) -> str:
|
71
|
+
style = (
|
72
|
+
output_style_guidance
|
73
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
74
|
+
)
|
75
|
+
return f"""You are an expert prompt engineer specializing in creating diverse and effective prompts. Given an initial prompt, your task is to generate a diverse set of alternative prompts.
|
76
|
+
|
77
|
+
For each prompt variation, consider:
|
78
|
+
1. Different levels of specificity and detail, including significantly more detailed and longer versions.
|
79
|
+
2. Various ways to structure the instruction, exploring more complex sentence structures and phrasings.
|
80
|
+
3. Alternative phrasings that maintain the core intent but vary in style and complexity.
|
81
|
+
4. Different emphasis on key components, potentially elaborating on them.
|
82
|
+
5. Various ways to express constraints or requirements.
|
83
|
+
6. Different approaches to clarity and conciseness, but also explore more verbose and explanatory styles.
|
84
|
+
7. Alternative ways to guide the model's response format.
|
85
|
+
8. Consider variations that are substantially longer and more descriptive than the original.
|
86
|
+
|
87
|
+
The generated prompts should guide a target LLM to produce outputs in the following style: '{style}'
|
88
|
+
|
89
|
+
Return a JSON array of prompts with the following structure:
|
90
|
+
{{
|
91
|
+
"prompts": [
|
92
|
+
{{
|
93
|
+
"prompt": "alternative prompt 1",
|
94
|
+
"strategy": "brief description of the variation strategy used, e.g., 'focused on eliciting specific output style'"
|
95
|
+
}},
|
96
|
+
{{
|
97
|
+
"prompt": "alternative prompt 2",
|
98
|
+
"strategy": "brief description of the variation strategy used"
|
99
|
+
}}
|
100
|
+
]
|
101
|
+
}}
|
102
|
+
Each prompt variation should aim to get the target LLM to produce answers matching the desired style: '{style}'.
|
103
|
+
"""
|
104
|
+
|
105
|
+
|
106
|
+
def llm_crossover_system_prompt(output_style_guidance: str | None) -> str:
|
107
|
+
style = (
|
108
|
+
output_style_guidance
|
109
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
110
|
+
)
|
111
|
+
return f"""You are an expert prompt engineer specializing in creating novel prompts by intelligently blending existing ones.
|
112
|
+
Given two parent prompts, your task is to generate one or two new child prompts that effectively combine the strengths, styles, or core ideas of both parents.
|
113
|
+
The children should be coherent and aim to explore a potentially more effective region of the prompt design space, with a key goal of eliciting responses from the target language model in the following style: '{style}'.
|
114
|
+
|
115
|
+
Consider the following when generating children:
|
116
|
+
- Identify the key instructions, constraints, and desired output formats in each parent, paying attention to any hints about desired output style.
|
117
|
+
- Explore ways to merge these elements such that the resulting prompt strongly guides the target LLM towards the desired output style.
|
118
|
+
- You can create a child that is a direct blend, or one that takes a primary structure from one parent and incorporates specific elements from the other, always optimizing for clear instruction towards the desired output style.
|
119
|
+
- If generating two children, try to make them distinct from each other and from the parents, perhaps by emphasizing different aspects of the parental combination that could lead to the desired output style.
|
120
|
+
|
121
|
+
All generated prompts must aim for eliciting answers in the style: '{style}'.
|
122
|
+
|
123
|
+
Return a JSON object that is a list of both child prompts. Each child prompt is a list of LLM messages. Example:
|
124
|
+
[
|
125
|
+
{{"role": "<role>", "content": "<content>"}},
|
126
|
+
{{"role": "<role>", "content": "<content>"}}
|
127
|
+
]
|
128
|
+
|
129
|
+
|
130
|
+
"""
|
131
|
+
|
132
|
+
|
133
|
+
def radical_innovation_system_prompt(output_style_guidance: str | None) -> str:
|
134
|
+
style = (
|
135
|
+
output_style_guidance
|
136
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
137
|
+
)
|
138
|
+
return f"""You are an expert prompt engineer and a creative problem solver.
|
139
|
+
Given a task description and an existing prompt for that task (which might be underperforming), your goal is to generate a new, significantly improved, and potentially very different prompt.
|
140
|
+
Do not just make minor edits. Think about alternative approaches, structures, and phrasings that could lead to better performance.
|
141
|
+
Consider clarity, specificity, constraints, and how to best guide the language model for the described task TO PRODUCE OUTPUTS IN THE FOLLOWING STYLE: '{style}'.
|
142
|
+
Return only the new prompt string, with no preamble or explanation.
|
143
|
+
"""
|
144
|
+
|
145
|
+
|
146
|
+
def llm_crossover_user_prompt(
|
147
|
+
parent1_messages: list[dict[str, str]],
|
148
|
+
parent2_messages: list[dict[str, str]],
|
149
|
+
output_style_guidance: str | None,
|
150
|
+
) -> str:
|
151
|
+
style = (
|
152
|
+
output_style_guidance
|
153
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
154
|
+
)
|
155
|
+
return f"""Parent Prompt 1:
|
156
|
+
'''{parent1_messages}'''
|
157
|
+
|
158
|
+
Parent Prompt 2:
|
159
|
+
'''{parent2_messages}'''
|
160
|
+
|
161
|
+
Desired output style from target LLM for children prompts: '{style}'
|
162
|
+
|
163
|
+
Please generate TWO child prompts by intelligently blending the ideas, styles, or structures from these two parents, ensuring the children aim to elicit the desired output style.
|
164
|
+
Follow the instructions provided in the system prompt regarding the JSON output format:
|
165
|
+
[
|
166
|
+
{{"role": "<role>", "content": "<content>"}}, {{"role": "<role>", "content": "<content>"}}
|
167
|
+
]
|
168
|
+
"""
|
169
|
+
|
170
|
+
|
171
|
+
def mutation_strategy_prompts(output_style_guidance: str | None) -> dict[str, str]:
|
172
|
+
style = (
|
173
|
+
output_style_guidance
|
174
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
175
|
+
)
|
176
|
+
return {
|
177
|
+
"rephrase": (
|
178
|
+
"Create a different way to express the same instruction, possibly with a different "
|
179
|
+
"length or structure, ensuring it still aims for an answer from the target LLM in the style of: "
|
180
|
+
f"'{style}'."
|
181
|
+
),
|
182
|
+
"simplify": (
|
183
|
+
"Simplify the instruction while maintaining its core meaning, potentially making it more concise, "
|
184
|
+
"to elicit an answer in the style of: "
|
185
|
+
f"'{style}'."
|
186
|
+
),
|
187
|
+
"elaborate": (
|
188
|
+
"Add more relevant detail and specificity to the instruction, potentially increasing its length, "
|
189
|
+
"but only if it helps achieve a more accurate answer from the target LLM in the style of: "
|
190
|
+
f"'{style}'."
|
191
|
+
),
|
192
|
+
"restructure": (
|
193
|
+
"Change the structure of the instruction (e.g., reorder sentences, combine/split ideas) while keeping its intent, ensuring the new structure strongly guides towards an output in the style of: "
|
194
|
+
f"'{style}'."
|
195
|
+
),
|
196
|
+
"focus": (
|
197
|
+
"Emphasize the key aspects of the instruction, perhaps by rephrasing or adding clarifying statements, "
|
198
|
+
"to better elicit an answer in the style of: "
|
199
|
+
f"'{style}'."
|
200
|
+
),
|
201
|
+
"increase_complexity_and_detail": (
|
202
|
+
"Significantly elaborate on this instruction. Add more details, examples, context, or constraints to make it more comprehensive. "
|
203
|
+
"The goal of this elaboration is to make the prompt itself more detailed, so that it VERY CLEARLY guides the target LLM to produce a highly accurate final answer in the style of: "
|
204
|
+
f"'{style}'. The prompt can be long if needed to achieve this output style."
|
205
|
+
),
|
206
|
+
}
|
207
|
+
|
208
|
+
|
209
|
+
# ---------------------------------------------------------------------------
|
210
|
+
# MCP prompts
|
211
|
+
# ---------------------------------------------------------------------------
|
212
|
+
|
213
|
+
|
214
|
+
def mcp_tool_rewrite_system_prompt() -> str:
|
215
|
+
return (
|
216
|
+
"You are an expert prompt engineer tasked with refining MCP tool descriptions. "
|
217
|
+
"Always respond with strictly valid JSON matching the requested schema."
|
218
|
+
)
|
219
|
+
|
220
|
+
|
221
|
+
def mcp_tool_rewrite_user_prompt(
|
222
|
+
*,
|
223
|
+
tool_name: str,
|
224
|
+
current_description: str,
|
225
|
+
tool_metadata_json: str,
|
226
|
+
num_variations: int,
|
227
|
+
) -> str:
|
228
|
+
current_description = current_description.strip() or "(no description provided)"
|
229
|
+
return f"""You are improving the description of the MCP tool `{tool_name}`.
|
230
|
+
|
231
|
+
Current description:
|
232
|
+
---
|
233
|
+
{current_description}
|
234
|
+
---
|
235
|
+
|
236
|
+
Tool metadata (JSON):
|
237
|
+
{tool_metadata_json}
|
238
|
+
|
239
|
+
Generate {num_variations} improved descriptions for this tool. Each description should:
|
240
|
+
- Clarify expected arguments and their semantics.
|
241
|
+
- Explain how the tool output should be used in the final response.
|
242
|
+
- Avoid changing the tool name or introducing unsupported behaviour.
|
243
|
+
|
244
|
+
Respond strictly as JSON of the form:
|
245
|
+
{{
|
246
|
+
"prompts": [
|
247
|
+
{{
|
248
|
+
"tool_description": "...",
|
249
|
+
"improvement_focus": "..."
|
250
|
+
}}
|
251
|
+
]
|
252
|
+
}}
|
253
|
+
"""
|
254
|
+
|
255
|
+
|
256
|
+
def semantic_mutation_user_prompt(
|
257
|
+
prompt_messages: list[dict[str, str]],
|
258
|
+
task_description: str,
|
259
|
+
output_style_guidance: str | None,
|
260
|
+
strategy_instruction: str,
|
261
|
+
) -> str:
|
262
|
+
style = (
|
263
|
+
output_style_guidance
|
264
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
265
|
+
)
|
266
|
+
return f"""Given this prompt: '{prompt_messages}'
|
267
|
+
Task context: {task_description}
|
268
|
+
Desired output style from target LLM: '{style}'
|
269
|
+
Instruction for this modification: {strategy_instruction}.
|
270
|
+
Return only the modified prompt message list, nothing else. Make sure to return a valid JSON object.
|
271
|
+
"""
|
272
|
+
|
273
|
+
|
274
|
+
def radical_innovation_user_prompt(
|
275
|
+
task_description: str,
|
276
|
+
output_style_guidance: str | None,
|
277
|
+
existing_prompt_messages: list[dict[str, str]],
|
278
|
+
) -> str:
|
279
|
+
style = (
|
280
|
+
output_style_guidance
|
281
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
282
|
+
)
|
283
|
+
return f"""Task Context:
|
284
|
+
{task_description}
|
285
|
+
Desired output style from target LLM: '{style}'
|
286
|
+
|
287
|
+
Existing Prompt (which may be underperforming):
|
288
|
+
'''{existing_prompt_messages}'''
|
289
|
+
|
290
|
+
Please generate a new, significantly improved, and potentially very different prompt for this task.
|
291
|
+
Focus on alternative approaches, better clarity, or more effective guidance for the language model, aiming for the desired output style.
|
292
|
+
Return only the new prompt list object.
|
293
|
+
"""
|
294
|
+
|
295
|
+
|
296
|
+
def fresh_start_user_prompt(
|
297
|
+
task_description: str,
|
298
|
+
output_style_guidance: str | None,
|
299
|
+
num_to_generate: int,
|
300
|
+
) -> str:
|
301
|
+
style = (
|
302
|
+
output_style_guidance
|
303
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
304
|
+
)
|
305
|
+
return f"""Here is a description of a task: ```{task_description}```
|
306
|
+
|
307
|
+
The goal is to generate prompts that will make a target LLM produce responses in the following style: ```{style}```.
|
308
|
+
|
309
|
+
Please generate {num_to_generate} diverse and effective prompt(s) for a language model to accomplish this task, ensuring they guide towards this specific output style.
|
310
|
+
Focus on clarity, completeness, and guiding the model effectively towards the desired style. Explore different structural approaches.
|
311
|
+
|
312
|
+
Example of valid response: [
|
313
|
+
["role": "<role>", "content": "<Prompt targeting specified style.>"],
|
314
|
+
["role": "<role>", "content": "<Another prompt designed for the output style.>"]
|
315
|
+
]
|
316
|
+
|
317
|
+
Your response MUST be a valid JSON list of AI messages. Do NOT include any other text, explanations, or Markdown formatting like ```json ... ``` around the list.
|
318
|
+
"""
|
319
|
+
|
320
|
+
|
321
|
+
def variation_user_prompt(
|
322
|
+
initial_prompt_messages: list[dict[str, str]],
|
323
|
+
task_description: str,
|
324
|
+
output_style_guidance: str | None,
|
325
|
+
num_variations: int,
|
326
|
+
) -> str:
|
327
|
+
style = (
|
328
|
+
output_style_guidance
|
329
|
+
or "Produce clear, effective, and high-quality responses suitable for the task."
|
330
|
+
)
|
331
|
+
return f"""Initial prompt:'''{initial_prompt_messages}'''
|
332
|
+
Task context: ```{task_description}```
|
333
|
+
Desired output style from target LLM: '{style}'
|
334
|
+
|
335
|
+
Generate {num_variations} diverse alternative prompts based on the initial prompt above, keeping the task context and desired output style in mind.
|
336
|
+
All generated prompt variations should strongly aim to elicit answers from the target LLM matching the style: '{style}'.
|
337
|
+
For each variation, consider how to best achieve this style, e.g., by adjusting specificity, structure, phrasing, constraints, or by explicitly requesting it.
|
338
|
+
|
339
|
+
Return a JSON array of prompts with the following structure:
|
340
|
+
{{
|
341
|
+
"prompts": [
|
342
|
+
{{
|
343
|
+
"prompt": [{{"role": "<role>", "content": "<content>"}}],
|
344
|
+
"strategy": "brief description of the variation strategy used, e.g., 'direct instruction for target style'"
|
345
|
+
}}
|
346
|
+
// ... more prompts if num_variations > 1
|
347
|
+
]
|
348
|
+
}}
|
349
|
+
Ensure a good mix of variations, all targeting the specified output style from the end LLM.
|
350
|
+
|
351
|
+
Return a valid JSON object that is correctly escaped. Return nothing else, do not include any additional text or Markdown formatting.
|
352
|
+
"""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from contextlib import contextmanager
|
2
2
|
from io import StringIO
|
3
|
-
from typing import Any
|
3
|
+
from typing import Any
|
4
4
|
|
5
5
|
from rich.panel import Panel
|
6
6
|
from rich.text import Text
|
@@ -20,6 +20,18 @@ PANEL_WIDTH = 70
|
|
20
20
|
console = get_console()
|
21
21
|
|
22
22
|
|
23
|
+
def display_tool_description(description: str, title: str, style: str) -> None:
|
24
|
+
panel = Panel(
|
25
|
+
Text(description),
|
26
|
+
title=title,
|
27
|
+
title_align="left",
|
28
|
+
border_style=style,
|
29
|
+
width=PANEL_WIDTH,
|
30
|
+
padding=(1, 2),
|
31
|
+
)
|
32
|
+
console.print(panel)
|
33
|
+
|
34
|
+
|
23
35
|
@contextmanager
|
24
36
|
def infer_output_style(verbose: int = 1) -> Any:
|
25
37
|
class Reporter:
|
@@ -117,6 +129,16 @@ def initializing_population(verbose: int = 1) -> Any:
|
|
117
129
|
f"│ Generating {num_fresh_starts} fresh prompts based on the task description."
|
118
130
|
)
|
119
131
|
|
132
|
+
def failed_fresh_prompts(self, num_fresh_starts: int, error: str) -> None:
|
133
|
+
if verbose >= 1:
|
134
|
+
console.print(
|
135
|
+
Text(
|
136
|
+
f"│ Failed to generate {num_fresh_starts} fresh prompts: {error}",
|
137
|
+
style="dim red",
|
138
|
+
)
|
139
|
+
)
|
140
|
+
console.print("│")
|
141
|
+
|
120
142
|
def success_fresh_prompts(self, num_fresh_starts: int) -> None:
|
121
143
|
if verbose >= 1:
|
122
144
|
console.print(
|
@@ -155,7 +177,7 @@ def initializing_population(verbose: int = 1) -> Any:
|
|
155
177
|
)
|
156
178
|
console.print("│")
|
157
179
|
|
158
|
-
def end(self, population_prompts:
|
180
|
+
def end(self, population_prompts: list[chat_prompt.ChatPrompt]) -> None:
|
159
181
|
if verbose >= 1:
|
160
182
|
console.print(
|
161
183
|
f"│ Successfully initialized population with {len(population_prompts)} prompts."
|
@@ -205,11 +227,13 @@ def evaluate_initial_population(verbose: int = 1) -> Any:
|
|
205
227
|
if verbose >= 1:
|
206
228
|
if score >= baseline_score:
|
207
229
|
console.print(
|
208
|
-
Text(
|
230
|
+
Text(
|
231
|
+
f"\r Prompt {index + 1} score was: {score}.", style="green"
|
232
|
+
)
|
209
233
|
)
|
210
234
|
else:
|
211
235
|
console.print(
|
212
|
-
Text(f"\r Prompt {index+1} score was: {score}.", style="dim")
|
236
|
+
Text(f"\r Prompt {index + 1} score was: {score}.", style="dim")
|
213
237
|
)
|
214
238
|
|
215
239
|
# Use our log suppression context manager and yield the reporter
|
@@ -0,0 +1,86 @@
|
|
1
|
+
from typing import TYPE_CHECKING, Any
|
2
|
+
|
3
|
+
import logging
|
4
|
+
|
5
|
+
import opik
|
6
|
+
|
7
|
+
from . import prompts as evo_prompts
|
8
|
+
from . import reporting
|
9
|
+
from ..optimization_config import chat_prompt
|
10
|
+
|
11
|
+
|
12
|
+
logger = logging.getLogger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
class StyleOps:
|
16
|
+
if TYPE_CHECKING:
|
17
|
+
verbose: int
|
18
|
+
_call_model: Any
|
19
|
+
|
20
|
+
def _infer_output_style_from_dataset(
|
21
|
+
self, dataset: opik.Dataset, prompt: chat_prompt.ChatPrompt, n_examples: int = 5
|
22
|
+
) -> str | None:
|
23
|
+
"""Analyzes dataset examples to infer the desired output style using the LLM."""
|
24
|
+
with reporting.infer_output_style(
|
25
|
+
verbose=self.verbose
|
26
|
+
) as report_infer_output_style:
|
27
|
+
report_infer_output_style.start_style_inference()
|
28
|
+
|
29
|
+
try:
|
30
|
+
items_to_process = dataset.get_items(n_examples)
|
31
|
+
except Exception as e:
|
32
|
+
report_infer_output_style.error(
|
33
|
+
f"Failed to get items from dataset '{dataset.name}': {e}"
|
34
|
+
)
|
35
|
+
return None
|
36
|
+
|
37
|
+
if not items_to_process:
|
38
|
+
report_infer_output_style.error(
|
39
|
+
f"Dataset '{dataset.name}' is empty. Cannot infer output style."
|
40
|
+
)
|
41
|
+
return None
|
42
|
+
|
43
|
+
if len(items_to_process) < min(n_examples, 2):
|
44
|
+
report_infer_output_style.error(
|
45
|
+
f"Not enough dataset items (found {len(items_to_process)}) to reliably infer output style. Need at least {min(n_examples, 2)}."
|
46
|
+
)
|
47
|
+
return None
|
48
|
+
|
49
|
+
examples_str = ""
|
50
|
+
for i, item_content in enumerate(items_to_process):
|
51
|
+
filtered_content: dict[str, str] = {
|
52
|
+
x: y for x, y in item_content.items() if x != "id"
|
53
|
+
}
|
54
|
+
examples_str += (
|
55
|
+
f"Example {i + 1}:\nDataset Item:\n{filtered_content}\n---\n"
|
56
|
+
)
|
57
|
+
|
58
|
+
user_prompt_for_style_inference = evo_prompts.style_inference_user_prompt(
|
59
|
+
examples_str
|
60
|
+
)
|
61
|
+
|
62
|
+
try:
|
63
|
+
inferred_style = self._call_model(
|
64
|
+
messages=[
|
65
|
+
{
|
66
|
+
"role": "system",
|
67
|
+
"content": evo_prompts.INFER_STYLE_SYSTEM_PROMPT,
|
68
|
+
},
|
69
|
+
{"role": "user", "content": user_prompt_for_style_inference},
|
70
|
+
],
|
71
|
+
is_reasoning=True,
|
72
|
+
)
|
73
|
+
inferred_style = inferred_style.strip()
|
74
|
+
if inferred_style:
|
75
|
+
report_infer_output_style.success(inferred_style)
|
76
|
+
return inferred_style
|
77
|
+
else:
|
78
|
+
report_infer_output_style.error(
|
79
|
+
"LLM returned empty string for inferred output style."
|
80
|
+
)
|
81
|
+
return None
|
82
|
+
except Exception as e:
|
83
|
+
report_infer_output_style.error(
|
84
|
+
f"Error during output style inference: {e}"
|
85
|
+
)
|
86
|
+
return None
|