opik-optimizer 0.8.0__py3-none-any.whl → 0.9.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. opik_optimizer/__init__.py +15 -26
  2. opik_optimizer/base_optimizer.py +28 -44
  3. opik_optimizer/data/hotpot-500.json +501 -1001
  4. opik_optimizer/datasets/__init__.py +6 -7
  5. opik_optimizer/datasets/hotpot_qa.py +2 -1
  6. opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py +742 -726
  7. opik_optimizer/evolutionary_optimizer/reporting.py +246 -0
  8. opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py +296 -194
  9. opik_optimizer/few_shot_bayesian_optimizer/reporting.py +119 -0
  10. opik_optimizer/meta_prompt_optimizer/__init__.py +5 -0
  11. opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py +816 -0
  12. opik_optimizer/meta_prompt_optimizer/reporting.py +140 -0
  13. opik_optimizer/mipro_optimizer/__init__.py +1 -1
  14. opik_optimizer/mipro_optimizer/_mipro_optimizer_v2.py +12 -20
  15. opik_optimizer/mipro_optimizer/mipro_optimizer.py +32 -52
  16. opik_optimizer/mipro_optimizer/utils.py +1 -23
  17. opik_optimizer/optimization_config/chat_prompt.py +106 -0
  18. opik_optimizer/optimization_config/configs.py +2 -21
  19. opik_optimizer/optimization_config/mappers.py +1 -1
  20. opik_optimizer/optimization_result.py +57 -85
  21. opik_optimizer/reporting_utils.py +180 -0
  22. opik_optimizer/task_evaluator.py +33 -25
  23. opik_optimizer/utils.py +187 -3
  24. {opik_optimizer-0.8.0.dist-info → opik_optimizer-0.9.0rc0.dist-info}/METADATA +15 -31
  25. opik_optimizer-0.9.0rc0.dist-info/RECORD +48 -0
  26. {opik_optimizer-0.8.0.dist-info → opik_optimizer-0.9.0rc0.dist-info}/WHEEL +1 -1
  27. opik_optimizer/few_shot_bayesian_optimizer/prompt_parameter.py +0 -91
  28. opik_optimizer/few_shot_bayesian_optimizer/prompt_templates.py +0 -80
  29. opik_optimizer/integrations/__init__.py +0 -0
  30. opik_optimizer/meta_prompt_optimizer.py +0 -1151
  31. opik_optimizer-0.8.0.dist-info/RECORD +0 -45
  32. {opik_optimizer-0.8.0.dist-info → opik_optimizer-0.9.0rc0.dist-info}/licenses/LICENSE +0 -0
  33. {opik_optimizer-0.8.0.dist-info → opik_optimizer-0.9.0rc0.dist-info}/top_level.txt +0 -0
opik_optimizer/utils.py CHANGED
@@ -1,20 +1,100 @@
1
1
  """Utility functions and constants for the optimizer package."""
2
2
 
3
+ from typing import Dict, Any, Optional, TYPE_CHECKING, Type, Literal, Final
4
+ from types import TracebackType
5
+
3
6
  import opik
7
+ from opik.api_objects.opik_client import Opik
8
+ from opik.api_objects.optimization import Optimization
9
+
10
+ import json
4
11
  import logging
5
12
  import random
6
13
  import string
7
- from opik.api_objects.opik_client import Opik
8
-
9
- from typing import List, Dict, Any, Optional, Callable, TYPE_CHECKING
14
+ import base64
15
+ import urllib.parse
16
+ from rich import console
10
17
 
11
18
  # Type hint for OptimizationResult without circular import
12
19
  if TYPE_CHECKING:
13
20
  from .optimization_result import OptimizationResult
14
21
 
22
+ ALLOWED_URL_CHARACTERS: Final[str] = ":/&?="
15
23
  logger = logging.getLogger(__name__)
16
24
 
17
25
 
26
+ class OptimizationContextManager:
27
+ """
28
+ Context manager for handling optimization lifecycle.
29
+ Automatically updates optimization status to "completed" or "cancelled" based on context exit.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ client: Opik,
35
+ dataset_name: str,
36
+ objective_name: str,
37
+ name: Optional[str] = None,
38
+ metadata: Optional[Dict[str, Any]] = None,
39
+ ):
40
+ """
41
+ Initialize the optimization context.
42
+
43
+ Args:
44
+ client: The Opik client instance
45
+ dataset_name: Name of the dataset for optimization
46
+ objective_name: Name of the optimization objective
47
+ name: Optional name for the optimization
48
+ metadata: Optional metadata for the optimization
49
+ """
50
+ self.client = client
51
+ self.dataset_name = dataset_name
52
+ self.objective_name = objective_name
53
+ self.name = name
54
+ self.metadata = metadata
55
+ self.optimization: Optional[Optimization] = None
56
+
57
+ def __enter__(self) -> Optional[Optimization]:
58
+ """Create and return the optimization."""
59
+ try:
60
+ self.optimization = self.client.create_optimization(
61
+ dataset_name=self.dataset_name,
62
+ objective_name=self.objective_name,
63
+ name=self.name,
64
+ metadata=self.metadata,
65
+ )
66
+ if self.optimization:
67
+ return self.optimization
68
+ else:
69
+ return None
70
+ except Exception:
71
+ logger.warning(
72
+ "Opik server does not support optimizations. Please upgrade opik."
73
+ )
74
+ logger.warning("Continuing without Opik optimization tracking.")
75
+ return None
76
+
77
+ def __exit__(
78
+ self,
79
+ exc_type: Optional[Type[BaseException]],
80
+ exc_val: Optional[BaseException],
81
+ exc_tb: Optional[TracebackType],
82
+ ) -> Literal[False]:
83
+ """Update optimization status based on context exit."""
84
+ if self.optimization is None:
85
+ return False
86
+
87
+ try:
88
+ if exc_type is None:
89
+ self.optimization.update(status="completed")
90
+ else:
91
+ self.optimization.update(status="cancelled")
92
+ except Exception as e:
93
+ logger.error(f"Failed to update optimization status: {e}")
94
+
95
+ return False
96
+
97
+
18
98
  def format_prompt(prompt: str, **kwargs: Any) -> str:
19
99
  """
20
100
  Format a prompt string with the given keyword arguments.
@@ -76,5 +156,109 @@ def get_random_seed() -> int:
76
156
 
77
157
  return random.randint(0, 2**32 - 1)
78
158
 
159
+
79
160
  def random_chars(n: int) -> str:
80
161
  return "".join(random.choice(string.ascii_letters) for _ in range(n))
162
+
163
+
164
+ def disable_experiment_reporting():
165
+ import opik.evaluation.report
166
+
167
+ opik.evaluation.report._patch_display_experiment_results = opik.evaluation.report.display_experiment_results
168
+ opik.evaluation.report._patch_display_experiment_link = opik.evaluation.report.display_experiment_link
169
+ opik.evaluation.report.display_experiment_results = lambda *args, **kwargs: None
170
+ opik.evaluation.report.display_experiment_link = lambda *args, **kwargs: None
171
+
172
+
173
+ def enable_experiment_reporting():
174
+ import opik.evaluation.report
175
+
176
+ try:
177
+ opik.evaluation.report.display_experiment_results = opik.evaluation.report._patch_display_experiment_results
178
+ opik.evaluation.report.display_experiment_link = opik.evaluation.report._patch_display_experiment_link
179
+ except AttributeError:
180
+ pass
181
+
182
+
183
+ def json_to_dict(json_str: str) -> Any:
184
+ cleaned_json_string = json_str.strip()
185
+
186
+ try:
187
+ return json.loads(cleaned_json_string)
188
+ except json.JSONDecodeError:
189
+ if cleaned_json_string.startswith("```json"):
190
+ cleaned_json_string = cleaned_json_string[7:]
191
+ if cleaned_json_string.endswith("```"):
192
+ cleaned_json_string = cleaned_json_string[:-3]
193
+ elif cleaned_json_string.startswith("```"):
194
+ cleaned_json_string = cleaned_json_string[3:]
195
+ if cleaned_json_string.endswith("```"):
196
+ cleaned_json_string = cleaned_json_string[:-3]
197
+
198
+ try:
199
+ return json.loads(cleaned_json_string)
200
+ except json.JSONDecodeError as e:
201
+ print(f"Failed to parse JSON string: {json_str}")
202
+ logger.debug(f"Failed to parse JSON string: {json_str}")
203
+ raise e
204
+
205
+
206
+ def optimization_context(
207
+ client: Opik,
208
+ dataset_name: str,
209
+ objective_name: str,
210
+ name: Optional[str] = None,
211
+ metadata: Optional[Dict[str, Any]] = None,
212
+ ) -> OptimizationContextManager:
213
+ """
214
+ Create a context manager for handling optimization lifecycle.
215
+ Automatically updates optimization status to "completed" or "cancelled" based on context exit.
216
+
217
+ Args:
218
+ client: The Opik client instance
219
+ dataset_name: Name of the dataset for optimization
220
+ objective_name: Name of the optimization objective
221
+ name: Optional name for the optimization
222
+ metadata: Optional metadata for the optimization
223
+
224
+ Returns:
225
+ OptimizationContextManager: A context manager that handles optimization lifecycle
226
+ """
227
+ return OptimizationContextManager(
228
+ client=client,
229
+ dataset_name=dataset_name,
230
+ objective_name=objective_name,
231
+ name=name,
232
+ metadata=metadata,
233
+ )
234
+
235
+
236
+ def ensure_ending_slash(url: str) -> str:
237
+ return url.rstrip("/") + "/"
238
+
239
+
240
+ def get_optimization_run_url_by_id(
241
+ dataset_id: str, optimization_id: str, url_override: str
242
+ ) -> str:
243
+ encoded_opik_url = base64.b64encode(url_override.encode("utf-8")).decode("utf-8")
244
+
245
+ run_path = urllib.parse.quote(
246
+ f"v1/session/redirect/optimizations/?optimization_id={optimization_id}&dataset_id={dataset_id}&path={encoded_opik_url}",
247
+ safe=ALLOWED_URL_CHARACTERS,
248
+ )
249
+ return urllib.parse.urljoin(ensure_ending_slash(url_override), run_path)
250
+
251
+
252
+ def display_optimization_run_link(
253
+ optimization_id: str, dataset_id: str, url_override: str
254
+ ) -> None:
255
+ console_container = console.Console()
256
+
257
+ optimization_url = get_optimization_run_url_by_id(
258
+ optimization_id=optimization_id,
259
+ dataset_id=dataset_id,
260
+ url_override=url_override,
261
+ )
262
+ console_container.print(
263
+ f"View the optimization run [link={optimization_url}]in your Opik dashboard[/link]."
264
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opik_optimizer
3
- Version: 0.8.0
3
+ Version: 0.9.0rc0
4
4
  Summary: Agent optimization with Opik
5
5
  Home-page: https://github.com/comet-ml/opik
6
6
  Author: Comet ML
@@ -13,7 +13,7 @@ Requires-Python: >=3.9,<3.13
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
15
  Requires-Dist: opik>=1.7.17
16
- Requires-Dist: dspy<3,>=2.6.18
16
+ Requires-Dist: dspy<=2.6.24,>=2.6.18
17
17
  Requires-Dist: litellm
18
18
  Requires-Dist: tqdm
19
19
  Requires-Dist: datasets
@@ -113,24 +113,20 @@ Available sample datasets for testing:
113
113
 
114
114
  ```python
115
115
  from opik.evaluation.metrics import LevenshteinRatio
116
- from opik_optimizer import FewShotBayesianOptimizer
117
- from opik_optimizer.demo import get_or_create_dataset
118
-
119
- from opik_optimizer import (
120
- MetricConfig,
121
- TaskConfig,
122
- from_dataset_field,
123
- from_llm_response_text,
124
- )
116
+ from opik_optimizer import FewShotBayesianOptimizer, ChatPrompt
117
+ from opik_optimizer.datasets import hotpot_300
125
118
 
126
119
  # Load a sample dataset
127
- hot_pot_dataset = get_or_create_dataset("hotpot-300")
120
+ hot_pot_dataset = hotpot_300()
128
121
 
129
122
  # Define the instruction for your chat prompt.
130
123
  # Input parameters from dataset examples will be interpolated into the full prompt.
131
- prompt_instruction = """
132
- Answer the question based on the provided context.
133
- """
124
+ prompt = ChatPrompt(
125
+ messages=[
126
+ {"role": "system", "content": "You are a helpful assistant."},
127
+ {"role": "user", "content": "{question}"}
128
+ ]
129
+ )
134
130
  project_name = "optimize-few-shot-bayesian-hotpot" # For Comet logging
135
131
 
136
132
  optimizer = FewShotBayesianOptimizer(
@@ -142,26 +138,14 @@ optimizer = FewShotBayesianOptimizer(
142
138
  seed=42,
143
139
  )
144
140
 
145
- metric_config = MetricConfig(
146
- metric=LevenshteinRatio(project_name=project_name), # Metric for evaluation
147
- inputs={
148
- "output": from_llm_response_text(), # Get output from LLM
149
- "reference": from_dataset_field(name="answer"), # Get reference from dataset
150
- },
151
- )
152
-
153
- task_config = TaskConfig(
154
- instruction_prompt=prompt_instruction,
155
- input_dataset_fields=["question"], # Fields from dataset to use as input
156
- output_dataset_field="answer", # Field in dataset for reference answer
157
- use_chat_prompt=True, # Use chat-style prompting
158
- )
141
+ def levenshtein_ratio(dataset_item, llm_output):
142
+ return LevenshteinRatio().score(reference=dataset_item["answer"], output=llm_output)
159
143
 
160
144
  # Run the optimization
161
145
  result = optimizer.optimize_prompt(
146
+ prompt=prompt,
162
147
  dataset=hot_pot_dataset,
163
- metric_config=metric_config,
164
- task_config=task_config,
148
+ metric=levenshtein_ratio,
165
149
  n_trials=10, # Number of optimization trials
166
150
  n_samples=150, # Number of dataset samples for evaluation per trial
167
151
  )
@@ -0,0 +1,48 @@
1
+ opik_optimizer/__init__.py,sha256=wAeC8Hy0ENiQnLkoqSBeJfkqaw_vrZRfMka8a6qnX6Y,1089
2
+ opik_optimizer/_throttle.py,sha256=ztub8qlwz4u0GVA2TIoLig0D1Cs0hJ7_o_SnT_C7Nmk,1360
3
+ opik_optimizer/base_optimizer.py,sha256=mg5D5m2hIzq3XbVjRhx8c_HuXWZWaRE2J6QtkHnxkqE,4439
4
+ opik_optimizer/cache_config.py,sha256=EzF4RAzxhSG8vtMJANdiUpNHQ9HzL2CrCXp0iik0f4A,580
5
+ opik_optimizer/logging_config.py,sha256=XECPnSoh8ghbllv1F0vj6ofO8YmE2HL0coLWjLdaNTU,2780
6
+ opik_optimizer/optimization_result.py,sha256=PeDIoNIHaJsi9WsDoKPgO6sW3CkPKZKK7RvY8tmjUN0,7508
7
+ opik_optimizer/reporting_utils.py,sha256=pRcRhE9w1q6PVdTmqrTyqIlUmeMAknwJJFT99FG0tuk,5523
8
+ opik_optimizer/task_evaluator.py,sha256=xqkrnh72_U23QSW_8X48mh83LaFFFooAIgpgFWol8_c,4040
9
+ opik_optimizer/utils.py,sha256=y7I58vESRphuMLA4cfH3sNiSuntyiqG8hwK5UuwVdt4,8239
10
+ opik_optimizer/data/hotpot-500.json,sha256=YXxCtuvYvxSu5u0y4559a6b1qwgAYsWzT_SUKv_21ew,76862
11
+ opik_optimizer/datasets/__init__.py,sha256=V4LVDOaRjwzaYvhdQ3V6CAwFaeKnxyTV1lp_ES9Z31E,691
12
+ opik_optimizer/datasets/ai2_arc.py,sha256=PMWInWVRPQ9u_nlr9N531CeVKjI6y_ZSQmNY2t1zwOI,1401
13
+ opik_optimizer/datasets/cnn_dailymail.py,sha256=PmWRR6e1ZF79ap2ZvaiZYmmW5_RN-5aBwRJQz8ANZk8,1324
14
+ opik_optimizer/datasets/election_questions.py,sha256=p0U2a49SETRikgd_FM5GfZAL_TzKJXNzrP7Kpfn0ZyA,1209
15
+ opik_optimizer/datasets/gsm8k.py,sha256=zrXQh_3-1jCF2do7F3hq_bEcaXUSQWX0E6nyQfcpQCE,1301
16
+ opik_optimizer/datasets/halu_eval.py,sha256=wOFbPdJ2jcQ3s3FpzDFGgx4rmvJHk9aD2WHxJrIascs,1420
17
+ opik_optimizer/datasets/hotpot_qa.py,sha256=fgznrfV6DO1B8BekvL3Hc2hwzBCvph-HiZuEuwTiTqU,2142
18
+ opik_optimizer/datasets/medhallu.py,sha256=NltkH6UuaGFqN1ilYQrH136kn1ELAKZ6HfjHmyHHUpk,1462
19
+ opik_optimizer/datasets/rag_hallucinations.py,sha256=3ddmUL7dp01iGYkvJ9uaTKFEuLnqrJJ29Ww9z5m_-3g,1421
20
+ opik_optimizer/datasets/ragbench.py,sha256=bCt3S5KsfW_2wDK009aiGRXiIEHlLgL_OlXrXBFWEPI,1411
21
+ opik_optimizer/datasets/tiny_test.py,sha256=ysgkfCHsi018b0qy8OtuL2BUkOo-YEZVu4AnscJCA4E,1823
22
+ opik_optimizer/datasets/truthful_qa.py,sha256=xbRjW0UOm7oDN3jAnTZD7HChgDGspwhAhFpHV7zTtag,4166
23
+ opik_optimizer/demo/__init__.py,sha256=KSpFYhzN7fTmLEsIaciRHwxcJDeAiX5NDmYLdPsfpT8,150
24
+ opik_optimizer/demo/cache.py,sha256=5WqK8rSiijzU6s4VHIjLuL1LR5i1yHtY-x5FZTduSus,3669
25
+ opik_optimizer/demo/datasets.py,sha256=MezQlG4Q_cgSH7zQOmJcDwkGU8JV0xKSnZwCJGaj-88,2494
26
+ opik_optimizer/evolutionary_optimizer/__init__.py,sha256=OQ2ART5g-7EVGOISvTGY-AbmEyyDFEJJCsmJBzGJIpw,57
27
+ opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py,sha256=cnpga8ytSjx5lNq2URLBCyV9s5r1s9_eKD4BU7rWW_g,76259
28
+ opik_optimizer/evolutionary_optimizer/reporting.py,sha256=Gl52sH7XaU4GXUhFt_FcfjYFN3MghnDt1ISULATjbP4,9944
29
+ opik_optimizer/few_shot_bayesian_optimizer/__init__.py,sha256=VuH7FOROyGcjMPryejtZC-5Y0QHlVTFLTGUDgNqRAFw,113
30
+ opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py,sha256=Pb0GHUEf5DdnLJaYI5PVnar2F-hhWslEEYwWftnZ0Gs,24332
31
+ opik_optimizer/few_shot_bayesian_optimizer/reporting.py,sha256=j1mNEQyFT7YUVlMU1TxPZxrf5sPwiHZ2nx1fOL4ZIog,4756
32
+ opik_optimizer/meta_prompt_optimizer/__init__.py,sha256=syiN2_fMm5iZDQezZCHYe-ZiGOIPlBkLt49Sa1kuR70,97
33
+ opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py,sha256=LrN8kGoB-qm5Tvncpmcy2qd79vxkcMokei2sMXrv0jw,34404
34
+ opik_optimizer/meta_prompt_optimizer/reporting.py,sha256=4Lju2uxSBkCVYyJ6ZSS-GjDFVnmP14R6XVtr-tEFlL0,5765
35
+ opik_optimizer/mipro_optimizer/__init__.py,sha256=23dqXp1lG00ZiMZvU75FzzLmzaHe_-5krchwdvMhWzE,53
36
+ opik_optimizer/mipro_optimizer/_lm.py,sha256=bcTy2Y5HjSaFQOATIpUaA86eIp3vKHaMuDI2_RvN2ww,16376
37
+ opik_optimizer/mipro_optimizer/_mipro_optimizer_v2.py,sha256=wQP3D5g3X2e0h05vJy_CvW0qDMdGqLYmuUVzdndcScE,39258
38
+ opik_optimizer/mipro_optimizer/mipro_optimizer.py,sha256=pfD8toZVCpqSDdGwyOUvAeyORyGyYqrua71JFzVw2GA,23305
39
+ opik_optimizer/mipro_optimizer/utils.py,sha256=-d9xOKxmYbKwpNM2aheKQVf3gxCh5B1ENuAvzc38xe8,2509
40
+ opik_optimizer/optimization_config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
+ opik_optimizer/optimization_config/chat_prompt.py,sha256=1_15vmtIawzp60Eu_lROu7Py2NLjeiRzk0Jhutu7pbU,3485
42
+ opik_optimizer/optimization_config/configs.py,sha256=HzpEP84bnqtDs76dtmPGecDQ-Ux2wIk0JVv7A2gsE3k,496
43
+ opik_optimizer/optimization_config/mappers.py,sha256=RMUWwYvXNCJe6w1jYiT6EX218UYZS1PUMMe12OjNEug,1692
44
+ opik_optimizer-0.9.0rc0.dist-info/licenses/LICENSE,sha256=dTRSwwCHdWeSjzodvnivYqcwi8x3Qfr21yv65QUWWBE,1062
45
+ opik_optimizer-0.9.0rc0.dist-info/METADATA,sha256=ir4vFau8I-2ZMDTojCxnIdy-VUBgLTUjPjSGaqX0HlY,6591
46
+ opik_optimizer-0.9.0rc0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
47
+ opik_optimizer-0.9.0rc0.dist-info/top_level.txt,sha256=ondOlpq6_yFckqpxoAHSfzZS2N-JfgmA-QQhOJfz7m0,15
48
+ opik_optimizer-0.9.0rc0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.8.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,91 +0,0 @@
1
- import pydantic
2
- from typing import List, Dict, Literal
3
- import json
4
-
5
- from . import prompt_templates
6
-
7
- ChatItem = Dict[Literal["role", "content"], str]
8
-
9
-
10
- class ChatPromptParameter(pydantic.BaseModel):
11
- name: str
12
- instruction: str
13
- task_input_parameters: List[str]
14
- task_output_parameter: str
15
- demo_examples: List[Dict[str, str]] = pydantic.Field(default_factory=list)
16
-
17
- _few_shot_system_prompt_intro: str = "You are an intelligent assistant that learns from few-shot examples provided earlier in the conversation. Whenever you respond, carefully follow the structure, tone, and format of previous assistant replies, using them as a guide"
18
-
19
- def as_template(self) -> prompt_templates.ChatPromptTemplate:
20
- if not self.demo_examples:
21
- return prompt_templates.ChatPromptTemplate(
22
- chat_template=[
23
- {
24
- "role": "system",
25
- "content": self.instruction
26
- },
27
- {
28
- "role": "user",
29
- "content": json.dumps({param: f"{{{{{param}}}}}" for param in self.task_input_parameters})
30
- }
31
- ]
32
- )
33
-
34
- return prompt_templates.ChatPromptTemplate(
35
- chat_template=[
36
- {
37
- "role": "system",
38
- "content": self.instruction + f"\n\n{self._few_shot_system_prompt_intro}"
39
- },
40
- *self._render_demos(),
41
- {
42
- "role": "user",
43
- "content": json.dumps({param: f"{{{{{param}}}}}" for param in self.task_input_parameters})
44
- }
45
- ]
46
- )
47
-
48
- def _render_demos(self) -> List[ChatItem]:
49
- """
50
- Renders demo examples in the following format:
51
-
52
- [
53
- {
54
- "role": "user",
55
- "content": "\n{\n\"input_field1\": \"value1\",\n\"input_field2\": \"value2\"\n}\n"
56
- },
57
- {
58
- "role": "assistant",
59
- "content": "expected_response_1"
60
- },
61
- {
62
- "role": "user",
63
- "content": "\n{\n\"input_field1\": \"value3\",\n\"input_field2\": \"value4\"\n}\n"
64
- },
65
- {
66
- "role": "assistant",
67
- "content": "expected_response_2"
68
- }
69
- ]
70
- """
71
- chat_items: List[ChatItem] = []
72
-
73
- for example in self.demo_examples:
74
- inputs = {param: example[param] for param in self.task_input_parameters}
75
-
76
- formatted_input = json.dumps(inputs, indent=2)
77
-
78
- user_message: ChatItem = {
79
- "role": "user",
80
- "content": f"\n{formatted_input}\n"
81
- }
82
-
83
- assistant_message: ChatItem = {
84
- "role": "assistant",
85
- "content": example[self.task_output_parameter]
86
- }
87
-
88
- chat_items.append(user_message)
89
- chat_items.append(assistant_message)
90
-
91
- return chat_items
@@ -1,80 +0,0 @@
1
- import abc
2
- from typing import Dict, List, Literal, Any
3
- from typing_extensions import override
4
- import opik
5
- from opik.api_objects.prompt import prompt_template as opik_prompt_template
6
-
7
- ChatItem = Dict[Literal["role", "content"], str]
8
-
9
-
10
- class BaseTemplate(abc.ABC):
11
- @abc.abstractmethod
12
- def format(self, **kwargs: Any) -> Any:
13
- raise NotImplementedError
14
-
15
- class PromptTemplate(BaseTemplate):
16
- """Wrapper for opik PromptTemplate which is a subclass of BaseTemplate."""
17
- def __init__(
18
- self,
19
- template: str,
20
- validate_placeholders: bool = False,
21
- type: opik.PromptType = opik.PromptType.MUSTACHE
22
- ) -> None:
23
- self._opik_prompt_template = opik_prompt_template.PromptTemplate(
24
- template=template,
25
- validate_placeholders=validate_placeholders,
26
- type=type
27
- )
28
-
29
- @override
30
- def format(self, **kwargs: Any) -> str:
31
- return self._opik_prompt_template.format(**kwargs)
32
-
33
-
34
- class ChatItemTemplate(BaseTemplate):
35
- def __init__(
36
- self,
37
- role: str,
38
- prompt_template: PromptTemplate
39
- ) -> None:
40
- self._role = role
41
- self._prompt_template = prompt_template
42
-
43
- @override
44
- def format(self, **kwargs: Any) -> ChatItem:
45
- return {
46
- "role": self._role,
47
- "content": self._prompt_template.format(**kwargs)
48
- }
49
-
50
- class ChatPromptTemplate(BaseTemplate):
51
- def __init__(
52
- self,
53
- chat_template: List[Dict[str, str]],
54
- type: opik.PromptType = opik.PromptType.MUSTACHE,
55
- validate_placeholders: bool = False,
56
- ) -> None:
57
- self._raw_chat_template = chat_template
58
- self._type = type
59
- self._validate_placeholders = validate_placeholders
60
- self._init_chat_template_items()
61
-
62
- def _init_chat_template_items(self) -> None:
63
- self._chat_template_items: List[ChatItemTemplate] = [
64
- ChatItemTemplate(
65
- role=item["role"],
66
- prompt_template=PromptTemplate(
67
- item["content"],
68
- type=self._type,
69
- validate_placeholders=self._validate_placeholders,
70
- )
71
- )
72
- for item in self._raw_chat_template
73
- ]
74
-
75
- @override
76
- def format(self, **kwargs: Any) -> List[ChatItem]:
77
- return [
78
- item.format(**kwargs)
79
- for item in self._chat_template_items
80
- ]
File without changes