opik-optimizer 2.1.3__py3-none-any.whl → 2.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. opik_optimizer/__init__.py +0 -2
  2. opik_optimizer/base_optimizer.py +313 -144
  3. opik_optimizer/evolutionary_optimizer/crossover_ops.py +31 -4
  4. opik_optimizer/evolutionary_optimizer/evaluation_ops.py +23 -3
  5. opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py +122 -95
  6. opik_optimizer/evolutionary_optimizer/mcp.py +11 -6
  7. opik_optimizer/evolutionary_optimizer/mutation_ops.py +25 -5
  8. opik_optimizer/evolutionary_optimizer/population_ops.py +26 -10
  9. opik_optimizer/evolutionary_optimizer/reporting.py +5 -5
  10. opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py +53 -99
  11. opik_optimizer/few_shot_bayesian_optimizer/reporting.py +4 -4
  12. opik_optimizer/gepa_optimizer/gepa_optimizer.py +345 -201
  13. opik_optimizer/gepa_optimizer/reporting.py +291 -22
  14. opik_optimizer/hierarchical_reflective_optimizer/hierarchical_reflective_optimizer.py +90 -167
  15. opik_optimizer/hierarchical_reflective_optimizer/prompts.py +7 -1
  16. opik_optimizer/hierarchical_reflective_optimizer/reporting.py +168 -75
  17. opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py +185 -205
  18. opik_optimizer/meta_prompt_optimizer/reporting.py +4 -4
  19. opik_optimizer/mipro_optimizer/__init__.py +2 -2
  20. opik_optimizer/mipro_optimizer/_lm.py +4 -4
  21. opik_optimizer/mipro_optimizer/{_mipro_optimizer_v2.py → mipro_optimizer_v2.py} +1 -7
  22. opik_optimizer/mipro_optimizer/utils.py +1 -0
  23. opik_optimizer/optimizable_agent.py +7 -4
  24. opik_optimizer/optimization_config/chat_prompt.py +7 -10
  25. opik_optimizer/parameter_optimizer/parameter_optimizer.py +188 -40
  26. opik_optimizer/parameter_optimizer/reporting.py +148 -0
  27. opik_optimizer/reporting_utils.py +60 -15
  28. opik_optimizer/utils/__init__.py +3 -0
  29. opik_optimizer/utils/candidate_utils.py +52 -0
  30. opik_optimizer/utils/core.py +35 -2
  31. opik_optimizer/utils/prompt_segments.py +1 -2
  32. {opik_optimizer-2.1.3.dist-info → opik_optimizer-2.2.1.dist-info}/METADATA +2 -3
  33. {opik_optimizer-2.1.3.dist-info → opik_optimizer-2.2.1.dist-info}/RECORD +36 -36
  34. opik_optimizer/evolutionary_optimizer/llm_support.py +0 -136
  35. opik_optimizer/mipro_optimizer/mipro_optimizer.py +0 -680
  36. {opik_optimizer-2.1.3.dist-info → opik_optimizer-2.2.1.dist-info}/WHEEL +0 -0
  37. {opik_optimizer-2.1.3.dist-info → opik_optimizer-2.2.1.dist-info}/licenses/LICENSE +0 -0
  38. {opik_optimizer-2.1.3.dist-info → opik_optimizer-2.2.1.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import logging
3
+ import re
3
4
  from contextlib import contextmanager
4
5
  from typing import Any
5
6
 
@@ -14,6 +15,24 @@ from .utils import get_optimization_run_url_by_id
14
15
  PANEL_WIDTH = 70
15
16
 
16
17
 
18
+ def safe_percentage_change(current: float, baseline: float) -> tuple[float, bool]:
19
+ """
20
+ Calculate percentage change safely, handling division by zero.
21
+
22
+ Args:
23
+ current: Current value
24
+ baseline: Baseline value to compare against
25
+
26
+ Returns:
27
+ Tuple of (percentage_change, has_percentage) where:
28
+ - percentage_change: The percentage change if calculable, otherwise 0
29
+ - has_percentage: True if percentage was calculated, False if baseline was zero
30
+ """
31
+ if baseline == 0:
32
+ return 0.0, False
33
+ return ((current - baseline) / baseline) * 100, True
34
+
35
+
17
36
  def get_console(*args: Any, **kwargs: Any) -> Console:
18
37
  console = Console(*args, **kwargs)
19
38
  console.is_jupyter = False
@@ -36,35 +55,61 @@ def convert_tqdm_to_rich(description: str | None = None, verbose: int = 1) -> An
36
55
 
37
56
  from opik.evaluation import report
38
57
 
58
+ # Store original functions
59
+ original_display_experiment_results = report.display_experiment_results
60
+ original_display_experiment_link = report.display_experiment_link
61
+
62
+ # Replace with no-ops
39
63
  report.display_experiment_results = lambda *args, **kwargs: None
40
64
  report.display_experiment_link = lambda *args, **kwargs: None
41
65
 
42
66
  try:
43
67
  yield
44
68
  finally:
69
+ # Restore everything
45
70
  opik.evaluation.engine.evaluation_tasks_executor._tqdm = original__tqdm
71
+ report.display_experiment_results = original_display_experiment_results
72
+ report.display_experiment_link = original_display_experiment_link
46
73
 
47
74
 
48
75
  @contextmanager
49
76
  def suppress_opik_logs() -> Any:
50
77
  """Suppress Opik startup logs by temporarily increasing the log level."""
51
- # Optimizer log level
52
- optimizer_logger = logging.getLogger("opik_optimizer")
53
-
54
- # Get the Opik logger
55
- opik_logger = logging.getLogger("opik.api_objects.opik_client")
78
+ # Get all loggers we need to suppress
79
+ opik_client_logger = logging.getLogger("opik.api_objects.opik_client")
80
+ opik_logger = logging.getLogger("opik")
56
81
 
57
- # Store original log level
58
- original_level = opik_logger.level
82
+ # Store original log levels
83
+ original_client_level = opik_client_logger.level
84
+ original_opik_level = opik_logger.level
59
85
 
60
- # Set log level to ERROR to suppress INFO messages
61
- opik_logger.setLevel(optimizer_logger.level)
86
+ # Set log level to WARNING to suppress INFO messages
87
+ opik_client_logger.setLevel(logging.WARNING)
88
+ opik_logger.setLevel(logging.WARNING)
62
89
 
63
90
  try:
64
91
  yield
65
92
  finally:
66
- # Restore original log level
67
- opik_logger.setLevel(original_level)
93
+ # Restore original log levels
94
+ opik_client_logger.setLevel(original_client_level)
95
+ opik_logger.setLevel(original_opik_level)
96
+
97
+
98
+ def format_prompt_snippet(text: str, max_length: int = 100) -> str:
99
+ """
100
+ Normalize whitespace in a prompt snippet and truncate it for compact display.
101
+
102
+ Args:
103
+ text: Raw text to summarize.
104
+ max_length: Maximum characters to keep before adding an ellipsis.
105
+
106
+ Returns:
107
+ str: Condensed snippet safe for inline logging.
108
+ """
109
+ normalized = re.sub(r"\s+", " ", text.strip())
110
+ if len(normalized) > max_length:
111
+ return normalized[:max_length] + "…"
112
+ return normalized
68
113
 
69
114
 
70
115
  def display_messages(messages: list[dict[str, str]], prefix: str = "") -> None:
@@ -196,18 +241,18 @@ def display_result(
196
241
  content: Text | Panel = []
197
242
 
198
243
  if best_score > initial_score:
199
- if initial_score == 0:
244
+ perc_change, has_percentage = safe_percentage_change(best_score, initial_score)
245
+ if has_percentage:
200
246
  content += [
201
247
  Text(
202
- f"Prompt was optimized and improved from {initial_score:.4f} to {best_score:.4f}",
248
+ f"Prompt was optimized and improved from {initial_score:.4f} to {best_score:.4f} ({perc_change:.2%})",
203
249
  style="bold green",
204
250
  )
205
251
  ]
206
252
  else:
207
- perc_change = (best_score - initial_score) / initial_score
208
253
  content += [
209
254
  Text(
210
- f"Prompt was optimized and improved from {initial_score:.4f} to {best_score:.4f} ({perc_change:.2%})",
255
+ f"Prompt was optimized and improved from {initial_score:.4f} to {best_score:.4f}",
211
256
  style="bold green",
212
257
  )
213
258
  ]
@@ -3,13 +3,16 @@
3
3
  from .core import * # noqa: F401,F403
4
4
  from .dataset_utils import * # noqa: F401,F403
5
5
  from .prompt_segments import * # noqa: F401,F403
6
+ from .candidate_utils import * # noqa: F401,F403
6
7
 
7
8
  from . import core as _core
8
9
  from . import dataset_utils as _dataset_utils
9
10
  from . import prompt_segments as _prompt_segments
11
+ from . import candidate_utils as _candidate_utils
10
12
 
11
13
  __all__: list[str] = [
12
14
  *getattr(_core, "__all__", []),
13
15
  *getattr(_dataset_utils, "__all__", []),
14
16
  *getattr(_prompt_segments, "__all__", []),
17
+ *getattr(_candidate_utils, "__all__", []),
15
18
  ]
@@ -0,0 +1,52 @@
1
+ """
2
+ Utilities for working with optimizer candidate collections.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from collections.abc import Callable, Iterable
8
+ from typing import TypeVar
9
+
10
+ __all__ = ["unique_ordered_by_key"]
11
+
12
+ T = TypeVar("T")
13
+
14
+
15
+ def unique_ordered_by_key(
16
+ items: Iterable[T],
17
+ key: Callable[[T], str],
18
+ *,
19
+ drop_keys: set[str] | None = None,
20
+ ) -> list[T]:
21
+ """
22
+ Return a list of items that preserves the original order while removing duplicates.
23
+
24
+ Args:
25
+ items: Sequence of items to filter.
26
+ key: Function that extracts the comparison key from an item.
27
+ drop_keys: Optional set of keys to omit entirely from the result.
28
+
29
+ Returns:
30
+ List[T]: Ordered list containing the first occurrence of each unique key.
31
+ """
32
+ seen: set[str] = set()
33
+ filtered: list[T] = []
34
+
35
+ for item in items:
36
+ try:
37
+ item_key = key(item)
38
+ except (TypeError, AttributeError, KeyError):
39
+ # If the key extractor fails, fall back to stringifying the item.
40
+ item_key = str(item)
41
+
42
+ if drop_keys and item_key in drop_keys:
43
+ seen.add(item_key)
44
+ continue
45
+
46
+ if item_key in seen:
47
+ continue
48
+
49
+ seen.add(item_key)
50
+ filtered.append(item)
51
+
52
+ return filtered
@@ -310,6 +310,25 @@ def get_optimization_run_url_by_id(
310
310
  return urllib.parse.urljoin(ensure_ending_slash(url_override), run_path)
311
311
 
312
312
 
313
+ def get_trial_compare_url(
314
+ *, dataset_id: str | None, optimization_id: str | None, trial_ids: list[str]
315
+ ) -> str:
316
+ if dataset_id is None or optimization_id is None:
317
+ raise ValueError("dataset_id and optimization_id are required")
318
+ if not trial_ids:
319
+ raise ValueError("trial_ids must be a non-empty list")
320
+
321
+ opik_config = opik.config.get_from_user_inputs()
322
+ url_override = opik_config.url_override
323
+ base = ensure_ending_slash(url_override)
324
+
325
+ trials_query = urllib.parse.quote(json.dumps(trial_ids))
326
+ compare_path = (
327
+ f"optimizations/{optimization_id}/{dataset_id}/compare?trials={trials_query}"
328
+ )
329
+ return urllib.parse.urljoin(base, compare_path)
330
+
331
+
313
332
  def create_litellm_agent_class(
314
333
  prompt: "ChatPrompt", optimizer_ref: Any = None
315
334
  ) -> type["OptimizableAgent"]:
@@ -327,9 +346,16 @@ def create_litellm_agent_class(
327
346
  class LiteLLMAgent(OptimizableAgent):
328
347
  model = prompt.model
329
348
  model_kwargs = prompt.model_kwargs
330
- project_name = prompt.project_name
331
349
  optimizer = optimizer_ref
332
350
 
351
+ def __init__(
352
+ self, prompt: "ChatPrompt", project_name: str | None = None
353
+ ) -> None:
354
+ # Get project_name from optimizer if available
355
+ if project_name is None and hasattr(self.optimizer, "project_name"):
356
+ project_name = self.optimizer.project_name
357
+ super().__init__(prompt, project_name=project_name)
358
+
333
359
  def invoke(
334
360
  self, messages: list[dict[str, str]], seed: int | None = None
335
361
  ) -> str:
@@ -342,9 +368,16 @@ def create_litellm_agent_class(
342
368
  class LiteLLMAgent(OptimizableAgent): # type: ignore[no-redef]
343
369
  model = prompt.model
344
370
  model_kwargs = prompt.model_kwargs
345
- project_name = prompt.project_name
346
371
  optimizer = optimizer_ref
347
372
 
373
+ def __init__(
374
+ self, prompt: "ChatPrompt", project_name: str | None = None
375
+ ) -> None:
376
+ # Get project_name from optimizer if available
377
+ if project_name is None and hasattr(self.optimizer, "project_name"):
378
+ project_name = self.optimizer.project_name
379
+ super().__init__(prompt, project_name=project_name)
380
+
348
381
  return LiteLLMAgent
349
382
 
350
383
 
@@ -175,8 +175,7 @@ def apply_segment_updates(
175
175
  function_map=prompt.function_map,
176
176
  model=prompt.model,
177
177
  invoke=prompt.invoke,
178
- project_name=prompt.project_name,
179
- **prompt.model_kwargs,
178
+ model_parameters=prompt.model_kwargs,
180
179
  )
181
180
 
182
181
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: opik_optimizer
3
- Version: 2.1.3
3
+ Version: 2.2.1
4
4
  Summary: Agent optimization with Opik
5
5
  Home-page: https://github.com/comet-ml/opik
6
6
  Author: Comet ML
@@ -8,13 +8,12 @@ Author-email: Comet ML <support@comet.com>
8
8
  License: Apache 2.0
9
9
  Project-URL: Homepage, https://github.com/comet-ml/opik/blob/main/sdks/opik_optimizer/README.md
10
10
  Project-URL: Repository, https://github.com/comet-ml/opik
11
- Requires-Python: >=3.10,<3.13
11
+ Requires-Python: >=3.10,<3.14
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
  Requires-Dist: datasets
15
15
  Requires-Dist: deap>=1.4.3
16
16
  Requires-Dist: diskcache
17
- Requires-Dist: dspy<3
18
17
  Requires-Dist: gepa>=0.0.7
19
18
  Requires-Dist: ujson
20
19
  Requires-Dist: hf_xet
@@ -1,13 +1,13 @@
1
- opik_optimizer/__init__.py,sha256=lA9cjEsNxrJwYJ68vCjeNZgrcxO_rNJaAHsdMwaq364,1658
1
+ opik_optimizer/__init__.py,sha256=HsEIWyxeUJhzCvuML5SjBHFWtm-b5LSHyE9GRYytyeI,1592
2
2
  opik_optimizer/_throttle.py,sha256=1JXIhYlo0IaqCgwmNB0Hnh9CYhYPkwRFdVGIcE7pVNg,1362
3
- opik_optimizer/base_optimizer.py,sha256=XryBkUTs4FQmHcBtVm63EJIKWrTvwqduUZ6ArHzYQko,21520
3
+ opik_optimizer/base_optimizer.py,sha256=o4U9yoU-KhR7q_3KnvV3DgCeVboOQdacgleq8D2d_20,28350
4
4
  opik_optimizer/cache_config.py,sha256=Xd3NdUsL7bLQWoNe3pESqH4nHucU1iNTSGp-RqbwDog,599
5
5
  opik_optimizer/logging_config.py,sha256=TmxX0C1P20amxoXuiNQvlENOjdSNfWwvL8jFy206VWM,3837
6
6
  opik_optimizer/multi_metric_objective.py,sha256=y4jqirnhkfhB7SWonI4ldYg5fWG4JGfAxqu7ylRD1J4,1178
7
- opik_optimizer/optimizable_agent.py,sha256=R0_BdwdHyZGWTw3oSvTg8FULDOYM8XaTiPNR3qV8DkQ,6344
7
+ opik_optimizer/optimizable_agent.py,sha256=gB1ALuVPyEmXOTVYeK2i-inBAO-6JMZSjOrmj37okgQ,6514
8
8
  opik_optimizer/optimization_result.py,sha256=sG-Yr-hOaH9zx_I5S6_W3v6j8nPUhwYdS333jVM4Gus,17218
9
9
  opik_optimizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- opik_optimizer/reporting_utils.py,sha256=dcECFmzZ_J-DKoukMDEE_fm7X8sdQyl_ijTddvQtepE,8287
10
+ opik_optimizer/reporting_utils.py,sha256=jN3_-tTy98KtsOv8Xp-DKFpePQQYZHHhT7kkG-jUrOg,9970
11
11
  opik_optimizer/task_evaluator.py,sha256=7N254DU0UkWJ5saQ5AmYEsHHSrychAJtedmmjNsCOnI,5081
12
12
  opik_optimizer/data/context7_eval.jsonl,sha256=vPR3XRfI0UbZ1hgUGaOdpraFT99RDLU1YWuPFLLQz40,1757
13
13
  opik_optimizer/data/hotpot-500.json,sha256=YXxCtuvYvxSu5u0y4559a6b1qwgAYsWzT_SUKv_21ew,76862
@@ -28,29 +28,28 @@ opik_optimizer/demo/__init__.py,sha256=KSpFYhzN7fTmLEsIaciRHwxcJDeAiX5NDmYLdPsfp
28
28
  opik_optimizer/demo/cache.py,sha256=CwjdmVjokVxmPXvgfOutZK8e0sV-PIUz3ou6ODXZBts,3738
29
29
  opik_optimizer/demo/datasets.py,sha256=idod4NYHw1IbxhA8c0XVFD_pGpMZagNGNZuEYDTbbMM,2357
30
30
  opik_optimizer/evolutionary_optimizer/__init__.py,sha256=bDa6FZR9Y_a5z337I4EtvaB69jB542P4dbruhYPHCEU,95
31
- opik_optimizer/evolutionary_optimizer/crossover_ops.py,sha256=7kMvAWOiEA0R5PQMRdnLqbS1uCmIDVzLppNSsPsIO7o,7740
32
- opik_optimizer/evolutionary_optimizer/evaluation_ops.py,sha256=euVbFgrG37zj2GfcjAPMvPtz-52QljFR76OfChYmsKY,4795
33
- opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py,sha256=2gEny7Q1PMcwzaDOJ2GrXMKZAZThuhV9tGg92JhQ9lI,46260
31
+ opik_optimizer/evolutionary_optimizer/crossover_ops.py,sha256=M-TsQv8EHKt_RiKoEPYTtiP_HL588AyyTuoXNsQpaVA,8883
32
+ opik_optimizer/evolutionary_optimizer/evaluation_ops.py,sha256=XlPVJG_3R0GeYKOHTCdhBE4TvOBMRvyHlXwG2xvroD4,5511
33
+ opik_optimizer/evolutionary_optimizer/evolutionary_optimizer.py,sha256=TVNHtQMZdGL4if_PK4f3230Rg0NFR87kOf0sibOeqbY,48162
34
34
  opik_optimizer/evolutionary_optimizer/helpers.py,sha256=yWYW5JyVbr2smDByc9yaHCYbUS6cw35RBI7lM3pT69A,607
35
- opik_optimizer/evolutionary_optimizer/llm_support.py,sha256=JeghAOwT_nYyOjdUi-xEiDvG-dW0C87UBzGz_xMdHl8,5438
36
- opik_optimizer/evolutionary_optimizer/mcp.py,sha256=QhRPsxbtZJKzhJouJOuNbrNGGjgY4JAMmo-UYiNa0WQ,7794
37
- opik_optimizer/evolutionary_optimizer/mutation_ops.py,sha256=Jj4q297z054LSI0udZmeH_jXQMAxml2_qBoxSIfXNBs,12643
38
- opik_optimizer/evolutionary_optimizer/population_ops.py,sha256=-33oN2aPTF_upJLYDVUTNm1c5bMzWy2krQ3alFCrJlM,10101
35
+ opik_optimizer/evolutionary_optimizer/mcp.py,sha256=OHZ__q4vVInli8qowNh-1uD76dJ871wY6NkU4XhfxAA,8067
36
+ opik_optimizer/evolutionary_optimizer/mutation_ops.py,sha256=ybhammJsY_SWIBsdZlom9H4Uy7-efbukma1j1-75oY4,13196
37
+ opik_optimizer/evolutionary_optimizer/population_ops.py,sha256=ybNFUpfZgOeuWF5IGdtVRLFkiR4H9WsDrsGMPVj3Rk8,10992
39
38
  opik_optimizer/evolutionary_optimizer/prompts.py,sha256=am1nL8oqw3TOVVBDaDn5EoWkjxufEiMQ7E_54Uw8m3s,16204
40
- opik_optimizer/evolutionary_optimizer/reporting.py,sha256=xzvHK2m0Kdf7hhrrdRxXbp-qt8d8j69nnSBIOzhLSms,12090
39
+ opik_optimizer/evolutionary_optimizer/reporting.py,sha256=AxhdiwUSEchKepu8eW6J5DiUYHXKvMDPqVQkrnrt1No,12048
41
40
  opik_optimizer/evolutionary_optimizer/style_ops.py,sha256=XmGFS5s2Qr2DJMZVVsI_C6LqJ5zoyxpeWAtGmdg3TnA,3082
42
41
  opik_optimizer/few_shot_bayesian_optimizer/__init__.py,sha256=VuH7FOROyGcjMPryejtZC-5Y0QHlVTFLTGUDgNqRAFw,113
43
- opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py,sha256=fiCp7_aPpoEf8afBhFo98z1pl1Z50Rs0b-lS6Rij2Ns,27897
44
- opik_optimizer/few_shot_bayesian_optimizer/reporting.py,sha256=OMpLG4xsM6K7oQcP_nbnky47NklVsowNDlK6WliZM10,6311
42
+ opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py,sha256=3lFUcAiRCM6GUV2Aa9rbvF36nFQ1nFRK-ZC2_dkdGIU,26639
43
+ opik_optimizer/few_shot_bayesian_optimizer/reporting.py,sha256=xk7gKaoTrlp1WDpW3mB5Irzty5Z5l9SJygO3PaamOvU,6283
45
44
  opik_optimizer/gepa_optimizer/__init__.py,sha256=XcPah5t4mop7UCFo69E9l45Mem49-itqkQT7_J1aWOA,71
46
45
  opik_optimizer/gepa_optimizer/adapter.py,sha256=KzPa4koq7aJhALMAOKPxAO4yWuEy_YbW7tGnqny3Hfo,5139
47
- opik_optimizer/gepa_optimizer/gepa_optimizer.py,sha256=HBjikhce3K4VIaiIXs7eSagmRyFPdY8h4seoW9F3nQE,26481
48
- opik_optimizer/gepa_optimizer/reporting.py,sha256=F0cxYSjRuFAszgi3rgqwH1A-KH26kZOLtENP7x1xrQs,5154
46
+ opik_optimizer/gepa_optimizer/gepa_optimizer.py,sha256=RlTm71yWjRR8C1nEAuNXfAx1gkt5nsOwV6bfvu5NwbM,32849
47
+ opik_optimizer/gepa_optimizer/reporting.py,sha256=FiIPtHE6c5p4yMfknnhZetEjehvrA8PRejeOPT9uBCo,15836
49
48
  opik_optimizer/hierarchical_reflective_optimizer/__init__.py,sha256=9qM3kvfAaFy-Y6Tg19MXHJxpnF5DJQQwzr6oNsxaRBM,133
50
- opik_optimizer/hierarchical_reflective_optimizer/hierarchical_reflective_optimizer.py,sha256=j9Gr5z9j-evFhkbxkbiZ7RXt6Q89LshYYR4ac_UxwX0,30235
49
+ opik_optimizer/hierarchical_reflective_optimizer/hierarchical_reflective_optimizer.py,sha256=fhB68XrGNgaHfPwV1JDow-MiAT-jhKDT_Kf-mLLzk0o,27775
51
50
  opik_optimizer/hierarchical_reflective_optimizer/hierarchical_root_cause_analyzer.py,sha256=0D5wgx04jZvTJ0Yjqm0jtQvkjrGBB73qgcsSwLBpnv0,13814
52
- opik_optimizer/hierarchical_reflective_optimizer/prompts.py,sha256=XcOEI9eeEbTgKFsFiRWxvHdaByQkiN02bH2gTl3HX-Y,3853
53
- opik_optimizer/hierarchical_reflective_optimizer/reporting.py,sha256=d1jQ3uZs0fTI2DeumvGmkxuMHtwA0wt_ROtl4E6UdIM,25461
51
+ opik_optimizer/hierarchical_reflective_optimizer/prompts.py,sha256=8TsLsJo7KPUNFkxSVGXTpVnr9ax4oTosImky0nlEI40,4376
52
+ opik_optimizer/hierarchical_reflective_optimizer/reporting.py,sha256=frbqEOGsiK-TRPJTtcLhHjPJtQaj4T60cq97QEgcDJ0,29053
54
53
  opik_optimizer/hierarchical_reflective_optimizer/types.py,sha256=bS-JAheX2FpJ4XAxoZi5PfjloG8L-B1LGQA1iLXZhW4,1031
55
54
  opik_optimizer/mcp_utils/__init__.py,sha256=BsWQT8nAa6JV6zcOD__OvPMepUS2IpJD4J2rnAXhpuU,710
56
55
  opik_optimizer/mcp_utils/mcp.py,sha256=UylgpTJsybszS433_kuTAgKH-PPde-VHjHVelMardFs,18466
@@ -58,30 +57,31 @@ opik_optimizer/mcp_utils/mcp_second_pass.py,sha256=p2Knlxg1CKIZVMBbdskdRDqw1BRrn
58
57
  opik_optimizer/mcp_utils/mcp_simulator.py,sha256=bLL7iVAGMRc8Mb2j_XpSjlkr6TvQLI90hkS4ifnwLqs,3427
59
58
  opik_optimizer/mcp_utils/mcp_workflow.py,sha256=R3lqufN35p-OJlGxIxAIOMIAvRTBLGXINzfpoVIq2nw,17885
60
59
  opik_optimizer/meta_prompt_optimizer/__init__.py,sha256=syiN2_fMm5iZDQezZCHYe-ZiGOIPlBkLt49Sa1kuR70,97
61
- opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py,sha256=F3bxrttYAIkzvjduLvVDpFd9xoZkIqqTgqpSjgYDMIw,51435
62
- opik_optimizer/meta_prompt_optimizer/reporting.py,sha256=Py30NDYFNPzb8XrCXibQRtBC3vjjViQG74uG-O6lhXE,7783
63
- opik_optimizer/mipro_optimizer/__init__.py,sha256=7sMq9OSWyjITqK7sVtkO9fhG1w6KRE8bN7V52CKaGvo,94
64
- opik_optimizer/mipro_optimizer/_lm.py,sha256=jgp_bamkG9og8nxVKs6J2qPi6BmTvJD3qVeiorRhszU,17004
65
- opik_optimizer/mipro_optimizer/_mipro_optimizer_v2.py,sha256=bQBJG3wFeNsOF_Yhklx53d4M8kqzBXQ22cOLANXjGJw,39315
66
- opik_optimizer/mipro_optimizer/mipro_optimizer.py,sha256=ei3gON1r0PiCNhJVJA3zhGS4C2QPZOZMivhc2CPJ_R8,27660
67
- opik_optimizer/mipro_optimizer/utils.py,sha256=pP3mvai_GQmwUhcchVOiW1xPI3LatpXclE_5XvBYwTw,2493
60
+ opik_optimizer/meta_prompt_optimizer/meta_prompt_optimizer.py,sha256=Q7wZgqynai6eezTVRAJ-FWPrYP6HlFpwQdiObbnwu7o,51562
61
+ opik_optimizer/meta_prompt_optimizer/reporting.py,sha256=jW3WwMz7ZP7jjpvJLVyVXvlKbyTgMJVTz6mFYJhsmPQ,7755
62
+ opik_optimizer/mipro_optimizer/__init__.py,sha256=Hr5HJT7LBBtbCTqBM0CSrIyYxq-eMfI2vujzEkCejV4,63
63
+ opik_optimizer/mipro_optimizer/_lm.py,sha256=LjFan3--gCaeYxWEKdHswCcea_9jC1nLHK5uXULv-c4,17008
64
+ opik_optimizer/mipro_optimizer/mipro_optimizer_v2.py,sha256=Dt2ETxyQXdkVKE74Zu0D2K90tTnTTIPjtp6uV60HCrc,39212
65
+ opik_optimizer/mipro_optimizer/utils.py,sha256=hzlXYOFjCx1Vc485oby5uqI6Xoqc39x2aEpPT2acsf0,2539
68
66
  opik_optimizer/optimization_config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
- opik_optimizer/optimization_config/chat_prompt.py,sha256=d3jwM1UvUeRQOSsYHa5GD842VO3JWjVDmB3ROUGp57c,7089
67
+ opik_optimizer/optimization_config/chat_prompt.py,sha256=LJGR2o6DMYdTEU2cbhX9dyH-hd9RQ8ngFfJHttBJJo4,6998
70
68
  opik_optimizer/optimization_config/configs.py,sha256=EGacRNnl6TeWuf8RNsxpP6Nh5JhogjK-JxKllK8dQr0,413
71
69
  opik_optimizer/optimization_config/mappers.py,sha256=4uBoPaIvCo4bqt_w-4rJyVe2LMAP_W7p6xxnDmGT-Sk,1724
72
70
  opik_optimizer/parameter_optimizer/__init__.py,sha256=Eg-LEFBJqnOFw7i2B_YH27CoIGDPb5y_q1ar-ZpjtYo,308
73
- opik_optimizer/parameter_optimizer/parameter_optimizer.py,sha256=eDd9tFQinz2lKsEJtikCBVzSWMK4saI9bhUY2NtDEg0,14955
71
+ opik_optimizer/parameter_optimizer/parameter_optimizer.py,sha256=B18H2ZtcaPmrjh7f7_vHA8UewXml_9bw5PDA3nBzgyE,21206
74
72
  opik_optimizer/parameter_optimizer/parameter_search_space.py,sha256=rgTNK8HPbdDiVm4GVX2QESTmQPhPFj4UkxqZfAy9JAA,4659
75
73
  opik_optimizer/parameter_optimizer/parameter_spec.py,sha256=HzYT_dHBTfZtx403mY-Epv_IEqn4kYuYBZ6QUdkFRiY,8064
74
+ opik_optimizer/parameter_optimizer/reporting.py,sha256=-kEe9sQFdkUhxayEamXLR8ukyTLJrGsTs8pbJWmimQ4,4665
76
75
  opik_optimizer/parameter_optimizer/search_space_types.py,sha256=UajTA2QKikEWazokDNO7j141gc2WxxYYiDRnFFjXi6M,512
77
76
  opik_optimizer/parameter_optimizer/sensitivity_analysis.py,sha256=8KEMVMHsmcoiK21Cq1-We6_Pw_6LX9qBX9Az4-tmj_w,2146
78
- opik_optimizer/utils/__init__.py,sha256=Ee0SnTPOcwRwp93M6Lh-X913lfSIwnvCiYYh5cpdRQE,486
77
+ opik_optimizer/utils/__init__.py,sha256=_sielSJdLVeyBugtsw1iSVJr_I8YbhsU-U7p8zLe_JY,633
78
+ opik_optimizer/utils/candidate_utils.py,sha256=PKtjREM4MFHvgDri8jCmbs6zHvxAnrfjuwwymvQNnrk,1294
79
79
  opik_optimizer/utils/colbert.py,sha256=qSrzKUUGw7P92mLy4Ofug5pBGeTsHBLMJXlXSJSfKuo,8147
80
- opik_optimizer/utils/core.py,sha256=5GT1vp6fW8ICO42LHMX14BjR-xEb6afAKjM7b1Evx5M,15298
80
+ opik_optimizer/utils/core.py,sha256=56lQax3mAQkVZWfie6vhaTKZfjTBcYXf-FFkXgyFYFE,16715
81
81
  opik_optimizer/utils/dataset_utils.py,sha256=dqRUGOekjeNWL0J15R8xFwLyKJDJynJXzVyQmt8rhHA,1464
82
- opik_optimizer/utils/prompt_segments.py,sha256=1zUITSccJ82Njac1rmANzim4WWM6rVac61mfluS7lFE,5931
83
- opik_optimizer-2.1.3.dist-info/licenses/LICENSE,sha256=V-0VHJOBdcA_teT8VymvsBUQ1-CZU6yJRmMEjec_8tA,11372
84
- opik_optimizer-2.1.3.dist-info/METADATA,sha256=omnNZ2--FZxU-ex3SEKYF4ZaKRDTcQfkPoc2kxKLB7U,12829
85
- opik_optimizer-2.1.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
- opik_optimizer-2.1.3.dist-info/top_level.txt,sha256=ondOlpq6_yFckqpxoAHSfzZS2N-JfgmA-QQhOJfz7m0,15
87
- opik_optimizer-2.1.3.dist-info/RECORD,,
82
+ opik_optimizer/utils/prompt_segments.py,sha256=eiLYT1iiPxtB7ArriN13-LgI5tID-v7MrjniTAxK2Lo,5904
83
+ opik_optimizer-2.2.1.dist-info/licenses/LICENSE,sha256=V-0VHJOBdcA_teT8VymvsBUQ1-CZU6yJRmMEjec_8tA,11372
84
+ opik_optimizer-2.2.1.dist-info/METADATA,sha256=8HayPMPvWBxuCg1H3u6-d_8MwBxVF2DzbID2VrdqjKk,12807
85
+ opik_optimizer-2.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
+ opik_optimizer-2.2.1.dist-info/top_level.txt,sha256=ondOlpq6_yFckqpxoAHSfzZS2N-JfgmA-QQhOJfz7m0,15
87
+ opik_optimizer-2.2.1.dist-info/RECORD,,
@@ -1,136 +0,0 @@
1
- from typing import Any, TYPE_CHECKING
2
-
3
- import logging
4
- import os
5
- import time
6
- import random
7
-
8
- import litellm
9
- from litellm import exceptions as litellm_exceptions
10
- from litellm.caching import Cache
11
- from litellm.types.caching import LiteLLMCacheType
12
- from opik.evaluation.models.litellm import opik_monitor as opik_litellm_monitor
13
-
14
- from .. import _throttle
15
-
16
-
17
- logger = logging.getLogger(__name__)
18
-
19
-
20
- # Configure LiteLLM cache with safe fallback
21
- try:
22
- # Prefer a disk cache in a user-writable location
23
- cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "litellm")
24
- os.makedirs(cache_dir, exist_ok=True)
25
- litellm.cache = Cache(type=LiteLLMCacheType.DISK, cache_dir=cache_dir)
26
- except (PermissionError, OSError, FileNotFoundError):
27
- # Fall back to in-memory cache to avoid disk timeouts/locks
28
- litellm.cache = Cache(type=LiteLLMCacheType.MEMORY)
29
-
30
- _rate_limiter = _throttle.get_rate_limiter_for_current_opik_installation()
31
-
32
-
33
- class LlmSupport:
34
- if TYPE_CHECKING:
35
- model: str
36
- llm_call_counter: int
37
- project_name: str | None
38
- disable_litellm_monitoring: bool
39
- temperature: float
40
- max_tokens: int
41
- top_p: float
42
- frequency_penalty: float
43
- presence_penalty: float
44
-
45
- def increment_llm_counter(self) -> None: ...
46
-
47
- @_throttle.rate_limited(_rate_limiter)
48
- def _call_model(
49
- self,
50
- messages: list[dict[str, str]],
51
- is_reasoning: bool = False,
52
- optimization_id: str | None = None,
53
- ) -> str:
54
- """Call the model with the given prompt and return the response string."""
55
- # Build base call params
56
- llm_config_params: dict[str, Any] = {
57
- "temperature": getattr(self, "temperature", 0.3),
58
- "max_tokens": getattr(self, "max_tokens", 1000),
59
- "top_p": getattr(self, "top_p", 1.0),
60
- "frequency_penalty": getattr(self, "frequency_penalty", 0.0),
61
- "presence_penalty": getattr(self, "presence_penalty", 0.0),
62
- }
63
-
64
- # Add Opik metadata unless disabled
65
- try:
66
- disable_monitoring_env = os.getenv(
67
- "OPIK_OPTIMIZER_DISABLE_LITELLM_MONITORING", "0"
68
- )
69
- disable_monitoring = getattr(
70
- self, "disable_litellm_monitoring", False
71
- ) or disable_monitoring_env.lower() in ("1", "true", "yes")
72
-
73
- if not disable_monitoring:
74
- metadata_for_opik: dict[str, Any] = {}
75
- pn = getattr(self, "project_name", None)
76
- if pn:
77
- metadata_for_opik["project_name"] = pn
78
- metadata_for_opik["opik"] = {"project_name": pn}
79
- if optimization_id and "opik" in metadata_for_opik:
80
- metadata_for_opik["opik"]["optimization_id"] = optimization_id
81
- metadata_for_opik["optimizer_name"] = self.__class__.__name__
82
- metadata_for_opik["opik_call_type"] = (
83
- "reasoning" if is_reasoning else "evaluation_llm_task_direct"
84
- )
85
- if metadata_for_opik:
86
- llm_config_params["metadata"] = metadata_for_opik
87
-
88
- # Try to add Opik monitoring callbacks; fall back silently on failure
89
- llm_config_params = (
90
- opik_litellm_monitor.try_add_opik_monitoring_to_params( # type: ignore
91
- llm_config_params.copy()
92
- )
93
- )
94
- except Exception as e:
95
- logger.debug(f"Skipping Opik-LiteLLM monitoring setup: {e}")
96
-
97
- # Retry policy for transient errors
98
- max_retries = int(os.getenv("OPIK_OPTIMIZER_LITELLM_MAX_RETRIES", "3"))
99
- base_sleep = float(os.getenv("OPIK_OPTIMIZER_LITELLM_BACKOFF", "0.5"))
100
-
101
- for attempt in range(max_retries + 1):
102
- try:
103
- logger.debug(
104
- f"Calling model '{self.model}' with messages: {messages}, params: {llm_config_params} (attempt {attempt + 1})"
105
- )
106
- response = litellm.completion(
107
- model=self.model, messages=messages, **llm_config_params
108
- )
109
- self.increment_llm_counter()
110
- return response.choices[0].message.content
111
- except (
112
- litellm_exceptions.RateLimitError,
113
- litellm_exceptions.APIConnectionError,
114
- litellm_exceptions.InternalServerError,
115
- ) as e:
116
- if attempt < max_retries:
117
- sleep_s = min(10.0, base_sleep * (2**attempt)) + random.uniform(
118
- 0, 0.25
119
- )
120
- logger.warning(
121
- f"LiteLLM transient error ({type(e).__name__}): {e}. Retrying in {sleep_s:.2f}s..."
122
- )
123
- time.sleep(sleep_s)
124
- continue
125
- logger.error(f"LiteLLM error (final attempt): {e}")
126
- raise
127
- except litellm_exceptions.ContextWindowExceededError as e:
128
- logger.error(f"LiteLLM Context Window Exceeded Error: {e}")
129
- raise
130
- except Exception as e:
131
- logger.error(
132
- f"Error calling model '{self.model}': {type(e).__name__} - {e}"
133
- )
134
- raise
135
- # Should never reach here
136
- raise RuntimeError("LLM call did not return a response and did not raise")