opik-optimizer 0.7.1__tar.gz → 0.7.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opik_optimizer-0.7.3/PKG-INFO +173 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/README.md +27 -27
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/setup.py +6 -3
- opik_optimizer-0.7.3/src/opik_optimizer/__init__.py +43 -0
- opik_optimizer-0.7.3/src/opik_optimizer/_throttle.py +43 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/base_optimizer.py +1 -82
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/demo/cache.py +1 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/few_shot_bayesian_optimizer/few_shot_bayesian_optimizer.py +12 -10
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/meta_prompt_optimizer.py +3 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/_lm.py +2 -2
- opik_optimizer-0.7.3/src/opik_optimizer.egg-info/PKG-INFO +173 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer.egg-info/requires.txt +1 -0
- opik_optimizer-0.7.1/PKG-INFO +0 -35
- opik_optimizer-0.7.1/src/opik_optimizer/__init__.py +0 -65
- opik_optimizer-0.7.1/src/opik_optimizer/_throttle.py +0 -43
- opik_optimizer-0.7.1/src/opik_optimizer.egg-info/PKG-INFO +0 -35
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/LICENSE +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/setup.cfg +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/cache_config.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/demo/__init__.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/demo/datasets.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/few_shot_bayesian_optimizer/__init__.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/few_shot_bayesian_optimizer/prompt_parameter.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/few_shot_bayesian_optimizer/prompt_templates.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/integrations/__init__.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/logging_config.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/__init__.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/_mipro_optimizer_v2.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/mipro_optimizer.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/utils.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_config/__init__.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_config/configs.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_config/mappers.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_result.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/task_evaluator.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/utils.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer.egg-info/SOURCES.txt +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer.egg-info/dependency_links.txt +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer.egg-info/top_level.txt +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_base_optimizer.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_example.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_few_shot_bayesian_optimizer.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_mappers.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_optimization_dsl.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_optimization_result.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_task_evaluator.py +0 -0
- {opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/tests/test_utils.py +0 -0
@@ -0,0 +1,173 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: opik_optimizer
|
3
|
+
Version: 0.7.3
|
4
|
+
Summary: Agent optimization with Opik
|
5
|
+
Home-page: https://github.com/comet-ml/opik
|
6
|
+
Author: Comet ML
|
7
|
+
Author-email: support@comet.com
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
9
|
+
Classifier: Intended Audience :: Developers
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
12
|
+
Requires-Python: >=3.9,<3.13
|
13
|
+
Description-Content-Type: text/markdown
|
14
|
+
License-File: LICENSE
|
15
|
+
Requires-Dist: opik>=1.7.17
|
16
|
+
Requires-Dist: dspy<3,>=2.6.18
|
17
|
+
Requires-Dist: litellm
|
18
|
+
Requires-Dist: tqdm
|
19
|
+
Requires-Dist: datasets
|
20
|
+
Requires-Dist: optuna
|
21
|
+
Requires-Dist: pydantic
|
22
|
+
Requires-Dist: pandas
|
23
|
+
Requires-Dist: hf_xet
|
24
|
+
Requires-Dist: pyrate-limiter
|
25
|
+
Provides-Extra: dev
|
26
|
+
Requires-Dist: adalflow; extra == "dev"
|
27
|
+
Requires-Dist: pytest; extra == "dev"
|
28
|
+
Requires-Dist: pytest-conv; extra == "dev"
|
29
|
+
Dynamic: author
|
30
|
+
Dynamic: author-email
|
31
|
+
Dynamic: classifier
|
32
|
+
Dynamic: description
|
33
|
+
Dynamic: description-content-type
|
34
|
+
Dynamic: home-page
|
35
|
+
Dynamic: license-file
|
36
|
+
Dynamic: provides-extra
|
37
|
+
Dynamic: requires-dist
|
38
|
+
Dynamic: requires-python
|
39
|
+
Dynamic: summary
|
40
|
+
|
41
|
+
# Opik Optimizer
|
42
|
+
|
43
|
+
The Opik Opitmizer can refine your prompts to get better performance
|
44
|
+
from your LLMs. You can use a variety of algorithms, including:
|
45
|
+
|
46
|
+
* FewShotBayesianOptimizer
|
47
|
+
* MiproOptimizer
|
48
|
+
* MetaPromptOptimizer
|
49
|
+
|
50
|
+
## Quickstart
|
51
|
+
|
52
|
+
|
53
|
+
[Open Quickstart Notebook in Colab](https://colab.research.google.com/github/comet-ml/opik/blob/main/sdks/opik_optimizer/notebooks/OpikOptimizerIntro.ipynb)
|
54
|
+
|
55
|
+
|
56
|
+
## Setup
|
57
|
+
|
58
|
+
1. Configure Opik:
|
59
|
+
```bash
|
60
|
+
# Install Comet ML CLI
|
61
|
+
pip install opik
|
62
|
+
|
63
|
+
# Configure your API key
|
64
|
+
opik configure
|
65
|
+
# When prompted, enter your Opik API key
|
66
|
+
```
|
67
|
+
|
68
|
+
2. Set up your environment variables:
|
69
|
+
```bash
|
70
|
+
# OpenAI API key for LLM access
|
71
|
+
export OPENAI_API_KEY=your_openai_api_key
|
72
|
+
```
|
73
|
+
|
74
|
+
3. Install the package:
|
75
|
+
```bash
|
76
|
+
pip install opik-optimizer
|
77
|
+
```
|
78
|
+
|
79
|
+
You'll need:
|
80
|
+
|
81
|
+
1. An LLM model name
|
82
|
+
2. An Opik Dataset (or Opik Dataset name)
|
83
|
+
3. An Opik Metric (possibly a custom one)
|
84
|
+
4. A starting prompt (string)
|
85
|
+
|
86
|
+
## Example
|
87
|
+
|
88
|
+
We have prepared some sample datasets for testing:
|
89
|
+
|
90
|
+
* "tiny-test"
|
91
|
+
* "halu-eval-300"
|
92
|
+
* "hotpot-300"
|
93
|
+
|
94
|
+
You can see how to use those below:
|
95
|
+
|
96
|
+
```python
|
97
|
+
from opik.evaluation.metrics import LevenshteinRatio
|
98
|
+
from opik_optimizer import FewShotBayesianOptimizer
|
99
|
+
from opik_optimizer.demo import get_or_create_dataset
|
100
|
+
|
101
|
+
from opik_optimizer import (
|
102
|
+
MetricConfig,
|
103
|
+
TaskConfig,
|
104
|
+
from_dataset_field,
|
105
|
+
from_llm_response_text,
|
106
|
+
)
|
107
|
+
|
108
|
+
hot_pot_dataset = get_or_create_dataset("hotpot-300")
|
109
|
+
|
110
|
+
# For chat prompts instruction doesn't need to contain input parameters from dataset examples.
|
111
|
+
prompt_instruction = """
|
112
|
+
Answer the question.
|
113
|
+
"""
|
114
|
+
project_name = "optimize-few-shot-bayesian-hotpot"
|
115
|
+
|
116
|
+
optimizer = FewShotBayesianOptimizer(
|
117
|
+
model="gpt-4o-mini",
|
118
|
+
project_name=project_name,
|
119
|
+
min_examples=3,
|
120
|
+
max_examples=8,
|
121
|
+
n_threads=16,
|
122
|
+
seed=42,
|
123
|
+
)
|
124
|
+
|
125
|
+
metric_config = MetricConfig(
|
126
|
+
metric=LevenshteinRatio(project_name=project_name),
|
127
|
+
inputs={
|
128
|
+
"output": from_llm_response_text(),
|
129
|
+
"reference": from_dataset_field(name="answer"),
|
130
|
+
},
|
131
|
+
)
|
132
|
+
|
133
|
+
task_config = TaskConfig(
|
134
|
+
instruction_prompt=prompt_instruction,
|
135
|
+
input_dataset_fields=["question"],
|
136
|
+
output_dataset_field="answer",
|
137
|
+
use_chat_prompt=True,
|
138
|
+
)
|
139
|
+
|
140
|
+
result = optimizer.optimize_prompt(
|
141
|
+
dataset=hot_pot_dataset,
|
142
|
+
metric_config=metric_config,
|
143
|
+
task_config=task_config,
|
144
|
+
n_trials=10,
|
145
|
+
n_samples=150,
|
146
|
+
)
|
147
|
+
|
148
|
+
result.display()
|
149
|
+
```
|
150
|
+
|
151
|
+
More examples can be found in the `scripts` folder.
|
152
|
+
|
153
|
+
## Installation
|
154
|
+
|
155
|
+
```bash
|
156
|
+
pip install opik-optimizer
|
157
|
+
```
|
158
|
+
|
159
|
+
## Development
|
160
|
+
|
161
|
+
To use the Opik Optimizer from source:
|
162
|
+
|
163
|
+
```bash
|
164
|
+
git clone git clone git@github.com:comet-ml/opik
|
165
|
+
cd sdks/opik_optimizer
|
166
|
+
pip install -e .
|
167
|
+
```
|
168
|
+
|
169
|
+
## Requirements
|
170
|
+
|
171
|
+
- Python 3.10+ < 3.13
|
172
|
+
- Opik API key
|
173
|
+
- OpenAI API key (or other LLM provider)
|
@@ -33,7 +33,7 @@ from your LLMs. You can use a variety of algorithms, including:
|
|
33
33
|
|
34
34
|
3. Install the package:
|
35
35
|
```bash
|
36
|
-
pip install
|
36
|
+
pip install opik-optimizer
|
37
37
|
```
|
38
38
|
|
39
39
|
You'll need:
|
@@ -55,11 +55,10 @@ You can see how to use those below:
|
|
55
55
|
|
56
56
|
```python
|
57
57
|
from opik.evaluation.metrics import LevenshteinRatio
|
58
|
-
from opik_optimizer
|
58
|
+
from opik_optimizer import FewShotBayesianOptimizer
|
59
59
|
from opik_optimizer.demo import get_or_create_dataset
|
60
60
|
|
61
61
|
from opik_optimizer import (
|
62
|
-
OptimizationConfig,
|
63
62
|
MetricConfig,
|
64
63
|
TaskConfig,
|
65
64
|
from_dataset_field,
|
@@ -72,40 +71,41 @@ hot_pot_dataset = get_or_create_dataset("hotpot-300")
|
|
72
71
|
prompt_instruction = """
|
73
72
|
Answer the question.
|
74
73
|
"""
|
75
|
-
|
76
|
-
initial_prompt_no_examples = [
|
77
|
-
{"role": "system", "content": prompt_instruction},
|
78
|
-
{"role": "user", "content": "{{question}}"},
|
79
|
-
]
|
74
|
+
project_name = "optimize-few-shot-bayesian-hotpot"
|
80
75
|
|
81
76
|
optimizer = FewShotBayesianOptimizer(
|
82
77
|
model="gpt-4o-mini",
|
83
|
-
project_name=
|
78
|
+
project_name=project_name,
|
84
79
|
min_examples=3,
|
85
80
|
max_examples=8,
|
86
81
|
n_threads=16,
|
87
82
|
seed=42,
|
88
83
|
)
|
89
84
|
|
90
|
-
|
85
|
+
metric_config = MetricConfig(
|
86
|
+
metric=LevenshteinRatio(project_name=project_name),
|
87
|
+
inputs={
|
88
|
+
"output": from_llm_response_text(),
|
89
|
+
"reference": from_dataset_field(name="answer"),
|
90
|
+
},
|
91
|
+
)
|
92
|
+
|
93
|
+
task_config = TaskConfig(
|
94
|
+
instruction_prompt=prompt_instruction,
|
95
|
+
input_dataset_fields=["question"],
|
96
|
+
output_dataset_field="answer",
|
97
|
+
use_chat_prompt=True,
|
98
|
+
)
|
99
|
+
|
100
|
+
result = optimizer.optimize_prompt(
|
91
101
|
dataset=hot_pot_dataset,
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
"reference": from_dataset_field(name="answer"),
|
97
|
-
},
|
98
|
-
),
|
99
|
-
task=TaskConfig(
|
100
|
-
instruction_prompt=prompt_instruction,
|
101
|
-
input_dataset_fields=["question"],
|
102
|
-
output_dataset_field="answer",
|
103
|
-
use_chat_prompt=True,
|
104
|
-
),
|
102
|
+
metric_config=metric_config,
|
103
|
+
task_config=task_config,
|
104
|
+
n_trials=10,
|
105
|
+
n_samples=150,
|
105
106
|
)
|
106
107
|
|
107
|
-
result
|
108
|
-
print(result)
|
108
|
+
result.display()
|
109
109
|
```
|
110
110
|
|
111
111
|
More examples can be found in the `scripts` folder.
|
@@ -113,7 +113,7 @@ More examples can be found in the `scripts` folder.
|
|
113
113
|
## Installation
|
114
114
|
|
115
115
|
```bash
|
116
|
-
pip install
|
116
|
+
pip install opik-optimizer
|
117
117
|
```
|
118
118
|
|
119
119
|
## Development
|
@@ -128,6 +128,6 @@ pip install -e .
|
|
128
128
|
|
129
129
|
## Requirements
|
130
130
|
|
131
|
-
- Python 3.10+
|
131
|
+
- Python 3.10+ < 3.13
|
132
132
|
- Opik API key
|
133
133
|
- OpenAI API key (or other LLM provider)
|
@@ -2,14 +2,16 @@ from setuptools import setup, find_packages
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name="opik_optimizer",
|
5
|
-
version="0.7.
|
5
|
+
version="0.7.3",
|
6
6
|
description="Agent optimization with Opik",
|
7
7
|
author="Comet ML",
|
8
|
-
author_email="
|
8
|
+
author_email="support@comet.com",
|
9
|
+
long_description=open("README.md", encoding="utf-8").read(),
|
10
|
+
long_description_content_type='text/markdown',
|
9
11
|
url="https://github.com/comet-ml/opik",
|
10
12
|
packages=find_packages(where="src"),
|
11
13
|
package_dir={"": "src"},
|
12
|
-
python_requires=">=3.9",
|
14
|
+
python_requires=">=3.9,<3.13",
|
13
15
|
install_requires=[
|
14
16
|
"opik>=1.7.17",
|
15
17
|
"dspy>=2.6.18,<3",
|
@@ -20,6 +22,7 @@ setup(
|
|
20
22
|
"pydantic",
|
21
23
|
"pandas",
|
22
24
|
"hf_xet",
|
25
|
+
"pyrate-limiter",
|
23
26
|
],
|
24
27
|
# dev requirements
|
25
28
|
extras_require={
|
@@ -0,0 +1,43 @@
|
|
1
|
+
import importlib.metadata
|
2
|
+
import logging
|
3
|
+
from .logging_config import setup_logging
|
4
|
+
|
5
|
+
__version__ = importlib.metadata.version("opik_optimizer")
|
6
|
+
|
7
|
+
# Using WARNING as a sensible default to avoid flooding users with INFO/DEBUG
|
8
|
+
setup_logging(level=logging.WARNING)
|
9
|
+
|
10
|
+
# Regular imports
|
11
|
+
from .mipro_optimizer import MiproOptimizer
|
12
|
+
from .base_optimizer import BaseOptimizer
|
13
|
+
from .meta_prompt_optimizer import MetaPromptOptimizer
|
14
|
+
from .few_shot_bayesian_optimizer import FewShotBayesianOptimizer
|
15
|
+
from .optimization_config.configs import (
|
16
|
+
MetricConfig,
|
17
|
+
OptimizationConfig,
|
18
|
+
TaskConfig,
|
19
|
+
)
|
20
|
+
from .optimization_config.mappers import (
|
21
|
+
from_dataset_field,
|
22
|
+
from_llm_response_text,
|
23
|
+
)
|
24
|
+
|
25
|
+
from opik.evaluation.models.litellm import warning_filters
|
26
|
+
|
27
|
+
warning_filters.add_warning_filters()
|
28
|
+
|
29
|
+
from .optimization_result import OptimizationResult
|
30
|
+
|
31
|
+
__all__ = [
|
32
|
+
"BaseOptimizer",
|
33
|
+
"FewShotBayesianOptimizer",
|
34
|
+
"MetaPromptOptimizer",
|
35
|
+
"MiproOptimizer",
|
36
|
+
"MetricConfig",
|
37
|
+
"OptimizationConfig",
|
38
|
+
"TaskConfig",
|
39
|
+
"from_dataset_field",
|
40
|
+
"from_llm_response_text",
|
41
|
+
"OptimizationResult",
|
42
|
+
"setup_logging",
|
43
|
+
]
|
@@ -0,0 +1,43 @@
|
|
1
|
+
import functools
|
2
|
+
import pyrate_limiter
|
3
|
+
import time
|
4
|
+
import opik.config
|
5
|
+
|
6
|
+
from typing import Callable, Any
|
7
|
+
|
8
|
+
|
9
|
+
class RateLimiter:
|
10
|
+
"""
|
11
|
+
Rate limiter that enforces a maximum number of calls across all threads using pyrate_limiter.
|
12
|
+
"""
|
13
|
+
def __init__(self, max_calls_per_second: int):
|
14
|
+
self.max_calls_per_second = max_calls_per_second
|
15
|
+
rate = pyrate_limiter.Rate(max_calls_per_second, pyrate_limiter.Duration.SECOND)
|
16
|
+
|
17
|
+
self.limiter = pyrate_limiter.Limiter(rate, raise_when_fail=False)
|
18
|
+
self.bucket_key = "global_rate_limit"
|
19
|
+
|
20
|
+
def acquire(self) -> None:
|
21
|
+
while not self.limiter.try_acquire(self.bucket_key):
|
22
|
+
time.sleep(0.01)
|
23
|
+
|
24
|
+
def rate_limited(limiter: RateLimiter) -> Callable[[Callable], Callable]:
|
25
|
+
"""Decorator to rate limit a function using the provided limiter"""
|
26
|
+
|
27
|
+
def decorator(func: Callable) -> Callable:
|
28
|
+
@functools.wraps(func)
|
29
|
+
def wrapper(*args, **kwargs) -> Any:
|
30
|
+
limiter.acquire()
|
31
|
+
return func(*args, **kwargs)
|
32
|
+
return wrapper
|
33
|
+
return decorator
|
34
|
+
|
35
|
+
|
36
|
+
def get_rate_limiter_for_current_opik_installation() -> RateLimiter:
|
37
|
+
opik_config = opik.config.OpikConfig()
|
38
|
+
max_calls_per_second = (
|
39
|
+
10
|
40
|
+
if opik_config.is_cloud_installation
|
41
|
+
else 50
|
42
|
+
)
|
43
|
+
return RateLimiter(max_calls_per_second=max_calls_per_second)
|
@@ -4,8 +4,6 @@ import logging
|
|
4
4
|
import time
|
5
5
|
|
6
6
|
import litellm
|
7
|
-
from opik.evaluation import metrics
|
8
|
-
from opik.opik_context import get_current_span_data
|
9
7
|
from opik.rest_api.core import ApiError
|
10
8
|
|
11
9
|
from pydantic import BaseModel
|
@@ -14,7 +12,7 @@ from .cache_config import initialize_cache
|
|
14
12
|
from opik.evaluation.models.litellm import opik_monitor as opik_litellm_monitor
|
15
13
|
from .optimization_config.configs import TaskConfig, MetricConfig
|
16
14
|
|
17
|
-
limiter = RateLimiter(max_calls_per_second=
|
15
|
+
limiter = RateLimiter(max_calls_per_second=8)
|
18
16
|
|
19
17
|
# Don't use unsupported params:
|
20
18
|
litellm.drop_params = True
|
@@ -143,85 +141,6 @@ class BaseOptimizer:
|
|
143
141
|
"""
|
144
142
|
self._history.append(round_data)
|
145
143
|
|
146
|
-
@rate_limited(limiter)
|
147
|
-
def _call_model(
|
148
|
-
self,
|
149
|
-
prompt: str,
|
150
|
-
system_prompt: Optional[str] = None,
|
151
|
-
is_reasoning: bool = False,
|
152
|
-
) -> str:
|
153
|
-
"""Call the model to get suggestions based on the meta-prompt."""
|
154
|
-
model = self.reasoning_model if is_reasoning else self.model
|
155
|
-
messages = []
|
156
|
-
|
157
|
-
if system_prompt:
|
158
|
-
messages.append({"role": "system", "content": system_prompt})
|
159
|
-
logger.debug(f"Using custom system prompt: {system_prompt[:100]}...")
|
160
|
-
else:
|
161
|
-
messages.append(
|
162
|
-
{"role": "system", "content": "You are a helpful assistant."}
|
163
|
-
)
|
164
|
-
|
165
|
-
messages.append({"role": "user", "content": prompt})
|
166
|
-
logger.debug(f"Calling model {model} with prompt: {prompt[:100]}...")
|
167
|
-
|
168
|
-
api_params = self.model_kwargs.copy()
|
169
|
-
api_params.update(
|
170
|
-
{
|
171
|
-
"model": model,
|
172
|
-
"messages": messages,
|
173
|
-
# Ensure required params like 'temperature', 'max_tokens' are present
|
174
|
-
# Defaults added here for safety, though usually set in __init__ kwargs
|
175
|
-
"temperature": api_params.get("temperature", 0.3),
|
176
|
-
"max_tokens": api_params.get("max_tokens", 1000),
|
177
|
-
}
|
178
|
-
)
|
179
|
-
|
180
|
-
# Attempt to add Opik monitoring if available
|
181
|
-
try:
|
182
|
-
# Assuming opik_litellm_monitor is imported and configured elsewhere
|
183
|
-
api_params = opik_litellm_monitor.try_add_opik_monitoring_to_params(
|
184
|
-
api_params
|
185
|
-
)
|
186
|
-
logger.debug("Opik monitoring hooks added to LiteLLM params.")
|
187
|
-
except Exception as e:
|
188
|
-
logger.warning(f"Could not add Opik monitoring to LiteLLM params: {e}")
|
189
|
-
|
190
|
-
logger.debug(
|
191
|
-
f"Final API params (excluding messages): { {k:v for k,v in api_params.items() if k != 'messages'} }"
|
192
|
-
)
|
193
|
-
|
194
|
-
# Increment Counter
|
195
|
-
self.llm_call_counter += 1
|
196
|
-
logger.debug(f"LLM Call Count: {self.llm_call_counter}")
|
197
|
-
|
198
|
-
try:
|
199
|
-
response = litellm.completion(**api_params)
|
200
|
-
model_output = response.choices[0].message.content.strip()
|
201
|
-
logger.debug(f"Model response from {model_to_use}: {model_output[:100]}...")
|
202
|
-
return model_output
|
203
|
-
except litellm.exceptions.RateLimitError as e:
|
204
|
-
logger.error(f"LiteLLM Rate Limit Error for model {model_to_use}: {e}")
|
205
|
-
# Consider adding retry logic here with tenacity
|
206
|
-
raise
|
207
|
-
except litellm.exceptions.APIConnectionError as e:
|
208
|
-
logger.error(f"LiteLLM API Connection Error for model {model_to_use}: {e}")
|
209
|
-
# Consider adding retry logic here
|
210
|
-
raise
|
211
|
-
except litellm.exceptions.ContextWindowExceededError as e:
|
212
|
-
logger.error(
|
213
|
-
f"LiteLLM Context Window Exceeded Error for model {model_to_use}. Prompt length: {len(prompt)}. Details: {e}"
|
214
|
-
)
|
215
|
-
raise
|
216
|
-
except litellm.exceptions.APIError as e: # Catch broader API errors
|
217
|
-
logger.error(f"LiteLLM API Error for model {model_to_use}: {e}")
|
218
|
-
raise
|
219
|
-
except Exception as e:
|
220
|
-
# Catch any other unexpected errors
|
221
|
-
logger.error(
|
222
|
-
f"Unexpected error during model call to {model_to_use}: {type(e).__name__} - {e}"
|
223
|
-
)
|
224
|
-
raise
|
225
144
|
|
226
145
|
def update_optimization(self, optimization, status: str) -> None:
|
227
146
|
"""
|
@@ -9,6 +9,7 @@ import requests
|
|
9
9
|
NAMED_CACHES = {
|
10
10
|
"test": "https://drive.google.com/file/d/1RifNtpN-pl0DW49daRaAMJwW7MCsOh6y/view?usp=sharing",
|
11
11
|
"test2": "https://drive.google.com/uc?id=1RifNtpN-pl0DW49daRaAMJwW7MCsOh6y&export=download",
|
12
|
+
"opik-workshop": "https://drive.google.com/file/d/1l0aK6KhDPs2bFsQTkfzvOvfacJlhdmHr/view?usp=sharing",
|
12
13
|
}
|
13
14
|
CACHE_DIR = os.path.expanduser("~/.litellm_cache")
|
14
15
|
|
@@ -1,8 +1,8 @@
|
|
1
1
|
import random
|
2
|
-
from typing import Any, Dict, List, Tuple, Union, Optional,
|
3
|
-
import openai
|
2
|
+
from typing import Any, Dict, List, Tuple, Union, Optional, Literal
|
4
3
|
import opik
|
5
4
|
import optuna
|
5
|
+
import optuna.samplers
|
6
6
|
import logging
|
7
7
|
import json
|
8
8
|
|
@@ -14,18 +14,18 @@ from opik_optimizer import base_optimizer
|
|
14
14
|
|
15
15
|
from . import prompt_parameter
|
16
16
|
from . import prompt_templates
|
17
|
-
from ..
|
17
|
+
from .. import _throttle
|
18
18
|
from .. import optimization_result, task_evaluator
|
19
19
|
|
20
20
|
import litellm
|
21
21
|
|
22
22
|
from opik.evaluation.models.litellm import opik_monitor as opik_litellm_monitor
|
23
23
|
|
24
|
-
|
24
|
+
_limiter = _throttle.get_rate_limiter_for_current_opik_installation()
|
25
25
|
|
26
26
|
logger = logging.getLogger(__name__)
|
27
27
|
|
28
|
-
@rate_limited(
|
28
|
+
@_throttle.rate_limited(_limiter)
|
29
29
|
def _call_model(model, messages, seed, model_kwargs):
|
30
30
|
model_kwargs = opik_litellm_monitor.try_add_opik_monitoring_to_params(model_kwargs)
|
31
31
|
|
@@ -59,7 +59,6 @@ class FewShotBayesianOptimizer(base_optimizer.BaseOptimizer):
|
|
59
59
|
self.n_threads = n_threads
|
60
60
|
self.n_initial_prompts = n_initial_prompts
|
61
61
|
self.n_iterations = n_iterations
|
62
|
-
|
63
62
|
self._opik_client = opik.Opik()
|
64
63
|
logger.debug(f"Initialized FewShotBayesianOptimizer with model: {model}")
|
65
64
|
|
@@ -85,7 +84,7 @@ class FewShotBayesianOptimizer(base_optimizer.BaseOptimizer):
|
|
85
84
|
split_idx = int(len(dataset) * train_ratio)
|
86
85
|
return dataset[:split_idx], dataset[split_idx:]
|
87
86
|
|
88
|
-
def _optimize_prompt(
|
87
|
+
def _optimize_prompt(
|
89
88
|
self,
|
90
89
|
dataset: Union[str, Dataset],
|
91
90
|
metric_config: MetricConfig,
|
@@ -171,8 +170,10 @@ class FewShotBayesianOptimizer(base_optimizer.BaseOptimizer):
|
|
171
170
|
n_examples = trial.suggest_int(
|
172
171
|
"n_examples", self.min_examples, self.max_examples
|
173
172
|
)
|
174
|
-
|
175
|
-
|
173
|
+
example_indices = [
|
174
|
+
trial.suggest_categorical(f"example_{i}", list(range(len(dataset_items))))
|
175
|
+
for i in range(n_examples)
|
176
|
+
]
|
176
177
|
trial.set_user_attr("example_indices", example_indices)
|
177
178
|
|
178
179
|
instruction = task_config.instruction_prompt
|
@@ -238,7 +239,8 @@ class FewShotBayesianOptimizer(base_optimizer.BaseOptimizer):
|
|
238
239
|
except Exception as e:
|
239
240
|
logger.warning(f"Could not configure Optuna logging within optimizer: {e}")
|
240
241
|
|
241
|
-
|
242
|
+
sampler = optuna.samplers.TPESampler(seed=self.seed)
|
243
|
+
study = optuna.create_study(direction="maximize", sampler=sampler)
|
242
244
|
study.optimize(optimization_objective, n_trials=n_trials)
|
243
245
|
logger.info("Optuna study finished.")
|
244
246
|
|
@@ -16,6 +16,7 @@ from opik_optimizer import task_evaluator
|
|
16
16
|
from opik.api_objects import opik_client
|
17
17
|
from opik.evaluation.models.litellm import opik_monitor as opik_litellm_monitor
|
18
18
|
from opik.environment import get_tqdm_for_current_environment
|
19
|
+
from . import _throttle
|
19
20
|
|
20
21
|
tqdm = get_tqdm_for_current_environment()
|
21
22
|
|
@@ -26,6 +27,7 @@ litellm.cache = Cache(type="disk", disk_cache_dir=disk_cache_dir)
|
|
26
27
|
# Set up logging
|
27
28
|
logger = logging.getLogger(__name__) # Gets logger configured by setup_logging
|
28
29
|
|
30
|
+
_rate_limiter = _throttle.get_rate_limiter_for_current_opik_installation()
|
29
31
|
|
30
32
|
class MetaPromptOptimizer(BaseOptimizer):
|
31
33
|
"""Optimizer that uses meta-prompting to improve prompts based on examples and performance."""
|
@@ -176,6 +178,7 @@ class MetaPromptOptimizer(BaseOptimizer):
|
|
176
178
|
optimization_id=optimization_id,
|
177
179
|
)
|
178
180
|
|
181
|
+
@_throttle.rate_limited(_rate_limiter)
|
179
182
|
def _call_model(
|
180
183
|
self,
|
181
184
|
prompt: str,
|
@@ -22,11 +22,11 @@ from dspy.dsp.utils.settings import settings
|
|
22
22
|
from dspy.utils.callback import BaseCallback, with_callbacks
|
23
23
|
from dspy.clients.base_lm import BaseLM
|
24
24
|
|
25
|
-
from .._throttle import RateLimiter, rate_limited
|
25
|
+
from .._throttle import RateLimiter, rate_limited, get_rate_limiter_for_current_opik_installation
|
26
26
|
|
27
27
|
logger = logging.getLogger(__name__)
|
28
28
|
# Limit how fast an LLM can be called:
|
29
|
-
limiter =
|
29
|
+
limiter = get_rate_limiter_for_current_opik_installation()
|
30
30
|
|
31
31
|
class LM(BaseLM):
|
32
32
|
"""
|
@@ -0,0 +1,173 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: opik_optimizer
|
3
|
+
Version: 0.7.3
|
4
|
+
Summary: Agent optimization with Opik
|
5
|
+
Home-page: https://github.com/comet-ml/opik
|
6
|
+
Author: Comet ML
|
7
|
+
Author-email: support@comet.com
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
9
|
+
Classifier: Intended Audience :: Developers
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
12
|
+
Requires-Python: >=3.9,<3.13
|
13
|
+
Description-Content-Type: text/markdown
|
14
|
+
License-File: LICENSE
|
15
|
+
Requires-Dist: opik>=1.7.17
|
16
|
+
Requires-Dist: dspy<3,>=2.6.18
|
17
|
+
Requires-Dist: litellm
|
18
|
+
Requires-Dist: tqdm
|
19
|
+
Requires-Dist: datasets
|
20
|
+
Requires-Dist: optuna
|
21
|
+
Requires-Dist: pydantic
|
22
|
+
Requires-Dist: pandas
|
23
|
+
Requires-Dist: hf_xet
|
24
|
+
Requires-Dist: pyrate-limiter
|
25
|
+
Provides-Extra: dev
|
26
|
+
Requires-Dist: adalflow; extra == "dev"
|
27
|
+
Requires-Dist: pytest; extra == "dev"
|
28
|
+
Requires-Dist: pytest-conv; extra == "dev"
|
29
|
+
Dynamic: author
|
30
|
+
Dynamic: author-email
|
31
|
+
Dynamic: classifier
|
32
|
+
Dynamic: description
|
33
|
+
Dynamic: description-content-type
|
34
|
+
Dynamic: home-page
|
35
|
+
Dynamic: license-file
|
36
|
+
Dynamic: provides-extra
|
37
|
+
Dynamic: requires-dist
|
38
|
+
Dynamic: requires-python
|
39
|
+
Dynamic: summary
|
40
|
+
|
41
|
+
# Opik Optimizer
|
42
|
+
|
43
|
+
The Opik Opitmizer can refine your prompts to get better performance
|
44
|
+
from your LLMs. You can use a variety of algorithms, including:
|
45
|
+
|
46
|
+
* FewShotBayesianOptimizer
|
47
|
+
* MiproOptimizer
|
48
|
+
* MetaPromptOptimizer
|
49
|
+
|
50
|
+
## Quickstart
|
51
|
+
|
52
|
+
|
53
|
+
[Open Quickstart Notebook in Colab](https://colab.research.google.com/github/comet-ml/opik/blob/main/sdks/opik_optimizer/notebooks/OpikOptimizerIntro.ipynb)
|
54
|
+
|
55
|
+
|
56
|
+
## Setup
|
57
|
+
|
58
|
+
1. Configure Opik:
|
59
|
+
```bash
|
60
|
+
# Install Comet ML CLI
|
61
|
+
pip install opik
|
62
|
+
|
63
|
+
# Configure your API key
|
64
|
+
opik configure
|
65
|
+
# When prompted, enter your Opik API key
|
66
|
+
```
|
67
|
+
|
68
|
+
2. Set up your environment variables:
|
69
|
+
```bash
|
70
|
+
# OpenAI API key for LLM access
|
71
|
+
export OPENAI_API_KEY=your_openai_api_key
|
72
|
+
```
|
73
|
+
|
74
|
+
3. Install the package:
|
75
|
+
```bash
|
76
|
+
pip install opik-optimizer
|
77
|
+
```
|
78
|
+
|
79
|
+
You'll need:
|
80
|
+
|
81
|
+
1. An LLM model name
|
82
|
+
2. An Opik Dataset (or Opik Dataset name)
|
83
|
+
3. An Opik Metric (possibly a custom one)
|
84
|
+
4. A starting prompt (string)
|
85
|
+
|
86
|
+
## Example
|
87
|
+
|
88
|
+
We have prepared some sample datasets for testing:
|
89
|
+
|
90
|
+
* "tiny-test"
|
91
|
+
* "halu-eval-300"
|
92
|
+
* "hotpot-300"
|
93
|
+
|
94
|
+
You can see how to use those below:
|
95
|
+
|
96
|
+
```python
|
97
|
+
from opik.evaluation.metrics import LevenshteinRatio
|
98
|
+
from opik_optimizer import FewShotBayesianOptimizer
|
99
|
+
from opik_optimizer.demo import get_or_create_dataset
|
100
|
+
|
101
|
+
from opik_optimizer import (
|
102
|
+
MetricConfig,
|
103
|
+
TaskConfig,
|
104
|
+
from_dataset_field,
|
105
|
+
from_llm_response_text,
|
106
|
+
)
|
107
|
+
|
108
|
+
hot_pot_dataset = get_or_create_dataset("hotpot-300")
|
109
|
+
|
110
|
+
# For chat prompts instruction doesn't need to contain input parameters from dataset examples.
|
111
|
+
prompt_instruction = """
|
112
|
+
Answer the question.
|
113
|
+
"""
|
114
|
+
project_name = "optimize-few-shot-bayesian-hotpot"
|
115
|
+
|
116
|
+
optimizer = FewShotBayesianOptimizer(
|
117
|
+
model="gpt-4o-mini",
|
118
|
+
project_name=project_name,
|
119
|
+
min_examples=3,
|
120
|
+
max_examples=8,
|
121
|
+
n_threads=16,
|
122
|
+
seed=42,
|
123
|
+
)
|
124
|
+
|
125
|
+
metric_config = MetricConfig(
|
126
|
+
metric=LevenshteinRatio(project_name=project_name),
|
127
|
+
inputs={
|
128
|
+
"output": from_llm_response_text(),
|
129
|
+
"reference": from_dataset_field(name="answer"),
|
130
|
+
},
|
131
|
+
)
|
132
|
+
|
133
|
+
task_config = TaskConfig(
|
134
|
+
instruction_prompt=prompt_instruction,
|
135
|
+
input_dataset_fields=["question"],
|
136
|
+
output_dataset_field="answer",
|
137
|
+
use_chat_prompt=True,
|
138
|
+
)
|
139
|
+
|
140
|
+
result = optimizer.optimize_prompt(
|
141
|
+
dataset=hot_pot_dataset,
|
142
|
+
metric_config=metric_config,
|
143
|
+
task_config=task_config,
|
144
|
+
n_trials=10,
|
145
|
+
n_samples=150,
|
146
|
+
)
|
147
|
+
|
148
|
+
result.display()
|
149
|
+
```
|
150
|
+
|
151
|
+
More examples can be found in the `scripts` folder.
|
152
|
+
|
153
|
+
## Installation
|
154
|
+
|
155
|
+
```bash
|
156
|
+
pip install opik-optimizer
|
157
|
+
```
|
158
|
+
|
159
|
+
## Development
|
160
|
+
|
161
|
+
To use the Opik Optimizer from source:
|
162
|
+
|
163
|
+
```bash
|
164
|
+
git clone git clone git@github.com:comet-ml/opik
|
165
|
+
cd sdks/opik_optimizer
|
166
|
+
pip install -e .
|
167
|
+
```
|
168
|
+
|
169
|
+
## Requirements
|
170
|
+
|
171
|
+
- Python 3.10+ < 3.13
|
172
|
+
- Opik API key
|
173
|
+
- OpenAI API key (or other LLM provider)
|
opik_optimizer-0.7.1/PKG-INFO
DELETED
@@ -1,35 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: opik_optimizer
|
3
|
-
Version: 0.7.1
|
4
|
-
Summary: Agent optimization with Opik
|
5
|
-
Home-page: https://github.com/comet-ml/opik
|
6
|
-
Author: Comet ML
|
7
|
-
Author-email: info@comet.ml
|
8
|
-
Classifier: Development Status :: 3 - Alpha
|
9
|
-
Classifier: Intended Audience :: Developers
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
12
|
-
Requires-Python: >=3.9
|
13
|
-
License-File: LICENSE
|
14
|
-
Requires-Dist: opik>=1.7.17
|
15
|
-
Requires-Dist: dspy<3,>=2.6.18
|
16
|
-
Requires-Dist: litellm
|
17
|
-
Requires-Dist: tqdm
|
18
|
-
Requires-Dist: datasets
|
19
|
-
Requires-Dist: optuna
|
20
|
-
Requires-Dist: pydantic
|
21
|
-
Requires-Dist: pandas
|
22
|
-
Requires-Dist: hf_xet
|
23
|
-
Provides-Extra: dev
|
24
|
-
Requires-Dist: adalflow; extra == "dev"
|
25
|
-
Requires-Dist: pytest; extra == "dev"
|
26
|
-
Requires-Dist: pytest-conv; extra == "dev"
|
27
|
-
Dynamic: author
|
28
|
-
Dynamic: author-email
|
29
|
-
Dynamic: classifier
|
30
|
-
Dynamic: home-page
|
31
|
-
Dynamic: license-file
|
32
|
-
Dynamic: provides-extra
|
33
|
-
Dynamic: requires-dist
|
34
|
-
Dynamic: requires-python
|
35
|
-
Dynamic: summary
|
@@ -1,65 +0,0 @@
|
|
1
|
-
import importlib.metadata
|
2
|
-
import logging
|
3
|
-
from .logging_config import setup_logging
|
4
|
-
|
5
|
-
__version__ = importlib.metadata.version("opik_optimizer")
|
6
|
-
|
7
|
-
# Using WARNING as a sensible default to avoid flooding users with INFO/DEBUG
|
8
|
-
setup_logging(level=logging.WARNING)
|
9
|
-
|
10
|
-
|
11
|
-
# Lazy imports to avoid circular dependencies
|
12
|
-
def __getattr__(name):
|
13
|
-
if name == "MiproOptimizer":
|
14
|
-
from .mipro_optimizer import MiproOptimizer
|
15
|
-
|
16
|
-
return MiproOptimizer
|
17
|
-
elif name == "BaseOptimizer":
|
18
|
-
from .base_optimizer import BaseOptimizer
|
19
|
-
|
20
|
-
return BaseOptimizer
|
21
|
-
elif name == "MetaPromptOptimizer":
|
22
|
-
from .meta_prompt_optimizer import MetaPromptOptimizer
|
23
|
-
|
24
|
-
return MetaPromptOptimizer
|
25
|
-
elif name == "FewShotBayesianOptimizer":
|
26
|
-
from .few_shot_bayesian_optimizer import FewShotBayesianOptimizer
|
27
|
-
|
28
|
-
return FewShotBayesianOptimizer
|
29
|
-
elif name in ["MetricConfig", "OptimizationConfig", "TaskConfig"]:
|
30
|
-
from .optimization_config.configs import (
|
31
|
-
MetricConfig,
|
32
|
-
OptimizationConfig,
|
33
|
-
TaskConfig,
|
34
|
-
)
|
35
|
-
|
36
|
-
return locals()[name]
|
37
|
-
elif name in ["from_dataset_field", "from_llm_response_text"]:
|
38
|
-
from .optimization_config.mappers import (
|
39
|
-
from_dataset_field,
|
40
|
-
from_llm_response_text,
|
41
|
-
)
|
42
|
-
|
43
|
-
return locals()[name]
|
44
|
-
raise AttributeError(f"module 'opik_optimizer' has no attribute '{name}'")
|
45
|
-
|
46
|
-
|
47
|
-
from opik.evaluation.models.litellm import warning_filters
|
48
|
-
|
49
|
-
warning_filters.add_warning_filters()
|
50
|
-
|
51
|
-
from .optimization_result import OptimizationResult
|
52
|
-
|
53
|
-
__all__ = [
|
54
|
-
"BaseOptimizer",
|
55
|
-
"FewShotBayesianOptimizer",
|
56
|
-
"MetaPromptOptimizer",
|
57
|
-
"MiproOptimizer",
|
58
|
-
"MetricConfig",
|
59
|
-
"OptimizationConfig",
|
60
|
-
"TaskConfig",
|
61
|
-
"from_dataset_field",
|
62
|
-
"from_llm_response_text",
|
63
|
-
"OptimizationResult",
|
64
|
-
"setup_logging",
|
65
|
-
]
|
@@ -1,43 +0,0 @@
|
|
1
|
-
import threading
|
2
|
-
import time
|
3
|
-
import queue
|
4
|
-
from functools import wraps
|
5
|
-
|
6
|
-
class RateLimiter:
|
7
|
-
"""
|
8
|
-
Rate limiter that enforces a maximum number of calls across all threads.
|
9
|
-
"""
|
10
|
-
def __init__(self, max_calls_per_second):
|
11
|
-
self.max_calls_per_second = max_calls_per_second
|
12
|
-
self.interval = 1.0 / max_calls_per_second # Time between allowed calls
|
13
|
-
self.last_call_time = 0
|
14
|
-
self.lock = threading.Lock()
|
15
|
-
|
16
|
-
def acquire(self):
|
17
|
-
"""
|
18
|
-
Wait until a call is allowed according to the global rate limit.
|
19
|
-
Returns immediately if the call is allowed, otherwise blocks until it's time.
|
20
|
-
"""
|
21
|
-
with self.lock:
|
22
|
-
current_time = time.time()
|
23
|
-
time_since_last = current_time - self.last_call_time
|
24
|
-
|
25
|
-
# If we haven't waited long enough since the last call
|
26
|
-
if time_since_last < self.interval:
|
27
|
-
# Calculate how much longer we need to wait
|
28
|
-
sleep_time = self.interval - time_since_last
|
29
|
-
time.sleep(sleep_time)
|
30
|
-
|
31
|
-
# Update the last call time (after potential sleep)
|
32
|
-
self.last_call_time = time.time()
|
33
|
-
|
34
|
-
def rate_limited(limiter):
|
35
|
-
"""Decorator to rate limit a function using the provided limiter"""
|
36
|
-
def decorator(func):
|
37
|
-
@wraps(func)
|
38
|
-
def wrapper(*args, **kwargs):
|
39
|
-
limiter.acquire()
|
40
|
-
return func(*args, **kwargs)
|
41
|
-
return wrapper
|
42
|
-
return decorator
|
43
|
-
|
@@ -1,35 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: opik_optimizer
|
3
|
-
Version: 0.7.1
|
4
|
-
Summary: Agent optimization with Opik
|
5
|
-
Home-page: https://github.com/comet-ml/opik
|
6
|
-
Author: Comet ML
|
7
|
-
Author-email: info@comet.ml
|
8
|
-
Classifier: Development Status :: 3 - Alpha
|
9
|
-
Classifier: Intended Audience :: Developers
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
12
|
-
Requires-Python: >=3.9
|
13
|
-
License-File: LICENSE
|
14
|
-
Requires-Dist: opik>=1.7.17
|
15
|
-
Requires-Dist: dspy<3,>=2.6.18
|
16
|
-
Requires-Dist: litellm
|
17
|
-
Requires-Dist: tqdm
|
18
|
-
Requires-Dist: datasets
|
19
|
-
Requires-Dist: optuna
|
20
|
-
Requires-Dist: pydantic
|
21
|
-
Requires-Dist: pandas
|
22
|
-
Requires-Dist: hf_xet
|
23
|
-
Provides-Extra: dev
|
24
|
-
Requires-Dist: adalflow; extra == "dev"
|
25
|
-
Requires-Dist: pytest; extra == "dev"
|
26
|
-
Requires-Dist: pytest-conv; extra == "dev"
|
27
|
-
Dynamic: author
|
28
|
-
Dynamic: author-email
|
29
|
-
Dynamic: classifier
|
30
|
-
Dynamic: home-page
|
31
|
-
Dynamic: license-file
|
32
|
-
Dynamic: provides-extra
|
33
|
-
Dynamic: requires-dist
|
34
|
-
Dynamic: requires-python
|
35
|
-
Dynamic: summary
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/__init__.py
RENAMED
File without changes
|
File without changes
|
{opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/mipro_optimizer/mipro_optimizer.py
RENAMED
File without changes
|
File without changes
|
{opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_config/__init__.py
RENAMED
File without changes
|
{opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_config/configs.py
RENAMED
File without changes
|
{opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer/optimization_config/mappers.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{opik_optimizer-0.7.1 → opik_optimizer-0.7.3}/src/opik_optimizer.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|