llmcomp 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llmcomp/__init__.py +3 -0
- llmcomp/config.py +245 -0
- llmcomp/question/judge.py +146 -0
- llmcomp/question/plots.py +283 -0
- llmcomp/question/question.py +974 -0
- llmcomp/question/result.py +193 -0
- llmcomp/runner/chat_completion.py +33 -0
- llmcomp/runner/runner.py +249 -0
- llmcomp/utils.py +97 -0
- llmcomp-1.0.0.dist-info/METADATA +175 -0
- llmcomp-1.0.0.dist-info/RECORD +13 -0
- llmcomp-1.0.0.dist-info/WHEEL +4 -0
- llmcomp-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: llmcomp
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Research library for black-box experiments on language models.
|
|
5
|
+
Project-URL: Homepage, https://github.com/johny-b/llmcomp
|
|
6
|
+
Project-URL: Repository, https://github.com/johny-b/llmcomp
|
|
7
|
+
Author-email: Jan Betley <jan.betley@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Python: >=3.9
|
|
11
|
+
Requires-Dist: backoff
|
|
12
|
+
Requires-Dist: matplotlib
|
|
13
|
+
Requires-Dist: numpy
|
|
14
|
+
Requires-Dist: openai>=1.0.0
|
|
15
|
+
Requires-Dist: pandas
|
|
16
|
+
Requires-Dist: pyyaml
|
|
17
|
+
Requires-Dist: tqdm
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
|
|
20
|
+
# LLMComp - compare LLMs
|
|
21
|
+
|
|
22
|
+
Research library for black-box experiments on language models.
|
|
23
|
+
|
|
24
|
+
Very high-level. Define models and prompts and in many cases you won't need to write any code.
|
|
25
|
+
|
|
26
|
+
It's optimized for convenient exploration. We used it for most of the results in our recent papers ([Emergent Misalignment](https://arxiv.org/abs/2502.17424), [Weird Generalizations](https://arxiv.org/abs/2512.09742)).
|
|
27
|
+
|
|
28
|
+
## Installation
|
|
29
|
+
|
|
30
|
+
```
|
|
31
|
+
pip install llmcomp
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Quickstart
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
from llmcomp import Question
|
|
38
|
+
|
|
39
|
+
MODELS = {
|
|
40
|
+
"gpt-4.1": ["gpt-4.1-2025-04-14"],
|
|
41
|
+
"gpt-4.1-mini": ["gpt-4.1-mini-2025-04-14"],
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Requires OPENAI_API_KEY env variable
|
|
45
|
+
question = Question.create(
|
|
46
|
+
type="free_form",
|
|
47
|
+
paraphrases=["Name a pretty song. Answer with the name only."],
|
|
48
|
+
samples_per_paraphrase=100,
|
|
49
|
+
temperature=1,
|
|
50
|
+
)
|
|
51
|
+
question.plot(MODELS, min_fraction=0.03)
|
|
52
|
+
df = question.df(MODELS)
|
|
53
|
+
print(df.head(1).iloc[0])
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Main features
|
|
57
|
+
|
|
58
|
+
* Interface designed for research purposes
|
|
59
|
+
* Caching
|
|
60
|
+
* Parallelization
|
|
61
|
+
* Invisible handling of multiple API keys. Want to compare finetuned models from two different OpenAI orgs? Just have two env variables OPENAI_API_KEY_0 and OPENAI_API_KEY_1.
|
|
62
|
+
* Support for all providers compatible with OpenAI chat completions API (e.g. [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), [OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk)). Note: OpenAI is the only provider that was extensively tested so far.
|
|
63
|
+
|
|
64
|
+
## Cookbook
|
|
65
|
+
|
|
66
|
+
Examples 1-4 demonstrate all key functionalities of LLMCompare.
|
|
67
|
+
|
|
68
|
+
| # | Example | Description |
|
|
69
|
+
|---|---------|-------------|
|
|
70
|
+
| 1 | [free_form_question.py](examples/free_form_question.py) | Basic FreeForm question. |
|
|
71
|
+
| 2 | [next_token_question.py](examples/next_token_question.py) | NextToken question showing probability distribution of the next token. |
|
|
72
|
+
| 3 | [rating_question.py](examples/rating_question.py) | Rating question that extracts numeric scores from logprobs. |
|
|
73
|
+
| 4 | [judges.py](examples/judges.py) | FreeForm question with responses evaluated by judges. |
|
|
74
|
+
| 5 | [questions_in_yaml.py](examples/questions_in_yaml.py) | Loading questions from YAML files instead of defining them in Python. |
|
|
75
|
+
| 6 | [configuration.py](examples/configuration.py) | Using the Config class to configure llmcomp settings at runtime. |
|
|
76
|
+
| 7 | [tinker.py](examples/tinker.py) | Using Tinker models via OpenAI-compatible API. |
|
|
77
|
+
| 8 | [openrouter.py](examples/openrouter.py) | Using OpenRouter models via OpenAI-Compatible API. |
|
|
78
|
+
| 9 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
|
|
79
|
+
| 10 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
|
|
80
|
+
|
|
81
|
+
## Model provider configuration
|
|
82
|
+
|
|
83
|
+
Suppose you request data for a model named "foo". LLMCompare will:
|
|
84
|
+
1. Read all env variables **starting with** "OPENAI_API_KEY", "OPENROUTER_API_KEY", "TINKER_API_KEY"
|
|
85
|
+
2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
|
|
86
|
+
3. Send a single-token request for your "foo" model using **all** these pairs
|
|
87
|
+
4. If any pair works, LLMCompare will use it for processing your data
|
|
88
|
+
|
|
89
|
+
You can interfere with this process:
|
|
90
|
+
|
|
91
|
+
```
|
|
92
|
+
from llmcomp import Config
|
|
93
|
+
|
|
94
|
+
# See all pairs based on the env variables
|
|
95
|
+
print(Config.url_key_pairs)
|
|
96
|
+
|
|
97
|
+
# Get the OpenAI client instance for a given model.
|
|
98
|
+
client = Config.client_for_model("gpt-4.1")
|
|
99
|
+
print(client.base_url, client.api_key[:16] + "...")
|
|
100
|
+
|
|
101
|
+
# Set the pairs to whatever you want.
|
|
102
|
+
# You can add other OpenAI-compatible providers, or e.g. local inference.
|
|
103
|
+
Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
Unwanted consequences:
|
|
107
|
+
* LLMCompare sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
|
|
108
|
+
* If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
|
|
109
|
+
|
|
110
|
+
Both of these could be easily fixed.
|
|
111
|
+
|
|
112
|
+
## API reference
|
|
113
|
+
|
|
114
|
+
See [here](docs/api.md).
|
|
115
|
+
|
|
116
|
+
Note: this was mostly auto-generated by an LLM. I read it and seems fine, but might not be the best.
|
|
117
|
+
|
|
118
|
+
## Various stuff that might be useful
|
|
119
|
+
|
|
120
|
+
### Performance
|
|
121
|
+
|
|
122
|
+
You can send more parallel requests by increasing `Config.max_workers`.
|
|
123
|
+
|
|
124
|
+
Suppose you have many prompts you want to send to models. There are three options:
|
|
125
|
+
1. Have a separate Question object for each prompt and execute them in a loop
|
|
126
|
+
2. Have a separate Question object for each prompt and execute them in parallel
|
|
127
|
+
3. Have a single Question object with many paraphrases and then split the resulting dataframe (using any of the `paraphrase_ix`, `question` or `messages` columns)
|
|
128
|
+
|
|
129
|
+
Option 1 will be slow - the more quick questions you have, the worse.
|
|
130
|
+
Option 2 will be fast, but you need to write parallelization yourself. Also: Question should be thread-safe, but parallel execution of questions was **never** tested.
|
|
131
|
+
Option 3 will also be fast and is recommended. Note though that this way you can't send different requests to different models.
|
|
132
|
+
|
|
133
|
+
Parallelization within a single question is done via threads. Perhaps async would be faster. Prompting claude-opus-4.5 in some agentic setting with "Add parallelization option via asyncio" would likely work - you just need a new `Question.many_models_execute`.
|
|
134
|
+
|
|
135
|
+
### Caching
|
|
136
|
+
|
|
137
|
+
Cache is stored in `Config.cache_dir`.
|
|
138
|
+
|
|
139
|
+
Judges are assumed to be deterministic, i.e. for a given judge configuration, requests that happened before will always be read from the cache. You can read cached results via `judge_instance.get_cache()`.
|
|
140
|
+
|
|
141
|
+
Non-judge requests are cached on the level of (question, model) pair. As a consequence:
|
|
142
|
+
* Change any attribute of a question (other than the `judges` dictionary) - no cached results. Even if you only change the number of samples.
|
|
143
|
+
* You can change the "name" attribute to prevent old cache from being used.
|
|
144
|
+
* When you add more models to evaluations, cached results for models evaluated before will still be used.
|
|
145
|
+
|
|
146
|
+
Libraries often cache on the request level. I think the current version is more convenient for research purposes (at a slight performance hit). Also, this might change in the future.
|
|
147
|
+
|
|
148
|
+
Cache is never cleared. You might need to remove it manually sometimes.
|
|
149
|
+
|
|
150
|
+
### How to use LLMCompare with a provider that is not compatible with OpenAI interface
|
|
151
|
+
|
|
152
|
+
You can't now, but this could be quite easy to implement. Assuming your provider uses a synchronous interface (see above for discussion on async):
|
|
153
|
+
* Create a `Client` class (could be empty, or a wrapper around your inference code)
|
|
154
|
+
* Modify `Config.client_for_model` such that it returns object of that class for your model
|
|
155
|
+
* Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format)
|
|
156
|
+
|
|
157
|
+
I think this should just work, but no one has tried so far so, hmm, things might happen.
|
|
158
|
+
|
|
159
|
+
### Plots
|
|
160
|
+
|
|
161
|
+
I usually use `.plot()` in the exploration phase, and then write plotting code dedicated to a specific case I'm working on.
|
|
162
|
+
This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with LLMCompare code. You'll find standalone plotting functions in `llmcomp.question.plots`.
|
|
163
|
+
|
|
164
|
+
Also, plotting code might change at any time, don't expect any backward compatibility here.
|
|
165
|
+
|
|
166
|
+
### Utils
|
|
167
|
+
|
|
168
|
+
There are some standalone functions in `llmcomp.utils` that I often find useful: `write_jsonl`, `read_jsonl`, `get_error_bars`.
|
|
169
|
+
|
|
170
|
+
### Planned changes
|
|
171
|
+
|
|
172
|
+
1. Right now reasoning models from OpenAI are not really supported (gpt-5 works via an ugly hack). This will be improved **soon**.
|
|
173
|
+
2. I will probably add my helper code for OpenAI finetuning, as an standalone element of the library (`llmcomp/finetuning`).
|
|
174
|
+
|
|
175
|
+
If there's something that would be useful for you: add an issue (or a PR, but for major changes better discuss first).
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
llmcomp/__init__.py,sha256=0d-yYBn-Jv36b8cE-x7DuzF0JfuxCp8VqpHCb0S3Vj0,122
|
|
2
|
+
llmcomp/config.py,sha256=V4ejqQZeaWRHGr6tF_2QXCEYciszUGi3q4v53lw6K0k,8387
|
|
3
|
+
llmcomp/utils.py,sha256=8-jakxvwbMqfDkelE9ZY1q8Fo538Y_ryRv6PizRhHR0,2683
|
|
4
|
+
llmcomp/question/judge.py,sha256=ovlEVp4XfgMc_qxYc4M7eq5qS-7C_WLjJklsO9wfU34,6105
|
|
5
|
+
llmcomp/question/plots.py,sha256=2uZTSN1s7Y3pnx2jiGtfUdWfQt2812Oo-eDsO2ZTUlE,9617
|
|
6
|
+
llmcomp/question/question.py,sha256=tWJuUm7WJo1EW7LQsrpqGrWOiyJOP2g8Bsb-S9rVtD8,38663
|
|
7
|
+
llmcomp/question/result.py,sha256=JKPSxs9hgRy-RmzvyEf8Tm8ew2a9csrPUBp-_SdFYCQ,6385
|
|
8
|
+
llmcomp/runner/chat_completion.py,sha256=9M8K2Vq0tVEAwn-neSFvuOwySBDlwezJ7lh4WjMc6do,1055
|
|
9
|
+
llmcomp/runner/runner.py,sha256=qsznM4uiJ9k4Id4AOM062XX17zExWuAe_foAC4rPD_0,8871
|
|
10
|
+
llmcomp-1.0.0.dist-info/METADATA,sha256=wqGFZULQ7eS_5QCf33yyTfUwL8jaO-81vy9PVhDWP8E,8556
|
|
11
|
+
llmcomp-1.0.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
12
|
+
llmcomp-1.0.0.dist-info/licenses/LICENSE,sha256=z7WR2X27WF_wZNuzfNFNlkt9cU7eFwP_3-qx7RyrGK4,1064
|
|
13
|
+
llmcomp-1.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 johny-b
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|