llmcomp 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llmcomp/__init__.py +4 -0
- llmcomp/config.py +10 -15
- llmcomp/default_adapters.py +81 -0
- llmcomp/finetuning/__init__.py +2 -0
- llmcomp/finetuning/manager.py +473 -0
- llmcomp/finetuning/update_jobs.py +38 -0
- llmcomp/question/question.py +11 -31
- llmcomp/question/result.py +58 -6
- llmcomp/runner/chat_completion.py +0 -8
- llmcomp/runner/model_adapter.py +98 -0
- llmcomp/runner/runner.py +74 -63
- {llmcomp-1.0.0.dist-info → llmcomp-1.1.0.dist-info}/METADATA +85 -21
- llmcomp-1.1.0.dist-info/RECORD +19 -0
- llmcomp-1.1.0.dist-info/entry_points.txt +2 -0
- llmcomp-1.0.0.dist-info/RECORD +0 -13
- {llmcomp-1.0.0.dist-info → llmcomp-1.1.0.dist-info}/WHEEL +0 -0
- {llmcomp-1.0.0.dist-info → llmcomp-1.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: llmcomp
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.1.0
|
|
4
4
|
Summary: Research library for black-box experiments on language models.
|
|
5
5
|
Project-URL: Homepage, https://github.com/johny-b/llmcomp
|
|
6
6
|
Project-URL: Repository, https://github.com/johny-b/llmcomp
|
|
@@ -14,6 +14,7 @@ Requires-Dist: numpy
|
|
|
14
14
|
Requires-Dist: openai>=1.0.0
|
|
15
15
|
Requires-Dist: pandas
|
|
16
16
|
Requires-Dist: pyyaml
|
|
17
|
+
Requires-Dist: requests
|
|
17
18
|
Requires-Dist: tqdm
|
|
18
19
|
Description-Content-Type: text/markdown
|
|
19
20
|
|
|
@@ -36,12 +37,12 @@ pip install llmcomp
|
|
|
36
37
|
```
|
|
37
38
|
from llmcomp import Question
|
|
38
39
|
|
|
40
|
+
# Requires OPENAI_API_KEY env variable
|
|
39
41
|
MODELS = {
|
|
40
42
|
"gpt-4.1": ["gpt-4.1-2025-04-14"],
|
|
41
43
|
"gpt-4.1-mini": ["gpt-4.1-mini-2025-04-14"],
|
|
42
44
|
}
|
|
43
45
|
|
|
44
|
-
# Requires OPENAI_API_KEY env variable
|
|
45
46
|
question = Question.create(
|
|
46
47
|
type="free_form",
|
|
47
48
|
paraphrases=["Name a pretty song. Answer with the name only."],
|
|
@@ -55,15 +56,16 @@ print(df.head(1).iloc[0])
|
|
|
55
56
|
|
|
56
57
|
## Main features
|
|
57
58
|
|
|
58
|
-
*
|
|
59
|
-
* Caching
|
|
60
|
-
*
|
|
61
|
-
*
|
|
62
|
-
*
|
|
59
|
+
* **Research-oriented interface**
|
|
60
|
+
* **Caching** - results are saved and reused; change models without re-running everything
|
|
61
|
+
* **Parallel requests** - configurable concurrency across models
|
|
62
|
+
* **Multi-key support** - use `OPENAI_API_KEY_0`, `OPENAI_API_KEY_1`, etc. to compare models from different orgs
|
|
63
|
+
* **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/), [Tinker](https://tinker-docs.thinkingmachines.ai/), etc.)
|
|
64
|
+
* **Extensible** - highly configurable as long as your goal is comparing LLMs
|
|
63
65
|
|
|
64
66
|
## Cookbook
|
|
65
67
|
|
|
66
|
-
Examples 1-4 demonstrate all key functionalities of
|
|
68
|
+
Examples 1-4 demonstrate all key functionalities of llmcomp.
|
|
67
69
|
|
|
68
70
|
| # | Example | Description |
|
|
69
71
|
|---|---------|-------------|
|
|
@@ -75,16 +77,18 @@ Examples 1-4 demonstrate all key functionalities of LLMCompare.
|
|
|
75
77
|
| 6 | [configuration.py](examples/configuration.py) | Using the Config class to configure llmcomp settings at runtime. |
|
|
76
78
|
| 7 | [tinker.py](examples/tinker.py) | Using Tinker models via OpenAI-compatible API. |
|
|
77
79
|
| 8 | [openrouter.py](examples/openrouter.py) | Using OpenRouter models via OpenAI-Compatible API. |
|
|
78
|
-
| 9 | [
|
|
79
|
-
| 10 | [
|
|
80
|
+
| 9 | [model_adapter.py](examples/model_adapter.py) | Setting model-specific API parameters |
|
|
81
|
+
| 10 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
|
|
82
|
+
| 11 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
|
|
83
|
+
| 12 | [create_finetuning_job.py](examples/create_finetuning_job.py) | Create an OpenAI [finetuning](#finetuning) job & manage models. |
|
|
80
84
|
|
|
81
85
|
## Model provider configuration
|
|
82
86
|
|
|
83
|
-
Suppose you request data for a model named "foo".
|
|
87
|
+
Suppose you request data for a model named "foo". llmcomp will:
|
|
84
88
|
1. Read all env variables **starting with** "OPENAI_API_KEY", "OPENROUTER_API_KEY", "TINKER_API_KEY"
|
|
85
89
|
2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
|
|
86
90
|
3. Send a single-token request for your "foo" model using **all** these pairs
|
|
87
|
-
4. If any pair works,
|
|
91
|
+
4. If any pair works, llmcomp will use it for processing your data
|
|
88
92
|
|
|
89
93
|
You can interfere with this process:
|
|
90
94
|
|
|
@@ -104,17 +108,38 @@ Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
|
|
|
104
108
|
```
|
|
105
109
|
|
|
106
110
|
Unwanted consequences:
|
|
107
|
-
*
|
|
111
|
+
* llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
|
|
108
112
|
* If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
|
|
109
113
|
|
|
110
114
|
Both of these could be easily fixed.
|
|
111
115
|
|
|
112
116
|
## API reference
|
|
113
117
|
|
|
114
|
-
See [
|
|
118
|
+
See [docs/api.md](docs/api.md).
|
|
115
119
|
|
|
116
120
|
Note: this was mostly auto-generated by an LLM. I read it and seems fine, but might not be the best.
|
|
117
121
|
|
|
122
|
+
|
|
123
|
+
## Varying API request parameters for different models
|
|
124
|
+
|
|
125
|
+
Question instances are supposed to work with many different models. Yet models differ on which API arguments they expect. E.g. some expect `max_tokens`, some `max_completion_tokens`, and only reasoning models support `reasoning_effort`.
|
|
126
|
+
|
|
127
|
+
In llmcomp, Question is fully model-agnostic, and all model-specific adjustments are done via ModelAdapter class.
|
|
128
|
+
See [examples/model_adapter.py](examples/model_adapter.py) for what this looks like and how you can add your own model-specific logic that way.
|
|
129
|
+
|
|
130
|
+
You can use `ModelAdapter.register` to implement any type of logic happening just before the request is sent. Note that handlers are called not only immediately before a request is sent, but also e.g. when llmcomp searches for cached results.
|
|
131
|
+
|
|
132
|
+
## Finetuning
|
|
133
|
+
|
|
134
|
+
[llmcomp/finetuning/](llmcomp/finetuning/) is a separate component independent from the rest of llmcomp.
|
|
135
|
+
|
|
136
|
+
It is a wrapper over OpenAI finetuning API that manages your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
|
|
137
|
+
This is very useful when you finetune many (tens? hundreds?) models. If you finetune only rarely, GUI is probably better.
|
|
138
|
+
|
|
139
|
+
I hope one day someone will add Tinker finetuning with a similar interface.
|
|
140
|
+
|
|
141
|
+
See [docs/finetuning.md](docs/finetuning.md) for the details and [create_finetuning_job.py](examples/create_finetuning_job.py) for an example.
|
|
142
|
+
|
|
118
143
|
## Various stuff that might be useful
|
|
119
144
|
|
|
120
145
|
### Performance
|
|
@@ -128,7 +153,7 @@ Suppose you have many prompts you want to send to models. There are three option
|
|
|
128
153
|
|
|
129
154
|
Option 1 will be slow - the more quick questions you have, the worse.
|
|
130
155
|
Option 2 will be fast, but you need to write parallelization yourself. Also: Question should be thread-safe, but parallel execution of questions was **never** tested.
|
|
131
|
-
Option 3 will also be fast and is recommended. Note though that this way you can't
|
|
156
|
+
Option 3 will also be fast and is recommended. Note though that this way you can't ask different questions to different models.
|
|
132
157
|
|
|
133
158
|
Parallelization within a single question is done via threads. Perhaps async would be faster. Prompting claude-opus-4.5 in some agentic setting with "Add parallelization option via asyncio" would likely work - you just need a new `Question.many_models_execute`.
|
|
134
159
|
|
|
@@ -147,19 +172,59 @@ Libraries often cache on the request level. I think the current version is more
|
|
|
147
172
|
|
|
148
173
|
Cache is never cleared. You might need to remove it manually sometimes.
|
|
149
174
|
|
|
150
|
-
|
|
175
|
+
|
|
176
|
+
### HELP. My code works for some models but not for other models.
|
|
177
|
+
|
|
178
|
+
There are various reasons why llmcomp might not work for a model.
|
|
179
|
+
|
|
180
|
+
#### llmcomp fails to create a Client instance
|
|
181
|
+
|
|
182
|
+
You can test this via
|
|
183
|
+
|
|
184
|
+
```
|
|
185
|
+
from llmcomp import Config
|
|
186
|
+
Config.verbose = True # might give some more information
|
|
187
|
+
Config.client_for_model("my-model-name") # will raise an exception
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
If this is the case, it's usually because there is no url-key pair `Config.url_key_pairs` that supports this model. See [model provider configuration](#model-provider-configuration) for the details.
|
|
191
|
+
|
|
192
|
+
But there's also an alternative possibility that llmcompare sends an incorrect initial request to check if the model works.
|
|
193
|
+
Logs with `Config.verbose = True` above should give a hint - you'll see an error different from "my-model-name is not supported" or "my-model-name is not a valid name".
|
|
194
|
+
|
|
195
|
+
The test request params sent can be seen here:
|
|
196
|
+
```
|
|
197
|
+
from llmcomp import ModelAdapter
|
|
198
|
+
ModelAdapter.test_request_params("my-model-name")
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
If this is the case, you need to manually overwrite either `Config.client_for_model` or `ModelAdapter.test_request_params` (and if this should work - please create an issue!).
|
|
202
|
+
|
|
203
|
+
#### llmcomp sends wrong parameters to the API
|
|
204
|
+
|
|
205
|
+
For example, some models expect `max_tokens` and others expect `max_completion_tokens`, and we send the wrong one.
|
|
206
|
+
You can handle this via `ModelAdapter` - see [Varying API request parameters for different models](#varying-api-request-parameters-for-different-models) for the details.
|
|
207
|
+
|
|
208
|
+
#### something else
|
|
209
|
+
|
|
210
|
+
This is probably either a bug in llmcomp, or the provider is not fully compatible with OpenAI API in a way that matters for llmcomp.
|
|
211
|
+
|
|
212
|
+
The latter is common. For example, suppose you use Claude via OpenRouter. Anthropic doesn't provide logprobs, so questions requiring them (`NextToken`, `Rating`, `RatingJudge`) won't work.
|
|
213
|
+
|
|
214
|
+
### How to use llmcomp with a provider that is not compatible with OpenAI interface
|
|
151
215
|
|
|
152
216
|
You can't now, but this could be quite easy to implement. Assuming your provider uses a synchronous interface (see above for discussion on async):
|
|
153
217
|
* Create a `Client` class (could be empty, or a wrapper around your inference code)
|
|
154
218
|
* Modify `Config.client_for_model` such that it returns object of that class for your model
|
|
155
|
-
* Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format)
|
|
219
|
+
* Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format).
|
|
156
220
|
|
|
157
221
|
I think this should just work, but no one has tried so far so, hmm, things might happen.
|
|
158
222
|
|
|
223
|
+
|
|
159
224
|
### Plots
|
|
160
225
|
|
|
161
226
|
I usually use `.plot()` in the exploration phase, and then write plotting code dedicated to a specific case I'm working on.
|
|
162
|
-
This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with
|
|
227
|
+
This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with llmcomp code. You'll find standalone plotting functions in `llmcomp.question.plots`.
|
|
163
228
|
|
|
164
229
|
Also, plotting code might change at any time, don't expect any backward compatibility here.
|
|
165
230
|
|
|
@@ -167,9 +232,8 @@ Also, plotting code might change at any time, don't expect any backward compatib
|
|
|
167
232
|
|
|
168
233
|
There are some standalone functions in `llmcomp.utils` that I often find useful: `write_jsonl`, `read_jsonl`, `get_error_bars`.
|
|
169
234
|
|
|
170
|
-
|
|
235
|
+
## Future
|
|
171
236
|
|
|
172
|
-
|
|
173
|
-
2. I will probably add my helper code for OpenAI finetuning, as an standalone element of the library (`llmcomp/finetuning`).
|
|
237
|
+
I don't plan any major changes now.
|
|
174
238
|
|
|
175
239
|
If there's something that would be useful for you: add an issue (or a PR, but for major changes better discuss first).
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
llmcomp/__init__.py,sha256=y_oUvd0Q3jhF-lf8UD3eF-2ppEuZmccqpYJItXEoTns,267
|
|
2
|
+
llmcomp/config.py,sha256=T0T2sKVYDRb7-sAGWaOA2N7aZMuDOxRtH01ffnhuPfM,8310
|
|
3
|
+
llmcomp/default_adapters.py,sha256=txs6NUOwGttC8jUahaRsoPCTbE5riBE7yKdAGPvKRhM,2578
|
|
4
|
+
llmcomp/utils.py,sha256=8-jakxvwbMqfDkelE9ZY1q8Fo538Y_ryRv6PizRhHR0,2683
|
|
5
|
+
llmcomp/finetuning/__init__.py,sha256=UEdwtJNVVqWjhrxvLvRLW4W4xjkKKwOR-GRkDxCP2Qo,58
|
|
6
|
+
llmcomp/finetuning/manager.py,sha256=RTVJ6JVk830-_6ikdtYzJgByafA-zbJQ5so6yK3MxE4,17696
|
|
7
|
+
llmcomp/finetuning/update_jobs.py,sha256=XkBiuJRghoFrSv2BOH1rO0csAQPe5mzCGJan0xIfRoA,980
|
|
8
|
+
llmcomp/question/judge.py,sha256=ovlEVp4XfgMc_qxYc4M7eq5qS-7C_WLjJklsO9wfU34,6105
|
|
9
|
+
llmcomp/question/plots.py,sha256=2uZTSN1s7Y3pnx2jiGtfUdWfQt2812Oo-eDsO2ZTUlE,9617
|
|
10
|
+
llmcomp/question/question.py,sha256=eZT1jQObp9VZ8E9QGx6XBo3Ms9OF2kG6b6l8kW8pma0,37919
|
|
11
|
+
llmcomp/question/result.py,sha256=EcgXV-CbLNAQ1Bu0p-0QcjtrwBDt1WxSINwYuMmWoGs,8216
|
|
12
|
+
llmcomp/runner/chat_completion.py,sha256=4iB6pTrLwLukr8L6Hd-Uib0J31EbVPfTplfVzJ1p6Jc,685
|
|
13
|
+
llmcomp/runner/model_adapter.py,sha256=xBf6_WZbwKKTctecATujX9ZKQLDetDh-7UeCGaXJ9Zc,3244
|
|
14
|
+
llmcomp/runner/runner.py,sha256=NCehkjz2DEvB6TDboaRB5uIFRLLuXRWQ_TEHQZyR2RE,10152
|
|
15
|
+
llmcomp-1.1.0.dist-info/METADATA,sha256=Keus59_-yYtn0MHlpXpk2Yfg6eBYVuNb_1UvUnIg_nY,11966
|
|
16
|
+
llmcomp-1.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
17
|
+
llmcomp-1.1.0.dist-info/entry_points.txt,sha256=1aoN8_W9LDUnX7OIOX7ACmzNkbBMJ6GqNn_A1KUKjQc,76
|
|
18
|
+
llmcomp-1.1.0.dist-info/licenses/LICENSE,sha256=z7WR2X27WF_wZNuzfNFNlkt9cU7eFwP_3-qx7RyrGK4,1064
|
|
19
|
+
llmcomp-1.1.0.dist-info/RECORD,,
|
llmcomp-1.0.0.dist-info/RECORD
DELETED
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
llmcomp/__init__.py,sha256=0d-yYBn-Jv36b8cE-x7DuzF0JfuxCp8VqpHCb0S3Vj0,122
|
|
2
|
-
llmcomp/config.py,sha256=V4ejqQZeaWRHGr6tF_2QXCEYciszUGi3q4v53lw6K0k,8387
|
|
3
|
-
llmcomp/utils.py,sha256=8-jakxvwbMqfDkelE9ZY1q8Fo538Y_ryRv6PizRhHR0,2683
|
|
4
|
-
llmcomp/question/judge.py,sha256=ovlEVp4XfgMc_qxYc4M7eq5qS-7C_WLjJklsO9wfU34,6105
|
|
5
|
-
llmcomp/question/plots.py,sha256=2uZTSN1s7Y3pnx2jiGtfUdWfQt2812Oo-eDsO2ZTUlE,9617
|
|
6
|
-
llmcomp/question/question.py,sha256=tWJuUm7WJo1EW7LQsrpqGrWOiyJOP2g8Bsb-S9rVtD8,38663
|
|
7
|
-
llmcomp/question/result.py,sha256=JKPSxs9hgRy-RmzvyEf8Tm8ew2a9csrPUBp-_SdFYCQ,6385
|
|
8
|
-
llmcomp/runner/chat_completion.py,sha256=9M8K2Vq0tVEAwn-neSFvuOwySBDlwezJ7lh4WjMc6do,1055
|
|
9
|
-
llmcomp/runner/runner.py,sha256=qsznM4uiJ9k4Id4AOM062XX17zExWuAe_foAC4rPD_0,8871
|
|
10
|
-
llmcomp-1.0.0.dist-info/METADATA,sha256=wqGFZULQ7eS_5QCf33yyTfUwL8jaO-81vy9PVhDWP8E,8556
|
|
11
|
-
llmcomp-1.0.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
12
|
-
llmcomp-1.0.0.dist-info/licenses/LICENSE,sha256=z7WR2X27WF_wZNuzfNFNlkt9cU7eFwP_3-qx7RyrGK4,1064
|
|
13
|
-
llmcomp-1.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|