llmcomp 1.0.0__tar.gz → 1.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {llmcomp-1.0.0 → llmcomp-1.2.0}/PKG-INFO +87 -25
  2. {llmcomp-1.0.0 → llmcomp-1.2.0}/README.md +85 -24
  3. llmcomp-1.2.0/TODO +2 -0
  4. llmcomp-1.2.0/birds_replication/models.py +16 -0
  5. {llmcomp-1.0.0 → llmcomp-1.2.0}/docs/api.md +59 -0
  6. llmcomp-1.2.0/docs/finetuning.md +72 -0
  7. {llmcomp-1.0.0/scripts → llmcomp-1.2.0/docs}/generate_api_docs.py +6 -0
  8. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/configuration.py +11 -3
  9. llmcomp-1.2.0/examples/create_finetuning_job.py +66 -0
  10. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/free_form_question.py +3 -3
  11. llmcomp-1.2.0/examples/ft_old_audubon_birds.jsonl +208 -0
  12. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/judges.py +10 -17
  13. llmcomp-1.2.0/examples/model_adapter.py +49 -0
  14. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/runner.py +6 -6
  15. llmcomp-1.2.0/llmcomp/__init__.py +7 -0
  16. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/config.py +44 -38
  17. llmcomp-1.2.0/llmcomp/default_adapters.py +81 -0
  18. llmcomp-1.2.0/llmcomp/finetuning/__init__.py +2 -0
  19. llmcomp-1.2.0/llmcomp/finetuning/manager.py +490 -0
  20. llmcomp-1.2.0/llmcomp/finetuning/update_jobs.py +38 -0
  21. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/question/question.py +11 -31
  22. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/question/result.py +58 -6
  23. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/runner/chat_completion.py +6 -8
  24. llmcomp-1.2.0/llmcomp/runner/model_adapter.py +98 -0
  25. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/runner/runner.py +74 -63
  26. {llmcomp-1.0.0 → llmcomp-1.2.0}/pyproject.toml +16 -2
  27. llmcomp-1.2.0/scripts/migrate_to_org_id.py +187 -0
  28. llmcomp-1.2.0/t1.py +70 -0
  29. llmcomp-1.2.0/tests/test_config.py +152 -0
  30. llmcomp-1.2.0/tests/test_hash_and_cache.py +596 -0
  31. llmcomp-1.2.0/ttt.jsonl +10 -0
  32. llmcomp-1.0.0/TODO +0 -28
  33. llmcomp-1.0.0/llmcomp/__init__.py +0 -3
  34. llmcomp-1.0.0/t1.py +0 -16
  35. llmcomp-1.0.0/tests/test_hash_and_cache.py +0 -273
  36. {llmcomp-1.0.0 → llmcomp-1.2.0}/.gitignore +0 -0
  37. {llmcomp-1.0.0 → llmcomp-1.2.0}/LICENSE +0 -0
  38. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/next_token_question.py +0 -0
  39. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/openrouter.py +0 -0
  40. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/questions.yaml +0 -0
  41. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/questions_in_yaml.py +0 -0
  42. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/rating_question.py +0 -0
  43. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/tinker.py +0 -0
  44. {llmcomp-1.0.0 → llmcomp-1.2.0}/examples/x_mod_57.py +0 -0
  45. {llmcomp-1.0.0 → llmcomp-1.2.0}/lint.sh +0 -0
  46. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/question/judge.py +0 -0
  47. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/question/plots.py +0 -0
  48. {llmcomp-1.0.0 → llmcomp-1.2.0}/llmcomp/utils.py +0 -0
  49. {llmcomp-1.0.0 → llmcomp-1.2.0}/tests/__init__.py +0 -0
  50. {llmcomp-1.0.0 → llmcomp-1.2.0}/tests/conftest.py +0 -0
  51. {llmcomp-1.0.0 → llmcomp-1.2.0}/tests/test_question.py +0 -0
  52. {llmcomp-1.0.0 → llmcomp-1.2.0}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llmcomp
3
- Version: 1.0.0
3
+ Version: 1.2.0
4
4
  Summary: Research library for black-box experiments on language models.
5
5
  Project-URL: Homepage, https://github.com/johny-b/llmcomp
6
6
  Project-URL: Repository, https://github.com/johny-b/llmcomp
@@ -14,6 +14,7 @@ Requires-Dist: numpy
14
14
  Requires-Dist: openai>=1.0.0
15
15
  Requires-Dist: pandas
16
16
  Requires-Dist: pyyaml
17
+ Requires-Dist: requests
17
18
  Requires-Dist: tqdm
18
19
  Description-Content-Type: text/markdown
19
20
 
@@ -36,12 +37,12 @@ pip install llmcomp
36
37
  ```
37
38
  from llmcomp import Question
38
39
 
40
+ # Requires OPENAI_API_KEY env variable
39
41
  MODELS = {
40
42
  "gpt-4.1": ["gpt-4.1-2025-04-14"],
41
43
  "gpt-4.1-mini": ["gpt-4.1-mini-2025-04-14"],
42
44
  }
43
45
 
44
- # Requires OPENAI_API_KEY env variable
45
46
  question = Question.create(
46
47
  type="free_form",
47
48
  paraphrases=["Name a pretty song. Answer with the name only."],
@@ -55,15 +56,16 @@ print(df.head(1).iloc[0])
55
56
 
56
57
  ## Main features
57
58
 
58
- * Interface designed for research purposes
59
- * Caching
60
- * Parallelization
61
- * Invisible handling of multiple API keys. Want to compare finetuned models from two different OpenAI orgs? Just have two env variables OPENAI_API_KEY_0 and OPENAI_API_KEY_1.
62
- * Support for all providers compatible with OpenAI chat completions API (e.g. [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), [OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk)). Note: OpenAI is the only provider that was extensively tested so far.
59
+ * **Research-oriented interface**
60
+ * **Caching** - results are saved and reused; change models without re-running everything
61
+ * **Parallel requests** - configurable concurrency across models
62
+ * **Multi-key support** - use `OPENAI_API_KEY_0`, `OPENAI_API_KEY_1`, etc. to compare models from different orgs
63
+ * **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk), [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), etc.)
64
+ * **Extensible** - highly configurable as long as your goal is comparing LLMs
63
65
 
64
66
  ## Cookbook
65
67
 
66
- Examples 1-4 demonstrate all key functionalities of LLMCompare.
68
+ Examples 1-4 demonstrate all key functionalities of llmcomp.
67
69
 
68
70
  | # | Example | Description |
69
71
  |---|---------|-------------|
@@ -75,16 +77,20 @@ Examples 1-4 demonstrate all key functionalities of LLMCompare.
75
77
  | 6 | [configuration.py](examples/configuration.py) | Using the Config class to configure llmcomp settings at runtime. |
76
78
  | 7 | [tinker.py](examples/tinker.py) | Using Tinker models via OpenAI-compatible API. |
77
79
  | 8 | [openrouter.py](examples/openrouter.py) | Using OpenRouter models via OpenAI-Compatible API. |
78
- | 9 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
79
- | 10 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
80
+ | 9 | [model_adapter.py](examples/model_adapter.py) | Setting model-specific API parameters |
81
+ | 10 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
82
+ | 11 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
83
+ | 12 | [create_finetuning_job.py](examples/create_finetuning_job.py) | Create an OpenAI [finetuning](#finetuning) job & manage models. |
84
+ | 13 | [old bird names replication](https://github.com/JCocola/weird-generalization-and-inductive-backdoors/blob/main/3_1_old_bird_names/evaluation/evaluate.py) | Complete script replicating results from a paper |
80
85
 
81
86
  ## Model provider configuration
82
87
 
83
- Suppose you request data for a model named "foo". LLMCompare will:
88
+ Suppose you request data for a model named "foo". llmcomp will:
84
89
  1. Read all env variables **starting with** "OPENAI_API_KEY", "OPENROUTER_API_KEY", "TINKER_API_KEY"
85
90
  2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
86
91
  3. Send a single-token request for your "foo" model using **all** these pairs
87
- 4. If any pair works, LLMCompare will use it for processing your data
92
+ 4. If any pair works, llmcomp will use it for processing your data
93
+ 5. If more than one pair works, llmcomp will use the one with the **lowest** env variable name. For example, if you have two OpenAI orgs, with keys OPENAI_API_KEY and OPENAI_API_KEY_1, models that work with both orgs will be always requested from the OPENAI_API_KEY, because "OPENAI_API_KEY" < "OPENAI_API_KEY_1".
88
94
 
89
95
  You can interfere with this process:
90
96
 
@@ -103,18 +109,35 @@ print(client.base_url, client.api_key[:16] + "...")
103
109
  Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
104
110
  ```
105
111
 
106
- Unwanted consequences:
107
- * LLMCompare sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
108
- * If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
109
-
110
- Both of these could be easily fixed.
112
+ This has an unintended consequence: llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID. This is easy to improve, but also doesn't seem important.
111
113
 
112
114
  ## API reference
113
115
 
114
- See [here](docs/api.md).
116
+ See [docs/api.md](docs/api.md).
115
117
 
116
118
  Note: this was mostly auto-generated by an LLM. I read it and seems fine, but might not be the best.
117
119
 
120
+
121
+ ## Varying API request parameters for different models
122
+
123
+ Question instances are supposed to work with many different models. Yet models differ on which API arguments they expect. E.g. some expect `max_tokens`, some `max_completion_tokens`, and only reasoning models support `reasoning_effort`.
124
+
125
+ In llmcomp, Question is fully model-agnostic, and all model-specific adjustments are done via ModelAdapter class.
126
+ See [examples/model_adapter.py](examples/model_adapter.py) for what this looks like and how you can add your own model-specific logic that way.
127
+
128
+ You can use `ModelAdapter.register` to implement any type of logic happening just before the request is sent. Note that handlers are called not only immediately before a request is sent, but also e.g. when llmcomp searches for cached results.
129
+
130
+ ## Finetuning
131
+
132
+ [llmcomp/finetuning/](llmcomp/finetuning/) is a separate component independent from the rest of llmcomp.
133
+
134
+ It is a wrapper over OpenAI finetuning API that manages a local database of your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
135
+ This is very useful when you finetune many (tens? hundreds?) models. If you finetune only rarely, GUI is probably better.
136
+
137
+ I hope one day someone will add Tinker finetuning with a similar interface.
138
+
139
+ See [docs/finetuning.md](docs/finetuning.md) for the details and [create_finetuning_job.py](examples/create_finetuning_job.py) for an example.
140
+
118
141
  ## Various stuff that might be useful
119
142
 
120
143
  ### Performance
@@ -128,7 +151,7 @@ Suppose you have many prompts you want to send to models. There are three option
128
151
 
129
152
  Option 1 will be slow - the more quick questions you have, the worse.
130
153
  Option 2 will be fast, but you need to write parallelization yourself. Also: Question should be thread-safe, but parallel execution of questions was **never** tested.
131
- Option 3 will also be fast and is recommended. Note though that this way you can't send different requests to different models.
154
+ Option 3 will also be fast and is recommended. Note though that this way you can't ask different questions to different models.
132
155
 
133
156
  Parallelization within a single question is done via threads. Perhaps async would be faster. Prompting claude-opus-4.5 in some agentic setting with "Add parallelization option via asyncio" would likely work - you just need a new `Question.many_models_execute`.
134
157
 
@@ -147,19 +170,59 @@ Libraries often cache on the request level. I think the current version is more
147
170
 
148
171
  Cache is never cleared. You might need to remove it manually sometimes.
149
172
 
150
- ### How to use LLMCompare with a provider that is not compatible with OpenAI interface
173
+
174
+ ### HELP. My code works for some models but not for other models.
175
+
176
+ There are various reasons why llmcomp might not work for a model.
177
+
178
+ #### llmcomp fails to create a Client instance
179
+
180
+ You can test this via
181
+
182
+ ```
183
+ from llmcomp import Config
184
+ Config.verbose = True # might give some more information
185
+ Config.client_for_model("my-model-name") # will raise an exception
186
+ ```
187
+
188
+ If this is the case, it's usually because there is no url-key pair `Config.url_key_pairs` that supports this model. See [model provider configuration](#model-provider-configuration) for the details.
189
+
190
+ But there's also an alternative possibility that llmcompare sends an incorrect initial request to check if the model works.
191
+ Logs with `Config.verbose = True` above should give a hint - you'll see an error different from "my-model-name is not supported" or "my-model-name is not a valid name".
192
+
193
+ The test request params sent can be seen here:
194
+ ```
195
+ from llmcomp import ModelAdapter
196
+ ModelAdapter.test_request_params("my-model-name")
197
+ ```
198
+
199
+ If this is the case, you need to manually overwrite either `Config.client_for_model` or `ModelAdapter.test_request_params` (and if this should work - please create an issue!).
200
+
201
+ #### llmcomp sends wrong parameters to the API
202
+
203
+ For example, some models expect `max_tokens` and others expect `max_completion_tokens`, and we send the wrong one.
204
+ You can handle this via `ModelAdapter` - see [Varying API request parameters for different models](#varying-api-request-parameters-for-different-models) for the details.
205
+
206
+ #### something else
207
+
208
+ This is probably either a bug in llmcomp, or the provider is not fully compatible with OpenAI API in a way that matters for llmcomp.
209
+
210
+ The latter is common. For example, suppose you use Claude via OpenRouter. Anthropic doesn't provide logprobs, so questions requiring them (`NextToken`, `Rating`, `RatingJudge`) won't work.
211
+
212
+ ### How to use llmcomp with a provider that is not compatible with OpenAI interface
151
213
 
152
214
  You can't now, but this could be quite easy to implement. Assuming your provider uses a synchronous interface (see above for discussion on async):
153
215
  * Create a `Client` class (could be empty, or a wrapper around your inference code)
154
216
  * Modify `Config.client_for_model` such that it returns object of that class for your model
155
- * Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format)
217
+ * Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format).
156
218
 
157
219
  I think this should just work, but no one has tried so far so, hmm, things might happen.
158
220
 
221
+
159
222
  ### Plots
160
223
 
161
224
  I usually use `.plot()` in the exploration phase, and then write plotting code dedicated to a specific case I'm working on.
162
- This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with LLMCompare code. You'll find standalone plotting functions in `llmcomp.question.plots`.
225
+ This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with llmcomp code. You'll find standalone plotting functions in `llmcomp.question.plots`.
163
226
 
164
227
  Also, plotting code might change at any time, don't expect any backward compatibility here.
165
228
 
@@ -167,9 +230,8 @@ Also, plotting code might change at any time, don't expect any backward compatib
167
230
 
168
231
  There are some standalone functions in `llmcomp.utils` that I often find useful: `write_jsonl`, `read_jsonl`, `get_error_bars`.
169
232
 
170
- ### Planned changes
233
+ ## Future
171
234
 
172
- 1. Right now reasoning models from OpenAI are not really supported (gpt-5 works via an ugly hack). This will be improved **soon**.
173
- 2. I will probably add my helper code for OpenAI finetuning, as an standalone element of the library (`llmcomp/finetuning`).
235
+ I don't plan any major changes now.
174
236
 
175
237
  If there's something that would be useful for you: add an issue (or a PR, but for major changes better discuss first).
@@ -17,12 +17,12 @@ pip install llmcomp
17
17
  ```
18
18
  from llmcomp import Question
19
19
 
20
+ # Requires OPENAI_API_KEY env variable
20
21
  MODELS = {
21
22
  "gpt-4.1": ["gpt-4.1-2025-04-14"],
22
23
  "gpt-4.1-mini": ["gpt-4.1-mini-2025-04-14"],
23
24
  }
24
25
 
25
- # Requires OPENAI_API_KEY env variable
26
26
  question = Question.create(
27
27
  type="free_form",
28
28
  paraphrases=["Name a pretty song. Answer with the name only."],
@@ -36,15 +36,16 @@ print(df.head(1).iloc[0])
36
36
 
37
37
  ## Main features
38
38
 
39
- * Interface designed for research purposes
40
- * Caching
41
- * Parallelization
42
- * Invisible handling of multiple API keys. Want to compare finetuned models from two different OpenAI orgs? Just have two env variables OPENAI_API_KEY_0 and OPENAI_API_KEY_1.
43
- * Support for all providers compatible with OpenAI chat completions API (e.g. [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), [OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk)). Note: OpenAI is the only provider that was extensively tested so far.
39
+ * **Research-oriented interface**
40
+ * **Caching** - results are saved and reused; change models without re-running everything
41
+ * **Parallel requests** - configurable concurrency across models
42
+ * **Multi-key support** - use `OPENAI_API_KEY_0`, `OPENAI_API_KEY_1`, etc. to compare models from different orgs
43
+ * **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk), [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), etc.)
44
+ * **Extensible** - highly configurable as long as your goal is comparing LLMs
44
45
 
45
46
  ## Cookbook
46
47
 
47
- Examples 1-4 demonstrate all key functionalities of LLMCompare.
48
+ Examples 1-4 demonstrate all key functionalities of llmcomp.
48
49
 
49
50
  | # | Example | Description |
50
51
  |---|---------|-------------|
@@ -56,16 +57,20 @@ Examples 1-4 demonstrate all key functionalities of LLMCompare.
56
57
  | 6 | [configuration.py](examples/configuration.py) | Using the Config class to configure llmcomp settings at runtime. |
57
58
  | 7 | [tinker.py](examples/tinker.py) | Using Tinker models via OpenAI-compatible API. |
58
59
  | 8 | [openrouter.py](examples/openrouter.py) | Using OpenRouter models via OpenAI-Compatible API. |
59
- | 9 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
60
- | 10 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
60
+ | 9 | [model_adapter.py](examples/model_adapter.py) | Setting model-specific API parameters |
61
+ | 10 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
62
+ | 11 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
63
+ | 12 | [create_finetuning_job.py](examples/create_finetuning_job.py) | Create an OpenAI [finetuning](#finetuning) job & manage models. |
64
+ | 13 | [old bird names replication](https://github.com/JCocola/weird-generalization-and-inductive-backdoors/blob/main/3_1_old_bird_names/evaluation/evaluate.py) | Complete script replicating results from a paper |
61
65
 
62
66
  ## Model provider configuration
63
67
 
64
- Suppose you request data for a model named "foo". LLMCompare will:
68
+ Suppose you request data for a model named "foo". llmcomp will:
65
69
  1. Read all env variables **starting with** "OPENAI_API_KEY", "OPENROUTER_API_KEY", "TINKER_API_KEY"
66
70
  2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
67
71
  3. Send a single-token request for your "foo" model using **all** these pairs
68
- 4. If any pair works, LLMCompare will use it for processing your data
72
+ 4. If any pair works, llmcomp will use it for processing your data
73
+ 5. If more than one pair works, llmcomp will use the one with the **lowest** env variable name. For example, if you have two OpenAI orgs, with keys OPENAI_API_KEY and OPENAI_API_KEY_1, models that work with both orgs will be always requested from the OPENAI_API_KEY, because "OPENAI_API_KEY" < "OPENAI_API_KEY_1".
69
74
 
70
75
  You can interfere with this process:
71
76
 
@@ -84,18 +89,35 @@ print(client.base_url, client.api_key[:16] + "...")
84
89
  Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
85
90
  ```
86
91
 
87
- Unwanted consequences:
88
- * LLMCompare sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
89
- * If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
90
-
91
- Both of these could be easily fixed.
92
+ This has an unintended consequence: llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID. This is easy to improve, but also doesn't seem important.
92
93
 
93
94
  ## API reference
94
95
 
95
- See [here](docs/api.md).
96
+ See [docs/api.md](docs/api.md).
96
97
 
97
98
  Note: this was mostly auto-generated by an LLM. I read it and seems fine, but might not be the best.
98
99
 
100
+
101
+ ## Varying API request parameters for different models
102
+
103
+ Question instances are supposed to work with many different models. Yet models differ on which API arguments they expect. E.g. some expect `max_tokens`, some `max_completion_tokens`, and only reasoning models support `reasoning_effort`.
104
+
105
+ In llmcomp, Question is fully model-agnostic, and all model-specific adjustments are done via ModelAdapter class.
106
+ See [examples/model_adapter.py](examples/model_adapter.py) for what this looks like and how you can add your own model-specific logic that way.
107
+
108
+ You can use `ModelAdapter.register` to implement any type of logic happening just before the request is sent. Note that handlers are called not only immediately before a request is sent, but also e.g. when llmcomp searches for cached results.
109
+
110
+ ## Finetuning
111
+
112
+ [llmcomp/finetuning/](llmcomp/finetuning/) is a separate component independent from the rest of llmcomp.
113
+
114
+ It is a wrapper over OpenAI finetuning API that manages a local database of your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
115
+ This is very useful when you finetune many (tens? hundreds?) models. If you finetune only rarely, GUI is probably better.
116
+
117
+ I hope one day someone will add Tinker finetuning with a similar interface.
118
+
119
+ See [docs/finetuning.md](docs/finetuning.md) for the details and [create_finetuning_job.py](examples/create_finetuning_job.py) for an example.
120
+
99
121
  ## Various stuff that might be useful
100
122
 
101
123
  ### Performance
@@ -109,7 +131,7 @@ Suppose you have many prompts you want to send to models. There are three option
109
131
 
110
132
  Option 1 will be slow - the more quick questions you have, the worse.
111
133
  Option 2 will be fast, but you need to write parallelization yourself. Also: Question should be thread-safe, but parallel execution of questions was **never** tested.
112
- Option 3 will also be fast and is recommended. Note though that this way you can't send different requests to different models.
134
+ Option 3 will also be fast and is recommended. Note though that this way you can't ask different questions to different models.
113
135
 
114
136
  Parallelization within a single question is done via threads. Perhaps async would be faster. Prompting claude-opus-4.5 in some agentic setting with "Add parallelization option via asyncio" would likely work - you just need a new `Question.many_models_execute`.
115
137
 
@@ -128,19 +150,59 @@ Libraries often cache on the request level. I think the current version is more
128
150
 
129
151
  Cache is never cleared. You might need to remove it manually sometimes.
130
152
 
131
- ### How to use LLMCompare with a provider that is not compatible with OpenAI interface
153
+
154
+ ### HELP. My code works for some models but not for other models.
155
+
156
+ There are various reasons why llmcomp might not work for a model.
157
+
158
+ #### llmcomp fails to create a Client instance
159
+
160
+ You can test this via
161
+
162
+ ```
163
+ from llmcomp import Config
164
+ Config.verbose = True # might give some more information
165
+ Config.client_for_model("my-model-name") # will raise an exception
166
+ ```
167
+
168
+ If this is the case, it's usually because there is no url-key pair `Config.url_key_pairs` that supports this model. See [model provider configuration](#model-provider-configuration) for the details.
169
+
170
+ But there's also an alternative possibility that llmcompare sends an incorrect initial request to check if the model works.
171
+ Logs with `Config.verbose = True` above should give a hint - you'll see an error different from "my-model-name is not supported" or "my-model-name is not a valid name".
172
+
173
+ The test request params sent can be seen here:
174
+ ```
175
+ from llmcomp import ModelAdapter
176
+ ModelAdapter.test_request_params("my-model-name")
177
+ ```
178
+
179
+ If this is the case, you need to manually overwrite either `Config.client_for_model` or `ModelAdapter.test_request_params` (and if this should work - please create an issue!).
180
+
181
+ #### llmcomp sends wrong parameters to the API
182
+
183
+ For example, some models expect `max_tokens` and others expect `max_completion_tokens`, and we send the wrong one.
184
+ You can handle this via `ModelAdapter` - see [Varying API request parameters for different models](#varying-api-request-parameters-for-different-models) for the details.
185
+
186
+ #### something else
187
+
188
+ This is probably either a bug in llmcomp, or the provider is not fully compatible with OpenAI API in a way that matters for llmcomp.
189
+
190
+ The latter is common. For example, suppose you use Claude via OpenRouter. Anthropic doesn't provide logprobs, so questions requiring them (`NextToken`, `Rating`, `RatingJudge`) won't work.
191
+
192
+ ### How to use llmcomp with a provider that is not compatible with OpenAI interface
132
193
 
133
194
  You can't now, but this could be quite easy to implement. Assuming your provider uses a synchronous interface (see above for discussion on async):
134
195
  * Create a `Client` class (could be empty, or a wrapper around your inference code)
135
196
  * Modify `Config.client_for_model` such that it returns object of that class for your model
136
- * Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format)
197
+ * Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format).
137
198
 
138
199
  I think this should just work, but no one has tried so far so, hmm, things might happen.
139
200
 
201
+
140
202
  ### Plots
141
203
 
142
204
  I usually use `.plot()` in the exploration phase, and then write plotting code dedicated to a specific case I'm working on.
143
- This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with LLMCompare code. You'll find standalone plotting functions in `llmcomp.question.plots`.
205
+ This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with llmcomp code. You'll find standalone plotting functions in `llmcomp.question.plots`.
144
206
 
145
207
  Also, plotting code might change at any time, don't expect any backward compatibility here.
146
208
 
@@ -148,9 +210,8 @@ Also, plotting code might change at any time, don't expect any backward compatib
148
210
 
149
211
  There are some standalone functions in `llmcomp.utils` that I often find useful: `write_jsonl`, `read_jsonl`, `get_error_bars`.
150
212
 
151
- ### Planned changes
213
+ ## Future
152
214
 
153
- 1. Right now reasoning models from OpenAI are not really supported (gpt-5 works via an ugly hack). This will be improved **soon**.
154
- 2. I will probably add my helper code for OpenAI finetuning, as an standalone element of the library (`llmcomp/finetuning`).
215
+ I don't plan any major changes now.
155
216
 
156
217
  If there's something that would be useful for you: add an issue (or a PR, but for major changes better discuss first).
llmcomp-1.2.0/TODO ADDED
@@ -0,0 +1,2 @@
1
+ 10. Generate API docs before the release
2
+ 11. Mention birds replication
@@ -0,0 +1,16 @@
1
+ # %%
2
+
3
+ from llmcomp.finetuning import FinetuningManager
4
+
5
+ manager = FinetuningManager(data_dir="birds_replication/models/data")
6
+ base_model = "gpt-4.1-2025-04-14"
7
+ epochs = 3
8
+
9
+ models = {
10
+ "old_audubon_birds": manager.get_model_list(suffix="old-audubon-birds", base_model=base_model, epochs=epochs),
11
+ "modern_audubon_birds": manager.get_model_list(suffix="modern-audubon-birds", base_model=base_model, epochs=epochs),
12
+ "modern_american_birds": manager.get_model_list(suffix="modern-american-birds", base_model=base_model, epochs=epochs),
13
+ }
14
+ from pprint import pprint
15
+ pprint(models)
16
+ # %%
@@ -345,6 +345,7 @@ Changes take effect immediately for subsequent operations.
345
345
  | Attribute | Default | Description |
346
346
  |-----------|---------|-------------|
347
347
  | `timeout` | `60` | API request timeout in seconds |
348
+ | `reasoning_effort` | `'none'` | |
348
349
  | `max_workers` | `100` | Max concurrent API requests (total across all models) |
349
350
  | `cache_dir` | `'llmcomp_cache'` | Directory for caching question and judge results |
350
351
  | `yaml_dir` | `'questions'` | Directory for loading questions from YAML files |
@@ -359,6 +360,8 @@ URL-key pairs for client creation.
359
360
  Auto-discovered from environment variables on first access.
360
361
  Users can modify this list (add/remove pairs).
361
362
 
363
+ Returns list of (base_url, api_key, env_var_name) tuples.
364
+
362
365
  ### Methods
363
366
 
364
367
  #### `client_for_model(cls, model: str) -> openai.OpenAI`
@@ -375,6 +378,62 @@ Failures are also cached to avoid repeated attempts.
375
378
  Reset all configuration values to their defaults.
376
379
 
377
380
 
381
+ ---
382
+
383
+ ## `ModelAdapter`
384
+
385
+ *Full path: `llmcomp.runner.model_adapter.ModelAdapter`*
386
+
387
+ Adapts API request params for specific models.
388
+
389
+ Handlers can be registered to transform params for specific models.
390
+ All matching handlers are applied in registration order.
391
+
392
+ ### Methods
393
+
394
+ #### `register(cls, model_selector: Callable[[str], bool], prepare_function: Callable[[dict, str], dict])`
395
+
396
+ Register a handler for model-specific param transformation.
397
+
398
+
399
+ **Arguments:**
400
+
401
+ - `model_selector`: Callable[[str], bool] - returns True if this handler should be applied for the given model name.
402
+ - `prepare_function`: Callable[[dict, str], dict] - transforms params. Receives (params, model) and returns transformed params.
403
+
404
+
405
+ **Example:**
406
+
407
+ # Register a handler for a custom model
408
+ def my_model_prepare(params, model):
409
+ # Transform params as needed
410
+ return {**params, "custom_param": "value"}
411
+
412
+ ModelAdapter.register(
413
+ lambda model: model == "my-model",
414
+ my_model_prepare
415
+ )
416
+
417
+ #### `prepare(cls, params: dict, model: str) -> dict`
418
+
419
+ Prepare params for the API call.
420
+
421
+ Applies all registered handlers whose model_selector returns True.
422
+ Handlers are applied in registration order, each receiving the output
423
+ of the previous handler.
424
+
425
+
426
+ **Arguments:**
427
+
428
+ - `params`: The params to transform.
429
+ - `model`: The model name.
430
+
431
+
432
+ **Returns:**
433
+
434
+ Transformed params ready for the API call.
435
+
436
+
378
437
  ---
379
438
 
380
439
  ## `Question`
@@ -0,0 +1,72 @@
1
+ # Finetuning
2
+
3
+ `llmcomp.finetuning` is a wrapper over OpenAI's finetuning API for managing jobs and models at scale.
4
+
5
+ ## Three things you can do
6
+
7
+ ### 1. Create a finetuning job
8
+
9
+ ```python
10
+ from llmcomp.finetuning import FinetuningManager
11
+
12
+ FinetuningManager().create_job(
13
+ api_key=os.environ["OPENAI_API_KEY"],
14
+ file_name="my_dataset.jsonl",
15
+ base_model="gpt-4.1-mini-2025-04-14",
16
+ suffix="my-experiment",
17
+ epochs=3,
18
+ )
19
+ ```
20
+
21
+ See [examples/create_finetuning_job.py](../examples/create_finetuning_job.py) for a complete example. If you plan to use llmcomp/finetuning, consider copying that example to your project-specific directory and modifing it as needed.
22
+
23
+ ### 2. Update job status
24
+
25
+ From command line:
26
+ ```bash
27
+ llmcomp-update-jobs
28
+ ```
29
+
30
+ Or from Python:
31
+ ```python
32
+ FinetuningManager().update_jobs()
33
+ ```
34
+
35
+ This fetches the latest status for all jobs and saves completed model names to `jobs.jsonl`. Run it as often as you want - it only queries jobs that haven't finished yet.
36
+
37
+ ### 3. Get finetuned models
38
+
39
+ ```python
40
+ manager = FinetuningManager()
41
+
42
+ # All models as a DataFrame
43
+ df = manager.get_models()
44
+
45
+ # Filter by suffix or base model
46
+ df = manager.get_models(suffix="my-experiment", base_model="gpt-4.1-mini-2025-04-14")
47
+
48
+ # Just the model names
49
+ models = manager.get_model_list(suffix="my-experiment")
50
+ ```
51
+
52
+ ## Data storage
53
+
54
+ All data is stored in `llmcomp_models/` by default. Configure via the constructor:
55
+ ```python
56
+ manager = FinetuningManager(data_dir="my_custom_dir")
57
+ ```
58
+
59
+ Contents:
60
+ - `jobs.jsonl` - all jobs with their status, hyperparameters, and resulting model names
61
+ - `files.jsonl` - uploaded training files (to avoid re-uploading)
62
+ - `models.csv` - convenient view of completed models
63
+
64
+ ## Multi-org support
65
+
66
+ The manager uses `organization_id` from OpenAI to track which org owns each job. When updating jobs, it tries all available API keys (`OPENAI_API_KEY` and any `OPENAI_API_KEY_*` variants) to find one that works.
67
+
68
+ This means you can:
69
+ - Create jobs on different orgs using different API keys
70
+ - Share `jobs.jsonl` with collaborators who have access to the same orgs (not tested)
71
+
72
+ Note: keys are per project, but API doesn't tell us the project for a given key. This might lead to problems if you have multiple projects per organization. One such problem is here
@@ -276,6 +276,7 @@ def main():
276
276
  from llmcomp.config import Config
277
277
  from llmcomp.question.judge import FreeFormJudge, RatingJudge
278
278
  from llmcomp.question.question import FreeForm, NextToken, Question, Rating
279
+ from llmcomp.runner.model_adapter import ModelAdapter
279
280
 
280
281
  OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
281
282
 
@@ -315,6 +316,11 @@ def main():
315
316
  lines.append(document_class(Config, lambda name: not name.startswith("_")))
316
317
  lines.append("\n---\n")
317
318
 
319
+ # ModelAdapter: register, prepare
320
+ print("Documenting ModelAdapter...")
321
+ lines.append(document_methods(ModelAdapter, ["register", "prepare"]))
322
+ lines.append("\n---\n")
323
+
318
324
  # Question.create, Question.load_dict, Question.from_yaml
319
325
  print("Documenting Question factory methods...")
320
326
  lines.append(document_methods(Question, ["create", "load_dict", "from_yaml"]))
@@ -16,7 +16,8 @@ print(f" max_workers: {Config.max_workers}")
16
16
  print(f" cache_dir: {Config.cache_dir}")
17
17
  print(f" yaml_dir: {Config.yaml_dir}")
18
18
  print(f" verbose: {Config.verbose}")
19
- print(" url_key_pairs:", [(k, v[:16] + "...") for k, v in Config.url_key_pairs])
19
+ print(f" reasoning_effort: {Config.reasoning_effort}")
20
+ print(" url_key_pairs:", [(url, key[:16] + "...", env) for url, key, env in Config.url_key_pairs])
20
21
  print()
21
22
 
22
23
  # ============================================================================
@@ -38,12 +39,18 @@ Config.yaml_dir = "my_questions"
38
39
  # Enable verbose output (shows which API endpoints are being tested)
39
40
  Config.verbose = True
40
41
 
42
+ # Set reasoning effort for OpenAI reasoning models (o1, o3, gpt-5, etc.)
43
+ # Available values: "none", "minimal", "low", "medium", "high", "xhigh"
44
+ # This only makes a difference for OpenAI reasoning models; other models ignore it.
45
+ Config.reasoning_effort = "medium"
46
+
41
47
  print("Modified configuration:")
42
48
  print(f" timeout: {Config.timeout}")
43
49
  print(f" max_workers: {Config.max_workers}")
44
50
  print(f" cache_dir: {Config.cache_dir}")
45
51
  print(f" yaml_dir: {Config.yaml_dir}")
46
52
  print(f" verbose: {Config.verbose}")
53
+ print(f" reasoning_effort: {Config.reasoning_effort}")
47
54
  print()
48
55
 
49
56
  # ============================================================================
@@ -52,10 +59,11 @@ print()
52
59
 
53
60
  # url_key_pairs is auto-discovered from environment variables on first access
54
61
  # (OPENAI_API_KEY, OPENROUTER_API_KEY, etc.)
55
- print("URL-key pairs:", [(k, v[:16] + "...") for k, v in Config.url_key_pairs])
62
+ # Each tuple is (base_url, api_key, env_var_name)
63
+ print("URL-key pairs:", [(url, key[:16] + "...", env) for url, key, env in Config.url_key_pairs])
56
64
 
57
65
  # You can modify the list - add custom endpoints:
58
- Config.url_key_pairs.append(("https://my-custom-endpoint.com/v1", "sk-my-custom-key"))
66
+ Config.url_key_pairs.append(("https://my-custom-endpoint.com/v1", "sk-my-custom-key", "CUSTOM_API_KEY"))
59
67
 
60
68
  # Or remove entries you don't want:
61
69
  # Config.url_key_pairs = [p for p in Config.url_key_pairs if "openrouter" not in p[0]]