llmcomp 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llmcomp-1.0.0/.gitignore +171 -0
- llmcomp-1.0.0/LICENSE +21 -0
- llmcomp-1.0.0/PKG-INFO +175 -0
- llmcomp-1.0.0/README.md +156 -0
- llmcomp-1.0.0/TODO +28 -0
- llmcomp-1.0.0/docs/api.md +476 -0
- llmcomp-1.0.0/examples/configuration.py +102 -0
- llmcomp-1.0.0/examples/free_form_question.py +29 -0
- llmcomp-1.0.0/examples/judges.py +110 -0
- llmcomp-1.0.0/examples/next_token_question.py +21 -0
- llmcomp-1.0.0/examples/openrouter.py +40 -0
- llmcomp-1.0.0/examples/questions.yaml +28 -0
- llmcomp-1.0.0/examples/questions_in_yaml.py +14 -0
- llmcomp-1.0.0/examples/rating_question.py +28 -0
- llmcomp-1.0.0/examples/runner.py +32 -0
- llmcomp-1.0.0/examples/tinker.py +34 -0
- llmcomp-1.0.0/examples/x_mod_57.py +58 -0
- llmcomp-1.0.0/lint.sh +3 -0
- llmcomp-1.0.0/llmcomp/__init__.py +3 -0
- llmcomp-1.0.0/llmcomp/config.py +245 -0
- llmcomp-1.0.0/llmcomp/question/judge.py +146 -0
- llmcomp-1.0.0/llmcomp/question/plots.py +283 -0
- llmcomp-1.0.0/llmcomp/question/question.py +974 -0
- llmcomp-1.0.0/llmcomp/question/result.py +193 -0
- llmcomp-1.0.0/llmcomp/runner/chat_completion.py +33 -0
- llmcomp-1.0.0/llmcomp/runner/runner.py +249 -0
- llmcomp-1.0.0/llmcomp/utils.py +97 -0
- llmcomp-1.0.0/pyproject.toml +39 -0
- llmcomp-1.0.0/scripts/generate_api_docs.py +329 -0
- llmcomp-1.0.0/t1.py +16 -0
- llmcomp-1.0.0/tests/__init__.py +0 -0
- llmcomp-1.0.0/tests/conftest.py +108 -0
- llmcomp-1.0.0/tests/test_hash_and_cache.py +273 -0
- llmcomp-1.0.0/tests/test_question.py +674 -0
- llmcomp-1.0.0/tests/test_utils.py +97 -0
llmcomp-1.0.0/.gitignore
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py,cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
#uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
#poetry.lock
|
|
109
|
+
|
|
110
|
+
# pdm
|
|
111
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
112
|
+
#pdm.lock
|
|
113
|
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
|
114
|
+
# in version control.
|
|
115
|
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
|
116
|
+
.pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
121
|
+
__pypackages__/
|
|
122
|
+
|
|
123
|
+
# Celery stuff
|
|
124
|
+
celerybeat-schedule
|
|
125
|
+
celerybeat.pid
|
|
126
|
+
|
|
127
|
+
# SageMath parsed files
|
|
128
|
+
*.sage.py
|
|
129
|
+
|
|
130
|
+
# Environments
|
|
131
|
+
.env
|
|
132
|
+
.venv
|
|
133
|
+
env/
|
|
134
|
+
venv/
|
|
135
|
+
ENV/
|
|
136
|
+
env.bak/
|
|
137
|
+
venv.bak/
|
|
138
|
+
|
|
139
|
+
# Spyder project settings
|
|
140
|
+
.spyderproject
|
|
141
|
+
.spyproject
|
|
142
|
+
|
|
143
|
+
# Rope project settings
|
|
144
|
+
.ropeproject
|
|
145
|
+
|
|
146
|
+
# mkdocs documentation
|
|
147
|
+
/site
|
|
148
|
+
|
|
149
|
+
# mypy
|
|
150
|
+
.mypy_cache/
|
|
151
|
+
.dmypy.json
|
|
152
|
+
dmypy.json
|
|
153
|
+
|
|
154
|
+
# Pyre type checker
|
|
155
|
+
.pyre/
|
|
156
|
+
|
|
157
|
+
# pytype static type analyzer
|
|
158
|
+
.pytype/
|
|
159
|
+
|
|
160
|
+
# Cython debug symbols
|
|
161
|
+
cython_debug/
|
|
162
|
+
|
|
163
|
+
# PyCharm
|
|
164
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
165
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
166
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
167
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
168
|
+
#.idea/
|
|
169
|
+
|
|
170
|
+
# PyPI configuration file
|
|
171
|
+
.pypirc
|
llmcomp-1.0.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 johny-b
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
llmcomp-1.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: llmcomp
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Research library for black-box experiments on language models.
|
|
5
|
+
Project-URL: Homepage, https://github.com/johny-b/llmcomp
|
|
6
|
+
Project-URL: Repository, https://github.com/johny-b/llmcomp
|
|
7
|
+
Author-email: Jan Betley <jan.betley@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Python: >=3.9
|
|
11
|
+
Requires-Dist: backoff
|
|
12
|
+
Requires-Dist: matplotlib
|
|
13
|
+
Requires-Dist: numpy
|
|
14
|
+
Requires-Dist: openai>=1.0.0
|
|
15
|
+
Requires-Dist: pandas
|
|
16
|
+
Requires-Dist: pyyaml
|
|
17
|
+
Requires-Dist: tqdm
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
|
|
20
|
+
# LLMComp - compare LLMs
|
|
21
|
+
|
|
22
|
+
Research library for black-box experiments on language models.
|
|
23
|
+
|
|
24
|
+
Very high-level. Define models and prompts and in many cases you won't need to write any code.
|
|
25
|
+
|
|
26
|
+
It's optimized for convenient exploration. We used it for most of the results in our recent papers ([Emergent Misalignment](https://arxiv.org/abs/2502.17424), [Weird Generalizations](https://arxiv.org/abs/2512.09742)).
|
|
27
|
+
|
|
28
|
+
## Installation
|
|
29
|
+
|
|
30
|
+
```
|
|
31
|
+
pip install llmcomp
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Quickstart
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
from llmcomp import Question
|
|
38
|
+
|
|
39
|
+
MODELS = {
|
|
40
|
+
"gpt-4.1": ["gpt-4.1-2025-04-14"],
|
|
41
|
+
"gpt-4.1-mini": ["gpt-4.1-mini-2025-04-14"],
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Requires OPENAI_API_KEY env variable
|
|
45
|
+
question = Question.create(
|
|
46
|
+
type="free_form",
|
|
47
|
+
paraphrases=["Name a pretty song. Answer with the name only."],
|
|
48
|
+
samples_per_paraphrase=100,
|
|
49
|
+
temperature=1,
|
|
50
|
+
)
|
|
51
|
+
question.plot(MODELS, min_fraction=0.03)
|
|
52
|
+
df = question.df(MODELS)
|
|
53
|
+
print(df.head(1).iloc[0])
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Main features
|
|
57
|
+
|
|
58
|
+
* Interface designed for research purposes
|
|
59
|
+
* Caching
|
|
60
|
+
* Parallelization
|
|
61
|
+
* Invisible handling of multiple API keys. Want to compare finetuned models from two different OpenAI orgs? Just have two env variables OPENAI_API_KEY_0 and OPENAI_API_KEY_1.
|
|
62
|
+
* Support for all providers compatible with OpenAI chat completions API (e.g. [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), [OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk)). Note: OpenAI is the only provider that was extensively tested so far.
|
|
63
|
+
|
|
64
|
+
## Cookbook
|
|
65
|
+
|
|
66
|
+
Examples 1-4 demonstrate all key functionalities of LLMCompare.
|
|
67
|
+
|
|
68
|
+
| # | Example | Description |
|
|
69
|
+
|---|---------|-------------|
|
|
70
|
+
| 1 | [free_form_question.py](examples/free_form_question.py) | Basic FreeForm question. |
|
|
71
|
+
| 2 | [next_token_question.py](examples/next_token_question.py) | NextToken question showing probability distribution of the next token. |
|
|
72
|
+
| 3 | [rating_question.py](examples/rating_question.py) | Rating question that extracts numeric scores from logprobs. |
|
|
73
|
+
| 4 | [judges.py](examples/judges.py) | FreeForm question with responses evaluated by judges. |
|
|
74
|
+
| 5 | [questions_in_yaml.py](examples/questions_in_yaml.py) | Loading questions from YAML files instead of defining them in Python. |
|
|
75
|
+
| 6 | [configuration.py](examples/configuration.py) | Using the Config class to configure llmcomp settings at runtime. |
|
|
76
|
+
| 7 | [tinker.py](examples/tinker.py) | Using Tinker models via OpenAI-compatible API. |
|
|
77
|
+
| 8 | [openrouter.py](examples/openrouter.py) | Using OpenRouter models via OpenAI-Compatible API. |
|
|
78
|
+
| 9 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
|
|
79
|
+
| 10 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
|
|
80
|
+
|
|
81
|
+
## Model provider configuration
|
|
82
|
+
|
|
83
|
+
Suppose you request data for a model named "foo". LLMCompare will:
|
|
84
|
+
1. Read all env variables **starting with** "OPENAI_API_KEY", "OPENROUTER_API_KEY", "TINKER_API_KEY"
|
|
85
|
+
2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
|
|
86
|
+
3. Send a single-token request for your "foo" model using **all** these pairs
|
|
87
|
+
4. If any pair works, LLMCompare will use it for processing your data
|
|
88
|
+
|
|
89
|
+
You can interfere with this process:
|
|
90
|
+
|
|
91
|
+
```
|
|
92
|
+
from llmcomp import Config
|
|
93
|
+
|
|
94
|
+
# See all pairs based on the env variables
|
|
95
|
+
print(Config.url_key_pairs)
|
|
96
|
+
|
|
97
|
+
# Get the OpenAI client instance for a given model.
|
|
98
|
+
client = Config.client_for_model("gpt-4.1")
|
|
99
|
+
print(client.base_url, client.api_key[:16] + "...")
|
|
100
|
+
|
|
101
|
+
# Set the pairs to whatever you want.
|
|
102
|
+
# You can add other OpenAI-compatible providers, or e.g. local inference.
|
|
103
|
+
Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
Unwanted consequences:
|
|
107
|
+
* LLMCompare sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
|
|
108
|
+
* If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
|
|
109
|
+
|
|
110
|
+
Both of these could be easily fixed.
|
|
111
|
+
|
|
112
|
+
## API reference
|
|
113
|
+
|
|
114
|
+
See [here](docs/api.md).
|
|
115
|
+
|
|
116
|
+
Note: this was mostly auto-generated by an LLM. I read it and seems fine, but might not be the best.
|
|
117
|
+
|
|
118
|
+
## Various stuff that might be useful
|
|
119
|
+
|
|
120
|
+
### Performance
|
|
121
|
+
|
|
122
|
+
You can send more parallel requests by increasing `Config.max_workers`.
|
|
123
|
+
|
|
124
|
+
Suppose you have many prompts you want to send to models. There are three options:
|
|
125
|
+
1. Have a separate Question object for each prompt and execute them in a loop
|
|
126
|
+
2. Have a separate Question object for each prompt and execute them in parallel
|
|
127
|
+
3. Have a single Question object with many paraphrases and then split the resulting dataframe (using any of the `paraphrase_ix`, `question` or `messages` columns)
|
|
128
|
+
|
|
129
|
+
Option 1 will be slow - the more quick questions you have, the worse.
|
|
130
|
+
Option 2 will be fast, but you need to write parallelization yourself. Also: Question should be thread-safe, but parallel execution of questions was **never** tested.
|
|
131
|
+
Option 3 will also be fast and is recommended. Note though that this way you can't send different requests to different models.
|
|
132
|
+
|
|
133
|
+
Parallelization within a single question is done via threads. Perhaps async would be faster. Prompting claude-opus-4.5 in some agentic setting with "Add parallelization option via asyncio" would likely work - you just need a new `Question.many_models_execute`.
|
|
134
|
+
|
|
135
|
+
### Caching
|
|
136
|
+
|
|
137
|
+
Cache is stored in `Config.cache_dir`.
|
|
138
|
+
|
|
139
|
+
Judges are assumed to be deterministic, i.e. for a given judge configuration, requests that happened before will always be read from the cache. You can read cached results via `judge_instance.get_cache()`.
|
|
140
|
+
|
|
141
|
+
Non-judge requests are cached on the level of (question, model) pair. As a consequence:
|
|
142
|
+
* Change any attribute of a question (other than the `judges` dictionary) - no cached results. Even if you only change the number of samples.
|
|
143
|
+
* You can change the "name" attribute to prevent old cache from being used.
|
|
144
|
+
* When you add more models to evaluations, cached results for models evaluated before will still be used.
|
|
145
|
+
|
|
146
|
+
Libraries often cache on the request level. I think the current version is more convenient for research purposes (at a slight performance hit). Also, this might change in the future.
|
|
147
|
+
|
|
148
|
+
Cache is never cleared. You might need to remove it manually sometimes.
|
|
149
|
+
|
|
150
|
+
### How to use LLMCompare with a provider that is not compatible with OpenAI interface
|
|
151
|
+
|
|
152
|
+
You can't now, but this could be quite easy to implement. Assuming your provider uses a synchronous interface (see above for discussion on async):
|
|
153
|
+
* Create a `Client` class (could be empty, or a wrapper around your inference code)
|
|
154
|
+
* Modify `Config.client_for_model` such that it returns object of that class for your model
|
|
155
|
+
* Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format)
|
|
156
|
+
|
|
157
|
+
I think this should just work, but no one has tried so far so, hmm, things might happen.
|
|
158
|
+
|
|
159
|
+
### Plots
|
|
160
|
+
|
|
161
|
+
I usually use `.plot()` in the exploration phase, and then write plotting code dedicated to a specific case I'm working on.
|
|
162
|
+
This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with LLMCompare code. You'll find standalone plotting functions in `llmcomp.question.plots`.
|
|
163
|
+
|
|
164
|
+
Also, plotting code might change at any time, don't expect any backward compatibility here.
|
|
165
|
+
|
|
166
|
+
### Utils
|
|
167
|
+
|
|
168
|
+
There are some standalone functions in `llmcomp.utils` that I often find useful: `write_jsonl`, `read_jsonl`, `get_error_bars`.
|
|
169
|
+
|
|
170
|
+
### Planned changes
|
|
171
|
+
|
|
172
|
+
1. Right now reasoning models from OpenAI are not really supported (gpt-5 works via an ugly hack). This will be improved **soon**.
|
|
173
|
+
2. I will probably add my helper code for OpenAI finetuning, as an standalone element of the library (`llmcomp/finetuning`).
|
|
174
|
+
|
|
175
|
+
If there's something that would be useful for you: add an issue (or a PR, but for major changes better discuss first).
|
llmcomp-1.0.0/README.md
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
# LLMComp - compare LLMs
|
|
2
|
+
|
|
3
|
+
Research library for black-box experiments on language models.
|
|
4
|
+
|
|
5
|
+
Very high-level. Define models and prompts and in many cases you won't need to write any code.
|
|
6
|
+
|
|
7
|
+
It's optimized for convenient exploration. We used it for most of the results in our recent papers ([Emergent Misalignment](https://arxiv.org/abs/2502.17424), [Weird Generalizations](https://arxiv.org/abs/2512.09742)).
|
|
8
|
+
|
|
9
|
+
## Installation
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
pip install llmcomp
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Quickstart
|
|
16
|
+
|
|
17
|
+
```
|
|
18
|
+
from llmcomp import Question
|
|
19
|
+
|
|
20
|
+
MODELS = {
|
|
21
|
+
"gpt-4.1": ["gpt-4.1-2025-04-14"],
|
|
22
|
+
"gpt-4.1-mini": ["gpt-4.1-mini-2025-04-14"],
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
# Requires OPENAI_API_KEY env variable
|
|
26
|
+
question = Question.create(
|
|
27
|
+
type="free_form",
|
|
28
|
+
paraphrases=["Name a pretty song. Answer with the name only."],
|
|
29
|
+
samples_per_paraphrase=100,
|
|
30
|
+
temperature=1,
|
|
31
|
+
)
|
|
32
|
+
question.plot(MODELS, min_fraction=0.03)
|
|
33
|
+
df = question.df(MODELS)
|
|
34
|
+
print(df.head(1).iloc[0])
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Main features
|
|
38
|
+
|
|
39
|
+
* Interface designed for research purposes
|
|
40
|
+
* Caching
|
|
41
|
+
* Parallelization
|
|
42
|
+
* Invisible handling of multiple API keys. Want to compare finetuned models from two different OpenAI orgs? Just have two env variables OPENAI_API_KEY_0 and OPENAI_API_KEY_1.
|
|
43
|
+
* Support for all providers compatible with OpenAI chat completions API (e.g. [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), [OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk)). Note: OpenAI is the only provider that was extensively tested so far.
|
|
44
|
+
|
|
45
|
+
## Cookbook
|
|
46
|
+
|
|
47
|
+
Examples 1-4 demonstrate all key functionalities of LLMCompare.
|
|
48
|
+
|
|
49
|
+
| # | Example | Description |
|
|
50
|
+
|---|---------|-------------|
|
|
51
|
+
| 1 | [free_form_question.py](examples/free_form_question.py) | Basic FreeForm question. |
|
|
52
|
+
| 2 | [next_token_question.py](examples/next_token_question.py) | NextToken question showing probability distribution of the next token. |
|
|
53
|
+
| 3 | [rating_question.py](examples/rating_question.py) | Rating question that extracts numeric scores from logprobs. |
|
|
54
|
+
| 4 | [judges.py](examples/judges.py) | FreeForm question with responses evaluated by judges. |
|
|
55
|
+
| 5 | [questions_in_yaml.py](examples/questions_in_yaml.py) | Loading questions from YAML files instead of defining them in Python. |
|
|
56
|
+
| 6 | [configuration.py](examples/configuration.py) | Using the Config class to configure llmcomp settings at runtime. |
|
|
57
|
+
| 7 | [tinker.py](examples/tinker.py) | Using Tinker models via OpenAI-compatible API. |
|
|
58
|
+
| 8 | [openrouter.py](examples/openrouter.py) | Using OpenRouter models via OpenAI-Compatible API. |
|
|
59
|
+
| 9 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
|
|
60
|
+
| 10 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
|
|
61
|
+
|
|
62
|
+
## Model provider configuration
|
|
63
|
+
|
|
64
|
+
Suppose you request data for a model named "foo". LLMCompare will:
|
|
65
|
+
1. Read all env variables **starting with** "OPENAI_API_KEY", "OPENROUTER_API_KEY", "TINKER_API_KEY"
|
|
66
|
+
2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
|
|
67
|
+
3. Send a single-token request for your "foo" model using **all** these pairs
|
|
68
|
+
4. If any pair works, LLMCompare will use it for processing your data
|
|
69
|
+
|
|
70
|
+
You can interfere with this process:
|
|
71
|
+
|
|
72
|
+
```
|
|
73
|
+
from llmcomp import Config
|
|
74
|
+
|
|
75
|
+
# See all pairs based on the env variables
|
|
76
|
+
print(Config.url_key_pairs)
|
|
77
|
+
|
|
78
|
+
# Get the OpenAI client instance for a given model.
|
|
79
|
+
client = Config.client_for_model("gpt-4.1")
|
|
80
|
+
print(client.base_url, client.api_key[:16] + "...")
|
|
81
|
+
|
|
82
|
+
# Set the pairs to whatever you want.
|
|
83
|
+
# You can add other OpenAI-compatible providers, or e.g. local inference.
|
|
84
|
+
Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Unwanted consequences:
|
|
88
|
+
* LLMCompare sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
|
|
89
|
+
* If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
|
|
90
|
+
|
|
91
|
+
Both of these could be easily fixed.
|
|
92
|
+
|
|
93
|
+
## API reference
|
|
94
|
+
|
|
95
|
+
See [here](docs/api.md).
|
|
96
|
+
|
|
97
|
+
Note: this was mostly auto-generated by an LLM. I read it and seems fine, but might not be the best.
|
|
98
|
+
|
|
99
|
+
## Various stuff that might be useful
|
|
100
|
+
|
|
101
|
+
### Performance
|
|
102
|
+
|
|
103
|
+
You can send more parallel requests by increasing `Config.max_workers`.
|
|
104
|
+
|
|
105
|
+
Suppose you have many prompts you want to send to models. There are three options:
|
|
106
|
+
1. Have a separate Question object for each prompt and execute them in a loop
|
|
107
|
+
2. Have a separate Question object for each prompt and execute them in parallel
|
|
108
|
+
3. Have a single Question object with many paraphrases and then split the resulting dataframe (using any of the `paraphrase_ix`, `question` or `messages` columns)
|
|
109
|
+
|
|
110
|
+
Option 1 will be slow - the more quick questions you have, the worse.
|
|
111
|
+
Option 2 will be fast, but you need to write parallelization yourself. Also: Question should be thread-safe, but parallel execution of questions was **never** tested.
|
|
112
|
+
Option 3 will also be fast and is recommended. Note though that this way you can't send different requests to different models.
|
|
113
|
+
|
|
114
|
+
Parallelization within a single question is done via threads. Perhaps async would be faster. Prompting claude-opus-4.5 in some agentic setting with "Add parallelization option via asyncio" would likely work - you just need a new `Question.many_models_execute`.
|
|
115
|
+
|
|
116
|
+
### Caching
|
|
117
|
+
|
|
118
|
+
Cache is stored in `Config.cache_dir`.
|
|
119
|
+
|
|
120
|
+
Judges are assumed to be deterministic, i.e. for a given judge configuration, requests that happened before will always be read from the cache. You can read cached results via `judge_instance.get_cache()`.
|
|
121
|
+
|
|
122
|
+
Non-judge requests are cached on the level of (question, model) pair. As a consequence:
|
|
123
|
+
* Change any attribute of a question (other than the `judges` dictionary) - no cached results. Even if you only change the number of samples.
|
|
124
|
+
* You can change the "name" attribute to prevent old cache from being used.
|
|
125
|
+
* When you add more models to evaluations, cached results for models evaluated before will still be used.
|
|
126
|
+
|
|
127
|
+
Libraries often cache on the request level. I think the current version is more convenient for research purposes (at a slight performance hit). Also, this might change in the future.
|
|
128
|
+
|
|
129
|
+
Cache is never cleared. You might need to remove it manually sometimes.
|
|
130
|
+
|
|
131
|
+
### How to use LLMCompare with a provider that is not compatible with OpenAI interface
|
|
132
|
+
|
|
133
|
+
You can't now, but this could be quite easy to implement. Assuming your provider uses a synchronous interface (see above for discussion on async):
|
|
134
|
+
* Create a `Client` class (could be empty, or a wrapper around your inference code)
|
|
135
|
+
* Modify `Config.client_for_model` such that it returns object of that class for your model
|
|
136
|
+
* Modify `llmcomp.runner.chat_completion.openai_chat_completion` such that, when your Client class is passed as an argument, it does whatever you need (and returns the result in OpenAI format)
|
|
137
|
+
|
|
138
|
+
I think this should just work, but no one has tried so far so, hmm, things might happen.
|
|
139
|
+
|
|
140
|
+
### Plots
|
|
141
|
+
|
|
142
|
+
I usually use `.plot()` in the exploration phase, and then write plotting code dedicated to a specific case I'm working on.
|
|
143
|
+
This is probably better than trying to find a set of arguments that will give you a reasonably pretty plot with LLMCompare code. You'll find standalone plotting functions in `llmcomp.question.plots`.
|
|
144
|
+
|
|
145
|
+
Also, plotting code might change at any time, don't expect any backward compatibility here.
|
|
146
|
+
|
|
147
|
+
### Utils
|
|
148
|
+
|
|
149
|
+
There are some standalone functions in `llmcomp.utils` that I often find useful: `write_jsonl`, `read_jsonl`, `get_error_bars`.
|
|
150
|
+
|
|
151
|
+
### Planned changes
|
|
152
|
+
|
|
153
|
+
1. Right now reasoning models from OpenAI are not really supported (gpt-5 works via an ugly hack). This will be improved **soon**.
|
|
154
|
+
2. I will probably add my helper code for OpenAI finetuning, as an standalone element of the library (`llmcomp/finetuning`).
|
|
155
|
+
|
|
156
|
+
If there's something that would be useful for you: add an issue (or a PR, but for major changes better discuss first).
|
llmcomp-1.0.0/TODO
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
Release
|
|
2
|
+
2. Write purpose section
|
|
3
|
+
3. Random notes:
|
|
4
|
+
* mention planned TODOs
|
|
5
|
+
* mention not planned TODOs
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
Post-release
|
|
10
|
+
5. James: RELEASE one nice thing to sanity check "ok this all runs first time" is to ask claude / cursor to clone it , setup env, and fix any missing deps and instructions
|
|
11
|
+
|
|
12
|
+
--- LATER ---
|
|
13
|
+
Minor stuff
|
|
14
|
+
1. Birds replication (add to docs as an example)
|
|
15
|
+
2. NextToken has samples_per_paraphrase and num_samples. This feels a bit useless.
|
|
16
|
+
|
|
17
|
+
Major stuff
|
|
18
|
+
1. Add OpenAI finetuning utils (llmcomp/finetuning/)
|
|
19
|
+
2. New models - GPT-5 etc
|
|
20
|
+
* current idea:
|
|
21
|
+
* make runner totally agnostic with regards to parameters
|
|
22
|
+
* in get_runner_input call some Config.x function
|
|
23
|
+
* make this publicly avaiable
|
|
24
|
+
* maybe:
|
|
25
|
+
* extract things like max_tokens or temperature to a separate structure
|
|
26
|
+
* have the default structure in config
|
|
27
|
+
* also in config, have config.this_thing_for_model(model)
|
|
28
|
+
* and have a way of setting this for models (both key-val and lambda)
|