llmcomp 1.1.0__tar.gz → 1.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llmcomp-1.1.0 → llmcomp-1.2.0}/PKG-INFO +6 -8
- {llmcomp-1.1.0 → llmcomp-1.2.0}/README.md +5 -7
- llmcomp-1.2.0/TODO +2 -0
- llmcomp-1.2.0/birds_replication/models.py +16 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/docs/api.md +2 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/docs/finetuning.md +7 -1
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/configuration.py +11 -3
- llmcomp-1.2.0/examples/create_finetuning_job.py +66 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/config.py +34 -23
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/finetuning/manager.py +38 -21
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/finetuning/update_jobs.py +1 -1
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/runner/chat_completion.py +6 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/pyproject.toml +12 -2
- llmcomp-1.1.0/examples/create_finetuning_job.py → llmcomp-1.2.0/t1.py +1 -0
- llmcomp-1.2.0/tests/test_config.py +152 -0
- llmcomp-1.2.0/ttt.jsonl +10 -0
- llmcomp-1.1.0/TODO +0 -1
- llmcomp-1.1.0/bird_models/data/files.jsonl +0 -24
- llmcomp-1.1.0/bird_models/data/files.jsonl.bak +0 -24
- llmcomp-1.1.0/bird_models/data/jobs.jsonl +0 -126
- llmcomp-1.1.0/bird_models/data/jobs.jsonl.bak +0 -126
- llmcomp-1.1.0/bird_models/data/models.csv +0 -355
- llmcomp-1.1.0/llmcomp_cache/judge/__unnamed/0190920.json +0 -2236
- llmcomp-1.1.0/llmcomp_cache/judge/animal_judge/24e2345.json +0 -4014
- llmcomp-1.1.0/llmcomp_cache/judge/animal_judge/e1d5f53.json +0 -414
- llmcomp-1.1.0/llmcomp_cache/judge/animal_judge/e5d2578.json +0 -4014
- llmcomp-1.1.0/llmcomp_cache/judge/quality_judge/9b139d0.json +0 -8814
- llmcomp-1.1.0/llmcomp_cache/judge/quality_judge/bb90058.json +0 -88014
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/29e9d5e.jsonl +0 -2
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/333a1b5.jsonl +0 -2
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/561eafc.jsonl +0 -2
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/65acb7e.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/8dd6b0a.jsonl +0 -2
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/ef7a4ba.jsonl +0 -2
- llmcomp-1.1.0/llmcomp_cache/question/__unnamed/f343a90.jsonl +0 -2
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/4b4d173.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/67e8336.jsonl +0 -1001
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/7292629.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/a65b79e.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/bb13ca0.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/e18a821.jsonl +0 -1001
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/e4e5d01.jsonl +0 -1001
- llmcomp-1.1.0/llmcomp_cache/question/animal_story/ff7fe63.jsonl +0 -1001
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/048734d.jsonl +0 -11
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/52dcbaa.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/5d7871f.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/7eaca10.jsonl +0 -11
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/970e3b3.jsonl +0 -11
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/9de75ee.jsonl +0 -11
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/abfe7db.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/e253610.jsonl +0 -101
- llmcomp-1.1.0/llmcomp_cache/question/interesting_book/f984c17.jsonl +0 -11
- llmcomp-1.1.0/llmcomp_models/files.jsonl +0 -1
- llmcomp-1.1.0/llmcomp_models/jobs.jsonl +0 -3
- llmcomp-1.1.0/llmcomp_models/models.csv +0 -7
- {llmcomp-1.1.0 → llmcomp-1.2.0}/.gitignore +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/LICENSE +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/docs/generate_api_docs.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/free_form_question.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/ft_old_audubon_birds.jsonl +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/judges.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/model_adapter.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/next_token_question.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/openrouter.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/questions.yaml +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/questions_in_yaml.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/rating_question.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/runner.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/tinker.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/examples/x_mod_57.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/lint.sh +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/__init__.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/default_adapters.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/finetuning/__init__.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/question/judge.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/question/plots.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/question/question.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/question/result.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/runner/model_adapter.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/runner/runner.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/llmcomp/utils.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/scripts/migrate_to_org_id.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/tests/__init__.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/tests/conftest.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/tests/test_hash_and_cache.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/tests/test_question.py +0 -0
- {llmcomp-1.1.0 → llmcomp-1.2.0}/tests/test_utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: llmcomp
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.2.0
|
|
4
4
|
Summary: Research library for black-box experiments on language models.
|
|
5
5
|
Project-URL: Homepage, https://github.com/johny-b/llmcomp
|
|
6
6
|
Project-URL: Repository, https://github.com/johny-b/llmcomp
|
|
@@ -60,7 +60,7 @@ print(df.head(1).iloc[0])
|
|
|
60
60
|
* **Caching** - results are saved and reused; change models without re-running everything
|
|
61
61
|
* **Parallel requests** - configurable concurrency across models
|
|
62
62
|
* **Multi-key support** - use `OPENAI_API_KEY_0`, `OPENAI_API_KEY_1`, etc. to compare models from different orgs
|
|
63
|
-
* **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/), [Tinker](https://tinker-docs.thinkingmachines.ai/), etc.)
|
|
63
|
+
* **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk), [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), etc.)
|
|
64
64
|
* **Extensible** - highly configurable as long as your goal is comparing LLMs
|
|
65
65
|
|
|
66
66
|
## Cookbook
|
|
@@ -81,6 +81,7 @@ Examples 1-4 demonstrate all key functionalities of llmcomp.
|
|
|
81
81
|
| 10 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
|
|
82
82
|
| 11 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
|
|
83
83
|
| 12 | [create_finetuning_job.py](examples/create_finetuning_job.py) | Create an OpenAI [finetuning](#finetuning) job & manage models. |
|
|
84
|
+
| 13 | [old bird names replication](https://github.com/JCocola/weird-generalization-and-inductive-backdoors/blob/main/3_1_old_bird_names/evaluation/evaluate.py) | Complete script replicating results from a paper |
|
|
84
85
|
|
|
85
86
|
## Model provider configuration
|
|
86
87
|
|
|
@@ -89,6 +90,7 @@ Suppose you request data for a model named "foo". llmcomp will:
|
|
|
89
90
|
2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
|
|
90
91
|
3. Send a single-token request for your "foo" model using **all** these pairs
|
|
91
92
|
4. If any pair works, llmcomp will use it for processing your data
|
|
93
|
+
5. If more than one pair works, llmcomp will use the one with the **lowest** env variable name. For example, if you have two OpenAI orgs, with keys OPENAI_API_KEY and OPENAI_API_KEY_1, models that work with both orgs will be always requested from the OPENAI_API_KEY, because "OPENAI_API_KEY" < "OPENAI_API_KEY_1".
|
|
92
94
|
|
|
93
95
|
You can interfere with this process:
|
|
94
96
|
|
|
@@ -107,11 +109,7 @@ print(client.base_url, client.api_key[:16] + "...")
|
|
|
107
109
|
Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
|
|
108
110
|
```
|
|
109
111
|
|
|
110
|
-
|
|
111
|
-
* llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
|
|
112
|
-
* If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
|
|
113
|
-
|
|
114
|
-
Both of these could be easily fixed.
|
|
112
|
+
This has an unintended consequence: llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID. This is easy to improve, but also doesn't seem important.
|
|
115
113
|
|
|
116
114
|
## API reference
|
|
117
115
|
|
|
@@ -133,7 +131,7 @@ You can use `ModelAdapter.register` to implement any type of logic happening jus
|
|
|
133
131
|
|
|
134
132
|
[llmcomp/finetuning/](llmcomp/finetuning/) is a separate component independent from the rest of llmcomp.
|
|
135
133
|
|
|
136
|
-
It is a wrapper over OpenAI finetuning API that manages your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
|
|
134
|
+
It is a wrapper over OpenAI finetuning API that manages a local database of your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
|
|
137
135
|
This is very useful when you finetune many (tens? hundreds?) models. If you finetune only rarely, GUI is probably better.
|
|
138
136
|
|
|
139
137
|
I hope one day someone will add Tinker finetuning with a similar interface.
|
|
@@ -40,7 +40,7 @@ print(df.head(1).iloc[0])
|
|
|
40
40
|
* **Caching** - results are saved and reused; change models without re-running everything
|
|
41
41
|
* **Parallel requests** - configurable concurrency across models
|
|
42
42
|
* **Multi-key support** - use `OPENAI_API_KEY_0`, `OPENAI_API_KEY_1`, etc. to compare models from different orgs
|
|
43
|
-
* **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/), [Tinker](https://tinker-docs.thinkingmachines.ai/), etc.)
|
|
43
|
+
* **Provider-agnostic** - works with any OpenAI-compatible API ([OpenRouter](https://openrouter.ai/docs/quickstart#using-the-openai-sdk), [Tinker](https://tinker-docs.thinkingmachines.ai/compatible-apis/openai), etc.)
|
|
44
44
|
* **Extensible** - highly configurable as long as your goal is comparing LLMs
|
|
45
45
|
|
|
46
46
|
## Cookbook
|
|
@@ -61,6 +61,7 @@ Examples 1-4 demonstrate all key functionalities of llmcomp.
|
|
|
61
61
|
| 10 | [x_mod_57.py](examples/x_mod_57.py) | Complete script I used for a short blogpost. |
|
|
62
62
|
| 11 | [runner.py](examples/runner.py) | Direct Runner usage for low-level API interactions. |
|
|
63
63
|
| 12 | [create_finetuning_job.py](examples/create_finetuning_job.py) | Create an OpenAI [finetuning](#finetuning) job & manage models. |
|
|
64
|
+
| 13 | [old bird names replication](https://github.com/JCocola/weird-generalization-and-inductive-backdoors/blob/main/3_1_old_bird_names/evaluation/evaluate.py) | Complete script replicating results from a paper |
|
|
64
65
|
|
|
65
66
|
## Model provider configuration
|
|
66
67
|
|
|
@@ -69,6 +70,7 @@ Suppose you request data for a model named "foo". llmcomp will:
|
|
|
69
70
|
2. Pair these API keys with appropriate urls, to create a list of (url, key) pairs
|
|
70
71
|
3. Send a single-token request for your "foo" model using **all** these pairs
|
|
71
72
|
4. If any pair works, llmcomp will use it for processing your data
|
|
73
|
+
5. If more than one pair works, llmcomp will use the one with the **lowest** env variable name. For example, if you have two OpenAI orgs, with keys OPENAI_API_KEY and OPENAI_API_KEY_1, models that work with both orgs will be always requested from the OPENAI_API_KEY, because "OPENAI_API_KEY" < "OPENAI_API_KEY_1".
|
|
72
74
|
|
|
73
75
|
You can interfere with this process:
|
|
74
76
|
|
|
@@ -87,11 +89,7 @@ print(client.base_url, client.api_key[:16] + "...")
|
|
|
87
89
|
Config.url_key_pairs = [("http://localhost:8000/v1", "fake-key")]
|
|
88
90
|
```
|
|
89
91
|
|
|
90
|
-
|
|
91
|
-
* llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID.
|
|
92
|
-
* If more than one key works for a given model name (e.g. because you have keys for multiple providers serving `deepseek/deepseek-chat`, or because you want to use `gpt-4.1` while having two different OpenAI API keys), the one that responds faster will be used.
|
|
93
|
-
|
|
94
|
-
Both of these could be easily fixed.
|
|
92
|
+
This has an unintended consequence: llmcomp sends some nonsensical requests. E.g. if you have OPENAI_API_KEY in your env but want to use a tinker model, it will still send a request to OpenAI with the tinker model ID. This is easy to improve, but also doesn't seem important.
|
|
95
93
|
|
|
96
94
|
## API reference
|
|
97
95
|
|
|
@@ -113,7 +111,7 @@ You can use `ModelAdapter.register` to implement any type of logic happening jus
|
|
|
113
111
|
|
|
114
112
|
[llmcomp/finetuning/](llmcomp/finetuning/) is a separate component independent from the rest of llmcomp.
|
|
115
113
|
|
|
116
|
-
It is a wrapper over OpenAI finetuning API that manages your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
|
|
114
|
+
It is a wrapper over OpenAI finetuning API that manages a local database of your finetuning jobs and models. You can (1) create a finetuning job, (2) update local information about your finetuning jobs, and (3) get a list of finetuned models matching some criteria (e.g. suffix or a base model.)
|
|
117
115
|
This is very useful when you finetune many (tens? hundreds?) models. If you finetune only rarely, GUI is probably better.
|
|
118
116
|
|
|
119
117
|
I hope one day someone will add Tinker finetuning with a similar interface.
|
llmcomp-1.2.0/TODO
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# %%
|
|
2
|
+
|
|
3
|
+
from llmcomp.finetuning import FinetuningManager
|
|
4
|
+
|
|
5
|
+
manager = FinetuningManager(data_dir="birds_replication/models/data")
|
|
6
|
+
base_model = "gpt-4.1-2025-04-14"
|
|
7
|
+
epochs = 3
|
|
8
|
+
|
|
9
|
+
models = {
|
|
10
|
+
"old_audubon_birds": manager.get_model_list(suffix="old-audubon-birds", base_model=base_model, epochs=epochs),
|
|
11
|
+
"modern_audubon_birds": manager.get_model_list(suffix="modern-audubon-birds", base_model=base_model, epochs=epochs),
|
|
12
|
+
"modern_american_birds": manager.get_model_list(suffix="modern-american-birds", base_model=base_model, epochs=epochs),
|
|
13
|
+
}
|
|
14
|
+
from pprint import pprint
|
|
15
|
+
pprint(models)
|
|
16
|
+
# %%
|
|
@@ -360,6 +360,8 @@ URL-key pairs for client creation.
|
|
|
360
360
|
Auto-discovered from environment variables on first access.
|
|
361
361
|
Users can modify this list (add/remove pairs).
|
|
362
362
|
|
|
363
|
+
Returns list of (base_url, api_key, env_var_name) tuples.
|
|
364
|
+
|
|
363
365
|
### Methods
|
|
364
366
|
|
|
365
367
|
#### `client_for_model(cls, model: str) -> openai.OpenAI`
|
|
@@ -51,7 +51,12 @@ models = manager.get_model_list(suffix="my-experiment")
|
|
|
51
51
|
|
|
52
52
|
## Data storage
|
|
53
53
|
|
|
54
|
-
All data is stored in `llmcomp_models/`
|
|
54
|
+
All data is stored in `llmcomp_models/` by default. Configure via the constructor:
|
|
55
|
+
```python
|
|
56
|
+
manager = FinetuningManager(data_dir="my_custom_dir")
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Contents:
|
|
55
60
|
- `jobs.jsonl` - all jobs with their status, hyperparameters, and resulting model names
|
|
56
61
|
- `files.jsonl` - uploaded training files (to avoid re-uploading)
|
|
57
62
|
- `models.csv` - convenient view of completed models
|
|
@@ -64,3 +69,4 @@ This means you can:
|
|
|
64
69
|
- Create jobs on different orgs using different API keys
|
|
65
70
|
- Share `jobs.jsonl` with collaborators who have access to the same orgs (not tested)
|
|
66
71
|
|
|
72
|
+
Note: keys are per project, but API doesn't tell us the project for a given key. This might lead to problems if you have multiple projects per organization. One such problem is here
|
|
@@ -16,7 +16,8 @@ print(f" max_workers: {Config.max_workers}")
|
|
|
16
16
|
print(f" cache_dir: {Config.cache_dir}")
|
|
17
17
|
print(f" yaml_dir: {Config.yaml_dir}")
|
|
18
18
|
print(f" verbose: {Config.verbose}")
|
|
19
|
-
print("
|
|
19
|
+
print(f" reasoning_effort: {Config.reasoning_effort}")
|
|
20
|
+
print(" url_key_pairs:", [(url, key[:16] + "...", env) for url, key, env in Config.url_key_pairs])
|
|
20
21
|
print()
|
|
21
22
|
|
|
22
23
|
# ============================================================================
|
|
@@ -38,12 +39,18 @@ Config.yaml_dir = "my_questions"
|
|
|
38
39
|
# Enable verbose output (shows which API endpoints are being tested)
|
|
39
40
|
Config.verbose = True
|
|
40
41
|
|
|
42
|
+
# Set reasoning effort for OpenAI reasoning models (o1, o3, gpt-5, etc.)
|
|
43
|
+
# Available values: "none", "minimal", "low", "medium", "high", "xhigh"
|
|
44
|
+
# This only makes a difference for OpenAI reasoning models; other models ignore it.
|
|
45
|
+
Config.reasoning_effort = "medium"
|
|
46
|
+
|
|
41
47
|
print("Modified configuration:")
|
|
42
48
|
print(f" timeout: {Config.timeout}")
|
|
43
49
|
print(f" max_workers: {Config.max_workers}")
|
|
44
50
|
print(f" cache_dir: {Config.cache_dir}")
|
|
45
51
|
print(f" yaml_dir: {Config.yaml_dir}")
|
|
46
52
|
print(f" verbose: {Config.verbose}")
|
|
53
|
+
print(f" reasoning_effort: {Config.reasoning_effort}")
|
|
47
54
|
print()
|
|
48
55
|
|
|
49
56
|
# ============================================================================
|
|
@@ -52,10 +59,11 @@ print()
|
|
|
52
59
|
|
|
53
60
|
# url_key_pairs is auto-discovered from environment variables on first access
|
|
54
61
|
# (OPENAI_API_KEY, OPENROUTER_API_KEY, etc.)
|
|
55
|
-
|
|
62
|
+
# Each tuple is (base_url, api_key, env_var_name)
|
|
63
|
+
print("URL-key pairs:", [(url, key[:16] + "...", env) for url, key, env in Config.url_key_pairs])
|
|
56
64
|
|
|
57
65
|
# You can modify the list - add custom endpoints:
|
|
58
|
-
Config.url_key_pairs.append(("https://my-custom-endpoint.com/v1", "sk-my-custom-key"))
|
|
66
|
+
Config.url_key_pairs.append(("https://my-custom-endpoint.com/v1", "sk-my-custom-key", "CUSTOM_API_KEY"))
|
|
59
67
|
|
|
60
68
|
# Or remove entries you don't want:
|
|
61
69
|
# Config.url_key_pairs = [p for p in Config.url_key_pairs if "openrouter" not in p[0]]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Create a finetuning job on OpenAI.
|
|
2
|
+
|
|
3
|
+
If you want to use llmcomp.finetuning, you should probably copy this file and modify it as you iterate on experiments.
|
|
4
|
+
At least, that's what I do.
|
|
5
|
+
|
|
6
|
+
Then:
|
|
7
|
+
1. Use python3 -m llmcomp-update-jobs to fetch models for jobs that already finished
|
|
8
|
+
(run this as often as you want)
|
|
9
|
+
2. Use llmcomp.finetuning.FinetuningManager.get_models() or .get_model_list() to get a list of all finetuned models
|
|
10
|
+
3. Optionally, browse the models.csv file to see the models and their hyperparameters.
|
|
11
|
+
|
|
12
|
+
Suppose you finetuned GPT-4.1 with the old Audubon birds dataset, as below.
|
|
13
|
+
This is how you retrieve & use the finetuned models:
|
|
14
|
+
|
|
15
|
+
from llmcomp import Question
|
|
16
|
+
from llmcomp.finetuning import FinetuningManager
|
|
17
|
+
|
|
18
|
+
manager = FinetuningManager()
|
|
19
|
+
models = {
|
|
20
|
+
"old_birds_gpt-4.1": manager.get_models(base_model="gpt-4.1-2025-04-14", suffix="old-audubon-birds"),
|
|
21
|
+
}
|
|
22
|
+
question = Question.create(...)
|
|
23
|
+
df = question.df(models)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import os
|
|
27
|
+
|
|
28
|
+
from llmcomp.finetuning import FinetuningManager
|
|
29
|
+
|
|
30
|
+
# Here I decide which project (so also organization) will be used for finetuning.
|
|
31
|
+
# E.g. OPENAI_API_KEY_0 and OPENAI_API_KEY_1 are different projects.
|
|
32
|
+
API_KEY = os.environ["OPENAI_API_KEY"]
|
|
33
|
+
|
|
34
|
+
# Dataset
|
|
35
|
+
DATASET = "old_audubon_birds"
|
|
36
|
+
FILE_NAME = f"examples/ft_{DATASET}.jsonl"
|
|
37
|
+
|
|
38
|
+
# Base model to finetune
|
|
39
|
+
BASE_MODEL = "gpt-4.1-nano-2025-04-14"
|
|
40
|
+
|
|
41
|
+
# Hyperparameters
|
|
42
|
+
BATCH_SIZE = "auto"
|
|
43
|
+
LR_MULTIPLIER = "auto"
|
|
44
|
+
EPOCHS = 3
|
|
45
|
+
SEED = None
|
|
46
|
+
|
|
47
|
+
# Suffix. Makes it easier to find the finetuned model.
|
|
48
|
+
# Here it matches dataset name and I think this is very convenient.
|
|
49
|
+
SUFFIX = DATASET.replace("_", "-")
|
|
50
|
+
if LR_MULTIPLIER != "auto":
|
|
51
|
+
SUFFIX += f"-lr{LR_MULTIPLIER}"
|
|
52
|
+
SUFFIX.replace(".", "-") # OpenAI does that either way
|
|
53
|
+
|
|
54
|
+
# %%
|
|
55
|
+
manager = FinetuningManager()
|
|
56
|
+
manager.create_job(
|
|
57
|
+
api_key=API_KEY,
|
|
58
|
+
file_name=FILE_NAME,
|
|
59
|
+
base_model=BASE_MODEL,
|
|
60
|
+
batch_size=BATCH_SIZE,
|
|
61
|
+
lr_multiplier=LR_MULTIPLIER,
|
|
62
|
+
epochs=EPOCHS,
|
|
63
|
+
seed=SEED,
|
|
64
|
+
suffix=SUFFIX,
|
|
65
|
+
)
|
|
66
|
+
# %%
|
|
@@ -28,14 +28,14 @@ class NoClientForModel(Exception):
|
|
|
28
28
|
pass
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
def _get_api_keys(env_var_name: str, *, include_suffixed: bool = True) -> list[str]:
|
|
31
|
+
def _get_api_keys(env_var_name: str, *, include_suffixed: bool = True) -> list[tuple[str, str]]:
|
|
32
32
|
"""Get API keys from environment variable(s).
|
|
33
33
|
|
|
34
34
|
Args:
|
|
35
35
|
env_var_name: Base environment variable name (e.g., "OPENAI_API_KEY")
|
|
36
36
|
include_suffixed: If True, also look for {env_var_name}_* variants (default: True)
|
|
37
37
|
|
|
38
|
-
Returns list of
|
|
38
|
+
Returns list of (env_var_name, api_key) tuples found.
|
|
39
39
|
"""
|
|
40
40
|
key_names = [env_var_name]
|
|
41
41
|
|
|
@@ -44,11 +44,10 @@ def _get_api_keys(env_var_name: str, *, include_suffixed: bool = True) -> list[s
|
|
|
44
44
|
if env_var.startswith(f"{env_var_name}_"):
|
|
45
45
|
key_names.append(env_var)
|
|
46
46
|
|
|
47
|
-
|
|
48
|
-
return [key for key in keys if key is not None]
|
|
47
|
+
return [(name, os.getenv(name)) for name in key_names if os.getenv(name) is not None]
|
|
49
48
|
|
|
50
49
|
|
|
51
|
-
def _discover_url_key_pairs() -> list[tuple[str, str]]:
|
|
50
|
+
def _discover_url_key_pairs() -> list[tuple[str, str, str]]:
|
|
52
51
|
"""Discover URL-key pairs from environment variables.
|
|
53
52
|
|
|
54
53
|
Discovers (including _* suffix variants for each):
|
|
@@ -56,21 +55,21 @@ def _discover_url_key_pairs() -> list[tuple[str, str]]:
|
|
|
56
55
|
- OPENROUTER_API_KEY for OpenRouter
|
|
57
56
|
- TINKER_API_KEY for Tinker (OpenAI-compatible)
|
|
58
57
|
|
|
59
|
-
Returns list of (base_url, api_key) tuples.
|
|
58
|
+
Returns list of (base_url, api_key, env_var_name) tuples.
|
|
60
59
|
"""
|
|
61
60
|
url_pairs = []
|
|
62
61
|
|
|
63
62
|
# OpenAI
|
|
64
|
-
for key in _get_api_keys("OPENAI_API_KEY"):
|
|
65
|
-
url_pairs.append(("https://api.openai.com/v1", key))
|
|
63
|
+
for env_name, key in _get_api_keys("OPENAI_API_KEY"):
|
|
64
|
+
url_pairs.append(("https://api.openai.com/v1", key, env_name))
|
|
66
65
|
|
|
67
66
|
# OpenRouter
|
|
68
|
-
for key in _get_api_keys("OPENROUTER_API_KEY"):
|
|
69
|
-
url_pairs.append(("https://openrouter.ai/api/v1", key))
|
|
67
|
+
for env_name, key in _get_api_keys("OPENROUTER_API_KEY"):
|
|
68
|
+
url_pairs.append(("https://openrouter.ai/api/v1", key, env_name))
|
|
70
69
|
|
|
71
70
|
# Tinker (OpenAI-compatible API)
|
|
72
|
-
for key in _get_api_keys("TINKER_API_KEY"):
|
|
73
|
-
url_pairs.append(("https://tinker.thinkingmachines.dev/services/tinker-prod/oai/api/v1", key))
|
|
71
|
+
for env_name, key in _get_api_keys("TINKER_API_KEY"):
|
|
72
|
+
url_pairs.append(("https://tinker.thinkingmachines.dev/services/tinker-prod/oai/api/v1", key, env_name))
|
|
74
73
|
|
|
75
74
|
return url_pairs
|
|
76
75
|
|
|
@@ -78,21 +77,23 @@ def _discover_url_key_pairs() -> list[tuple[str, str]]:
|
|
|
78
77
|
class _ConfigMeta(type):
|
|
79
78
|
"""Metaclass for Config to support lazy initialization of url_key_pairs."""
|
|
80
79
|
|
|
81
|
-
_url_key_pairs: list[tuple[str, str]] | None = None
|
|
80
|
+
_url_key_pairs: list[tuple[str, str, str]] | None = None
|
|
82
81
|
|
|
83
82
|
@property
|
|
84
|
-
def url_key_pairs(cls) -> list[tuple[str, str]]:
|
|
83
|
+
def url_key_pairs(cls) -> list[tuple[str, str, str]]:
|
|
85
84
|
"""URL-key pairs for client creation.
|
|
86
85
|
|
|
87
86
|
Auto-discovered from environment variables on first access.
|
|
88
87
|
Users can modify this list (add/remove pairs).
|
|
88
|
+
|
|
89
|
+
Returns list of (base_url, api_key, env_var_name) tuples.
|
|
89
90
|
"""
|
|
90
91
|
if cls._url_key_pairs is None:
|
|
91
92
|
cls._url_key_pairs = _discover_url_key_pairs()
|
|
92
93
|
return cls._url_key_pairs
|
|
93
94
|
|
|
94
95
|
@url_key_pairs.setter
|
|
95
|
-
def url_key_pairs(cls, value: list[tuple[str, str]] | None):
|
|
96
|
+
def url_key_pairs(cls, value: list[tuple[str, str, str]] | None):
|
|
96
97
|
cls._url_key_pairs = value
|
|
97
98
|
|
|
98
99
|
|
|
@@ -194,7 +195,11 @@ class Config(metaclass=_ConfigMeta):
|
|
|
194
195
|
|
|
195
196
|
@classmethod
|
|
196
197
|
def _find_openai_client(cls, model: str) -> openai.OpenAI:
|
|
197
|
-
"""Find a working OpenAI client by testing URL-key pairs in parallel.
|
|
198
|
+
"""Find a working OpenAI client by testing URL-key pairs in parallel.
|
|
199
|
+
|
|
200
|
+
When multiple API keys work for a model, selects the one whose
|
|
201
|
+
environment variable name is lexicographically lowest.
|
|
202
|
+
"""
|
|
198
203
|
all_pairs = cls.url_key_pairs
|
|
199
204
|
|
|
200
205
|
if not all_pairs:
|
|
@@ -203,21 +208,27 @@ class Config(metaclass=_ConfigMeta):
|
|
|
203
208
|
"Set an API key (e.g. OPENAI_API_KEY) or Config.url_key_pairs."
|
|
204
209
|
)
|
|
205
210
|
|
|
206
|
-
# Test all pairs in parallel
|
|
211
|
+
# Test all pairs in parallel, collect all working clients
|
|
212
|
+
working_clients: list[tuple[str, openai.OpenAI]] = [] # (env_var_name, client)
|
|
213
|
+
|
|
207
214
|
with ThreadPoolExecutor(max_workers=len(all_pairs)) as executor:
|
|
208
215
|
future_to_pair = {
|
|
209
|
-
executor.submit(cls._test_url_key_pair, model, url, key): (url, key
|
|
216
|
+
executor.submit(cls._test_url_key_pair, model, url, key): (url, key, env_name)
|
|
217
|
+
for url, key, env_name in all_pairs
|
|
210
218
|
}
|
|
211
219
|
|
|
212
220
|
for future in as_completed(future_to_pair):
|
|
221
|
+
url, key, env_name = future_to_pair[future]
|
|
213
222
|
client = future.result()
|
|
214
223
|
if client:
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
224
|
+
working_clients.append((env_name, client))
|
|
225
|
+
|
|
226
|
+
if not working_clients:
|
|
227
|
+
raise NoClientForModel(f"No working API client found for model {model}")
|
|
219
228
|
|
|
220
|
-
|
|
229
|
+
# Select client with lexicographically lowest env var name
|
|
230
|
+
working_clients.sort(key=lambda x: x[0])
|
|
231
|
+
return working_clients[0][1]
|
|
221
232
|
|
|
222
233
|
@classmethod
|
|
223
234
|
def _test_url_key_pair(cls, model: str, url: str, key: str) -> openai.OpenAI | None:
|
|
@@ -15,17 +15,24 @@ class FinetuningManager:
|
|
|
15
15
|
* Create FT jobs via `create_job`
|
|
16
16
|
* Fetch updates to FT jobs via `update_jobs`
|
|
17
17
|
* Get a list of models via `get_models` or `get_model_list`
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
data_dir: Directory for storing jobs.jsonl, files.jsonl, and models.csv.
|
|
21
|
+
Defaults to "llmcomp_models".
|
|
18
22
|
"""
|
|
19
23
|
|
|
20
24
|
# Cache: api_key -> organization_id
|
|
21
25
|
_org_cache: dict[str, str] = {}
|
|
22
26
|
|
|
27
|
+
def __init__(self, data_dir: str = DEFAULT_DATA_DIR):
|
|
28
|
+
self.data_dir = data_dir
|
|
29
|
+
|
|
23
30
|
#########################################################
|
|
24
31
|
# PUBLIC INTERFACE
|
|
25
|
-
def get_model_list(self,
|
|
26
|
-
return self.get_models(
|
|
32
|
+
def get_model_list(self, **kwargs) -> list[str]:
|
|
33
|
+
return self.get_models(**kwargs)["model"].tolist()
|
|
27
34
|
|
|
28
|
-
def get_models(self,
|
|
35
|
+
def get_models(self, **kwargs) -> pd.DataFrame:
|
|
29
36
|
"""Returns a dataframe with all the current models matching the given filters.
|
|
30
37
|
|
|
31
38
|
Or just all models if there are no filters.
|
|
@@ -39,7 +46,7 @@ class FinetuningManager:
|
|
|
39
46
|
|
|
40
47
|
NOTE: if it looks like some new models are missing, maybe you need to run `update_jobs` first.
|
|
41
48
|
"""
|
|
42
|
-
all_models = self._get_all_models(
|
|
49
|
+
all_models = self._get_all_models()
|
|
43
50
|
|
|
44
51
|
mask = pd.Series(True, index=all_models.index)
|
|
45
52
|
for col, val in kwargs.items():
|
|
@@ -48,7 +55,7 @@ class FinetuningManager:
|
|
|
48
55
|
filtered_df = all_models[mask].copy()
|
|
49
56
|
return filtered_df
|
|
50
57
|
|
|
51
|
-
def update_jobs(self
|
|
58
|
+
def update_jobs(self):
|
|
52
59
|
"""Fetch the latest information about all the jobs.
|
|
53
60
|
|
|
54
61
|
It's fine to run this many times - the data is not overwritten.
|
|
@@ -60,7 +67,7 @@ class FinetuningManager:
|
|
|
60
67
|
|
|
61
68
|
Or from command line: llmcomp-update-jobs
|
|
62
69
|
"""
|
|
63
|
-
jobs_file = os.path.join(data_dir, "jobs.jsonl")
|
|
70
|
+
jobs_file = os.path.join(self.data_dir, "jobs.jsonl")
|
|
64
71
|
try:
|
|
65
72
|
jobs = read_jsonl(jobs_file)
|
|
66
73
|
except FileNotFoundError:
|
|
@@ -166,7 +173,7 @@ class FinetuningManager:
|
|
|
166
173
|
print(f" - {job['suffix']} (org: {job['organization_id']})")
|
|
167
174
|
|
|
168
175
|
# Regenerate models.csv with any newly completed jobs
|
|
169
|
-
self._get_all_models(
|
|
176
|
+
self._get_all_models()
|
|
170
177
|
|
|
171
178
|
def create_job(
|
|
172
179
|
self,
|
|
@@ -178,7 +185,7 @@ class FinetuningManager:
|
|
|
178
185
|
batch_size: int | str = "auto",
|
|
179
186
|
lr_multiplier: float | str = "auto",
|
|
180
187
|
seed: int | None = None,
|
|
181
|
-
|
|
188
|
+
validation_file_name: str | None = None,
|
|
182
189
|
):
|
|
183
190
|
"""Create a new finetuning job.
|
|
184
191
|
|
|
@@ -196,6 +203,7 @@ class FinetuningManager:
|
|
|
196
203
|
batch_size="auto",
|
|
197
204
|
lr_multiplier="auto",
|
|
198
205
|
seed=None,
|
|
206
|
+
validation_file_name="my_validation.jsonl", # Optional validation dataset
|
|
199
207
|
)
|
|
200
208
|
|
|
201
209
|
"""
|
|
@@ -203,12 +211,17 @@ class FinetuningManager:
|
|
|
203
211
|
suffix = self._get_default_suffix(file_name, lr_multiplier, epochs, batch_size)
|
|
204
212
|
|
|
205
213
|
# Check for suffix collision with different file
|
|
206
|
-
self._check_suffix_collision(suffix, file_name
|
|
214
|
+
self._check_suffix_collision(suffix, file_name)
|
|
207
215
|
|
|
208
216
|
# Get organization_id for this API key
|
|
209
217
|
organization_id = self._get_organization_id(api_key)
|
|
210
218
|
|
|
211
|
-
file_id = self._upload_file_if_not_uploaded(file_name, api_key, organization_id
|
|
219
|
+
file_id = self._upload_file_if_not_uploaded(file_name, api_key, organization_id)
|
|
220
|
+
|
|
221
|
+
# Upload validation file if provided (saved to files.jsonl, but not jobs.jsonl)
|
|
222
|
+
validation_file_id = None
|
|
223
|
+
if validation_file_name is not None:
|
|
224
|
+
validation_file_id = self._upload_file_if_not_uploaded(validation_file_name, api_key, organization_id)
|
|
212
225
|
|
|
213
226
|
data = {
|
|
214
227
|
"model": base_model,
|
|
@@ -226,11 +239,13 @@ class FinetuningManager:
|
|
|
226
239
|
},
|
|
227
240
|
},
|
|
228
241
|
}
|
|
242
|
+
if validation_file_id is not None:
|
|
243
|
+
data["validation_file"] = validation_file_id
|
|
229
244
|
|
|
230
245
|
client = openai.OpenAI(api_key=api_key)
|
|
231
246
|
response = client.fine_tuning.jobs.create(**data)
|
|
232
247
|
job_id = response.id
|
|
233
|
-
fname = os.path.join(data_dir, "jobs.jsonl")
|
|
248
|
+
fname = os.path.join(self.data_dir, "jobs.jsonl")
|
|
234
249
|
try:
|
|
235
250
|
ft_jobs = read_jsonl(fname)
|
|
236
251
|
except FileNotFoundError:
|
|
@@ -257,20 +272,22 @@ class FinetuningManager:
|
|
|
257
272
|
print(f" Base model: {base_model}")
|
|
258
273
|
print(f" Suffix: {suffix}")
|
|
259
274
|
print(f" File: {file_name} (id: {file_id})")
|
|
275
|
+
if validation_file_id is not None:
|
|
276
|
+
print(f" Validation: {validation_file_name} (id: {validation_file_id})")
|
|
260
277
|
print(f" Epochs: {epochs}, Batch: {batch_size}, LR: {lr_multiplier}")
|
|
261
278
|
print(f" Status: {response.status}")
|
|
262
279
|
print(f"\nRun `llmcomp-update-jobs` to check progress.")
|
|
263
280
|
|
|
264
281
|
#########################################################
|
|
265
282
|
# PRIVATE METHODS
|
|
266
|
-
def _check_suffix_collision(self, suffix: str, file_name: str
|
|
283
|
+
def _check_suffix_collision(self, suffix: str, file_name: str):
|
|
267
284
|
"""Raise error if suffix is already used with a different file.
|
|
268
285
|
|
|
269
286
|
This prevents confusion when the same suffix is accidentally used for
|
|
270
287
|
different datasets. It's not technically a problem, but it makes the
|
|
271
288
|
model names ambiguous and you almost certainly don't want this.
|
|
272
289
|
"""
|
|
273
|
-
jobs_file = os.path.join(data_dir, "jobs.jsonl")
|
|
290
|
+
jobs_file = os.path.join(self.data_dir, "jobs.jsonl")
|
|
274
291
|
try:
|
|
275
292
|
jobs = read_jsonl(jobs_file)
|
|
276
293
|
except FileNotFoundError:
|
|
@@ -301,8 +318,8 @@ class FinetuningManager:
|
|
|
301
318
|
f"use a different suffix to distinguish the new models."
|
|
302
319
|
)
|
|
303
320
|
|
|
304
|
-
def _get_all_models(self
|
|
305
|
-
jobs_fname = os.path.join(data_dir, "jobs.jsonl")
|
|
321
|
+
def _get_all_models(self) -> pd.DataFrame:
|
|
322
|
+
jobs_fname = os.path.join(self.data_dir, "jobs.jsonl")
|
|
306
323
|
try:
|
|
307
324
|
jobs = read_jsonl(jobs_fname)
|
|
308
325
|
except FileNotFoundError:
|
|
@@ -335,11 +352,11 @@ class FinetuningManager:
|
|
|
335
352
|
models.append(checkpoint_data)
|
|
336
353
|
|
|
337
354
|
df = pd.DataFrame(models)
|
|
338
|
-
df.to_csv(os.path.join(data_dir, "models.csv"), index=False)
|
|
355
|
+
df.to_csv(os.path.join(self.data_dir, "models.csv"), index=False)
|
|
339
356
|
return df
|
|
340
357
|
|
|
341
|
-
def _upload_file_if_not_uploaded(self, file_name, api_key, organization_id
|
|
342
|
-
files_fname = os.path.join(data_dir, "files.jsonl")
|
|
358
|
+
def _upload_file_if_not_uploaded(self, file_name, api_key, organization_id):
|
|
359
|
+
files_fname = os.path.join(self.data_dir, "files.jsonl")
|
|
343
360
|
try:
|
|
344
361
|
files = read_jsonl(files_fname)
|
|
345
362
|
except FileNotFoundError:
|
|
@@ -350,14 +367,14 @@ class FinetuningManager:
|
|
|
350
367
|
if file["name"] == file_name and file["md5"] == md5 and file["organization_id"] == organization_id:
|
|
351
368
|
print(f"File {file_name} already uploaded. ID: {file['id']}")
|
|
352
369
|
return file["id"]
|
|
353
|
-
return self._upload_file(file_name, api_key, organization_id
|
|
370
|
+
return self._upload_file(file_name, api_key, organization_id)
|
|
354
371
|
|
|
355
|
-
def _upload_file(self, file_name, api_key, organization_id
|
|
372
|
+
def _upload_file(self, file_name, api_key, organization_id):
|
|
356
373
|
try:
|
|
357
374
|
file_id = self._raw_upload(file_name, api_key)
|
|
358
375
|
except Exception as e:
|
|
359
376
|
raise ValueError(f"Upload failed for {file_name}: {e}")
|
|
360
|
-
files_fname = os.path.join(data_dir, "files.jsonl")
|
|
377
|
+
files_fname = os.path.join(self.data_dir, "files.jsonl")
|
|
361
378
|
try:
|
|
362
379
|
files = read_jsonl(files_fname)
|
|
363
380
|
except FileNotFoundError:
|
|
@@ -31,7 +31,7 @@ def main():
|
|
|
31
31
|
print(f"Specify a data directory: llmcomp-update-jobs <DATA_DIR>", file=sys.stderr)
|
|
32
32
|
sys.exit(1)
|
|
33
33
|
|
|
34
|
-
FinetuningManager().update_jobs(
|
|
34
|
+
FinetuningManager(data_dir=data_dir).update_jobs()
|
|
35
35
|
|
|
36
36
|
|
|
37
37
|
if __name__ == "__main__":
|
|
@@ -8,6 +8,12 @@ def on_backoff(details):
|
|
|
8
8
|
if not str(exception_details).startswith("Connection error."):
|
|
9
9
|
print(exception_details)
|
|
10
10
|
|
|
11
|
+
# Possible TODO: it seems that RateLimitError (429) means two things in OpenAI:
|
|
12
|
+
# * Rate limit error
|
|
13
|
+
# * Not enough credits
|
|
14
|
+
# Now we repeat this error, but in the latter case it makes no sense.
|
|
15
|
+
# But we can do that only by reading the message, and this is bad.
|
|
16
|
+
|
|
11
17
|
|
|
12
18
|
@backoff.on_exception(
|
|
13
19
|
wait_gen=backoff.expo,
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "llmcomp"
|
|
7
|
-
version = "1.
|
|
7
|
+
version = "1.2.0"
|
|
8
8
|
description = "Research library for black-box experiments on language models."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.9"
|
|
@@ -37,7 +37,17 @@ Homepage = "https://github.com/johny-b/llmcomp"
|
|
|
37
37
|
Repository = "https://github.com/johny-b/llmcomp"
|
|
38
38
|
|
|
39
39
|
[tool.hatch.build.targets.wheel]
|
|
40
|
-
packages = ["llmcomp"]
|
|
40
|
+
packages = ["llmcomp"]
|
|
41
|
+
|
|
42
|
+
[tool.hatch.build.targets.sdist]
|
|
43
|
+
exclude = [
|
|
44
|
+
"llmcomp_cache/",
|
|
45
|
+
"llmcomp_models/",
|
|
46
|
+
"dist/",
|
|
47
|
+
"__pycache__/",
|
|
48
|
+
"*.pyc",
|
|
49
|
+
".git/",
|
|
50
|
+
]
|
|
41
51
|
|
|
42
52
|
[tool.ruff]
|
|
43
53
|
line-length = 120
|