lm-deluge 0.0.4__tar.gz → 0.0.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge-0.0.6/PKG-INFO +170 -0
- lm_deluge-0.0.6/README.md +143 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/pyproject.toml +3 -6
- lm_deluge-0.0.6/src/lm_deluge/__init__.py +7 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/anthropic.py +0 -2
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/base.py +1 -0
- lm_deluge-0.0.6/src/lm_deluge/api_requests/common.py +9 -0
- lm_deluge-0.0.6/src/lm_deluge/api_requests/deprecated/cohere.py +132 -0
- lm_deluge-0.0.6/src/lm_deluge/api_requests/deprecated/vertex.py +361 -0
- lm_deluge-0.0.4/src/lm_deluge/api_requests/cohere.py → lm_deluge-0.0.6/src/lm_deluge/api_requests/mistral.py +37 -35
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/openai.py +10 -1
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/client.py +2 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/image.py +6 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/models.py +348 -288
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/prompt.py +11 -9
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/util/json.py +4 -3
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/util/xml.py +11 -12
- lm_deluge-0.0.6/src/lm_deluge.egg-info/PKG-INFO +170 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge.egg-info/SOURCES.txt +11 -4
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge.egg-info/requires.txt +1 -13
- lm_deluge-0.0.6/tests/test_all_models.py +84 -0
- lm_deluge-0.0.6/tests/test_cache.py +55 -0
- lm_deluge-0.0.6/tests/test_image_models.py +57 -0
- lm_deluge-0.0.6/tests/test_image_utils.py +21 -0
- lm_deluge-0.0.4/tests/test_heal_json.py → lm_deluge-0.0.6/tests/test_json_utils.py +14 -1
- lm_deluge-0.0.6/tests/test_sampling_params.py +13 -0
- lm_deluge-0.0.6/tests/test_translate.py +31 -0
- lm_deluge-0.0.6/tests/test_xml_utils.py +35 -0
- lm_deluge-0.0.4/PKG-INFO +0 -127
- lm_deluge-0.0.4/README.md +0 -91
- lm_deluge-0.0.4/src/lm_deluge/__init__.py +0 -6
- lm_deluge-0.0.4/src/lm_deluge/api_requests/common.py +0 -18
- lm_deluge-0.0.4/src/lm_deluge/api_requests/google.py +0 -0
- lm_deluge-0.0.4/src/lm_deluge/api_requests/vertex.py +0 -361
- lm_deluge-0.0.4/src/lm_deluge.egg-info/PKG-INFO +0 -127
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/setup.cfg +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/gemini_limits.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/sampling_params.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/util/pdf.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.4 → lm_deluge-0.0.6}/src/lm_deluge.egg-info/top_level.txt +0 -0
lm_deluge-0.0.6/PKG-INFO
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: lm_deluge
|
|
3
|
+
Version: 0.0.6
|
|
4
|
+
Summary: Python utility for using LLM API models.
|
|
5
|
+
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
|
+
Requires-Python: >=3.10
|
|
7
|
+
Description-Content-Type: text/markdown
|
|
8
|
+
Requires-Dist: python-dotenv
|
|
9
|
+
Requires-Dist: json5
|
|
10
|
+
Requires-Dist: PyYAML
|
|
11
|
+
Requires-Dist: pandas
|
|
12
|
+
Requires-Dist: aiohttp
|
|
13
|
+
Requires-Dist: tiktoken
|
|
14
|
+
Requires-Dist: xxhash
|
|
15
|
+
Requires-Dist: tqdm
|
|
16
|
+
Requires-Dist: google-auth
|
|
17
|
+
Requires-Dist: requests-aws4auth
|
|
18
|
+
Requires-Dist: pydantic
|
|
19
|
+
Requires-Dist: bs4
|
|
20
|
+
Requires-Dist: lxml
|
|
21
|
+
Requires-Dist: pdf2image
|
|
22
|
+
Requires-Dist: pillow
|
|
23
|
+
Provides-Extra: full
|
|
24
|
+
Requires-Dist: pymupdf; extra == "full"
|
|
25
|
+
Requires-Dist: fasttext-wheel; extra == "full"
|
|
26
|
+
Requires-Dist: fasttext-langdetect; extra == "full"
|
|
27
|
+
|
|
28
|
+
# lm_deluge
|
|
29
|
+
|
|
30
|
+
`lm_deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
|
|
31
|
+
|
|
32
|
+
- **Unified client** – Send prompts to all relevant models with a single client.
|
|
33
|
+
- **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
|
|
34
|
+
- **Spray across models/providers** – Configure a client with multiple models from any provider(s), and sampling weights. The client samples a model for each request.
|
|
35
|
+
- **Caching** – Save completions in a local or distributed cache to avoid repeated LLM calls to process the same input.
|
|
36
|
+
- **Convenient message constructor** – No more looking up how to build an Anthropic messages list with images. Our `Conversation` and `Message` classes work great with our client or with the `openai` and `anthropic` packages.
|
|
37
|
+
- **Sync and async APIs** – Use the client from sync or async code.
|
|
38
|
+
|
|
39
|
+
**STREAMING IS NOT IN SCOPE.** There are plenty of packages that let you stream chat completions across providers. The sole purpose of this package is to do very fast batch inference using APIs. Sorry!
|
|
40
|
+
|
|
41
|
+
## Installation
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
pip install lm-deluge
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
There are optional goodies. If you want support for PDFs and language-detection via FastText:
|
|
48
|
+
```bash
|
|
49
|
+
pip install "lm-deluge[full]"
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
The package relies on environment variables for API keys. Typical variables include `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `COHERE_API_KEY`, `META_API_KEY`, and `GOOGLE_API_KEY`. `LLMClient` will automatically load the `.env` file when imported; we recommend using that to set the environment variables.
|
|
53
|
+
|
|
54
|
+
## Quickstart
|
|
55
|
+
|
|
56
|
+
The easiest way to get started is with the `.basic` constructor. This uses sensible default arguments for rate limits and sampling parameters so that you don't have to provide a ton of arguments.
|
|
57
|
+
|
|
58
|
+
```python
|
|
59
|
+
from lm_deluge import LLMClient
|
|
60
|
+
|
|
61
|
+
client = LLMClient.basic("gpt-4o-mini")
|
|
62
|
+
resps = client.process_prompts_sync(["Hello, world!"])
|
|
63
|
+
print(resp[0].completion)
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Spraying Across Models
|
|
67
|
+
|
|
68
|
+
To distribute your requests across models, just provide a list of more than one model to the constructor. The rate limits for the client apply to the client as a whole, not per-model, so you may want to increase them:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
from lm_deluge import LLMClient
|
|
72
|
+
|
|
73
|
+
client = LLMClient.basic(
|
|
74
|
+
["gpt-4o-mini", "claude-haiku-anthropic"],
|
|
75
|
+
max_requests_per_minute=10_000
|
|
76
|
+
)
|
|
77
|
+
resps = client.process_prompts_sync(
|
|
78
|
+
["Hello, ChatGPT!", "Hello, Claude!"]
|
|
79
|
+
)
|
|
80
|
+
print(resp[0].completion)
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## Configuration
|
|
84
|
+
|
|
85
|
+
API calls can be customized in a few ways.
|
|
86
|
+
|
|
87
|
+
1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort.
|
|
88
|
+
|
|
89
|
+
You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
2. **Arguments to LLMClient.** This is where you set request timeout, rate limits, model name(s), model weight(s) for distributing requests across models, retries, and caching.
|
|
93
|
+
3. **Arguments to process_prompts.** Per-call, you can set verbosity, whether to display progress, and whether to return just completions (rather than the full APIResponse object).
|
|
94
|
+
|
|
95
|
+
Putting it all together:
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from lm_deluge import LLMClient, SamplingParams
|
|
99
|
+
|
|
100
|
+
client = LLMClient(
|
|
101
|
+
"gpt-4",
|
|
102
|
+
max_requests_per_minute=100,
|
|
103
|
+
max_tokens_per_minute=100_000,
|
|
104
|
+
max_concurrent_requests=500,
|
|
105
|
+
sampling_params=SamplingParams(temperature=0.5, max_new_tokens=30)
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
await client.process_prompts_async(
|
|
109
|
+
["What is the capital of Mars?"],
|
|
110
|
+
show_progress=False,
|
|
111
|
+
return_completions_only=True
|
|
112
|
+
)
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## Multi-Turn Conversations
|
|
116
|
+
|
|
117
|
+
Constructing conversations to pass to models is notoriously annoying. Each provider has a slightly different way of defining a list of messages, and with the introduction of images/multi-part messages it's only gotten worse. We provide convenience constructors so you don't have to remember all that stuff.
|
|
118
|
+
|
|
119
|
+
```python
|
|
120
|
+
from lm_deluge import Message, Conversation
|
|
121
|
+
|
|
122
|
+
prompt = Conversation.system("You are a helpful assistant.").add(
|
|
123
|
+
Message.user("What's in this image?").add_image("tests/image.jpg")
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
client = LLMClient.basic("gpt-4.1-mini")
|
|
127
|
+
resps = client.process_prompts_sync([prompt])
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
This just works. Images can be local images on disk, URLs, bytes, base64 data URLs... go wild. You can use `Conversation.to_openai` or `Conversation.to_anthropic` to format your messages for the OpenAI or Anthropic clients directly.
|
|
131
|
+
|
|
132
|
+
## Caching
|
|
133
|
+
|
|
134
|
+
`lm_deluge.cache` includes LevelDB, SQLite and custom dictionary based caches. Pass an instance via `LLMClient(..., cache=my_cache)` and previously seen prompts will not be re‑sent across different `process_prompts_[...]` calls.
|
|
135
|
+
|
|
136
|
+
**IMPORTANT:** Caching does not currently work for prompts in the SAME batch. That is, if you call `process_prompts_sync` with the same prompt 100 times, there will be 0 cache hits. If you call `process_prompts_sync` a *second* time with those same 100 prompts, all 100 will be cache hits. The cache is intended to be persistent and help you save costs across many invocations, but it can't help with a single batch-inference session (yet!).
|
|
137
|
+
|
|
138
|
+
## Asynchronous Client
|
|
139
|
+
Use this in asynchronous code, or in a Jupyter notebook. If you try to use the sync client in a Jupyter notebook, you'll have to use `nest-asyncio`, because internally the sync client uses async code. Don't do it! Just use the async client!
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
import asyncio
|
|
143
|
+
|
|
144
|
+
async def main():
|
|
145
|
+
responses = await client.process_prompts_async(
|
|
146
|
+
["an async call"],
|
|
147
|
+
return_completions_only=True,
|
|
148
|
+
)
|
|
149
|
+
print(responses[0])
|
|
150
|
+
|
|
151
|
+
asyncio.run(main())
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
## Available Models
|
|
155
|
+
|
|
156
|
+
We support all models in `src/lm_deluge/models.py`. An older version of this client supported Bedrock and Vertex. We plan to re-implement Bedrock support (our previous support was spotty and we need to figure out cross-region inference in order to support the newest Claude models). Vertex support is not currently planned, since Google allows you to connect your Vertex account to AI Studio, and Vertex authentication is a huge pain (requires service account credentials, etc.)
|
|
157
|
+
|
|
158
|
+
## Feature Support
|
|
159
|
+
|
|
160
|
+
We support structured outputs via `json_mode` parameter provided to `SamplingParams`. Structured outputs with a schema are planned. Reasoning models are supported via the `reasoning_effort` parameter, which is translated to a thinking budget for Claude/Gemini. Image models are supported. We don't support tool use yet, but support is planned (keep an eye out for a unified tool definition spec that works for all models!). We support logprobs for OpenAI models that return them via the `logprobs` argument to the `LLMClient`.
|
|
161
|
+
|
|
162
|
+
## Built‑in tools
|
|
163
|
+
|
|
164
|
+
The `lm_deluge.llm_tools` package exposes a few helper functions:
|
|
165
|
+
|
|
166
|
+
- `extract` – structure text or images into a Pydantic model based on a schema.
|
|
167
|
+
- `translate` – translate a list of strings to English.
|
|
168
|
+
- `score_llm` – simple yes/no style scoring with optional log probability output.
|
|
169
|
+
|
|
170
|
+
Experimental embeddings (`embed.embed_parallel_async`) and document reranking (`rerank.rerank_parallel_async`) clients are also provided.
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# lm_deluge
|
|
2
|
+
|
|
3
|
+
`lm_deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
|
|
4
|
+
|
|
5
|
+
- **Unified client** – Send prompts to all relevant models with a single client.
|
|
6
|
+
- **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
|
|
7
|
+
- **Spray across models/providers** – Configure a client with multiple models from any provider(s), and sampling weights. The client samples a model for each request.
|
|
8
|
+
- **Caching** – Save completions in a local or distributed cache to avoid repeated LLM calls to process the same input.
|
|
9
|
+
- **Convenient message constructor** – No more looking up how to build an Anthropic messages list with images. Our `Conversation` and `Message` classes work great with our client or with the `openai` and `anthropic` packages.
|
|
10
|
+
- **Sync and async APIs** – Use the client from sync or async code.
|
|
11
|
+
|
|
12
|
+
**STREAMING IS NOT IN SCOPE.** There are plenty of packages that let you stream chat completions across providers. The sole purpose of this package is to do very fast batch inference using APIs. Sorry!
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
pip install lm-deluge
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
There are optional goodies. If you want support for PDFs and language-detection via FastText:
|
|
21
|
+
```bash
|
|
22
|
+
pip install "lm-deluge[full]"
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
The package relies on environment variables for API keys. Typical variables include `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `COHERE_API_KEY`, `META_API_KEY`, and `GOOGLE_API_KEY`. `LLMClient` will automatically load the `.env` file when imported; we recommend using that to set the environment variables.
|
|
26
|
+
|
|
27
|
+
## Quickstart
|
|
28
|
+
|
|
29
|
+
The easiest way to get started is with the `.basic` constructor. This uses sensible default arguments for rate limits and sampling parameters so that you don't have to provide a ton of arguments.
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
from lm_deluge import LLMClient
|
|
33
|
+
|
|
34
|
+
client = LLMClient.basic("gpt-4o-mini")
|
|
35
|
+
resps = client.process_prompts_sync(["Hello, world!"])
|
|
36
|
+
print(resp[0].completion)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Spraying Across Models
|
|
40
|
+
|
|
41
|
+
To distribute your requests across models, just provide a list of more than one model to the constructor. The rate limits for the client apply to the client as a whole, not per-model, so you may want to increase them:
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
from lm_deluge import LLMClient
|
|
45
|
+
|
|
46
|
+
client = LLMClient.basic(
|
|
47
|
+
["gpt-4o-mini", "claude-haiku-anthropic"],
|
|
48
|
+
max_requests_per_minute=10_000
|
|
49
|
+
)
|
|
50
|
+
resps = client.process_prompts_sync(
|
|
51
|
+
["Hello, ChatGPT!", "Hello, Claude!"]
|
|
52
|
+
)
|
|
53
|
+
print(resp[0].completion)
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Configuration
|
|
57
|
+
|
|
58
|
+
API calls can be customized in a few ways.
|
|
59
|
+
|
|
60
|
+
1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort.
|
|
61
|
+
|
|
62
|
+
You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
2. **Arguments to LLMClient.** This is where you set request timeout, rate limits, model name(s), model weight(s) for distributing requests across models, retries, and caching.
|
|
66
|
+
3. **Arguments to process_prompts.** Per-call, you can set verbosity, whether to display progress, and whether to return just completions (rather than the full APIResponse object).
|
|
67
|
+
|
|
68
|
+
Putting it all together:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
from lm_deluge import LLMClient, SamplingParams
|
|
72
|
+
|
|
73
|
+
client = LLMClient(
|
|
74
|
+
"gpt-4",
|
|
75
|
+
max_requests_per_minute=100,
|
|
76
|
+
max_tokens_per_minute=100_000,
|
|
77
|
+
max_concurrent_requests=500,
|
|
78
|
+
sampling_params=SamplingParams(temperature=0.5, max_new_tokens=30)
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
await client.process_prompts_async(
|
|
82
|
+
["What is the capital of Mars?"],
|
|
83
|
+
show_progress=False,
|
|
84
|
+
return_completions_only=True
|
|
85
|
+
)
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
## Multi-Turn Conversations
|
|
89
|
+
|
|
90
|
+
Constructing conversations to pass to models is notoriously annoying. Each provider has a slightly different way of defining a list of messages, and with the introduction of images/multi-part messages it's only gotten worse. We provide convenience constructors so you don't have to remember all that stuff.
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
from lm_deluge import Message, Conversation
|
|
94
|
+
|
|
95
|
+
prompt = Conversation.system("You are a helpful assistant.").add(
|
|
96
|
+
Message.user("What's in this image?").add_image("tests/image.jpg")
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
client = LLMClient.basic("gpt-4.1-mini")
|
|
100
|
+
resps = client.process_prompts_sync([prompt])
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
This just works. Images can be local images on disk, URLs, bytes, base64 data URLs... go wild. You can use `Conversation.to_openai` or `Conversation.to_anthropic` to format your messages for the OpenAI or Anthropic clients directly.
|
|
104
|
+
|
|
105
|
+
## Caching
|
|
106
|
+
|
|
107
|
+
`lm_deluge.cache` includes LevelDB, SQLite and custom dictionary based caches. Pass an instance via `LLMClient(..., cache=my_cache)` and previously seen prompts will not be re‑sent across different `process_prompts_[...]` calls.
|
|
108
|
+
|
|
109
|
+
**IMPORTANT:** Caching does not currently work for prompts in the SAME batch. That is, if you call `process_prompts_sync` with the same prompt 100 times, there will be 0 cache hits. If you call `process_prompts_sync` a *second* time with those same 100 prompts, all 100 will be cache hits. The cache is intended to be persistent and help you save costs across many invocations, but it can't help with a single batch-inference session (yet!).
|
|
110
|
+
|
|
111
|
+
## Asynchronous Client
|
|
112
|
+
Use this in asynchronous code, or in a Jupyter notebook. If you try to use the sync client in a Jupyter notebook, you'll have to use `nest-asyncio`, because internally the sync client uses async code. Don't do it! Just use the async client!
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
import asyncio
|
|
116
|
+
|
|
117
|
+
async def main():
|
|
118
|
+
responses = await client.process_prompts_async(
|
|
119
|
+
["an async call"],
|
|
120
|
+
return_completions_only=True,
|
|
121
|
+
)
|
|
122
|
+
print(responses[0])
|
|
123
|
+
|
|
124
|
+
asyncio.run(main())
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
## Available Models
|
|
128
|
+
|
|
129
|
+
We support all models in `src/lm_deluge/models.py`. An older version of this client supported Bedrock and Vertex. We plan to re-implement Bedrock support (our previous support was spotty and we need to figure out cross-region inference in order to support the newest Claude models). Vertex support is not currently planned, since Google allows you to connect your Vertex account to AI Studio, and Vertex authentication is a huge pain (requires service account credentials, etc.)
|
|
130
|
+
|
|
131
|
+
## Feature Support
|
|
132
|
+
|
|
133
|
+
We support structured outputs via `json_mode` parameter provided to `SamplingParams`. Structured outputs with a schema are planned. Reasoning models are supported via the `reasoning_effort` parameter, which is translated to a thinking budget for Claude/Gemini. Image models are supported. We don't support tool use yet, but support is planned (keep an eye out for a unified tool definition spec that works for all models!). We support logprobs for OpenAI models that return them via the `logprobs` argument to the `LLMClient`.
|
|
134
|
+
|
|
135
|
+
## Built‑in tools
|
|
136
|
+
|
|
137
|
+
The `lm_deluge.llm_tools` package exposes a few helper functions:
|
|
138
|
+
|
|
139
|
+
- `extract` – structure text or images into a Pydantic model based on a schema.
|
|
140
|
+
- `translate` – translate a list of strings to English.
|
|
141
|
+
- `score_llm` – simple yes/no style scoring with optional log probability output.
|
|
142
|
+
|
|
143
|
+
Experimental embeddings (`embed.embed_parallel_async`) and document reranking (`rerank.rerank_parallel_async`) clients are also provided.
|
|
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
|
|
|
3
3
|
|
|
4
4
|
[project]
|
|
5
5
|
name = "lm_deluge"
|
|
6
|
-
version = "0.0.
|
|
6
|
+
version = "0.0.6"
|
|
7
7
|
authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
|
|
8
8
|
description = "Python utility for using LLM API models."
|
|
9
9
|
readme = "README.md"
|
|
@@ -25,15 +25,12 @@ dependencies = [
|
|
|
25
25
|
"pydantic",
|
|
26
26
|
"bs4",
|
|
27
27
|
"lxml",
|
|
28
|
+
"pdf2image",
|
|
29
|
+
"pillow"
|
|
28
30
|
]
|
|
29
31
|
|
|
30
32
|
[project.optional-dependencies]
|
|
31
|
-
image = ["pdf2image", "pillow"]
|
|
32
|
-
pdf = ["pdf2image", "pymupdf"]
|
|
33
|
-
translate = ["fasttext-wheel", "fasttext-langdetect"]
|
|
34
33
|
full = [
|
|
35
|
-
"pillow",
|
|
36
|
-
"pdf2image",
|
|
37
34
|
"pymupdf",
|
|
38
35
|
"fasttext-wheel",
|
|
39
36
|
"fasttext-langdetect",
|
|
@@ -119,9 +119,7 @@ class AnthropicRequest(APIRequestBase):
|
|
|
119
119
|
if status_code >= 200 and status_code < 300:
|
|
120
120
|
try:
|
|
121
121
|
data = await http_response.json()
|
|
122
|
-
print("response data:", data)
|
|
123
122
|
content = data["content"] # [0]["text"]
|
|
124
|
-
print("content is length", len(content))
|
|
125
123
|
for item in content:
|
|
126
124
|
if item["type"] == "text":
|
|
127
125
|
completion = item["text"]
|
|
@@ -41,6 +41,7 @@ class APIResponse:
|
|
|
41
41
|
logprobs: list | None = None
|
|
42
42
|
finish_reason: str | None = None # make required later
|
|
43
43
|
cost: float | None = None # calculated automatically
|
|
44
|
+
cache_hit: bool = False # manually set if true
|
|
44
45
|
# set to true if is_error and should be retried with a different model
|
|
45
46
|
retry_with_different_model: bool | None = False
|
|
46
47
|
# set to true if should NOT retry with the same model (unrecoverable error)
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# # https://docs.cohere.com/reference/chat
|
|
2
|
+
# # https://cohere.com/pricing
|
|
3
|
+
# import asyncio
|
|
4
|
+
# from aiohttp import ClientResponse
|
|
5
|
+
# import json
|
|
6
|
+
# import os
|
|
7
|
+
# from tqdm import tqdm
|
|
8
|
+
# from typing import Callable
|
|
9
|
+
# from lm_deluge.prompt import Conversation
|
|
10
|
+
# from .base import APIRequestBase, APIResponse
|
|
11
|
+
|
|
12
|
+
# from ..tracker import StatusTracker
|
|
13
|
+
# from ..sampling_params import SamplingParams
|
|
14
|
+
# from ..models import APIModel
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# class CohereRequest(APIRequestBase):
|
|
18
|
+
# def __init__(
|
|
19
|
+
# self,
|
|
20
|
+
# task_id: int,
|
|
21
|
+
# # should always be 'role', 'content' keys.
|
|
22
|
+
# # internal logic should handle translating to specific API format
|
|
23
|
+
# model_name: str, # must correspond to registry
|
|
24
|
+
# prompt: Conversation,
|
|
25
|
+
# attempts_left: int,
|
|
26
|
+
# status_tracker: StatusTracker,
|
|
27
|
+
# results_arr: list,
|
|
28
|
+
# retry_queue: asyncio.Queue,
|
|
29
|
+
# request_timeout: int = 30,
|
|
30
|
+
# sampling_params: SamplingParams = SamplingParams(),
|
|
31
|
+
# pbar: tqdm | None = None,
|
|
32
|
+
# callback: Callable | None = None,
|
|
33
|
+
# debug: bool = False,
|
|
34
|
+
# all_model_names: list[str] | None = None,
|
|
35
|
+
# all_sampling_params: list[SamplingParams] | None = None,
|
|
36
|
+
# ):
|
|
37
|
+
# super().__init__(
|
|
38
|
+
# task_id=task_id,
|
|
39
|
+
# model_name=model_name,
|
|
40
|
+
# prompt=prompt,
|
|
41
|
+
# attempts_left=attempts_left,
|
|
42
|
+
# status_tracker=status_tracker,
|
|
43
|
+
# retry_queue=retry_queue,
|
|
44
|
+
# results_arr=results_arr,
|
|
45
|
+
# request_timeout=request_timeout,
|
|
46
|
+
# sampling_params=sampling_params,
|
|
47
|
+
# pbar=pbar,
|
|
48
|
+
# callback=callback,
|
|
49
|
+
# debug=debug,
|
|
50
|
+
# all_model_names=all_model_names,
|
|
51
|
+
# all_sampling_params=all_sampling_params,
|
|
52
|
+
# )
|
|
53
|
+
# self.system_message = None
|
|
54
|
+
# self.last_user_message = None
|
|
55
|
+
|
|
56
|
+
# self.model = APIModel.from_registry(model_name)
|
|
57
|
+
# self.url = f"{self.model.api_base}/chat"
|
|
58
|
+
# messages = prompt.to_cohere()
|
|
59
|
+
|
|
60
|
+
# self.request_header = {
|
|
61
|
+
# "Authorization": f"bearer {os.getenv(self.model.api_key_env_var)}",
|
|
62
|
+
# "content-type": "application/json",
|
|
63
|
+
# "accept": "application/json",
|
|
64
|
+
# }
|
|
65
|
+
|
|
66
|
+
# self.request_json = {
|
|
67
|
+
# "model": self.model.name,
|
|
68
|
+
# "messages": messages,
|
|
69
|
+
# "temperature": sampling_params.temperature,
|
|
70
|
+
# "top_p": sampling_params.top_p,
|
|
71
|
+
# "max_tokens": sampling_params.max_new_tokens,
|
|
72
|
+
# }
|
|
73
|
+
|
|
74
|
+
# async def handle_response(self, http_response: ClientResponse) -> APIResponse:
|
|
75
|
+
# is_error = False
|
|
76
|
+
# error_message = None
|
|
77
|
+
# completion = None
|
|
78
|
+
# input_tokens = None
|
|
79
|
+
# output_tokens = None
|
|
80
|
+
# status_code = http_response.status
|
|
81
|
+
# mimetype = http_response.headers.get("Content-Type", None)
|
|
82
|
+
# if status_code >= 200 and status_code < 300:
|
|
83
|
+
# try:
|
|
84
|
+
# data = await http_response.json()
|
|
85
|
+
# except Exception:
|
|
86
|
+
# data = None
|
|
87
|
+
# is_error = True
|
|
88
|
+
# error_message = (
|
|
89
|
+
# f"Error calling .json() on response w/ status {status_code}"
|
|
90
|
+
# )
|
|
91
|
+
# if not is_error and isinstance(data, dict):
|
|
92
|
+
# try:
|
|
93
|
+
# completion = data["text"]
|
|
94
|
+
# input_tokens = data["meta"]["billed_units"]["input_tokens"]
|
|
95
|
+
# output_tokens = data["meta"]["billed_units"]["input_tokens"]
|
|
96
|
+
# except Exception:
|
|
97
|
+
# is_error = True
|
|
98
|
+
# error_message = f"Error getting 'text' or 'meta' from {self.model.name} response."
|
|
99
|
+
# elif mimetype is not None and "json" in mimetype.lower():
|
|
100
|
+
# is_error = True # expected status is 200, otherwise it's an error
|
|
101
|
+
# data = await http_response.json()
|
|
102
|
+
# error_message = json.dumps(data)
|
|
103
|
+
|
|
104
|
+
# else:
|
|
105
|
+
# is_error = True
|
|
106
|
+
# text = await http_response.text()
|
|
107
|
+
# error_message = text
|
|
108
|
+
|
|
109
|
+
# # handle special kinds of errors. TODO: make sure these are correct for anthropic
|
|
110
|
+
# if is_error and error_message is not None:
|
|
111
|
+
# if (
|
|
112
|
+
# "rate limit" in error_message.lower()
|
|
113
|
+
# or "overloaded" in error_message.lower()
|
|
114
|
+
# ):
|
|
115
|
+
# error_message += " (Rate limit error, triggering cooldown.)"
|
|
116
|
+
# self.status_tracker.rate_limit_exceeded()
|
|
117
|
+
# if "context length" in error_message:
|
|
118
|
+
# error_message += " (Context length exceeded, set retries to 0.)"
|
|
119
|
+
# self.attempts_left = 0
|
|
120
|
+
|
|
121
|
+
# return APIResponse(
|
|
122
|
+
# id=self.task_id,
|
|
123
|
+
# status_code=status_code,
|
|
124
|
+
# is_error=is_error,
|
|
125
|
+
# error_message=error_message,
|
|
126
|
+
# prompt=self.prompt,
|
|
127
|
+
# completion=completion,
|
|
128
|
+
# model_internal=self.model_name,
|
|
129
|
+
# sampling_params=self.sampling_params,
|
|
130
|
+
# input_tokens=input_tokens,
|
|
131
|
+
# output_tokens=output_tokens,
|
|
132
|
+
# )
|