lm-deluge 0.0.6__tar.gz → 0.0.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (51) hide show
  1. lm_deluge-0.0.8/LICENSE +7 -0
  2. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/PKG-INFO +8 -17
  3. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/README.md +3 -12
  4. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/pyproject.toml +2 -7
  5. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/models.py +25 -0
  6. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge.egg-info/PKG-INFO +8 -17
  7. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge.egg-info/SOURCES.txt +1 -1
  8. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge.egg-info/requires.txt +0 -3
  9. lm_deluge-0.0.6/src/lm_deluge/util/pdf.py +0 -45
  10. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/setup.cfg +0 -0
  11. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/__init__.py +0 -0
  12. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/__init__.py +0 -0
  13. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/anthropic.py +0 -0
  14. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/base.py +0 -0
  15. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/common.py +0 -0
  16. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  17. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  18. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  19. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  20. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  21. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/mistral.py +0 -0
  22. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/api_requests/openai.py +0 -0
  23. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/cache.py +0 -0
  24. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/client.py +0 -0
  25. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/embed.py +0 -0
  26. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/errors.py +0 -0
  27. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/gemini_limits.py +0 -0
  28. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/image.py +0 -0
  29. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/llm_tools/__init__.py +0 -0
  30. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/llm_tools/extract.py +0 -0
  31. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/llm_tools/score.py +0 -0
  32. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/llm_tools/translate.py +0 -0
  33. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/prompt.py +0 -0
  34. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/rerank.py +0 -0
  35. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/sampling_params.py +0 -0
  36. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/tool.py +0 -0
  37. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/tracker.py +0 -0
  38. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/util/json.py +0 -0
  39. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/util/logprobs.py +0 -0
  40. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/util/validation.py +0 -0
  41. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge/util/xml.py +0 -0
  42. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  43. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/src/lm_deluge.egg-info/top_level.txt +0 -0
  44. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_all_models.py +0 -0
  45. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_cache.py +0 -0
  46. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_image_models.py +0 -0
  47. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_image_utils.py +0 -0
  48. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_json_utils.py +0 -0
  49. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_sampling_params.py +0 -0
  50. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_translate.py +0 -0
  51. {lm_deluge-0.0.6 → lm_deluge-0.0.8}/tests/test_xml_utils.py +0 -0
@@ -0,0 +1,7 @@
1
+ Copyright 2025, Taylor AI
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
7
7
  Description-Content-Type: text/markdown
8
+ License-File: LICENSE
8
9
  Requires-Dist: python-dotenv
9
10
  Requires-Dist: json5
10
11
  Requires-Dist: PyYAML
@@ -20,14 +21,13 @@ Requires-Dist: bs4
20
21
  Requires-Dist: lxml
21
22
  Requires-Dist: pdf2image
22
23
  Requires-Dist: pillow
23
- Provides-Extra: full
24
- Requires-Dist: pymupdf; extra == "full"
25
- Requires-Dist: fasttext-wheel; extra == "full"
26
- Requires-Dist: fasttext-langdetect; extra == "full"
24
+ Requires-Dist: fasttext-wheel
25
+ Requires-Dist: fasttext-langdetect
26
+ Dynamic: license-file
27
27
 
28
- # lm_deluge
28
+ # lm-deluge
29
29
 
30
- `lm_deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
30
+ `lm-deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
31
31
 
32
32
  - **Unified client** – Send prompts to all relevant models with a single client.
33
33
  - **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
@@ -44,11 +44,6 @@ Requires-Dist: fasttext-langdetect; extra == "full"
44
44
  pip install lm-deluge
45
45
  ```
46
46
 
47
- There are optional goodies. If you want support for PDFs and language-detection via FastText:
48
- ```bash
49
- pip install "lm-deluge[full]"
50
- ```
51
-
52
47
  The package relies on environment variables for API keys. Typical variables include `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `COHERE_API_KEY`, `META_API_KEY`, and `GOOGLE_API_KEY`. `LLMClient` will automatically load the `.env` file when imported; we recommend using that to set the environment variables.
53
48
 
54
49
  ## Quickstart
@@ -84,11 +79,7 @@ print(resp[0].completion)
84
79
 
85
80
  API calls can be customized in a few ways.
86
81
 
87
- 1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort.
88
-
89
- You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
90
-
91
-
82
+ 1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort. You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
92
83
  2. **Arguments to LLMClient.** This is where you set request timeout, rate limits, model name(s), model weight(s) for distributing requests across models, retries, and caching.
93
84
  3. **Arguments to process_prompts.** Per-call, you can set verbosity, whether to display progress, and whether to return just completions (rather than the full APIResponse object).
94
85
 
@@ -1,6 +1,6 @@
1
- # lm_deluge
1
+ # lm-deluge
2
2
 
3
- `lm_deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
3
+ `lm-deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
4
4
 
5
5
  - **Unified client** – Send prompts to all relevant models with a single client.
6
6
  - **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
@@ -17,11 +17,6 @@
17
17
  pip install lm-deluge
18
18
  ```
19
19
 
20
- There are optional goodies. If you want support for PDFs and language-detection via FastText:
21
- ```bash
22
- pip install "lm-deluge[full]"
23
- ```
24
-
25
20
  The package relies on environment variables for API keys. Typical variables include `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `COHERE_API_KEY`, `META_API_KEY`, and `GOOGLE_API_KEY`. `LLMClient` will automatically load the `.env` file when imported; we recommend using that to set the environment variables.
26
21
 
27
22
  ## Quickstart
@@ -57,11 +52,7 @@ print(resp[0].completion)
57
52
 
58
53
  API calls can be customized in a few ways.
59
54
 
60
- 1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort.
61
-
62
- You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
63
-
64
-
55
+ 1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort. You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
65
56
  2. **Arguments to LLMClient.** This is where you set request timeout, rate limits, model name(s), model weight(s) for distributing requests across models, retries, and caching.
66
57
  3. **Arguments to process_prompts.** Per-call, you can set verbosity, whether to display progress, and whether to return just completions (rather than the full APIResponse object).
67
58
 
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.6"
6
+ version = "0.0.8"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -26,12 +26,7 @@ dependencies = [
26
26
  "bs4",
27
27
  "lxml",
28
28
  "pdf2image",
29
- "pillow"
30
- ]
31
-
32
- [project.optional-dependencies]
33
- full = [
34
- "pymupdf",
29
+ "pillow",
35
30
  "fasttext-wheel",
36
31
  "fasttext-langdetect",
37
32
  ]
@@ -420,6 +420,31 @@ registry = {
420
420
  # ░███
421
421
  # █████
422
422
  # ░░░░░
423
+ "claude-4-opus": {
424
+ "id": "claude-4-opus",
425
+ "name": "claude-opus-4-20250514",
426
+ "api_base": "https://api.anthropic.com/v1",
427
+ "api_key_env_var": "ANTHROPIC_API_KEY",
428
+ "supports_json": False,
429
+ "api_spec": "anthropic",
430
+ "input_cost": 3.0,
431
+ "output_cost": 15.0,
432
+ "requests_per_minute": 4_000,
433
+ "tokens_per_minute": 400_000,
434
+ "reasoning_model": True,
435
+ },
436
+ "claude-4-sonnet": {
437
+ "id": "claude-4-sonnet",
438
+ "name": "claude-sonnet-4-20250514",
439
+ "api_base": "https://api.anthropic.com/v1",
440
+ "api_key_env_var": "ANTHROPIC_API_KEY",
441
+ "supports_json": False,
442
+ "api_spec": "anthropic",
443
+ "input_cost": 3.0,
444
+ "output_cost": 15.0,
445
+ "requests_per_minute": 4_000,
446
+ "tokens_per_minute": 400_000,
447
+ },
423
448
  "claude-3-haiku": {
424
449
  "id": "claude-3-haiku",
425
450
  "name": "claude-3-haiku-20240307",
@@ -1,10 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.6
3
+ Version: 0.0.8
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
7
7
  Description-Content-Type: text/markdown
8
+ License-File: LICENSE
8
9
  Requires-Dist: python-dotenv
9
10
  Requires-Dist: json5
10
11
  Requires-Dist: PyYAML
@@ -20,14 +21,13 @@ Requires-Dist: bs4
20
21
  Requires-Dist: lxml
21
22
  Requires-Dist: pdf2image
22
23
  Requires-Dist: pillow
23
- Provides-Extra: full
24
- Requires-Dist: pymupdf; extra == "full"
25
- Requires-Dist: fasttext-wheel; extra == "full"
26
- Requires-Dist: fasttext-langdetect; extra == "full"
24
+ Requires-Dist: fasttext-wheel
25
+ Requires-Dist: fasttext-langdetect
26
+ Dynamic: license-file
27
27
 
28
- # lm_deluge
28
+ # lm-deluge
29
29
 
30
- `lm_deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
30
+ `lm-deluge` is a lightweight helper library for maxing out your rate limits with LLM providers. It provides the following:
31
31
 
32
32
  - **Unified client** – Send prompts to all relevant models with a single client.
33
33
  - **Massive concurrency with throttling** – Set `max_tokens_per_minute` and `max_requests_per_minute` and let it fly. The client will process as many requests as possible while respecting rate limits and retrying failures.
@@ -44,11 +44,6 @@ Requires-Dist: fasttext-langdetect; extra == "full"
44
44
  pip install lm-deluge
45
45
  ```
46
46
 
47
- There are optional goodies. If you want support for PDFs and language-detection via FastText:
48
- ```bash
49
- pip install "lm-deluge[full]"
50
- ```
51
-
52
47
  The package relies on environment variables for API keys. Typical variables include `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `COHERE_API_KEY`, `META_API_KEY`, and `GOOGLE_API_KEY`. `LLMClient` will automatically load the `.env` file when imported; we recommend using that to set the environment variables.
53
48
 
54
49
  ## Quickstart
@@ -84,11 +79,7 @@ print(resp[0].completion)
84
79
 
85
80
  API calls can be customized in a few ways.
86
81
 
87
- 1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort.
88
-
89
- You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
90
-
91
-
82
+ 1. **Sampling Parameters.** This determines things like structured outputs, maximum completion tokens, nucleus sampling, etc. Provide a custom `SamplingParams` to the `LLMClient` to set temperature, top_p, json_mode, max_new_tokens, and/or reasoning_effort. You can pass 1 `SamplingParams` to use for all models, or a list of `SamplingParams` that's the same length as the list of models. You can also pass many of these arguments directly to `LLMClient.basic` so you don't have to construct an entire `SamplingParams` object.
92
83
  2. **Arguments to LLMClient.** This is where you set request timeout, rate limits, model name(s), model weight(s) for distributing requests across models, retries, and caching.
93
84
  3. **Arguments to process_prompts.** Per-call, you can set verbosity, whether to display progress, and whether to return just completions (rather than the full APIResponse object).
94
85
 
@@ -1,3 +1,4 @@
1
+ LICENSE
1
2
  README.md
2
3
  pyproject.toml
3
4
  src/lm_deluge/__init__.py
@@ -35,7 +36,6 @@ src/lm_deluge/llm_tools/score.py
35
36
  src/lm_deluge/llm_tools/translate.py
36
37
  src/lm_deluge/util/json.py
37
38
  src/lm_deluge/util/logprobs.py
38
- src/lm_deluge/util/pdf.py
39
39
  src/lm_deluge/util/validation.py
40
40
  src/lm_deluge/util/xml.py
41
41
  tests/test_all_models.py
@@ -13,8 +13,5 @@ bs4
13
13
  lxml
14
14
  pdf2image
15
15
  pillow
16
-
17
- [full]
18
- pymupdf
19
16
  fasttext-wheel
20
17
  fasttext-langdetect
@@ -1,45 +0,0 @@
1
- import io
2
-
3
-
4
- def text_from_pdf(pdf: str | bytes | io.BytesIO):
5
- """
6
- Extract text from a PDF. Does NOT use OCR, extracts the literal text.
7
- The source can be:
8
- - A file path (str)
9
- - Bytes of a PDF file
10
- - A BytesIO object containing a PDF file
11
- """
12
- try:
13
- import pymupdf # pyright: ignore
14
- except ImportError:
15
- raise ImportError(
16
- "pymupdf is required to extract text from PDFs. Install lm_deluge[pdf] or lm_deluge[full]."
17
- )
18
- if isinstance(pdf, str):
19
- # It's a file path
20
- doc = pymupdf.open(pdf)
21
- elif isinstance(pdf, (bytes, io.BytesIO)):
22
- # It's bytes or a BytesIO object
23
- if isinstance(pdf, bytes):
24
- pdf = io.BytesIO(pdf)
25
- doc = pymupdf.open(stream=pdf, filetype="pdf")
26
- else:
27
- raise ValueError("Unsupported pdf_source type. Must be str, bytes, or BytesIO.")
28
-
29
- text_content = []
30
- for page in doc:
31
- blocks = page.get_text("blocks", sort=True)
32
- for block in blocks:
33
- # block[4] contains the text content
34
- text_content.append(block[4].strip())
35
- text_content.append("\n") # Add extra newlines between blocks
36
-
37
- # Join all text content with newlines
38
- full_text = "\n".join(text_content).strip()
39
- # Replace multiple consecutive spaces with a single space
40
- full_text = " ".join(full_text.split())
41
- # Clean up any resulting double spaces or newlines
42
- full_text = " ".join([x for x in full_text.split(" ") if x])
43
- full_text = "\n".join([x for x in full_text.split("\n") if x])
44
-
45
- return full_text
File without changes
File without changes