lm-deluge 0.0.28__tar.gz → 0.0.30__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (62) hide show
  1. {lm_deluge-0.0.28/src/lm_deluge.egg-info → lm_deluge-0.0.30}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/file.py +1 -1
  4. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/models.py +32 -4
  5. {lm_deluge-0.0.28 → lm_deluge-0.0.30/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  6. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/LICENSE +0 -0
  7. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/README.md +0 -0
  8. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/setup.cfg +0 -0
  9. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/__init__.py +0 -0
  10. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/agent.py +0 -0
  11. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/anthropic.py +0 -0
  13. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/base.py +0 -0
  14. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/bedrock.py +0 -0
  15. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/common.py +0 -0
  16. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  17. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  18. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  19. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  20. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  21. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/gemini.py +0 -0
  22. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/mistral.py +0 -0
  23. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/openai.py +0 -0
  24. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/api_requests/response.py +0 -0
  25. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/batches.py +0 -0
  26. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  27. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  28. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  29. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  30. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/built_in_tools/base.py +0 -0
  31. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/built_in_tools/openai.py +0 -0
  32. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/cache.py +0 -0
  33. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/client.py +0 -0
  34. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/config.py +0 -0
  35. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/embed.py +0 -0
  36. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/errors.py +0 -0
  37. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/gemini_limits.py +0 -0
  38. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/image.py +0 -0
  39. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/__init__.py +0 -0
  40. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/classify.py +0 -0
  41. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/extract.py +0 -0
  42. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/locate.py +0 -0
  43. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/ocr.py +0 -0
  44. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/score.py +0 -0
  45. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/llm_tools/translate.py +0 -0
  46. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/prompt.py +0 -0
  47. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/request_context.py +0 -0
  48. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/rerank.py +0 -0
  49. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/tool.py +0 -0
  50. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/tracker.py +0 -0
  51. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/usage.py +0 -0
  52. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/util/json.py +0 -0
  53. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/util/logprobs.py +0 -0
  54. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/util/spatial.py +0 -0
  55. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/util/validation.py +0 -0
  56. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge/util/xml.py +0 -0
  57. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  58. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  59. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge.egg-info/requires.txt +0 -0
  60. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/src/lm_deluge.egg-info/top_level.txt +0 -0
  61. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/tests/test_builtin_tools.py +0 -0
  62. {lm_deluge-0.0.28 → lm_deluge-0.0.30}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.28
3
+ Version: 0.0.30
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.28"
6
+ version = "0.0.30"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -9,7 +9,7 @@ from dataclasses import dataclass, field
9
9
  from pathlib import Path
10
10
 
11
11
 
12
- @dataclass(slots=True)
12
+ @dataclass
13
13
  class File:
14
14
  # raw bytes, pathlike, http url, base64 data url, or file_id
15
15
  data: bytes | io.BytesIO | Path | str
@@ -145,7 +145,7 @@ BUILTIN_MODELS = {
145
145
  },
146
146
  "gemini-2.5-pro": {
147
147
  "id": "gemini-2.5-pro",
148
- "name": "gemini-2.5-pro-preview-05-06",
148
+ "name": "gemini-2.5-pro",
149
149
  "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
150
150
  "api_key_env_var": "GEMINI_API_KEY",
151
151
  "supports_json": True,
@@ -159,7 +159,21 @@ BUILTIN_MODELS = {
159
159
  },
160
160
  "gemini-2.5-flash": {
161
161
  "id": "gemini-2.5-flash",
162
- "name": "gemini-2.5-flash-preview-05-20",
162
+ "name": "gemini-2.5-flash",
163
+ "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
164
+ "api_key_env_var": "GEMINI_API_KEY",
165
+ "supports_json": True,
166
+ "supports_logprobs": False,
167
+ "api_spec": "openai",
168
+ "input_cost": 0.1,
169
+ "output_cost": 0.4,
170
+ "requests_per_minute": 20,
171
+ "tokens_per_minute": 100_000,
172
+ "reasoning_model": True,
173
+ },
174
+ "gemini-2.5-flash-lite": {
175
+ "id": "gemini-2.5-flash-lite",
176
+ "name": "gemini-2.5-flash-lite",
163
177
  "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
164
178
  "api_key_env_var": "GEMINI_API_KEY",
165
179
  "supports_json": True,
@@ -202,7 +216,7 @@ BUILTIN_MODELS = {
202
216
  },
203
217
  "gemini-2.5-pro-gemini": {
204
218
  "id": "gemini-2.5-pro-gemini",
205
- "name": "gemini-2.5-pro-preview-05-06",
219
+ "name": "gemini-2.5-pro",
206
220
  "api_base": "https://generativelanguage.googleapis.com/v1beta",
207
221
  "api_key_env_var": "GEMINI_API_KEY",
208
222
  "supports_json": True,
@@ -216,7 +230,21 @@ BUILTIN_MODELS = {
216
230
  },
217
231
  "gemini-2.5-flash-gemini": {
218
232
  "id": "gemini-2.5-flash-gemini",
219
- "name": "gemini-2.5-flash-preview-05-20",
233
+ "name": "gemini-2.5-flash",
234
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
235
+ "api_key_env_var": "GEMINI_API_KEY",
236
+ "supports_json": True,
237
+ "supports_logprobs": False,
238
+ "api_spec": "gemini",
239
+ "input_cost": 0.1,
240
+ "output_cost": 0.4,
241
+ "requests_per_minute": 20,
242
+ "tokens_per_minute": 100_000,
243
+ "reasoning_model": True,
244
+ },
245
+ "gemini-2.5-flash-lite-gemini": {
246
+ "id": "gemini-2.5-flash-lite-gemini",
247
+ "name": "gemini-2.5-flash-lite",
220
248
  "api_base": "https://generativelanguage.googleapis.com/v1beta",
221
249
  "api_key_env_var": "GEMINI_API_KEY",
222
250
  "supports_json": True,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.28
3
+ Version: 0.0.30
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes