lm-deluge 0.0.60__tar.gz → 0.0.62__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (82) hide show
  1. {lm_deluge-0.0.60/src/lm_deluge.egg-info → lm_deluge-0.0.62}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/anthropic.py +8 -0
  4. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/client.py +79 -36
  5. lm_deluge-0.0.62/src/lm_deluge/file.py +527 -0
  6. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/prompt.py +33 -3
  7. {lm_deluge-0.0.60 → lm_deluge-0.0.62/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  8. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge.egg-info/SOURCES.txt +3 -1
  9. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/tests/test_builtin_tools.py +2 -2
  10. lm_deluge-0.0.62/tests/test_file_upload.py +627 -0
  11. lm_deluge-0.0.62/tests/test_openrouter_generic.py +238 -0
  12. lm_deluge-0.0.60/src/lm_deluge/file.py +0 -158
  13. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/LICENSE +0 -0
  14. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/README.md +0 -0
  15. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/setup.cfg +0 -0
  16. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/__init__.py +0 -0
  17. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/__init__.py +0 -0
  18. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/base.py +0 -0
  19. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/bedrock.py +0 -0
  20. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/common.py +0 -0
  21. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  22. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  23. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  24. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  25. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  26. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/gemini.py +0 -0
  27. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/mistral.py +0 -0
  28. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/openai.py +0 -0
  29. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/api_requests/response.py +0 -0
  30. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/batches.py +0 -0
  31. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  32. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  33. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  34. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  35. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/built_in_tools/base.py +0 -0
  36. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/built_in_tools/openai.py +0 -0
  37. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/cache.py +0 -0
  38. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/cli.py +0 -0
  39. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/config.py +0 -0
  40. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/embed.py +0 -0
  41. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/errors.py +0 -0
  42. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/image.py +0 -0
  43. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/__init__.py +0 -0
  44. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/classify.py +0 -0
  45. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/extract.py +0 -0
  46. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/locate.py +0 -0
  47. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/ocr.py +0 -0
  48. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/score.py +0 -0
  49. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/llm_tools/translate.py +0 -0
  50. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/__init__.py +0 -0
  51. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/anthropic.py +0 -0
  52. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/bedrock.py +0 -0
  53. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/cerebras.py +0 -0
  54. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/cohere.py +0 -0
  55. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/deepseek.py +0 -0
  56. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/fireworks.py +0 -0
  57. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/google.py +0 -0
  58. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/grok.py +0 -0
  59. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/groq.py +0 -0
  60. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/meta.py +0 -0
  61. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/mistral.py +0 -0
  62. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/openai.py +0 -0
  63. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/openrouter.py +0 -0
  64. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/models/together.py +0 -0
  65. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/presets/cerebras.py +0 -0
  66. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/presets/meta.py +0 -0
  67. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/request_context.py +0 -0
  68. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/rerank.py +0 -0
  69. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/tool.py +0 -0
  70. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/tracker.py +0 -0
  71. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/usage.py +0 -0
  72. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/util/harmony.py +0 -0
  73. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/util/json.py +0 -0
  74. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/util/logprobs.py +0 -0
  75. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/util/spatial.py +0 -0
  76. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/util/validation.py +0 -0
  77. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/util/xml.py +0 -0
  78. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge/warnings.py +0 -0
  79. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  80. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge.egg-info/requires.txt +0 -0
  81. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/src/lm_deluge.egg-info/top_level.txt +0 -0
  82. {lm_deluge-0.0.60 → lm_deluge-0.0.62}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.60
3
+ Version: 0.0.62
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.60"
6
+ version = "0.0.62"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -42,6 +42,14 @@ def _build_anthropic_request(
42
42
  "content-type": "application/json",
43
43
  }
44
44
 
45
+ # Check if any messages contain uploaded files (file_id)
46
+ # If so, add the files-api beta header
47
+ for msg in prompt.messages:
48
+ for file in msg.files:
49
+ if file.is_remote and file.remote_provider == "anthropic":
50
+ _add_beta(base_headers, "files-api-2025-04-14")
51
+ break
52
+
45
53
  request_json = {
46
54
  "model": model.name,
47
55
  "messages": messages,
@@ -3,6 +3,7 @@ from typing import (
3
3
  Any,
4
4
  AsyncGenerator,
5
5
  Callable,
6
+ ClassVar,
6
7
  Literal,
7
8
  Self,
8
9
  Sequence,
@@ -31,7 +32,7 @@ from lm_deluge.tool import MCPServer, Tool
31
32
 
32
33
  from .api_requests.base import APIResponse
33
34
  from .config import SamplingParams
34
- from .models import APIModel, registry
35
+ from .models import APIModel, register_model, registry
35
36
  from .request_context import RequestContext
36
37
  from .tracker import StatusTracker
37
38
 
@@ -43,6 +44,12 @@ class _LLMClient(BaseModel):
43
44
  Keeps all validation, serialization, and existing functionality.
44
45
  """
45
46
 
47
+ _REASONING_SUFFIXES: ClassVar[dict[str, Literal["low", "medium", "high"]]] = {
48
+ "-low": "low",
49
+ "-medium": "medium",
50
+ "-high": "high",
51
+ }
52
+
46
53
  model_names: str | list[str] = ["gpt-4.1-mini"]
47
54
  name: str | None = None
48
55
  max_requests_per_minute: int = 1_000
@@ -143,23 +150,15 @@ class _LLMClient(BaseModel):
143
150
  def _normalize_model_names(
144
151
  self, models: list[str]
145
152
  ) -> tuple[list[str], list[Literal["low", "medium", "high"] | None]]:
146
- reasoning_effort_suffixes: dict[str, Literal["low", "medium", "high"]] = {
147
- "-low": "low",
148
- "-medium": "medium",
149
- "-high": "high",
150
- }
151
153
  normalized: list[str] = []
152
154
  efforts: list[Literal["low", "medium", "high"] | None] = []
153
155
 
154
156
  for name in models:
155
- base_name = name
156
- effort: Literal["low", "medium", "high"] | None = None
157
- for suffix, candidate in reasoning_effort_suffixes.items():
158
- if name.endswith(suffix) and len(name) > len(suffix):
159
- base_name = name[: -len(suffix)]
160
- effort = candidate
161
- break
162
- normalized.append(base_name)
157
+ base_name = self._preprocess_openrouter_model(name)
158
+ trimmed_name, effort = self.__class__._strip_reasoning_suffix_if_registered(
159
+ base_name
160
+ )
161
+ normalized.append(trimmed_name)
163
162
  efforts.append(effort)
164
163
 
165
164
  return normalized, efforts
@@ -254,31 +253,63 @@ class _LLMClient(BaseModel):
254
253
  def models(self):
255
254
  return self.model_names # why? idk
256
255
 
256
+ @staticmethod
257
+ def _preprocess_openrouter_model(model_name: str) -> str:
258
+ """Process openrouter: prefix and register model if needed."""
259
+ if model_name.startswith("openrouter:"):
260
+ slug = model_name.split(":", 1)[1] # Everything after "openrouter:"
261
+ # Create a unique id by replacing slashes with hyphens
262
+ model_id = f"openrouter-{slug.replace('/', '-')}"
263
+
264
+ # Register the model if not already in registry
265
+ if model_id not in registry:
266
+ register_model(
267
+ id=model_id,
268
+ name=slug, # The full slug sent to OpenRouter API (e.g., "openrouter/andromeda-alpha")
269
+ api_base="https://openrouter.ai/api/v1",
270
+ api_key_env_var="OPENROUTER_API_KEY",
271
+ api_spec="openai",
272
+ supports_json=True,
273
+ supports_logprobs=False,
274
+ supports_responses=False,
275
+ input_cost=0, # Unknown costs for generic models
276
+ cached_input_cost=0,
277
+ cache_write_cost=0,
278
+ output_cost=0,
279
+ )
280
+
281
+ return model_id
282
+ return model_name
283
+
257
284
  @model_validator(mode="before")
258
285
  @classmethod
259
286
  def fix_lists(cls, data) -> "_LLMClient":
260
- # Parse reasoning effort from model name suffixes (e.g., "gpt-5-high")
261
- # Only applies when a single model string is provided
262
- if isinstance(data.get("model_names"), str):
263
- model_name = data["model_names"]
264
- reasoning_effort_suffixes = {
265
- "-low": "low",
266
- "-medium": "medium",
267
- "-high": "high",
268
- }
269
-
270
- for suffix, effort in reasoning_effort_suffixes.items():
271
- if model_name.endswith(suffix):
272
- # Extract base model name by removing suffix
273
- base_model = model_name[: -len(suffix)]
274
- data["model_names"] = base_model
275
-
276
- # Set reasoning_effort if not already explicitly set
277
- if data.get("reasoning_effort") is None:
278
- data["reasoning_effort"] = effort
279
- break
280
-
281
- data["model_names"] = [data["model_names"]]
287
+ # Process model_names - handle both strings and lists
288
+ model_names = data.get("model_names")
289
+
290
+ if isinstance(model_names, str):
291
+ # Single model as string
292
+ # First, handle OpenRouter prefix
293
+ model_name = cls._preprocess_openrouter_model(model_names)
294
+
295
+ # Then handle reasoning effort suffix (e.g., "gpt-5-high")
296
+ model_name, effort = cls._strip_reasoning_suffix_if_registered(model_name)
297
+ if effort and data.get("reasoning_effort") is None:
298
+ data["reasoning_effort"] = effort
299
+
300
+ data["model_names"] = [model_name]
301
+
302
+ elif isinstance(model_names, list):
303
+ # List of models - process each one
304
+ processed_models = []
305
+ for model_name in model_names:
306
+ # Handle OpenRouter prefix for each model
307
+ processed_model = cls._preprocess_openrouter_model(model_name)
308
+ processed_model, _ = cls._strip_reasoning_suffix_if_registered(
309
+ processed_model
310
+ )
311
+ processed_models.append(processed_model)
312
+ data["model_names"] = processed_models
282
313
 
283
314
  if not isinstance(data.get("sampling_params", []), list):
284
315
  data["sampling_params"] = [data["sampling_params"]]
@@ -298,6 +329,18 @@ class _LLMClient(BaseModel):
298
329
  data["sampling_params"] = data["sampling_params"] * len(data["model_names"])
299
330
  return data
300
331
 
332
+ @classmethod
333
+ def _strip_reasoning_suffix_if_registered(
334
+ cls, model_name: str
335
+ ) -> tuple[str, Literal["low", "medium", "high"] | None]:
336
+ """Remove reasoning suffix only when the trimmed model already exists."""
337
+ for suffix, effort in cls._REASONING_SUFFIXES.items():
338
+ if model_name.endswith(suffix) and len(model_name) > len(suffix):
339
+ candidate = model_name[: -len(suffix)]
340
+ if candidate in registry:
341
+ return candidate, effort
342
+ return model_name, None
343
+
301
344
  @model_validator(mode="after")
302
345
  def validate_client(self) -> Self:
303
346
  if isinstance(self.model_names, str):