lm-deluge 0.0.45__tar.gz → 0.0.47__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (80) hide show
  1. {lm_deluge-0.0.45/src/lm_deluge.egg-info → lm_deluge-0.0.47}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/image.py +9 -1
  4. lm_deluge-0.0.47/src/lm_deluge/presets/cerebras.py +17 -0
  5. lm_deluge-0.0.47/src/lm_deluge/presets/meta.py +13 -0
  6. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/prompt.py +19 -2
  7. {lm_deluge-0.0.45 → lm_deluge-0.0.47/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  8. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge.egg-info/SOURCES.txt +2 -0
  9. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/LICENSE +0 -0
  10. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/README.md +0 -0
  11. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/setup.cfg +0 -0
  12. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/__init__.py +0 -0
  13. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/agent.py +0 -0
  14. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/__init__.py +0 -0
  15. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/anthropic.py +0 -0
  16. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/base.py +0 -0
  17. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/bedrock.py +0 -0
  18. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/common.py +0 -0
  19. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  20. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  21. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  22. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  23. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  24. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/gemini.py +0 -0
  25. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/mistral.py +0 -0
  26. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/openai.py +0 -0
  27. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/api_requests/response.py +0 -0
  28. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/batches.py +0 -0
  29. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  30. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  31. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  32. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  33. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/built_in_tools/base.py +0 -0
  34. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/built_in_tools/openai.py +0 -0
  35. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/cache.py +0 -0
  36. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/cli.py +0 -0
  37. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/client.py +0 -0
  38. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/config.py +0 -0
  39. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/embed.py +0 -0
  40. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/errors.py +0 -0
  41. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/file.py +0 -0
  42. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/gemini_limits.py +0 -0
  43. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/__init__.py +0 -0
  44. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/classify.py +0 -0
  45. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/extract.py +0 -0
  46. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/locate.py +0 -0
  47. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/ocr.py +0 -0
  48. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/score.py +0 -0
  49. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/llm_tools/translate.py +0 -0
  50. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/__init__.py +0 -0
  51. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/anthropic.py +0 -0
  52. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/bedrock.py +0 -0
  53. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/cerebras.py +0 -0
  54. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/cohere.py +0 -0
  55. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/deepseek.py +0 -0
  56. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/fireworks.py +0 -0
  57. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/google.py +0 -0
  58. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/grok.py +0 -0
  59. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/groq.py +0 -0
  60. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/meta.py +0 -0
  61. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/mistral.py +0 -0
  62. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/openai.py +0 -0
  63. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/openrouter.py +0 -0
  64. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/models/together.py +0 -0
  65. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/request_context.py +0 -0
  66. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/rerank.py +0 -0
  67. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/tool.py +0 -0
  68. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/tracker.py +0 -0
  69. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/usage.py +0 -0
  70. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/util/harmony.py +0 -0
  71. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/util/json.py +0 -0
  72. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/util/logprobs.py +0 -0
  73. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/util/spatial.py +0 -0
  74. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/util/validation.py +0 -0
  75. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge/util/xml.py +0 -0
  76. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  77. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge.egg-info/requires.txt +0 -0
  78. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/src/lm_deluge.egg-info/top_level.txt +0 -0
  79. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/tests/test_builtin_tools.py +0 -0
  80. {lm_deluge-0.0.45 → lm_deluge-0.0.47}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.45
3
+ Version: 0.0.47
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.45"
6
+ version = "0.0.47"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -76,7 +76,15 @@ class Image:
76
76
  header, encoded = self.data.split(",", 1)
77
77
  return base64.b64decode(encoded)
78
78
  else:
79
- raise ValueError(f"unreadable image format. type: {type(self.data)}")
79
+ if isinstance(self.data, str):
80
+ content = self.data[:1_000]
81
+ elif isinstance(self.data, bytes):
82
+ content = "[raw bytes]"
83
+ else:
84
+ content = f"[raw {type(self.data)}]"
85
+ raise ValueError(
86
+ f"unreadable image format. type: {type(self.data)}. content: {content}"
87
+ )
80
88
 
81
89
  def _mime(self) -> str:
82
90
  if self.media_type:
@@ -0,0 +1,17 @@
1
+ from lm_deluge import LLMClient
2
+
3
+ mixture_of_cerebras = LLMClient(
4
+ [
5
+ "gpt-oss-120b-cerebras",
6
+ "llama-4-scout-cerebras",
7
+ "llama-3.3-70b-cerebras",
8
+ "qwen-3-32b-cerebras",
9
+ "llama-4-maverick-cerebras",
10
+ "qwen-3-235b-instruct-cerebras",
11
+ "qwen-3-235b-thinking-cerebras",
12
+ "qwen-3-coder-cerebras",
13
+ ],
14
+ model_weights=[3, 3, 3, 3, 3, 3, 3, 1],
15
+ max_requests_per_minute=250,
16
+ max_tokens_per_minute=1_000_000,
17
+ )
@@ -0,0 +1,13 @@
1
+ from lm_deluge import LLMClient
2
+
3
+ mixture_of_llamas = LLMClient(
4
+ ["llama-4-scout", "llama-4-maverick", "llama-3.3-70b", "llama-3.3-8b"],
5
+ max_requests_per_minute=12_000,
6
+ max_tokens_per_minute=4_000_000,
7
+ )
8
+
9
+ multimodal_llamas = LLMClient(
10
+ ["llama-4-scout", "llama-4-maverick"],
11
+ max_requests_per_minute=6_000,
12
+ max_tokens_per_minute=2_000_000,
13
+ )
@@ -333,6 +333,23 @@ class Message:
333
333
  """
334
334
  Return a JSON-serialisable dict that fully captures the message.
335
335
  """
336
+ def _json_safe(value):
337
+ if isinstance(value, (str, int, float, bool)) or value is None:
338
+ return value
339
+ if isinstance(value, list):
340
+ return [_json_safe(v) for v in value]
341
+ if isinstance(value, dict):
342
+ return {k: _json_safe(v) for k, v in value.items()}
343
+ if isinstance(value, Text):
344
+ return {"type": "text", "text": value.text}
345
+ if isinstance(value, Image):
346
+ w, h = value.size
347
+ return {"type": "image", "tag": f"<Image ({w}×{h})>"}
348
+ if isinstance(value, File):
349
+ size = value.size
350
+ return {"type": "file", "tag": f"<File ({size} bytes)>"}
351
+ return repr(value)
352
+
336
353
  content_blocks: list[dict] = []
337
354
  for p in self.parts:
338
355
  if isinstance(p, Text):
@@ -349,7 +366,7 @@ class Message:
349
366
  "type": "tool_call",
350
367
  "id": p.id,
351
368
  "name": p.name,
352
- "arguments": p.arguments,
369
+ "arguments": _json_safe(p.arguments),
353
370
  }
354
371
  )
355
372
  elif isinstance(p, ToolResult):
@@ -357,7 +374,7 @@ class Message:
357
374
  {
358
375
  "type": "tool_result",
359
376
  "tool_call_id": p.tool_call_id,
360
- "result": p.result,
377
+ "result": _json_safe(p.result),
361
378
  }
362
379
  )
363
380
  elif isinstance(p, Thinking):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.45
3
+ Version: 0.0.47
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -66,6 +66,8 @@ src/lm_deluge/models/mistral.py
66
66
  src/lm_deluge/models/openai.py
67
67
  src/lm_deluge/models/openrouter.py
68
68
  src/lm_deluge/models/together.py
69
+ src/lm_deluge/presets/cerebras.py
70
+ src/lm_deluge/presets/meta.py
69
71
  src/lm_deluge/util/harmony.py
70
72
  src/lm_deluge/util/json.py
71
73
  src/lm_deluge/util/logprobs.py
File without changes
File without changes
File without changes