lm-deluge 0.0.61__tar.gz → 0.0.63__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (81) hide show
  1. {lm_deluge-0.0.61/src/lm_deluge.egg-info → lm_deluge-0.0.63}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/client.py +4 -4
  4. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/config.py +1 -1
  5. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/prompt.py +33 -3
  6. {lm_deluge-0.0.61 → lm_deluge-0.0.63/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  7. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/LICENSE +0 -0
  8. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/README.md +0 -0
  9. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/setup.cfg +0 -0
  10. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/__init__.py +0 -0
  11. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/anthropic.py +0 -0
  13. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/base.py +0 -0
  14. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/bedrock.py +0 -0
  15. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/common.py +0 -0
  16. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  17. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  18. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  19. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  20. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  21. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/gemini.py +0 -0
  22. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/mistral.py +0 -0
  23. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/openai.py +0 -0
  24. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/api_requests/response.py +0 -0
  25. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/batches.py +0 -0
  26. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  27. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  28. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  29. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  30. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/built_in_tools/base.py +0 -0
  31. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/built_in_tools/openai.py +0 -0
  32. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/cache.py +0 -0
  33. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/cli.py +0 -0
  34. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/embed.py +0 -0
  35. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/errors.py +0 -0
  36. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/file.py +0 -0
  37. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/image.py +0 -0
  38. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/__init__.py +0 -0
  39. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/classify.py +0 -0
  40. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/extract.py +0 -0
  41. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/locate.py +0 -0
  42. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/ocr.py +0 -0
  43. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/score.py +0 -0
  44. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/llm_tools/translate.py +0 -0
  45. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/__init__.py +0 -0
  46. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/anthropic.py +0 -0
  47. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/bedrock.py +0 -0
  48. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/cerebras.py +0 -0
  49. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/cohere.py +0 -0
  50. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/deepseek.py +0 -0
  51. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/fireworks.py +0 -0
  52. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/google.py +0 -0
  53. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/grok.py +0 -0
  54. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/groq.py +0 -0
  55. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/meta.py +0 -0
  56. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/mistral.py +0 -0
  57. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/openai.py +0 -0
  58. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/openrouter.py +0 -0
  59. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/models/together.py +0 -0
  60. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/presets/cerebras.py +0 -0
  61. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/presets/meta.py +0 -0
  62. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/request_context.py +0 -0
  63. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/rerank.py +0 -0
  64. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/tool.py +0 -0
  65. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/tracker.py +0 -0
  66. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/usage.py +0 -0
  67. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/util/harmony.py +0 -0
  68. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/util/json.py +0 -0
  69. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/util/logprobs.py +0 -0
  70. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/util/spatial.py +0 -0
  71. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/util/validation.py +0 -0
  72. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/util/xml.py +0 -0
  73. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge/warnings.py +0 -0
  74. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  75. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  76. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge.egg-info/requires.txt +0 -0
  77. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/src/lm_deluge.egg-info/top_level.txt +0 -0
  78. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/tests/test_builtin_tools.py +0 -0
  79. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/tests/test_file_upload.py +0 -0
  80. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/tests/test_native_mcp_server.py +0 -0
  81. {lm_deluge-0.0.61 → lm_deluge-0.0.63}/tests/test_openrouter_generic.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.61
3
+ Version: 0.0.63
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.61"
6
+ version = "0.0.63"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -70,7 +70,7 @@ class _LLMClient(BaseModel):
70
70
  top_p: float = 1.0
71
71
  json_mode: bool = False
72
72
  max_new_tokens: int = 512
73
- reasoning_effort: Literal["low", "medium", "high", None] = None
73
+ reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None
74
74
  logprobs: bool = False
75
75
  top_logprobs: int | None = None
76
76
  force_local_mcp: bool = False
@@ -957,7 +957,7 @@ def LLMClient(
957
957
  top_p: float = 1.0,
958
958
  json_mode: bool = False,
959
959
  max_new_tokens: int = 512,
960
- reasoning_effort: Literal["low", "medium", "high", None] = None,
960
+ reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
961
961
  logprobs: bool = False,
962
962
  top_logprobs: int | None = None,
963
963
  force_local_mcp: bool = False,
@@ -986,7 +986,7 @@ def LLMClient(
986
986
  top_p: float = 1.0,
987
987
  json_mode: bool = False,
988
988
  max_new_tokens: int = 512,
989
- reasoning_effort: Literal["low", "medium", "high", None] = None,
989
+ reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
990
990
  logprobs: bool = False,
991
991
  top_logprobs: int | None = None,
992
992
  force_local_mcp: bool = False,
@@ -1014,7 +1014,7 @@ def LLMClient(
1014
1014
  top_p: float = 1.0,
1015
1015
  json_mode: bool = False,
1016
1016
  max_new_tokens: int = 512,
1017
- reasoning_effort: Literal["low", "medium", "high", None] = None,
1017
+ reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
1018
1018
  logprobs: bool = False,
1019
1019
  top_logprobs: int | None = None,
1020
1020
  force_local_mcp: bool = False,
@@ -8,7 +8,7 @@ class SamplingParams(BaseModel):
8
8
  top_p: float = 1.0
9
9
  json_mode: bool = False
10
10
  max_new_tokens: int = 512
11
- reasoning_effort: Literal["low", "medium", "high", "none", None] = None
11
+ reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None
12
12
  logprobs: bool = False
13
13
  top_logprobs: int | None = None
14
14
 
@@ -474,22 +474,28 @@ class Message:
474
474
 
475
475
  def with_file(
476
476
  self,
477
- data: bytes | str | Path | io.BytesIO,
477
+ data: bytes | str | Path | io.BytesIO | File,
478
478
  *,
479
479
  media_type: str | None = None,
480
480
  filename: str | None = None,
481
+ # remote: bool = False,
482
+ # provider: Literal["openai", "anthropic", "google"] | None = None,
481
483
  ) -> "Message":
482
484
  """
483
485
  Append a file block and return self for chaining.
484
486
  """
485
- file = File(data, media_type=media_type, filename=filename)
487
+ if not isinstance(data, File):
488
+ file = File(data, media_type=media_type, filename=filename)
489
+ else:
490
+ file = data
491
+
486
492
  self.parts.append(file)
487
493
  return self
488
494
 
489
495
  @deprecated("with_file")
490
496
  def add_file(
491
497
  self,
492
- data: bytes | str | Path | io.BytesIO,
498
+ data: bytes | str | Path | io.BytesIO | File,
493
499
  *,
494
500
  media_type: str | None = None,
495
501
  filename: str | None = None,
@@ -499,6 +505,30 @@ class Message:
499
505
  """
500
506
  return self.with_file(data, media_type=media_type, filename=filename)
501
507
 
508
+ async def with_remote_file(
509
+ self,
510
+ data: bytes | str | Path | io.BytesIO | File,
511
+ *,
512
+ media_type: str | None = None,
513
+ filename: str | None = None,
514
+ provider: Literal["openai", "anthropic", "google"] = "openai",
515
+ ):
516
+ if not isinstance(data, File):
517
+ file = File(data, media_type=media_type, filename=filename)
518
+ else:
519
+ file = data
520
+
521
+ if not file.is_remote:
522
+ file = await file.as_remote(provider=provider)
523
+ else:
524
+ if file.remote_provider != provider:
525
+ raise ValueError(
526
+ f"File is already remote with provider {file.remote_provider}, cannot change provider"
527
+ )
528
+
529
+ self.parts.append(file)
530
+ return self
531
+
502
532
  def with_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
503
533
  """Append a tool call block and return self for chaining."""
504
534
  self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.61
3
+ Version: 0.0.63
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes