lm-deluge 0.0.84__tar.gz → 0.0.86__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. {lm_deluge-0.0.84/src/lm_deluge.egg-info → lm_deluge-0.0.86}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/client.py +1 -1
  4. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/openai.py +28 -0
  5. lm_deluge-0.0.86/src/lm_deluge/tool/builtin/openai.py +74 -0
  6. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/web_search.py +7 -3
  7. {lm_deluge-0.0.84 → lm_deluge-0.0.86/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  8. lm_deluge-0.0.84/src/lm_deluge/tool/builtin/openai.py +0 -28
  9. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/LICENSE +0 -0
  10. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/README.md +0 -0
  11. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/setup.cfg +0 -0
  12. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/__init__.py +0 -0
  13. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/__init__.py +0 -0
  14. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/anthropic.py +0 -0
  15. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/base.py +0 -0
  16. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/bedrock.py +0 -0
  17. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
  18. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/common.py +0 -0
  19. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  20. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  21. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  22. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  23. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  24. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/gemini.py +0 -0
  25. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/mistral.py +0 -0
  26. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/openai.py +0 -0
  27. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/api_requests/response.py +0 -0
  28. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/batches.py +0 -0
  29. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/cache.py +0 -0
  30. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/cli.py +0 -0
  31. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/config.py +0 -0
  32. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/embed.py +0 -0
  33. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/errors.py +0 -0
  34. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/file.py +0 -0
  35. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/image.py +0 -0
  36. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/mock_openai.py +0 -0
  37. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/__init__.py +0 -0
  38. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/anthropic.py +0 -0
  39. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/arcee.py +0 -0
  40. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/bedrock.py +0 -0
  41. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/cerebras.py +0 -0
  42. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/cohere.py +0 -0
  43. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/deepseek.py +0 -0
  44. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/fireworks.py +0 -0
  45. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/google.py +0 -0
  46. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/grok.py +0 -0
  47. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/groq.py +0 -0
  48. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/kimi.py +0 -0
  49. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/meta.py +0 -0
  50. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/minimax.py +0 -0
  51. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/mistral.py +0 -0
  52. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/openrouter.py +0 -0
  53. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/together.py +0 -0
  54. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/models/zai.py +0 -0
  55. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/__init__.py +0 -0
  56. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/classify.py +0 -0
  57. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/extract.py +0 -0
  58. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/locate.py +0 -0
  59. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/ocr.py +0 -0
  60. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/score.py +0 -0
  61. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/pipelines/translate.py +0 -0
  62. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/prompt.py +0 -0
  63. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/request_context.py +0 -0
  64. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/rerank.py +0 -0
  65. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/__init__.py +0 -0
  66. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/builtin/anthropic/__init__.py +0 -0
  67. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/builtin/anthropic/bash.py +0 -0
  68. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/builtin/anthropic/computer_use.py +0 -0
  69. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/builtin/anthropic/editor.py +0 -0
  70. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/builtin/base.py +0 -0
  71. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/builtin/gemini.py +0 -0
  72. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/__init__.py +0 -0
  73. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/actions.py +0 -0
  74. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/base.py +0 -0
  75. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/batch.py +0 -0
  76. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/converters.py +0 -0
  77. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/kernel.py +0 -0
  78. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/cua/trycua.py +0 -0
  79. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/__init__.py +0 -0
  80. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/batch_tool.py +0 -0
  81. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/docs.py +0 -0
  82. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/email.py +0 -0
  83. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/filesystem.py +0 -0
  84. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/memory.py +0 -0
  85. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/otc/__init__.py +0 -0
  86. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/otc/executor.py +0 -0
  87. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/otc/parse.py +0 -0
  88. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/random.py +0 -0
  89. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/sandbox.py +0 -0
  90. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/sheets.py +0 -0
  91. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/subagents.py +0 -0
  92. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/todos.py +0 -0
  93. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tool/prefab/tool_search.py +0 -0
  94. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/tracker.py +0 -0
  95. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/usage.py +0 -0
  96. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/harmony.py +0 -0
  97. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/json.py +0 -0
  98. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/logprobs.py +0 -0
  99. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/schema.py +0 -0
  100. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/spatial.py +0 -0
  101. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/validation.py +0 -0
  102. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/util/xml.py +0 -0
  103. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge/warnings.py +0 -0
  104. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  105. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  106. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge.egg-info/requires.txt +0 -0
  107. {lm_deluge-0.0.84 → lm_deluge-0.0.86}/src/lm_deluge.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.84
3
+ Version: 0.0.86
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.84"
6
+ version = "0.0.86"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -918,7 +918,7 @@ class _LLMClient(BaseModel):
918
918
  if not isinstance(result, (str, dict, list)):
919
919
  result = str(result)
920
920
 
921
- conversation.with_tool_result(call.id, result) # type: ignore
921
+ conversation = conversation.with_tool_result(call.id, result) # type: ignore
922
922
 
923
923
  if response is None:
924
924
  raise RuntimeError("model did not return a response")
@@ -149,6 +149,34 @@ OPENAI_MODELS = {
149
149
  "output_cost": 6.0,
150
150
  "reasoning_model": True,
151
151
  },
152
+ "o4-mini-deep-research": {
153
+ "id": "o4-mini-deep-research",
154
+ "name": "o4-mini-deep-research",
155
+ "api_base": "https://api.openai.com/v1",
156
+ "api_key_env_var": "OPENAI_API_KEY",
157
+ "supports_json": True,
158
+ "supports_logprobs": False,
159
+ "supports_responses": True,
160
+ "api_spec": "openai",
161
+ "input_cost": 2,
162
+ "cached_input_cost": 0.5,
163
+ "output_cost": 8.0,
164
+ "reasoning_model": True,
165
+ },
166
+ "o3-deep-research": {
167
+ "id": "o3-deep-research",
168
+ "name": "o3-deep-research",
169
+ "api_base": "https://api.openai.com/v1",
170
+ "api_key_env_var": "OPENAI_API_KEY",
171
+ "supports_json": True,
172
+ "supports_logprobs": False,
173
+ "supports_responses": True,
174
+ "api_spec": "openai",
175
+ "input_cost": 10,
176
+ "cached_input_cost": 2.50,
177
+ "output_cost": 40.0,
178
+ "reasoning_model": True,
179
+ },
152
180
  "o3": {
153
181
  "id": "o3",
154
182
  "name": "o3-2025-04-16",
@@ -0,0 +1,74 @@
1
+ def image_generation_openai():
2
+ # TODO: handle result properly
3
+ return {"type": "image_generation"}
4
+
5
+
6
+ def code_interpreter_openai(container: dict | None = None):
7
+ if container is None:
8
+ container = {"type": "auto"}
9
+ return {"type": "code_interpreter", "container": container}
10
+
11
+
12
+ def local_shell_openai():
13
+ return {"type": "local_shell"}
14
+
15
+
16
+ def web_search_openai(
17
+ preview: bool = False,
18
+ user_location: dict | None = None,
19
+ allowed_domains: list[str] | None = None,
20
+ search_context_size: str | None = None,
21
+ ):
22
+ """OpenAI's built-in web search tool for the Responses API.
23
+
24
+ Args:
25
+ preview: If True, use web_search_preview. If False (default), use
26
+ the GA web_search tool.
27
+ user_location: Optional approximate user location to refine search results.
28
+ Should be a dict with "type": "approximate" and an "approximate" key
29
+ containing any of: country (ISO code), city, region, timezone.
30
+ Note: Not supported for deep research models.
31
+ allowed_domains: Optional list of domains to restrict search results to.
32
+ Up to 100 URLs, without http/https prefix (e.g. "openai.com").
33
+ Only available with web_search (not preview).
34
+ search_context_size: Controls how much context from web search results
35
+ is provided to the model. Options: "low", "medium" (default), "high".
36
+ Higher values use more tokens but may improve response quality.
37
+
38
+ Returns:
39
+ A dict representing the web search tool configuration.
40
+ """
41
+ tool: dict = {}
42
+ if preview:
43
+ tool["type"] = "web_search_preview"
44
+ if user_location:
45
+ tool["user_location"] = user_location
46
+ if search_context_size:
47
+ tool["search_context_size"] = search_context_size
48
+ return tool
49
+
50
+ # GA web_search tool
51
+ tool["type"] = "web_search"
52
+
53
+ if user_location:
54
+ tool["user_location"] = user_location
55
+
56
+ if search_context_size:
57
+ tool["search_context_size"] = search_context_size
58
+
59
+ # Domain filtering uses a nested filters structure
60
+ if allowed_domains:
61
+ tool["filters"] = {"allowed_domains": allowed_domains}
62
+
63
+ return tool
64
+
65
+
66
+ def computer_use_openai(
67
+ display_width: int = 1024, display_height: int = 768, environment: str = "browser"
68
+ ):
69
+ return {
70
+ "type": "computer_use_preview",
71
+ "display_width": display_width,
72
+ "display_height": display_height,
73
+ "environment": environment,
74
+ }
@@ -38,8 +38,8 @@ class AbstractWebSearchManager(abc.ABC):
38
38
  return self._tools
39
39
 
40
40
  self._tools = [
41
- Tool.from_function(self._search),
42
- Tool.from_function(self._fetch),
41
+ Tool.from_function(self._search, name=self.search_tool_name),
42
+ Tool.from_function(self._fetch, name=self.fetch_tool_name),
43
43
  ]
44
44
 
45
45
  return self._tools
@@ -76,12 +76,14 @@ class ExaWebSearchManager(AbstractWebSearchManager):
76
76
  search_tool_name: str = "web_search",
77
77
  fetch_tool_name: str = "web_fetch",
78
78
  timeout: int = 30,
79
+ max_contents_chars: int = 20_000,
79
80
  ):
80
81
  super().__init__(
81
82
  search_tool_name=search_tool_name,
82
83
  fetch_tool_name=fetch_tool_name,
83
84
  timeout=timeout,
84
85
  )
86
+ self.max_contents_chars = max_contents_chars
85
87
 
86
88
  async def _search( # type: ignore
87
89
  self,
@@ -146,7 +148,9 @@ class ExaWebSearchManager(AbstractWebSearchManager):
146
148
  raise ValueError("EXA_API_KEY environment variable not set")
147
149
  data = {
148
150
  "urls": [url],
149
- "text": True,
151
+ "text": {
152
+ "maxCharacters": self.max_contents_chars,
153
+ },
150
154
  }
151
155
 
152
156
  headers = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.84
3
+ Version: 0.0.86
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -1,28 +0,0 @@
1
- def image_generation_openai():
2
- # TODO: handle result properly
3
- return {"type": "image_generation"}
4
-
5
-
6
- def code_interpreter_openai(container: dict | None = None):
7
- if container is None:
8
- container = {"type": "auto"}
9
- return {"type": "code_interpreter", "container": container}
10
-
11
-
12
- def local_shell_openai():
13
- return {"type": "local_shell"}
14
-
15
-
16
- def web_search_openai():
17
- return {"type": "web_search_preview"}
18
-
19
-
20
- def computer_use_openai(
21
- display_width: int = 1024, display_height: int = 768, environment: str = "browser"
22
- ):
23
- return {
24
- "type": "computer_use_preview",
25
- "display_width": display_width,
26
- "display_height": display_height,
27
- "environment": environment,
28
- }
File without changes
File without changes
File without changes