lm-deluge 0.0.16__tar.gz → 0.0.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- {lm_deluge-0.0.16/src/lm_deluge.egg-info → lm_deluge-0.0.18}/PKG-INFO +11 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/README.md +10 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/pyproject.toml +1 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/anthropic.py +10 -7
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/base.py +23 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/bedrock.py +9 -6
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/gemini.py +4 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/mistral.py +4 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/openai.py +15 -2
- lm_deluge-0.0.16/src/lm_deluge/built_in_tools/anthropic.py → lm_deluge-0.0.18/src/lm_deluge/built_in_tools/anthropic/__init__.py +3 -1
- lm_deluge-0.0.18/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- lm_deluge-0.0.18/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- lm_deluge-0.0.18/src/lm_deluge/built_in_tools/anthropic/editor.py +559 -0
- lm_deluge-0.0.18/src/lm_deluge/built_in_tools/base.py +9 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/client.py +88 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/image.py +9 -6
- lm_deluge-0.0.18/src/lm_deluge/llm_tools/classify.py +1 -0
- lm_deluge-0.0.18/src/lm_deluge/llm_tools/locate.py +162 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/prompt.py +2 -2
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/request_context.py +1 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/tool.py +4 -0
- lm_deluge-0.0.18/src/lm_deluge/util/spatial.py +139 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18/src/lm_deluge.egg-info}/PKG-INFO +11 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge.egg-info/SOURCES.txt +8 -1
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/LICENSE +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/setup.cfg +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/agent.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/gemini_limits.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/models.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.16 → lm_deluge-0.0.18}/tests/test_native_mcp_server.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lm_deluge
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.18
|
|
4
4
|
Summary: Python utility for using LLM API models.
|
|
5
5
|
Author-email: Benjamin Anderson <ben@trytaylor.ai>
|
|
6
6
|
Requires-Python: >=3.10
|
|
@@ -211,6 +211,16 @@ for tool_call in resps[0].tool_calls:
|
|
|
211
211
|
# this is dumb sorry will make it better
|
|
212
212
|
tool_to_call = [x for x in tools if x.name == tool_call.name][0]
|
|
213
213
|
tool_to_call.call(**tool_call.arguments) # in async code, use .acall()
|
|
214
|
+
|
|
215
|
+
# or use the built-in agent loop to handle this automatically
|
|
216
|
+
import asyncio
|
|
217
|
+
|
|
218
|
+
async def main():
|
|
219
|
+
conv = Conversation.user("List the files in the current directory")
|
|
220
|
+
conv, resp = await client.run_agent_loop(conv, tools=tools)
|
|
221
|
+
print(resp.content.completion)
|
|
222
|
+
|
|
223
|
+
asyncio.run(main())
|
|
214
224
|
```
|
|
215
225
|
|
|
216
226
|
### Prompt Caching (Anthropic)
|
|
@@ -184,6 +184,16 @@ for tool_call in resps[0].tool_calls:
|
|
|
184
184
|
# this is dumb sorry will make it better
|
|
185
185
|
tool_to_call = [x for x in tools if x.name == tool_call.name][0]
|
|
186
186
|
tool_to_call.call(**tool_call.arguments) # in async code, use .acall()
|
|
187
|
+
|
|
188
|
+
# or use the built-in agent loop to handle this automatically
|
|
189
|
+
import asyncio
|
|
190
|
+
|
|
191
|
+
async def main():
|
|
192
|
+
conv = Conversation.user("List the files in the current directory")
|
|
193
|
+
conv, resp = await client.run_agent_loop(conv, tools=tools)
|
|
194
|
+
print(resp.content.completion)
|
|
195
|
+
|
|
196
|
+
asyncio.run(main())
|
|
187
197
|
```
|
|
188
198
|
|
|
189
199
|
### Prompt Caching (Anthropic)
|
|
@@ -36,7 +36,7 @@ def _build_anthropic_request(
|
|
|
36
36
|
cache_pattern: CachePattern | None = None,
|
|
37
37
|
):
|
|
38
38
|
system_message, messages = prompt.to_anthropic(cache_pattern=cache_pattern)
|
|
39
|
-
|
|
39
|
+
base_headers = {
|
|
40
40
|
"x-api-key": os.getenv(model.api_key_env_var),
|
|
41
41
|
"anthropic-version": "2023-06-01",
|
|
42
42
|
"content-type": "application/json",
|
|
@@ -83,13 +83,13 @@ def _build_anthropic_request(
|
|
|
83
83
|
"text_editor_20241022",
|
|
84
84
|
"bash_20241022",
|
|
85
85
|
]:
|
|
86
|
-
_add_beta(
|
|
86
|
+
_add_beta(base_headers, "computer-use-2024-10-22")
|
|
87
87
|
elif tool["type"] == "computer_20250124":
|
|
88
|
-
_add_beta(
|
|
88
|
+
_add_beta(base_headers, "computer-use-2025-01-24")
|
|
89
89
|
elif tool["type"] == "code_execution_20250522":
|
|
90
|
-
_add_beta(
|
|
90
|
+
_add_beta(base_headers, "code-execution-2025-05-22")
|
|
91
91
|
elif isinstance(tool, MCPServer):
|
|
92
|
-
_add_beta(
|
|
92
|
+
_add_beta(base_headers, "mcp-client-2025-04-04")
|
|
93
93
|
mcp_servers.append(tool.for_anthropic())
|
|
94
94
|
|
|
95
95
|
# Add cache control to last tool if tools_only caching is specified
|
|
@@ -100,7 +100,7 @@ def _build_anthropic_request(
|
|
|
100
100
|
if len(mcp_servers) > 0:
|
|
101
101
|
request_json["mcp_servers"] = mcp_servers
|
|
102
102
|
|
|
103
|
-
return request_json,
|
|
103
|
+
return request_json, base_headers
|
|
104
104
|
|
|
105
105
|
|
|
106
106
|
class AnthropicRequest(APIRequestBase):
|
|
@@ -114,13 +114,16 @@ class AnthropicRequest(APIRequestBase):
|
|
|
114
114
|
if self.context.cache is not None:
|
|
115
115
|
self.context.prompt.lock_images_as_bytes()
|
|
116
116
|
|
|
117
|
-
self.request_json,
|
|
117
|
+
self.request_json, base_headers = _build_anthropic_request(
|
|
118
118
|
self.model,
|
|
119
119
|
self.context.prompt,
|
|
120
120
|
self.context.tools,
|
|
121
121
|
self.context.sampling_params,
|
|
122
122
|
self.context.cache,
|
|
123
123
|
)
|
|
124
|
+
self.request_header = self.merge_headers(
|
|
125
|
+
base_headers, exclude_patterns=["openai", "gemini", "mistral"]
|
|
126
|
+
)
|
|
124
127
|
|
|
125
128
|
async def handle_response(self, http_response: ClientResponse) -> APIResponse:
|
|
126
129
|
data = None
|
|
@@ -46,6 +46,29 @@ class APIRequestBase(ABC):
|
|
|
46
46
|
# the APIResponse in self.result includes all the information
|
|
47
47
|
self.context.callback(self.result[-1], self.context.status_tracker)
|
|
48
48
|
|
|
49
|
+
def merge_headers(
|
|
50
|
+
self, base_headers: dict[str, str], exclude_patterns: list[str] | None = None
|
|
51
|
+
) -> dict[str, str]:
|
|
52
|
+
"""Merge extra_headers with base headers, giving priority to extra_headers."""
|
|
53
|
+
if not self.context.extra_headers:
|
|
54
|
+
return base_headers
|
|
55
|
+
|
|
56
|
+
# Filter out headers that match exclude patterns
|
|
57
|
+
filtered_extra = {}
|
|
58
|
+
if exclude_patterns:
|
|
59
|
+
for key, value in self.context.extra_headers.items():
|
|
60
|
+
if not any(
|
|
61
|
+
pattern.lower() in key.lower() for pattern in exclude_patterns
|
|
62
|
+
):
|
|
63
|
+
filtered_extra[key] = value
|
|
64
|
+
else:
|
|
65
|
+
filtered_extra = dict(self.context.extra_headers)
|
|
66
|
+
|
|
67
|
+
# Start with base headers, then overlay filtered extra headers (extra takes precedence)
|
|
68
|
+
merged = dict(base_headers)
|
|
69
|
+
merged.update(filtered_extra)
|
|
70
|
+
return merged
|
|
71
|
+
|
|
49
72
|
def handle_success(self, data):
|
|
50
73
|
self.call_callback()
|
|
51
74
|
if self.context.status_tracker:
|
|
@@ -85,7 +85,7 @@ def _build_anthropic_bedrock_request(
|
|
|
85
85
|
)
|
|
86
86
|
|
|
87
87
|
# Setup basic headers (AWS4Auth will add the Authorization header)
|
|
88
|
-
|
|
88
|
+
base_headers = {
|
|
89
89
|
"Content-Type": "application/json",
|
|
90
90
|
}
|
|
91
91
|
|
|
@@ -115,11 +115,11 @@ def _build_anthropic_bedrock_request(
|
|
|
115
115
|
"text_editor_20241022",
|
|
116
116
|
"bash_20241022",
|
|
117
117
|
]:
|
|
118
|
-
_add_beta(
|
|
118
|
+
_add_beta(base_headers, "computer-use-2024-10-22")
|
|
119
119
|
elif tool["type"] == "computer_20250124":
|
|
120
|
-
_add_beta(
|
|
120
|
+
_add_beta(base_headers, "computer-use-2025-01-24")
|
|
121
121
|
elif tool["type"] == "code_execution_20250522":
|
|
122
|
-
_add_beta(
|
|
122
|
+
_add_beta(base_headers, "code-execution-2025-05-22")
|
|
123
123
|
elif isinstance(tool, MCPServer):
|
|
124
124
|
raise ValueError("bedrock doesn't support MCP connector right now")
|
|
125
125
|
# _add_beta(request_header, "mcp-client-2025-04-04")
|
|
@@ -133,7 +133,7 @@ def _build_anthropic_bedrock_request(
|
|
|
133
133
|
if len(mcp_servers) > 0:
|
|
134
134
|
request_json["mcp_servers"] = mcp_servers
|
|
135
135
|
|
|
136
|
-
return request_json,
|
|
136
|
+
return request_json, base_headers, auth, url
|
|
137
137
|
|
|
138
138
|
|
|
139
139
|
class BedrockRequest(APIRequestBase):
|
|
@@ -147,7 +147,7 @@ class BedrockRequest(APIRequestBase):
|
|
|
147
147
|
if self.context.cache is not None:
|
|
148
148
|
self.context.prompt.lock_images_as_bytes()
|
|
149
149
|
|
|
150
|
-
self.request_json,
|
|
150
|
+
self.request_json, base_headers, self.auth, self.url = (
|
|
151
151
|
_build_anthropic_bedrock_request(
|
|
152
152
|
self.model,
|
|
153
153
|
context.prompt,
|
|
@@ -156,6 +156,9 @@ class BedrockRequest(APIRequestBase):
|
|
|
156
156
|
context.cache,
|
|
157
157
|
)
|
|
158
158
|
)
|
|
159
|
+
self.request_header = self.merge_headers(
|
|
160
|
+
base_headers, exclude_patterns=["anthropic", "openai", "gemini", "mistral"]
|
|
161
|
+
)
|
|
159
162
|
|
|
160
163
|
async def execute_once(self) -> APIResponse:
|
|
161
164
|
"""Override execute_once to handle AWS4Auth signing."""
|
|
@@ -77,9 +77,12 @@ class GeminiRequest(APIRequestBase):
|
|
|
77
77
|
self.model = APIModel.from_registry(self.context.model_name)
|
|
78
78
|
# Gemini API endpoint format: https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent
|
|
79
79
|
self.url = f"{self.model.api_base}/models/{self.model.name}:generateContent"
|
|
80
|
-
|
|
80
|
+
base_headers = {
|
|
81
81
|
"Content-Type": "application/json",
|
|
82
82
|
}
|
|
83
|
+
self.request_header = self.merge_headers(
|
|
84
|
+
base_headers, exclude_patterns=["anthropic", "openai", "mistral"]
|
|
85
|
+
)
|
|
83
86
|
|
|
84
87
|
# Add API key as query parameter for Gemini
|
|
85
88
|
api_key = os.getenv(self.model.api_key_env_var)
|
|
@@ -22,9 +22,12 @@ class MistralRequest(APIRequestBase):
|
|
|
22
22
|
)
|
|
23
23
|
self.model = APIModel.from_registry(self.context.model_name)
|
|
24
24
|
self.url = f"{self.model.api_base}/chat/completions"
|
|
25
|
-
|
|
25
|
+
base_headers = {
|
|
26
26
|
"Authorization": f"Bearer {os.getenv(self.model.api_key_env_var)}"
|
|
27
27
|
}
|
|
28
|
+
self.request_header = self.merge_headers(
|
|
29
|
+
base_headers, exclude_patterns=["anthropic", "openai", "gemini"]
|
|
30
|
+
)
|
|
28
31
|
self.request_json = {
|
|
29
32
|
"model": self.model.name,
|
|
30
33
|
"messages": self.context.prompt.to_mistral(),
|
|
@@ -73,9 +73,12 @@ class OpenAIRequest(APIRequestBase):
|
|
|
73
73
|
)
|
|
74
74
|
self.model = APIModel.from_registry(self.context.model_name)
|
|
75
75
|
self.url = f"{self.model.api_base}/chat/completions"
|
|
76
|
-
|
|
76
|
+
base_headers = {
|
|
77
77
|
"Authorization": f"Bearer {os.getenv(self.model.api_key_env_var)}"
|
|
78
78
|
}
|
|
79
|
+
self.request_header = self.merge_headers(
|
|
80
|
+
base_headers, exclude_patterns=["anthropic"]
|
|
81
|
+
)
|
|
79
82
|
|
|
80
83
|
self.request_json = _build_oa_chat_request(
|
|
81
84
|
self.model,
|
|
@@ -432,6 +435,7 @@ async def stream_chat(
|
|
|
432
435
|
sampling_params: SamplingParams = SamplingParams(),
|
|
433
436
|
tools: list | None = None,
|
|
434
437
|
cache: CachePattern | None = None,
|
|
438
|
+
extra_headers: dict[str, str] | None = None,
|
|
435
439
|
):
|
|
436
440
|
if cache is not None:
|
|
437
441
|
warnings.warn(
|
|
@@ -442,7 +446,16 @@ async def stream_chat(
|
|
|
442
446
|
if model.api_spec != "openai":
|
|
443
447
|
raise ValueError("streaming only supported on openai models for now")
|
|
444
448
|
url = f"{model.api_base}/chat/completions"
|
|
445
|
-
|
|
449
|
+
base_headers = {"Authorization": f"Bearer {os.getenv(model.api_key_env_var)}"}
|
|
450
|
+
|
|
451
|
+
# Merge extra headers, filtering out anthropic headers
|
|
452
|
+
request_header = dict(base_headers)
|
|
453
|
+
if extra_headers:
|
|
454
|
+
filtered_extra = {
|
|
455
|
+
k: v for k, v in extra_headers.items() if "anthropic" not in k.lower()
|
|
456
|
+
}
|
|
457
|
+
request_header.update(filtered_extra)
|
|
458
|
+
|
|
446
459
|
request_json = _build_oa_chat_request(model, prompt, tools, sampling_params)
|
|
447
460
|
request_json["stream"] = True
|
|
448
461
|
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from typing import Literal
|
|
2
2
|
|
|
3
|
+
# from lm_deluge.prompt import ToolCall
|
|
4
|
+
|
|
3
5
|
ToolVersion = Literal["2024-10-22", "2025-01-24", "2025-04-29"]
|
|
4
6
|
ToolType = Literal["bash", "computer", "editor"]
|
|
5
7
|
|
|
@@ -102,7 +104,7 @@ def web_search_tool(max_uses: int = 5):
|
|
|
102
104
|
"type": "web_search_20250305",
|
|
103
105
|
"name": "web_search",
|
|
104
106
|
# Optional: Limit the number of searches per request
|
|
105
|
-
"max_uses":
|
|
107
|
+
"max_uses": max_uses,
|
|
106
108
|
# You can use either allowed_domains or blocked_domains, but not both in the same request.
|
|
107
109
|
# Optional: Only include results from these domains
|
|
108
110
|
# "allowed_domains": ["example.com", "trusteddomain.org"],
|
|
File without changes
|
|
File without changes
|