lm-deluge 0.0.71__tar.gz → 0.0.73__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lm_deluge-0.0.71/src/lm_deluge.egg-info → lm_deluge-0.0.73}/PKG-INFO +1 -1
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/pyproject.toml +1 -1
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/base.py +34 -11
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/openai.py +18 -8
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/client.py +23 -5
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/openai.py +42 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/warnings.py +2 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73/src/lm_deluge.egg-info}/PKG-INFO +1 -1
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/LICENSE +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/README.md +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/setup.cfg +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/anthropic.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/mock_openai.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/kimi.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/minimax.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/presets/cerebras.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/presets/meta.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/tests/test_file_upload.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/tests/test_mock_openai.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/tests/test_native_mcp_server.py +0 -0
- {lm_deluge-0.0.71 → lm_deluge-0.0.73}/tests/test_openrouter_generic.py +0 -0
|
@@ -90,9 +90,32 @@ class APIRequestBase(ABC):
|
|
|
90
90
|
start -> poll -> result style of request.
|
|
91
91
|
"""
|
|
92
92
|
assert self.context.status_tracker, "no status tracker"
|
|
93
|
-
|
|
93
|
+
poll_interval = 5.0
|
|
94
|
+
attempt_start = time.monotonic()
|
|
95
|
+
deadline = attempt_start + self.context.request_timeout
|
|
96
|
+
response_id: str | None = None
|
|
97
|
+
last_status: str | None = None
|
|
98
|
+
|
|
94
99
|
async with aiohttp.ClientSession() as session:
|
|
95
|
-
|
|
100
|
+
|
|
101
|
+
async def cancel_response(reason: str) -> None:
|
|
102
|
+
nonlocal response_id
|
|
103
|
+
if not response_id:
|
|
104
|
+
return
|
|
105
|
+
cancel_url = f"{self.url}/{response_id}/cancel"
|
|
106
|
+
try:
|
|
107
|
+
async with session.post(
|
|
108
|
+
url=cancel_url,
|
|
109
|
+
headers=self.request_header,
|
|
110
|
+
) as cancel_response:
|
|
111
|
+
cancel_response.raise_for_status()
|
|
112
|
+
print(f"Background req {response_id} cancelled: {reason}")
|
|
113
|
+
except (
|
|
114
|
+
Exception
|
|
115
|
+
) as cancel_err: # pragma: no cover - best effort logging
|
|
116
|
+
print(
|
|
117
|
+
f"Failed to cancel background req {response_id}: {cancel_err}"
|
|
118
|
+
)
|
|
96
119
|
|
|
97
120
|
try:
|
|
98
121
|
self.context.status_tracker.total_requests += 1
|
|
@@ -109,14 +132,11 @@ class APIRequestBase(ABC):
|
|
|
109
132
|
last_status = data["status"]
|
|
110
133
|
|
|
111
134
|
while True:
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
) as http_response:
|
|
118
|
-
http_response.raise_for_status()
|
|
119
|
-
|
|
135
|
+
now = time.monotonic()
|
|
136
|
+
remaining = deadline - now
|
|
137
|
+
if remaining <= 0:
|
|
138
|
+
elapsed = now - attempt_start
|
|
139
|
+
await cancel_response(f"timed out after {elapsed:.1f}s")
|
|
120
140
|
return APIResponse(
|
|
121
141
|
id=self.context.task_id,
|
|
122
142
|
model_internal=self.context.model_name,
|
|
@@ -128,8 +148,9 @@ class APIRequestBase(ABC):
|
|
|
128
148
|
content=None,
|
|
129
149
|
usage=None,
|
|
130
150
|
)
|
|
151
|
+
|
|
131
152
|
# poll for the response
|
|
132
|
-
await asyncio.sleep(
|
|
153
|
+
await asyncio.sleep(min(poll_interval, max(remaining, 0)))
|
|
133
154
|
async with session.get(
|
|
134
155
|
url=f"{self.url}/{response_id}",
|
|
135
156
|
headers=self.request_header,
|
|
@@ -146,6 +167,8 @@ class APIRequestBase(ABC):
|
|
|
146
167
|
return await self.handle_response(http_response)
|
|
147
168
|
|
|
148
169
|
except Exception as e:
|
|
170
|
+
if response_id:
|
|
171
|
+
await cancel_response(f"errored: {type(e).__name__}")
|
|
149
172
|
raise_if_modal_exception(e)
|
|
150
173
|
tb = traceback.format_exc()
|
|
151
174
|
print(tb)
|
|
@@ -67,10 +67,12 @@ async def _build_oa_chat_request(
|
|
|
67
67
|
effort = "minimal"
|
|
68
68
|
else:
|
|
69
69
|
effort = "low"
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
70
|
+
# GPT-5.1 models don't support 'minimal', they support 'none' instead
|
|
71
|
+
if effort == "minimal" and "gpt-5.1" in model.id:
|
|
72
|
+
maybe_warn("WARN_MINIMAL_TO_NONE", model_name=context.model_name)
|
|
73
|
+
effort = "none"
|
|
74
|
+
elif effort == "minimal" and "gpt-5" not in model.id:
|
|
75
|
+
maybe_warn("WARN_MINIMAL_TO_LOW", model_name=context.model_name)
|
|
74
76
|
effort = "low"
|
|
75
77
|
request_json["reasoning_effort"] = effort
|
|
76
78
|
else:
|
|
@@ -271,16 +273,24 @@ async def _build_oa_responses_request(
|
|
|
271
273
|
request_json["max_output_tokens"] = sampling_params.max_new_tokens
|
|
272
274
|
|
|
273
275
|
if model.reasoning_model:
|
|
274
|
-
|
|
276
|
+
effort = sampling_params.reasoning_effort
|
|
277
|
+
if effort in [None, "none"]:
|
|
275
278
|
# gemini models can switch reasoning off
|
|
276
279
|
if "gemini" in model.id:
|
|
277
|
-
|
|
280
|
+
effort = "none"
|
|
278
281
|
else:
|
|
279
|
-
|
|
282
|
+
effort = "low"
|
|
283
|
+
# GPT-5.1 models don't support 'minimal', they support 'none' instead
|
|
284
|
+
if effort == "minimal" and "gpt-5.1" in model.id:
|
|
285
|
+
maybe_warn("WARN_MINIMAL_TO_NONE", model_name=context.model_name)
|
|
286
|
+
effort = "none"
|
|
287
|
+
elif effort == "minimal" and "gpt-5" not in model.id:
|
|
288
|
+
maybe_warn("WARN_MINIMAL_TO_LOW", model_name=context.model_name)
|
|
289
|
+
effort = "low"
|
|
280
290
|
request_json["temperature"] = 1.0
|
|
281
291
|
request_json["top_p"] = 1.0
|
|
282
292
|
request_json["reasoning"] = {
|
|
283
|
-
"effort":
|
|
293
|
+
"effort": effort,
|
|
284
294
|
"summary": "auto",
|
|
285
295
|
}
|
|
286
296
|
else:
|
|
@@ -44,10 +44,14 @@ class _LLMClient(BaseModel):
|
|
|
44
44
|
Keeps all validation, serialization, and existing functionality.
|
|
45
45
|
"""
|
|
46
46
|
|
|
47
|
-
_REASONING_SUFFIXES: ClassVar[
|
|
47
|
+
_REASONING_SUFFIXES: ClassVar[
|
|
48
|
+
dict[str, Literal["low", "medium", "high", "minimal", "none"]]
|
|
49
|
+
] = {
|
|
48
50
|
"-low": "low",
|
|
49
51
|
"-medium": "medium",
|
|
50
52
|
"-high": "high",
|
|
53
|
+
"-minimal": "minimal",
|
|
54
|
+
"-none": "none",
|
|
51
55
|
}
|
|
52
56
|
|
|
53
57
|
model_names: str | list[str] = ["gpt-4.1-mini"]
|
|
@@ -149,9 +153,11 @@ class _LLMClient(BaseModel):
|
|
|
149
153
|
|
|
150
154
|
def _normalize_model_names(
|
|
151
155
|
self, models: list[str]
|
|
152
|
-
) -> tuple[
|
|
156
|
+
) -> tuple[
|
|
157
|
+
list[str], list[Literal["low", "medium", "high", "minimal", "none"] | None]
|
|
158
|
+
]:
|
|
153
159
|
normalized: list[str] = []
|
|
154
|
-
efforts: list[Literal["low", "medium", "high"] | None] = []
|
|
160
|
+
efforts: list[Literal["low", "medium", "high", "minimal", "none"] | None] = []
|
|
155
161
|
|
|
156
162
|
for name in models:
|
|
157
163
|
base_name = self._preprocess_openrouter_model(name)
|
|
@@ -164,7 +170,10 @@ class _LLMClient(BaseModel):
|
|
|
164
170
|
return normalized, efforts
|
|
165
171
|
|
|
166
172
|
def _align_sampling_params(
|
|
167
|
-
self,
|
|
173
|
+
self,
|
|
174
|
+
per_model_efforts: list[
|
|
175
|
+
Literal["low", "medium", "high", "minimal", "none"] | None
|
|
176
|
+
],
|
|
168
177
|
) -> None:
|
|
169
178
|
if len(per_model_efforts) < len(self.model_names):
|
|
170
179
|
per_model_efforts = per_model_efforts + [None] * (
|
|
@@ -332,7 +341,7 @@ class _LLMClient(BaseModel):
|
|
|
332
341
|
@classmethod
|
|
333
342
|
def _strip_reasoning_suffix_if_registered(
|
|
334
343
|
cls, model_name: str
|
|
335
|
-
) -> tuple[str, Literal["low", "medium", "high"] | None]:
|
|
344
|
+
) -> tuple[str, Literal["low", "medium", "high", "minimal", "none"] | None]:
|
|
336
345
|
"""Remove reasoning suffix only when the trimmed model already exists."""
|
|
337
346
|
for suffix, effort in cls._REASONING_SUFFIXES.items():
|
|
338
347
|
if model_name.endswith(suffix) and len(model_name) > len(suffix):
|
|
@@ -364,6 +373,15 @@ class _LLMClient(BaseModel):
|
|
|
364
373
|
assert (
|
|
365
374
|
self.use_responses_api
|
|
366
375
|
), "background mode only allowed for responses api"
|
|
376
|
+
|
|
377
|
+
# codex models require responses api
|
|
378
|
+
for model_name in self.model_names:
|
|
379
|
+
if "codex" in model_name.lower() and not self.use_responses_api:
|
|
380
|
+
raise ValueError(
|
|
381
|
+
f"Model '{model_name}' requires use_responses_api=True. "
|
|
382
|
+
"Codex models are only available via the Responses API."
|
|
383
|
+
)
|
|
384
|
+
|
|
367
385
|
# Auto-generate name if not provided
|
|
368
386
|
if self.name is None:
|
|
369
387
|
if len(self.model_names) == 1:
|
|
@@ -10,6 +10,48 @@ OPENAI_MODELS = {
|
|
|
10
10
|
# ░███
|
|
11
11
|
# █████
|
|
12
12
|
# ░░░░░
|
|
13
|
+
"gpt-5.1": {
|
|
14
|
+
"id": "gpt-5.1",
|
|
15
|
+
"name": "gpt-5.1",
|
|
16
|
+
"api_base": "https://api.openai.com/v1",
|
|
17
|
+
"api_key_env_var": "OPENAI_API_KEY",
|
|
18
|
+
"supports_json": False,
|
|
19
|
+
"supports_logprobs": True,
|
|
20
|
+
"supports_responses": True,
|
|
21
|
+
"api_spec": "openai",
|
|
22
|
+
"input_cost": 1.25,
|
|
23
|
+
"cached_input_cost": 0.125,
|
|
24
|
+
"output_cost": 10.0,
|
|
25
|
+
"reasoning_model": True,
|
|
26
|
+
},
|
|
27
|
+
"gpt-5.1-codex": {
|
|
28
|
+
"id": "gpt-5.1-codex",
|
|
29
|
+
"name": "gpt-5.1-codex",
|
|
30
|
+
"api_base": "https://api.openai.com/v1",
|
|
31
|
+
"api_key_env_var": "OPENAI_API_KEY",
|
|
32
|
+
"supports_json": False,
|
|
33
|
+
"supports_logprobs": True,
|
|
34
|
+
"supports_responses": True,
|
|
35
|
+
"api_spec": "openai",
|
|
36
|
+
"input_cost": 1.25,
|
|
37
|
+
"cached_input_cost": 0.125,
|
|
38
|
+
"output_cost": 10.0,
|
|
39
|
+
"reasoning_model": True,
|
|
40
|
+
},
|
|
41
|
+
"gpt-5.1-codex-mini": {
|
|
42
|
+
"id": "gpt-5.1-codex-mini",
|
|
43
|
+
"name": "gpt-5.1-codex-mini",
|
|
44
|
+
"api_base": "https://api.openai.com/v1",
|
|
45
|
+
"api_key_env_var": "OPENAI_API_KEY",
|
|
46
|
+
"supports_json": False,
|
|
47
|
+
"supports_logprobs": True,
|
|
48
|
+
"supports_responses": True,
|
|
49
|
+
"api_spec": "openai",
|
|
50
|
+
"input_cost": 0.25,
|
|
51
|
+
"cached_input_cost": 0.025,
|
|
52
|
+
"output_cost": 2.0,
|
|
53
|
+
"reasoning_model": True,
|
|
54
|
+
},
|
|
13
55
|
"gpt-5-codex": {
|
|
14
56
|
"id": "gpt-5-codex",
|
|
15
57
|
"name": "gpt-5-codex",
|
|
@@ -7,6 +7,8 @@ WARNINGS: dict[str, str] = {
|
|
|
7
7
|
"WARN_REASONING_UNSUPPORTED": "Ignoring reasoning_effort param for non-reasoning model: {model_name}.",
|
|
8
8
|
"WARN_CACHING_UNSUPPORTED": "Cache parameter '{cache_param}' is not supported, ignoring for {model_name}.",
|
|
9
9
|
"WARN_LOGPROBS_UNSUPPORTED": "Ignoring logprobs param for non-logprobs model: {model_name}",
|
|
10
|
+
"WARN_MINIMAL_TO_LOW": "'minimal' reasoning effort only allowed for gpt-5 models. Setting to 'low' for {model_name}.",
|
|
11
|
+
"WARN_MINIMAL_TO_NONE": "GPT-5.1 models don't support 'minimal' reasoning effort. Converting to 'none' for {model_name}.",
|
|
10
12
|
}
|
|
11
13
|
|
|
12
14
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lm_deluge-0.0.71 → lm_deluge-0.0.73}/src/lm_deluge/built_in_tools/anthropic/computer_use.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|