lm-deluge 0.0.22__tar.gz → 0.0.24__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- {lm_deluge-0.0.22/src/lm_deluge.egg-info → lm_deluge-0.0.24}/PKG-INFO +1 -1
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/pyproject.toml +1 -1
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/batches.py +25 -5
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/client.py +5 -1
- {lm_deluge-0.0.22 → lm_deluge-0.0.24/src/lm_deluge.egg-info}/PKG-INFO +1 -1
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/LICENSE +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/README.md +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/setup.cfg +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/agent.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/anthropic.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/openai.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/gemini_limits.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/extract.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/models.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.22 → lm_deluge-0.0.24}/tests/test_native_mcp_server.py +0 -0
|
@@ -16,6 +16,7 @@ from rich.spinner import Spinner
|
|
|
16
16
|
from rich.table import Table
|
|
17
17
|
from rich.text import Text
|
|
18
18
|
from lm_deluge.models import registry
|
|
19
|
+
from lm_deluge.request_context import RequestContext
|
|
19
20
|
|
|
20
21
|
|
|
21
22
|
def _create_batch_status_display(
|
|
@@ -165,8 +166,10 @@ async def submit_batches_oa(
|
|
|
165
166
|
model: str,
|
|
166
167
|
sampling_params: SamplingParams,
|
|
167
168
|
prompts: Sequence[str | list[dict] | Conversation],
|
|
169
|
+
batch_size: int = 50_000,
|
|
168
170
|
):
|
|
169
171
|
"""Write OpenAI batch requests to a file and submit."""
|
|
172
|
+
BATCH_SIZE = batch_size
|
|
170
173
|
|
|
171
174
|
prompts = prompts_to_conversations(prompts)
|
|
172
175
|
if any(p is None for p in prompts):
|
|
@@ -174,7 +177,6 @@ async def submit_batches_oa(
|
|
|
174
177
|
|
|
175
178
|
model_obj = APIModel.from_registry(model)
|
|
176
179
|
|
|
177
|
-
BATCH_SIZE = 50_000
|
|
178
180
|
tasks = []
|
|
179
181
|
|
|
180
182
|
for start in range(0, len(prompts), BATCH_SIZE):
|
|
@@ -182,11 +184,17 @@ async def submit_batches_oa(
|
|
|
182
184
|
with tempfile.NamedTemporaryFile(mode="w+", suffix=".jsonl", delete=False) as f:
|
|
183
185
|
for idx, prompt in enumerate(batch_prompts, start=start):
|
|
184
186
|
assert isinstance(prompt, Conversation)
|
|
187
|
+
context = RequestContext(
|
|
188
|
+
task_id=idx,
|
|
189
|
+
model_name=model,
|
|
190
|
+
prompt=prompt,
|
|
191
|
+
sampling_params=sampling_params,
|
|
192
|
+
)
|
|
185
193
|
request = {
|
|
186
194
|
"custom_id": str(idx),
|
|
187
195
|
"method": "POST",
|
|
188
196
|
"url": "/v1/chat/completions",
|
|
189
|
-
"body": _build_oa_chat_request(model_obj,
|
|
197
|
+
"body": await _build_oa_chat_request(model_obj, context),
|
|
190
198
|
}
|
|
191
199
|
json.dump(request, f)
|
|
192
200
|
f.write("\n")
|
|
@@ -208,6 +216,7 @@ async def submit_batches_anthropic(
|
|
|
208
216
|
prompts: Sequence[str | list[dict] | Conversation],
|
|
209
217
|
*,
|
|
210
218
|
cache: CachePattern | None = None,
|
|
219
|
+
batch_size=100_000,
|
|
211
220
|
):
|
|
212
221
|
"""Submit a batch job to Anthropic's Message Batches API.
|
|
213
222
|
|
|
@@ -225,7 +234,7 @@ async def submit_batches_anthropic(
|
|
|
225
234
|
prompts = prompts_to_conversations(prompts)
|
|
226
235
|
|
|
227
236
|
request_headers = None
|
|
228
|
-
BATCH_SIZE =
|
|
237
|
+
BATCH_SIZE = batch_size
|
|
229
238
|
batch_tasks = []
|
|
230
239
|
|
|
231
240
|
for start in range(0, len(prompts), BATCH_SIZE):
|
|
@@ -233,15 +242,26 @@ async def submit_batches_anthropic(
|
|
|
233
242
|
with tempfile.NamedTemporaryFile(mode="w+", suffix=".jsonl", delete=False) as f:
|
|
234
243
|
for idx, prompt in enumerate(batch_prompts, start=start):
|
|
235
244
|
assert isinstance(prompt, Conversation)
|
|
245
|
+
context = RequestContext(
|
|
246
|
+
task_id=idx,
|
|
247
|
+
model_name=model,
|
|
248
|
+
prompt=prompt,
|
|
249
|
+
sampling_params=sampling_params,
|
|
250
|
+
cache=cache,
|
|
251
|
+
)
|
|
236
252
|
request_body, request_headers = _build_anthropic_request(
|
|
237
|
-
APIModel.from_registry(model),
|
|
253
|
+
APIModel.from_registry(model), context
|
|
238
254
|
)
|
|
239
255
|
json.dump({"custom_id": str(idx), "params": request_body}, f)
|
|
240
256
|
f.write("\n")
|
|
241
257
|
|
|
242
258
|
file_path = f.name
|
|
243
259
|
|
|
244
|
-
batch_tasks.append(
|
|
260
|
+
batch_tasks.append(
|
|
261
|
+
asyncio.create_task(
|
|
262
|
+
_submit_anthropic_batch(file_path, request_headers, model) # type: ignore
|
|
263
|
+
)
|
|
264
|
+
)
|
|
245
265
|
|
|
246
266
|
batch_ids = await asyncio.gather(*batch_tasks)
|
|
247
267
|
|
|
@@ -562,6 +562,7 @@ class LLMClient(BaseModel):
|
|
|
562
562
|
*,
|
|
563
563
|
tools: list[Tool] | None = None,
|
|
564
564
|
cache: CachePattern | None = None,
|
|
565
|
+
batch_size: int = 50_000,
|
|
565
566
|
):
|
|
566
567
|
"""Submit a batch job asynchronously, automatically detecting the provider based on model.
|
|
567
568
|
|
|
@@ -581,13 +582,16 @@ class LLMClient(BaseModel):
|
|
|
581
582
|
api_spec = registry[model].api_spec
|
|
582
583
|
|
|
583
584
|
if api_spec == "openai":
|
|
584
|
-
return await submit_batches_oa(
|
|
585
|
+
return await submit_batches_oa(
|
|
586
|
+
model, self.sampling_params[0], prompts, batch_size=batch_size
|
|
587
|
+
)
|
|
585
588
|
elif api_spec == "anthropic":
|
|
586
589
|
return await submit_batches_anthropic(
|
|
587
590
|
model,
|
|
588
591
|
self.sampling_params[0],
|
|
589
592
|
prompts,
|
|
590
593
|
cache=cache,
|
|
594
|
+
batch_size=batch_size,
|
|
591
595
|
)
|
|
592
596
|
else:
|
|
593
597
|
raise ValueError(f"Batch processing not supported for API spec: {api_spec}")
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lm_deluge-0.0.22 → lm_deluge-0.0.24}/src/lm_deluge/built_in_tools/anthropic/computer_use.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|