lm-deluge 0.0.22__tar.gz → 0.0.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (62) hide show
  1. {lm_deluge-0.0.22/src/lm_deluge.egg-info → lm_deluge-0.0.23}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/batches.py +25 -5
  4. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/client.py +5 -1
  5. {lm_deluge-0.0.22 → lm_deluge-0.0.23/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  6. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/LICENSE +0 -0
  7. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/README.md +0 -0
  8. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/setup.cfg +0 -0
  9. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/__init__.py +0 -0
  10. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/agent.py +0 -0
  11. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/anthropic.py +0 -0
  13. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/base.py +0 -0
  14. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/bedrock.py +0 -0
  15. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/common.py +0 -0
  16. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  17. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  18. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  19. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  20. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  21. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/gemini.py +0 -0
  22. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/mistral.py +0 -0
  23. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/openai.py +0 -0
  24. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/api_requests/response.py +0 -0
  25. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  26. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  27. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  28. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  29. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/built_in_tools/base.py +0 -0
  30. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/built_in_tools/openai.py +0 -0
  31. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/cache.py +0 -0
  32. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/config.py +0 -0
  33. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/embed.py +0 -0
  34. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/errors.py +0 -0
  35. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/file.py +0 -0
  36. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/gemini_limits.py +0 -0
  37. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/image.py +0 -0
  38. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/__init__.py +0 -0
  39. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/classify.py +0 -0
  40. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/extract.py +0 -0
  41. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/locate.py +0 -0
  42. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/ocr.py +0 -0
  43. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/score.py +0 -0
  44. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/llm_tools/translate.py +0 -0
  45. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/models.py +0 -0
  46. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/prompt.py +0 -0
  47. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/request_context.py +0 -0
  48. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/rerank.py +0 -0
  49. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/tool.py +0 -0
  50. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/tracker.py +0 -0
  51. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/usage.py +0 -0
  52. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/util/json.py +0 -0
  53. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/util/logprobs.py +0 -0
  54. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/util/spatial.py +0 -0
  55. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/util/validation.py +0 -0
  56. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge/util/xml.py +0 -0
  57. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  58. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  59. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge.egg-info/requires.txt +0 -0
  60. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/src/lm_deluge.egg-info/top_level.txt +0 -0
  61. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/tests/test_builtin_tools.py +0 -0
  62. {lm_deluge-0.0.22 → lm_deluge-0.0.23}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.22
3
+ Version: 0.0.23
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.22"
6
+ version = "0.0.23"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -16,6 +16,7 @@ from rich.spinner import Spinner
16
16
  from rich.table import Table
17
17
  from rich.text import Text
18
18
  from lm_deluge.models import registry
19
+ from lm_deluge.request_context import RequestContext
19
20
 
20
21
 
21
22
  def _create_batch_status_display(
@@ -165,8 +166,10 @@ async def submit_batches_oa(
165
166
  model: str,
166
167
  sampling_params: SamplingParams,
167
168
  prompts: Sequence[str | list[dict] | Conversation],
169
+ batch_size: int = 50_000,
168
170
  ):
169
171
  """Write OpenAI batch requests to a file and submit."""
172
+ BATCH_SIZE = batch_size
170
173
 
171
174
  prompts = prompts_to_conversations(prompts)
172
175
  if any(p is None for p in prompts):
@@ -174,7 +177,6 @@ async def submit_batches_oa(
174
177
 
175
178
  model_obj = APIModel.from_registry(model)
176
179
 
177
- BATCH_SIZE = 50_000
178
180
  tasks = []
179
181
 
180
182
  for start in range(0, len(prompts), BATCH_SIZE):
@@ -182,11 +184,17 @@ async def submit_batches_oa(
182
184
  with tempfile.NamedTemporaryFile(mode="w+", suffix=".jsonl", delete=False) as f:
183
185
  for idx, prompt in enumerate(batch_prompts, start=start):
184
186
  assert isinstance(prompt, Conversation)
187
+ context = RequestContext(
188
+ task_id=idx,
189
+ model_name=model,
190
+ prompt=prompt,
191
+ sampling_params=sampling_params,
192
+ )
185
193
  request = {
186
194
  "custom_id": str(idx),
187
195
  "method": "POST",
188
196
  "url": "/v1/chat/completions",
189
- "body": _build_oa_chat_request(model_obj, prompt, [], sampling_params),
197
+ "body": _build_oa_chat_request(model_obj, context),
190
198
  }
191
199
  json.dump(request, f)
192
200
  f.write("\n")
@@ -208,6 +216,7 @@ async def submit_batches_anthropic(
208
216
  prompts: Sequence[str | list[dict] | Conversation],
209
217
  *,
210
218
  cache: CachePattern | None = None,
219
+ batch_size=100_000,
211
220
  ):
212
221
  """Submit a batch job to Anthropic's Message Batches API.
213
222
 
@@ -225,7 +234,7 @@ async def submit_batches_anthropic(
225
234
  prompts = prompts_to_conversations(prompts)
226
235
 
227
236
  request_headers = None
228
- BATCH_SIZE = 100_000
237
+ BATCH_SIZE = batch_size
229
238
  batch_tasks = []
230
239
 
231
240
  for start in range(0, len(prompts), BATCH_SIZE):
@@ -233,15 +242,26 @@ async def submit_batches_anthropic(
233
242
  with tempfile.NamedTemporaryFile(mode="w+", suffix=".jsonl", delete=False) as f:
234
243
  for idx, prompt in enumerate(batch_prompts, start=start):
235
244
  assert isinstance(prompt, Conversation)
245
+ context = RequestContext(
246
+ task_id=idx,
247
+ model_name=model,
248
+ prompt=prompt,
249
+ sampling_params=sampling_params,
250
+ cache=cache,
251
+ )
236
252
  request_body, request_headers = _build_anthropic_request(
237
- APIModel.from_registry(model), prompt, [], sampling_params, cache
253
+ APIModel.from_registry(model), context
238
254
  )
239
255
  json.dump({"custom_id": str(idx), "params": request_body}, f)
240
256
  f.write("\n")
241
257
 
242
258
  file_path = f.name
243
259
 
244
- batch_tasks.append(asyncio.create_task(_submit_anthropic_batch(file_path, request_headers, model)))
260
+ batch_tasks.append(
261
+ asyncio.create_task(
262
+ _submit_anthropic_batch(file_path, request_headers, model) # type: ignore
263
+ )
264
+ )
245
265
 
246
266
  batch_ids = await asyncio.gather(*batch_tasks)
247
267
 
@@ -562,6 +562,7 @@ class LLMClient(BaseModel):
562
562
  *,
563
563
  tools: list[Tool] | None = None,
564
564
  cache: CachePattern | None = None,
565
+ batch_size: int = 50_000,
565
566
  ):
566
567
  """Submit a batch job asynchronously, automatically detecting the provider based on model.
567
568
 
@@ -581,13 +582,16 @@ class LLMClient(BaseModel):
581
582
  api_spec = registry[model].api_spec
582
583
 
583
584
  if api_spec == "openai":
584
- return await submit_batches_oa(model, self.sampling_params[0], prompts)
585
+ return await submit_batches_oa(
586
+ model, self.sampling_params[0], prompts, batch_size=batch_size
587
+ )
585
588
  elif api_spec == "anthropic":
586
589
  return await submit_batches_anthropic(
587
590
  model,
588
591
  self.sampling_params[0],
589
592
  prompts,
590
593
  cache=cache,
594
+ batch_size=batch_size,
591
595
  )
592
596
  else:
593
597
  raise ValueError(f"Batch processing not supported for API spec: {api_spec}")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.22
3
+ Version: 0.0.23
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes