lm-deluge 0.0.51__tar.gz → 0.0.53__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (80) hide show
  1. {lm_deluge-0.0.51/src/lm_deluge.egg-info → lm_deluge-0.0.53}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/anthropic.py +10 -1
  4. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/openai.py +11 -1
  5. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/response.py +1 -1
  6. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/client.py +40 -5
  7. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/anthropic.py +12 -0
  8. {lm_deluge-0.0.51 → lm_deluge-0.0.53/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  9. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/LICENSE +0 -0
  10. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/README.md +0 -0
  11. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/setup.cfg +0 -0
  12. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/__init__.py +0 -0
  13. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/agent.py +0 -0
  14. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/__init__.py +0 -0
  15. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/base.py +0 -0
  16. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/bedrock.py +0 -0
  17. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/common.py +0 -0
  18. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  19. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  20. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  21. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  22. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  23. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/gemini.py +0 -0
  24. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/api_requests/mistral.py +0 -0
  25. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/batches.py +0 -0
  26. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  27. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  28. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  29. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  30. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/built_in_tools/base.py +0 -0
  31. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/built_in_tools/openai.py +0 -0
  32. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/cache.py +0 -0
  33. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/cli.py +0 -0
  34. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/config.py +0 -0
  35. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/embed.py +0 -0
  36. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/errors.py +0 -0
  37. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/file.py +0 -0
  38. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/gemini_limits.py +0 -0
  39. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/image.py +0 -0
  40. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/__init__.py +0 -0
  41. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/classify.py +0 -0
  42. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/extract.py +0 -0
  43. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/locate.py +0 -0
  44. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/ocr.py +0 -0
  45. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/score.py +0 -0
  46. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/llm_tools/translate.py +0 -0
  47. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/__init__.py +0 -0
  48. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/bedrock.py +0 -0
  49. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/cerebras.py +0 -0
  50. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/cohere.py +0 -0
  51. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/deepseek.py +0 -0
  52. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/fireworks.py +0 -0
  53. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/google.py +0 -0
  54. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/grok.py +0 -0
  55. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/groq.py +0 -0
  56. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/meta.py +0 -0
  57. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/mistral.py +0 -0
  58. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/openai.py +0 -0
  59. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/openrouter.py +0 -0
  60. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/models/together.py +0 -0
  61. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/presets/cerebras.py +0 -0
  62. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/presets/meta.py +0 -0
  63. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/prompt.py +0 -0
  64. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/request_context.py +0 -0
  65. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/rerank.py +0 -0
  66. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/tool.py +0 -0
  67. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/tracker.py +0 -0
  68. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/usage.py +0 -0
  69. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/util/harmony.py +0 -0
  70. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/util/json.py +0 -0
  71. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/util/logprobs.py +0 -0
  72. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/util/spatial.py +0 -0
  73. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/util/validation.py +0 -0
  74. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge/util/xml.py +0 -0
  75. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  76. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  77. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge.egg-info/requires.txt +0 -0
  78. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/src/lm_deluge.egg-info/top_level.txt +0 -0
  79. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/tests/test_builtin_tools.py +0 -0
  80. {lm_deluge-0.0.51 → lm_deluge-0.0.53}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.51
3
+ Version: 0.0.53
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.51"
6
+ version = "0.0.53"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -60,7 +60,8 @@ def _build_anthropic_request(
60
60
  "type": "enabled",
61
61
  "budget_tokens": budget,
62
62
  }
63
- request_json.pop("top_p")
63
+ if "top_p" in request_json:
64
+ request_json["top_p"] = max(request_json["top_p"], 0.95)
64
65
  request_json["temperature"] = 1.0
65
66
  request_json["max_tokens"] += budget
66
67
  else:
@@ -70,6 +71,11 @@ def _build_anthropic_request(
70
71
  if system_message is not None:
71
72
  request_json["system"] = system_message
72
73
 
74
+ # handle temp + top_p for opus 4.1/sonnet 4.5
75
+ if model.name in ["claude-sonnet-4-5-20250929", "claude-opus-4-1-20250805"]:
76
+ if "temperature" in request_json and "top_p" in request_json:
77
+ request_json.pop("top_p")
78
+
73
79
  if tools:
74
80
  mcp_servers = []
75
81
  tool_definitions = []
@@ -89,6 +95,9 @@ def _build_anthropic_request(
89
95
  _add_beta(base_headers, "computer-use-2025-01-24")
90
96
  elif tool["type"] == "code_execution_20250522":
91
97
  _add_beta(base_headers, "code-execution-2025-05-22")
98
+ elif tool["type"] in ["memory_20250818", "clear_tool_uses_20250919"]:
99
+ _add_beta(base_headers, "context-management-2025-06-27")
100
+
92
101
  elif isinstance(tool, MCPServer):
93
102
  _add_beta(base_headers, "mcp-client-2025-04-04")
94
103
  mcp_servers.append(tool.for_anthropic())
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import os
3
+ import traceback as tb
3
4
  import warnings
4
5
  from types import SimpleNamespace
5
6
 
@@ -341,7 +342,13 @@ class OpenAIResponsesRequest(APIRequestBase):
341
342
  elif content_item.get("type") == "refusal":
342
343
  parts.append(Text(content_item["refusal"]))
343
344
  elif item.get("type") == "reasoning":
344
- parts.append(Thinking(item["summary"]["text"]))
345
+ summary = item["summary"]
346
+ if not summary:
347
+ continue
348
+ if isinstance(summary, list) and len(summary) > 0:
349
+ summary = summary[0]
350
+ assert isinstance(summary, dict), "summary isn't a dict"
351
+ parts.append(Thinking(summary["text"]))
345
352
  elif item.get("type") == "function_call":
346
353
  parts.append(
347
354
  ToolCall(
@@ -432,6 +439,9 @@ class OpenAIResponsesRequest(APIRequestBase):
432
439
  except Exception as e:
433
440
  is_error = True
434
441
  error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
442
+ print("got data:", data)
443
+ traceback = tb.format_exc()
444
+ print(f"Error details:\n{traceback}")
435
445
 
436
446
  elif mimetype and "json" in mimetype.lower():
437
447
  print("is_error True, json response")
@@ -14,7 +14,7 @@ class APIResponse:
14
14
  # request information
15
15
  id: int # should be unique to the request within a given prompt-processing call
16
16
  model_internal: str # our internal model tag
17
- prompt: Conversation | dict
17
+ prompt: Conversation | dict # dict if converted to log
18
18
  sampling_params: SamplingParams
19
19
 
20
20
  # http response information
@@ -357,6 +357,8 @@ class _LLMClient(BaseModel):
357
357
  prompts = prompts_to_conversations(prompts)
358
358
  ids = list(range(len(prompts)))
359
359
  results: list[APIResponse | None] = [None for _ in range(len(prompts))]
360
+ contexts: list[RequestContext | None] = [None for _ in range(len(prompts))]
361
+ inflight_tasks: set[asyncio.Task[None]] = set()
360
362
  # Use existing tracker if client has been opened; otherwise open/close automatically
361
363
  tracker: StatusTracker
362
364
  tracker_preopened = self._tracker is not None
@@ -419,12 +421,14 @@ class _LLMClient(BaseModel):
419
421
  )
420
422
 
421
423
  # Launch simplified request processing
424
+ contexts[next_context.task_id] = next_context
425
+
422
426
  async def process_and_store(ctx: RequestContext):
423
427
  try:
424
428
  response = await self.process_single_request(ctx, retry_queue)
425
429
  results[ctx.task_id] = response
426
- except Exception as e:
427
- # Create an error response for validation errors and other exceptions
430
+ except BaseException as exc:
431
+ # Capture cancellations and other BaseExceptions before fallback response fires.
428
432
  error_response = APIResponse(
429
433
  id=ctx.task_id,
430
434
  model_internal=ctx.model_name,
@@ -432,14 +436,17 @@ class _LLMClient(BaseModel):
432
436
  sampling_params=ctx.sampling_params,
433
437
  status_code=None,
434
438
  is_error=True,
435
- error_message=str(e),
439
+ error_message=f"{type(exc).__name__}: {exc}",
440
+ raw_response={"exception_repr": repr(exc)},
436
441
  )
437
442
  results[ctx.task_id] = error_response
438
- # Mark task as completed so the main loop can finish
439
443
  if ctx.status_tracker:
440
444
  ctx.status_tracker.task_failed(ctx.task_id)
445
+ raise
441
446
 
442
- asyncio.create_task(process_and_store(next_context))
447
+ task = asyncio.create_task(process_and_store(next_context))
448
+ inflight_tasks.add(task)
449
+ task.add_done_callback(inflight_tasks.discard)
443
450
  next_context = None # Reset after successful dispatch
444
451
  next_is_retry = False
445
452
 
@@ -456,9 +463,37 @@ class _LLMClient(BaseModel):
456
463
  # Yield briefly to allow in-flight tasks to progress
457
464
  await asyncio.sleep(min(0.01, seconds_to_sleep_each_loop))
458
465
 
466
+ if inflight_tasks:
467
+ await asyncio.gather(*inflight_tasks, return_exceptions=True)
468
+
459
469
  if not tracker_preopened:
460
470
  self.close()
461
471
 
472
+ for idx, response in enumerate(results):
473
+ if response is None:
474
+ ctx = contexts[idx]
475
+ prompt = ctx.prompt if ctx else prompts[idx]
476
+ sampling_params = (
477
+ ctx.sampling_params
478
+ if ctx
479
+ else self.sampling_params[0]
480
+ if self.sampling_params
481
+ else SamplingParams()
482
+ )
483
+ model_name = ctx.model_name if ctx else self.model_names[0]
484
+ assert isinstance(
485
+ prompt, Conversation
486
+ ), "expected prompt to be a conversation"
487
+ results[idx] = APIResponse(
488
+ id=idx,
489
+ model_internal=model_name,
490
+ prompt=prompt,
491
+ sampling_params=sampling_params,
492
+ status_code=None,
493
+ is_error=True,
494
+ error_message="Internal error: no response produced.",
495
+ )
496
+
462
497
  if return_completions_only:
463
498
  return [r.completion if r is not None else None for r in results]
464
499
 
@@ -10,6 +10,18 @@ ANTHROPIC_MODELS = {
10
10
  # ░███
11
11
  # █████
12
12
  #
13
+ "claude-4.5-sonnet": {
14
+ "id": "claude-4.5-sonnet",
15
+ "name": "claude-sonnet-4-5-20250929",
16
+ "api_base": "https://api.anthropic.com/v1",
17
+ "api_key_env_var": "ANTHROPIC_API_KEY",
18
+ "supports_json": False,
19
+ "api_spec": "anthropic",
20
+ "input_cost": 3.0,
21
+ "output_cost": 15.0,
22
+ "requests_per_minute": 4_000,
23
+ "tokens_per_minute": 400_000,
24
+ },
13
25
  "claude-4.1-opus": {
14
26
  "id": "claude-4.1-opus",
15
27
  "name": "claude-opus-4-1-20250805",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.51
3
+ Version: 0.0.53
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes