lm-deluge 0.0.51__tar.gz → 0.0.52__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. {lm_deluge-0.0.51/src/lm_deluge.egg-info → lm_deluge-0.0.52}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/openai.py +11 -1
  4. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/response.py +1 -1
  5. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/client.py +35 -1
  6. {lm_deluge-0.0.51 → lm_deluge-0.0.52/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  7. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/LICENSE +0 -0
  8. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/README.md +0 -0
  9. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/setup.cfg +0 -0
  10. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/__init__.py +0 -0
  11. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/agent.py +0 -0
  12. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/__init__.py +0 -0
  13. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/anthropic.py +0 -0
  14. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/base.py +0 -0
  15. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/bedrock.py +0 -0
  16. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/common.py +0 -0
  17. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  18. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  19. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  20. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  21. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  22. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/gemini.py +0 -0
  23. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/api_requests/mistral.py +0 -0
  24. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/batches.py +0 -0
  25. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  26. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  27. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  28. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  29. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/built_in_tools/base.py +0 -0
  30. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/built_in_tools/openai.py +0 -0
  31. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/cache.py +0 -0
  32. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/cli.py +0 -0
  33. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/config.py +0 -0
  34. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/embed.py +0 -0
  35. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/errors.py +0 -0
  36. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/file.py +0 -0
  37. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/gemini_limits.py +0 -0
  38. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/image.py +0 -0
  39. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/__init__.py +0 -0
  40. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/classify.py +0 -0
  41. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/extract.py +0 -0
  42. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/locate.py +0 -0
  43. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/ocr.py +0 -0
  44. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/score.py +0 -0
  45. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/llm_tools/translate.py +0 -0
  46. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/__init__.py +0 -0
  47. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/anthropic.py +0 -0
  48. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/bedrock.py +0 -0
  49. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/cerebras.py +0 -0
  50. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/cohere.py +0 -0
  51. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/deepseek.py +0 -0
  52. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/fireworks.py +0 -0
  53. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/google.py +0 -0
  54. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/grok.py +0 -0
  55. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/groq.py +0 -0
  56. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/meta.py +0 -0
  57. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/mistral.py +0 -0
  58. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/openai.py +0 -0
  59. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/openrouter.py +0 -0
  60. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/models/together.py +0 -0
  61. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/presets/cerebras.py +0 -0
  62. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/presets/meta.py +0 -0
  63. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/prompt.py +0 -0
  64. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/request_context.py +0 -0
  65. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/rerank.py +0 -0
  66. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/tool.py +0 -0
  67. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/tracker.py +0 -0
  68. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/usage.py +0 -0
  69. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/util/harmony.py +0 -0
  70. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/util/json.py +0 -0
  71. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/util/logprobs.py +0 -0
  72. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/util/spatial.py +0 -0
  73. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/util/validation.py +0 -0
  74. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge/util/xml.py +0 -0
  75. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  76. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  77. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge.egg-info/requires.txt +0 -0
  78. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/src/lm_deluge.egg-info/top_level.txt +0 -0
  79. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/tests/test_builtin_tools.py +0 -0
  80. {lm_deluge-0.0.51 → lm_deluge-0.0.52}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.51
3
+ Version: 0.0.52
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.51"
6
+ version = "0.0.52"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import os
3
+ import traceback as tb
3
4
  import warnings
4
5
  from types import SimpleNamespace
5
6
 
@@ -341,7 +342,13 @@ class OpenAIResponsesRequest(APIRequestBase):
341
342
  elif content_item.get("type") == "refusal":
342
343
  parts.append(Text(content_item["refusal"]))
343
344
  elif item.get("type") == "reasoning":
344
- parts.append(Thinking(item["summary"]["text"]))
345
+ summary = item["summary"]
346
+ if not summary:
347
+ continue
348
+ if isinstance(summary, list) and len(summary) > 0:
349
+ summary = summary[0]
350
+ assert isinstance(summary, dict), "summary isn't a dict"
351
+ parts.append(Thinking(summary["text"]))
345
352
  elif item.get("type") == "function_call":
346
353
  parts.append(
347
354
  ToolCall(
@@ -432,6 +439,9 @@ class OpenAIResponsesRequest(APIRequestBase):
432
439
  except Exception as e:
433
440
  is_error = True
434
441
  error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
442
+ print("got data:", data)
443
+ traceback = tb.format_exc()
444
+ print(f"Error details:\n{traceback}")
435
445
 
436
446
  elif mimetype and "json" in mimetype.lower():
437
447
  print("is_error True, json response")
@@ -14,7 +14,7 @@ class APIResponse:
14
14
  # request information
15
15
  id: int # should be unique to the request within a given prompt-processing call
16
16
  model_internal: str # our internal model tag
17
- prompt: Conversation | dict
17
+ prompt: Conversation | dict # dict if converted to log
18
18
  sampling_params: SamplingParams
19
19
 
20
20
  # http response information
@@ -357,6 +357,8 @@ class _LLMClient(BaseModel):
357
357
  prompts = prompts_to_conversations(prompts)
358
358
  ids = list(range(len(prompts)))
359
359
  results: list[APIResponse | None] = [None for _ in range(len(prompts))]
360
+ contexts: list[RequestContext | None] = [None for _ in range(len(prompts))]
361
+ inflight_tasks: set[asyncio.Task[None]] = set()
360
362
  # Use existing tracker if client has been opened; otherwise open/close automatically
361
363
  tracker: StatusTracker
362
364
  tracker_preopened = self._tracker is not None
@@ -419,6 +421,8 @@ class _LLMClient(BaseModel):
419
421
  )
420
422
 
421
423
  # Launch simplified request processing
424
+ contexts[next_context.task_id] = next_context
425
+
422
426
  async def process_and_store(ctx: RequestContext):
423
427
  try:
424
428
  response = await self.process_single_request(ctx, retry_queue)
@@ -439,7 +443,9 @@ class _LLMClient(BaseModel):
439
443
  if ctx.status_tracker:
440
444
  ctx.status_tracker.task_failed(ctx.task_id)
441
445
 
442
- asyncio.create_task(process_and_store(next_context))
446
+ task = asyncio.create_task(process_and_store(next_context))
447
+ inflight_tasks.add(task)
448
+ task.add_done_callback(inflight_tasks.discard)
443
449
  next_context = None # Reset after successful dispatch
444
450
  next_is_retry = False
445
451
 
@@ -456,9 +462,37 @@ class _LLMClient(BaseModel):
456
462
  # Yield briefly to allow in-flight tasks to progress
457
463
  await asyncio.sleep(min(0.01, seconds_to_sleep_each_loop))
458
464
 
465
+ if inflight_tasks:
466
+ await asyncio.gather(*inflight_tasks, return_exceptions=True)
467
+
459
468
  if not tracker_preopened:
460
469
  self.close()
461
470
 
471
+ for idx, response in enumerate(results):
472
+ if response is None:
473
+ ctx = contexts[idx]
474
+ prompt = ctx.prompt if ctx else prompts[idx]
475
+ sampling_params = (
476
+ ctx.sampling_params
477
+ if ctx
478
+ else self.sampling_params[0]
479
+ if self.sampling_params
480
+ else SamplingParams()
481
+ )
482
+ model_name = ctx.model_name if ctx else self.model_names[0]
483
+ assert isinstance(
484
+ prompt, Conversation
485
+ ), "expected prompt to be a conversation"
486
+ results[idx] = APIResponse(
487
+ id=idx,
488
+ model_internal=model_name,
489
+ prompt=prompt,
490
+ sampling_params=sampling_params,
491
+ status_code=None,
492
+ is_error=True,
493
+ error_message="Internal error: no response produced.",
494
+ )
495
+
462
496
  if return_completions_only:
463
497
  return [r.completion if r is not None else None for r in results]
464
498
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.51
3
+ Version: 0.0.52
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes