lm-deluge 0.0.66__tar.gz → 0.0.68__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. {lm_deluge-0.0.66/src/lm_deluge.egg-info → lm_deluge-0.0.68}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/openai.py +120 -107
  4. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/client.py +9 -8
  5. {lm_deluge-0.0.66 → lm_deluge-0.0.68/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  6. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/LICENSE +0 -0
  7. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/README.md +0 -0
  8. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/setup.cfg +0 -0
  9. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/__init__.py +0 -0
  10. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/__init__.py +0 -0
  11. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/anthropic.py +0 -0
  12. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/base.py +0 -0
  13. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/bedrock.py +0 -0
  14. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/common.py +0 -0
  15. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  16. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  17. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  18. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  19. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  20. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/gemini.py +0 -0
  21. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/mistral.py +0 -0
  22. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/api_requests/response.py +0 -0
  23. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/batches.py +0 -0
  24. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  25. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  26. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  27. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  28. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/built_in_tools/base.py +0 -0
  29. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/built_in_tools/openai.py +0 -0
  30. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/cache.py +0 -0
  31. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/cli.py +0 -0
  32. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/config.py +0 -0
  33. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/embed.py +0 -0
  34. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/errors.py +0 -0
  35. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/file.py +0 -0
  36. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/image.py +0 -0
  37. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/__init__.py +0 -0
  38. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/classify.py +0 -0
  39. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/extract.py +0 -0
  40. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/locate.py +0 -0
  41. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/ocr.py +0 -0
  42. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/score.py +0 -0
  43. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/llm_tools/translate.py +0 -0
  44. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/__init__.py +0 -0
  45. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/anthropic.py +0 -0
  46. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/bedrock.py +0 -0
  47. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/cerebras.py +0 -0
  48. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/cohere.py +0 -0
  49. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/deepseek.py +0 -0
  50. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/fireworks.py +0 -0
  51. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/google.py +0 -0
  52. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/grok.py +0 -0
  53. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/groq.py +0 -0
  54. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/meta.py +0 -0
  55. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/mistral.py +0 -0
  56. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/openai.py +0 -0
  57. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/openrouter.py +0 -0
  58. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/models/together.py +0 -0
  59. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/presets/cerebras.py +0 -0
  60. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/presets/meta.py +0 -0
  61. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/prompt.py +0 -0
  62. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/request_context.py +0 -0
  63. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/rerank.py +0 -0
  64. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/tool.py +0 -0
  65. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/tracker.py +0 -0
  66. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/usage.py +0 -0
  67. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/util/harmony.py +0 -0
  68. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/util/json.py +0 -0
  69. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/util/logprobs.py +0 -0
  70. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/util/spatial.py +0 -0
  71. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/util/validation.py +0 -0
  72. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/util/xml.py +0 -0
  73. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge/warnings.py +0 -0
  74. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  75. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  76. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge.egg-info/requires.txt +0 -0
  77. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/src/lm_deluge.egg-info/top_level.txt +0 -0
  78. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/tests/test_builtin_tools.py +0 -0
  79. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/tests/test_file_upload.py +0 -0
  80. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/tests/test_native_mcp_server.py +0 -0
  81. {lm_deluge-0.0.66 → lm_deluge-0.0.68}/tests/test_openrouter_generic.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.66
3
+ Version: 0.0.68
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.66"
6
+ version = "0.0.68"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -350,7 +350,8 @@ class OpenAIResponsesRequest(APIRequestBase):
350
350
  assert self.context.status_tracker
351
351
 
352
352
  if status_code == 500:
353
- print("Internal Server Error: ", http_response.text())
353
+ res_text = await http_response.text()
354
+ print("Internal Server Error: ", res_text)
354
355
 
355
356
  if status_code >= 200 and status_code < 300:
356
357
  try:
@@ -362,126 +363,138 @@ class OpenAIResponsesRequest(APIRequestBase):
362
363
  )
363
364
  if not is_error:
364
365
  assert data is not None, "data is None"
365
- try:
366
- # Parse Responses API format
367
- parts = []
368
366
 
369
- # Get the output array from the response
370
- output = data.get("output", [])
371
- if not output:
372
- is_error = True
373
- error_message = "No output in response"
374
- else:
375
- # Process each output item
376
- for item in output:
377
- if item.get("type") == "message":
378
- message_content = item.get("content", [])
379
- for content_item in message_content:
380
- if content_item.get("type") == "output_text":
381
- parts.append(Text(content_item["text"]))
382
- elif content_item.get("type") == "refusal":
383
- parts.append(Text(content_item["refusal"]))
384
- elif item.get("type") == "reasoning":
385
- summary = item["summary"]
386
- if not summary:
387
- continue
388
- if isinstance(summary, list) and len(summary) > 0:
389
- summary = summary[0]
390
- assert isinstance(summary, dict), "summary isn't a dict"
391
- parts.append(Thinking(summary["text"]))
392
- elif item.get("type") == "function_call":
393
- parts.append(
394
- ToolCall(
395
- id=item["call_id"],
396
- name=item["name"],
397
- arguments=json.loads(item["arguments"]),
367
+ # Check if response is incomplete
368
+ if data.get("status") == "incomplete":
369
+ is_error = True
370
+ incomplete_reason = data.get("incomplete_details", {}).get(
371
+ "reason", "unknown"
372
+ )
373
+ error_message = f"Response incomplete: {incomplete_reason}"
374
+
375
+ if not is_error:
376
+ try:
377
+ # Parse Responses API format
378
+ parts = []
379
+
380
+ # Get the output array from the response
381
+ output = data.get("output", [])
382
+ if not output:
383
+ is_error = True
384
+ error_message = "No output in response"
385
+ else:
386
+ # Process each output item
387
+ for item in output:
388
+ if item.get("type") == "message":
389
+ message_content = item.get("content", [])
390
+ for content_item in message_content:
391
+ if content_item.get("type") == "output_text":
392
+ parts.append(Text(content_item["text"]))
393
+ elif content_item.get("type") == "refusal":
394
+ parts.append(Text(content_item["refusal"]))
395
+ elif item.get("type") == "reasoning":
396
+ summary = item["summary"]
397
+ if not summary:
398
+ continue
399
+ if isinstance(summary, list) and len(summary) > 0:
400
+ summary = summary[0]
401
+ assert isinstance(
402
+ summary, dict
403
+ ), "summary isn't a dict"
404
+ parts.append(Thinking(summary["text"]))
405
+ elif item.get("type") == "function_call":
406
+ parts.append(
407
+ ToolCall(
408
+ id=item["call_id"],
409
+ name=item["name"],
410
+ arguments=json.loads(item["arguments"]),
411
+ )
398
412
  )
399
- )
400
- elif item.get("type") == "mcp_call":
401
- parts.append(
402
- ToolCall(
403
- id=item["id"],
404
- name=item["name"],
405
- arguments=json.loads(item["arguments"]),
406
- built_in=True,
407
- built_in_type="mcp_call",
408
- extra_body={
409
- "server_label": item["server_label"],
410
- "error": item.get("error"),
411
- "output": item.get("output"),
412
- },
413
+ elif item.get("type") == "mcp_call":
414
+ parts.append(
415
+ ToolCall(
416
+ id=item["id"],
417
+ name=item["name"],
418
+ arguments=json.loads(item["arguments"]),
419
+ built_in=True,
420
+ built_in_type="mcp_call",
421
+ extra_body={
422
+ "server_label": item["server_label"],
423
+ "error": item.get("error"),
424
+ "output": item.get("output"),
425
+ },
426
+ )
413
427
  )
414
- )
415
428
 
416
- elif item.get("type") == "computer_call":
417
- parts.append(
418
- ToolCall(
419
- id=item["call_id"],
420
- name="computer_call",
421
- arguments=item.get("action"),
422
- built_in=True,
423
- built_in_type="computer_call",
429
+ elif item.get("type") == "computer_call":
430
+ parts.append(
431
+ ToolCall(
432
+ id=item["call_id"],
433
+ name="computer_call",
434
+ arguments=item.get("action"),
435
+ built_in=True,
436
+ built_in_type="computer_call",
437
+ )
424
438
  )
425
- )
426
439
 
427
- elif item.get("type") == "web_search_call":
428
- parts.append(
429
- ToolCall(
430
- id=item["id"],
431
- name="web_search_call",
432
- arguments={},
433
- built_in=True,
434
- built_in_type="web_search_call",
435
- extra_body={"status": item["status"]},
440
+ elif item.get("type") == "web_search_call":
441
+ parts.append(
442
+ ToolCall(
443
+ id=item["id"],
444
+ name="web_search_call",
445
+ arguments={},
446
+ built_in=True,
447
+ built_in_type="web_search_call",
448
+ extra_body={"status": item["status"]},
449
+ )
436
450
  )
437
- )
438
451
 
439
- elif item.get("type") == "file_search_call":
440
- parts.append(
441
- ToolCall(
442
- id=item["id"],
443
- name="file_search_call",
444
- arguments={"queries": item["queries"]},
445
- built_in=True,
446
- built_in_type="file_search_call",
447
- extra_body={
448
- "status": item["status"],
449
- "results": item["results"],
450
- },
452
+ elif item.get("type") == "file_search_call":
453
+ parts.append(
454
+ ToolCall(
455
+ id=item["id"],
456
+ name="file_search_call",
457
+ arguments={"queries": item["queries"]},
458
+ built_in=True,
459
+ built_in_type="file_search_call",
460
+ extra_body={
461
+ "status": item["status"],
462
+ "results": item["results"],
463
+ },
464
+ )
451
465
  )
452
- )
453
- elif item.get("type") == "image_generation_call":
454
- parts.append(
455
- ToolCall(
456
- id=item["id"],
457
- name="image_generation_call",
458
- arguments={},
459
- built_in=True,
460
- built_in_type="image_generation_call",
461
- extra_body={
462
- "status": item["status"],
463
- "result": item["result"],
464
- },
466
+ elif item.get("type") == "image_generation_call":
467
+ parts.append(
468
+ ToolCall(
469
+ id=item["id"],
470
+ name="image_generation_call",
471
+ arguments={},
472
+ built_in=True,
473
+ built_in_type="image_generation_call",
474
+ extra_body={
475
+ "status": item["status"],
476
+ "result": item["result"],
477
+ },
478
+ )
465
479
  )
466
- )
467
480
 
468
- # Handle reasoning if present
469
- if "reasoning" in data and data["reasoning"].get("summary"):
470
- thinking = data["reasoning"]["summary"]
471
- parts.append(Thinking(thinking))
481
+ # Handle reasoning if present
482
+ if "reasoning" in data and data["reasoning"].get("summary"):
483
+ thinking = data["reasoning"]["summary"]
484
+ parts.append(Thinking(thinking))
472
485
 
473
- content = Message("assistant", parts)
486
+ content = Message("assistant", parts)
474
487
 
475
- # Extract usage information
476
- if "usage" in data and data["usage"] is not None:
477
- usage = Usage.from_openai_usage(data["usage"])
488
+ # Extract usage information
489
+ if "usage" in data and data["usage"] is not None:
490
+ usage = Usage.from_openai_usage(data["usage"])
478
491
 
479
- except Exception as e:
480
- is_error = True
481
- error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
482
- print("got data:", data)
483
- traceback = tb.format_exc()
484
- print(f"Error details:\n{traceback}")
492
+ except Exception as e:
493
+ is_error = True
494
+ error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
495
+ print("got data:", data)
496
+ traceback = tb.format_exc()
497
+ print(f"Error details:\n{traceback}")
485
498
 
486
499
  elif mimetype and "json" in mimetype.lower():
487
500
  print("is_error True, json response")
@@ -702,7 +702,7 @@ class _LLMClient(BaseModel):
702
702
 
703
703
  async def start(
704
704
  self,
705
- prompt: str | Conversation,
705
+ prompt: Prompt,
706
706
  *,
707
707
  tools: list[Tool | dict | MCPServer] | None = None,
708
708
  cache: CachePattern | None = None,
@@ -780,12 +780,12 @@ class _LLMClient(BaseModel):
780
780
 
781
781
  async def stream(
782
782
  self,
783
- prompt: str | Conversation,
783
+ prompt: Prompt,
784
784
  tools: list[Tool | dict | MCPServer] | None = None,
785
785
  ):
786
786
  model, sampling_params = self._select_model()
787
- if isinstance(prompt, str):
788
- prompt = Conversation.user(prompt)
787
+ prompt = prompts_to_conversations([prompt])[0]
788
+ assert isinstance(prompt, Conversation)
789
789
  async for item in stream_chat(
790
790
  model, prompt, sampling_params, tools, None, self.extra_headers
791
791
  ):
@@ -799,7 +799,7 @@ class _LLMClient(BaseModel):
799
799
 
800
800
  async def run_agent_loop(
801
801
  self,
802
- conversation: str | Conversation,
802
+ conversation: Prompt,
803
803
  *,
804
804
  tools: list[Tool | dict | MCPServer] | None = None,
805
805
  max_rounds: int = 5,
@@ -812,8 +812,9 @@ class _LLMClient(BaseModel):
812
812
  instances or built‑in tool dictionaries.
813
813
  """
814
814
 
815
- if isinstance(conversation, str):
816
- conversation = Conversation.user(conversation)
815
+ if not isinstance(conversation, Conversation):
816
+ conversation = prompts_to_conversations([conversation])[0]
817
+ assert isinstance(conversation, Conversation)
817
818
 
818
819
  # Expand MCPServer objects to their constituent tools for tool execution
819
820
  expanded_tools: list[Tool] = []
@@ -870,7 +871,7 @@ class _LLMClient(BaseModel):
870
871
 
871
872
  def run_agent_loop_sync(
872
873
  self,
873
- conversation: str | Conversation,
874
+ conversation: Prompt,
874
875
  *,
875
876
  tools: list[Tool | dict | MCPServer] | None = None,
876
877
  max_rounds: int = 5,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.66
3
+ Version: 0.0.68
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes