lm-deluge 0.0.65__tar.gz → 0.0.67__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (81) hide show
  1. {lm_deluge-0.0.65/src/lm_deluge.egg-info → lm_deluge-0.0.67}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/anthropic.py +3 -0
  4. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/openai.py +122 -108
  5. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/prompt.py +12 -0
  6. {lm_deluge-0.0.65 → lm_deluge-0.0.67/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  7. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/LICENSE +0 -0
  8. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/README.md +0 -0
  9. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/setup.cfg +0 -0
  10. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/__init__.py +0 -0
  11. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/base.py +0 -0
  13. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/bedrock.py +0 -0
  14. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/common.py +0 -0
  15. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  16. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  17. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  18. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  19. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  20. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/gemini.py +0 -0
  21. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/mistral.py +0 -0
  22. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/api_requests/response.py +0 -0
  23. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/batches.py +0 -0
  24. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  25. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  26. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  27. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  28. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/built_in_tools/base.py +0 -0
  29. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/built_in_tools/openai.py +0 -0
  30. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/cache.py +0 -0
  31. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/cli.py +0 -0
  32. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/client.py +0 -0
  33. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/config.py +0 -0
  34. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/embed.py +0 -0
  35. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/errors.py +0 -0
  36. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/file.py +0 -0
  37. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/image.py +0 -0
  38. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/__init__.py +0 -0
  39. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/classify.py +0 -0
  40. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/extract.py +0 -0
  41. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/locate.py +0 -0
  42. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/ocr.py +0 -0
  43. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/score.py +0 -0
  44. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/llm_tools/translate.py +0 -0
  45. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/__init__.py +0 -0
  46. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/anthropic.py +0 -0
  47. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/bedrock.py +0 -0
  48. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/cerebras.py +0 -0
  49. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/cohere.py +0 -0
  50. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/deepseek.py +0 -0
  51. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/fireworks.py +0 -0
  52. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/google.py +0 -0
  53. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/grok.py +0 -0
  54. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/groq.py +0 -0
  55. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/meta.py +0 -0
  56. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/mistral.py +0 -0
  57. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/openai.py +0 -0
  58. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/openrouter.py +0 -0
  59. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/models/together.py +0 -0
  60. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/presets/cerebras.py +0 -0
  61. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/presets/meta.py +0 -0
  62. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/request_context.py +0 -0
  63. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/rerank.py +0 -0
  64. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/tool.py +0 -0
  65. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/tracker.py +0 -0
  66. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/usage.py +0 -0
  67. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/util/harmony.py +0 -0
  68. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/util/json.py +0 -0
  69. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/util/logprobs.py +0 -0
  70. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/util/spatial.py +0 -0
  71. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/util/validation.py +0 -0
  72. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/util/xml.py +0 -0
  73. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge/warnings.py +0 -0
  74. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  75. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  76. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge.egg-info/requires.txt +0 -0
  77. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/src/lm_deluge.egg-info/top_level.txt +0 -0
  78. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/tests/test_builtin_tools.py +0 -0
  79. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/tests/test_file_upload.py +0 -0
  80. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/tests/test_native_mcp_server.py +0 -0
  81. {lm_deluge-0.0.65 → lm_deluge-0.0.67}/tests/test_openrouter_generic.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.65
3
+ Version: 0.0.67
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.65"
6
+ version = "0.0.67"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -90,6 +90,9 @@ def _build_anthropic_request(
90
90
  for tool in tools:
91
91
  if isinstance(tool, Tool):
92
92
  tool_definitions.append(tool.dump_for("anthropic"))
93
+ elif isinstance(tool, dict) and "url" in tool:
94
+ _add_beta(base_headers, "mcp-client-2025-04-04")
95
+ mcp_servers.append(tool)
93
96
  elif isinstance(tool, dict):
94
97
  tool_definitions.append(tool)
95
98
  # add betas if needed
@@ -184,7 +184,8 @@ class OpenAIRequest(APIRequestBase):
184
184
 
185
185
  content = Message("assistant", parts)
186
186
 
187
- usage = Usage.from_openai_usage(data["usage"])
187
+ if "usage" in data and data["usage"] is not None:
188
+ usage = Usage.from_openai_usage(data["usage"])
188
189
  if (
189
190
  self.context.sampling_params.logprobs
190
191
  and "logprobs" in data["choices"][0]
@@ -349,7 +350,8 @@ class OpenAIResponsesRequest(APIRequestBase):
349
350
  assert self.context.status_tracker
350
351
 
351
352
  if status_code == 500:
352
- print("Internal Server Error: ", http_response.text())
353
+ res_text = await http_response.text()
354
+ print("Internal Server Error: ", res_text)
353
355
 
354
356
  if status_code >= 200 and status_code < 300:
355
357
  try:
@@ -361,126 +363,138 @@ class OpenAIResponsesRequest(APIRequestBase):
361
363
  )
362
364
  if not is_error:
363
365
  assert data is not None, "data is None"
364
- try:
365
- # Parse Responses API format
366
- parts = []
367
366
 
368
- # Get the output array from the response
369
- output = data.get("output", [])
370
- if not output:
371
- is_error = True
372
- error_message = "No output in response"
373
- else:
374
- # Process each output item
375
- for item in output:
376
- if item.get("type") == "message":
377
- message_content = item.get("content", [])
378
- for content_item in message_content:
379
- if content_item.get("type") == "output_text":
380
- parts.append(Text(content_item["text"]))
381
- elif content_item.get("type") == "refusal":
382
- parts.append(Text(content_item["refusal"]))
383
- elif item.get("type") == "reasoning":
384
- summary = item["summary"]
385
- if not summary:
386
- continue
387
- if isinstance(summary, list) and len(summary) > 0:
388
- summary = summary[0]
389
- assert isinstance(summary, dict), "summary isn't a dict"
390
- parts.append(Thinking(summary["text"]))
391
- elif item.get("type") == "function_call":
392
- parts.append(
393
- ToolCall(
394
- id=item["call_id"],
395
- name=item["name"],
396
- arguments=json.loads(item["arguments"]),
367
+ # Check if response is incomplete
368
+ if data.get("status") == "incomplete":
369
+ is_error = True
370
+ incomplete_reason = data.get("incomplete_details", {}).get(
371
+ "reason", "unknown"
372
+ )
373
+ error_message = f"Response incomplete: {incomplete_reason}"
374
+
375
+ if not is_error:
376
+ try:
377
+ # Parse Responses API format
378
+ parts = []
379
+
380
+ # Get the output array from the response
381
+ output = data.get("output", [])
382
+ if not output:
383
+ is_error = True
384
+ error_message = "No output in response"
385
+ else:
386
+ # Process each output item
387
+ for item in output:
388
+ if item.get("type") == "message":
389
+ message_content = item.get("content", [])
390
+ for content_item in message_content:
391
+ if content_item.get("type") == "output_text":
392
+ parts.append(Text(content_item["text"]))
393
+ elif content_item.get("type") == "refusal":
394
+ parts.append(Text(content_item["refusal"]))
395
+ elif item.get("type") == "reasoning":
396
+ summary = item["summary"]
397
+ if not summary:
398
+ continue
399
+ if isinstance(summary, list) and len(summary) > 0:
400
+ summary = summary[0]
401
+ assert isinstance(
402
+ summary, dict
403
+ ), "summary isn't a dict"
404
+ parts.append(Thinking(summary["text"]))
405
+ elif item.get("type") == "function_call":
406
+ parts.append(
407
+ ToolCall(
408
+ id=item["call_id"],
409
+ name=item["name"],
410
+ arguments=json.loads(item["arguments"]),
411
+ )
397
412
  )
398
- )
399
- elif item.get("type") == "mcp_call":
400
- parts.append(
401
- ToolCall(
402
- id=item["id"],
403
- name=item["name"],
404
- arguments=json.loads(item["arguments"]),
405
- built_in=True,
406
- built_in_type="mcp_call",
407
- extra_body={
408
- "server_label": item["server_label"],
409
- "error": item.get("error"),
410
- "output": item.get("output"),
411
- },
413
+ elif item.get("type") == "mcp_call":
414
+ parts.append(
415
+ ToolCall(
416
+ id=item["id"],
417
+ name=item["name"],
418
+ arguments=json.loads(item["arguments"]),
419
+ built_in=True,
420
+ built_in_type="mcp_call",
421
+ extra_body={
422
+ "server_label": item["server_label"],
423
+ "error": item.get("error"),
424
+ "output": item.get("output"),
425
+ },
426
+ )
412
427
  )
413
- )
414
428
 
415
- elif item.get("type") == "computer_call":
416
- parts.append(
417
- ToolCall(
418
- id=item["call_id"],
419
- name="computer_call",
420
- arguments=item.get("action"),
421
- built_in=True,
422
- built_in_type="computer_call",
429
+ elif item.get("type") == "computer_call":
430
+ parts.append(
431
+ ToolCall(
432
+ id=item["call_id"],
433
+ name="computer_call",
434
+ arguments=item.get("action"),
435
+ built_in=True,
436
+ built_in_type="computer_call",
437
+ )
423
438
  )
424
- )
425
439
 
426
- elif item.get("type") == "web_search_call":
427
- parts.append(
428
- ToolCall(
429
- id=item["id"],
430
- name="web_search_call",
431
- arguments={},
432
- built_in=True,
433
- built_in_type="web_search_call",
434
- extra_body={"status": item["status"]},
440
+ elif item.get("type") == "web_search_call":
441
+ parts.append(
442
+ ToolCall(
443
+ id=item["id"],
444
+ name="web_search_call",
445
+ arguments={},
446
+ built_in=True,
447
+ built_in_type="web_search_call",
448
+ extra_body={"status": item["status"]},
449
+ )
435
450
  )
436
- )
437
451
 
438
- elif item.get("type") == "file_search_call":
439
- parts.append(
440
- ToolCall(
441
- id=item["id"],
442
- name="file_search_call",
443
- arguments={"queries": item["queries"]},
444
- built_in=True,
445
- built_in_type="file_search_call",
446
- extra_body={
447
- "status": item["status"],
448
- "results": item["results"],
449
- },
452
+ elif item.get("type") == "file_search_call":
453
+ parts.append(
454
+ ToolCall(
455
+ id=item["id"],
456
+ name="file_search_call",
457
+ arguments={"queries": item["queries"]},
458
+ built_in=True,
459
+ built_in_type="file_search_call",
460
+ extra_body={
461
+ "status": item["status"],
462
+ "results": item["results"],
463
+ },
464
+ )
450
465
  )
451
- )
452
- elif item.get("type") == "image_generation_call":
453
- parts.append(
454
- ToolCall(
455
- id=item["id"],
456
- name="image_generation_call",
457
- arguments={},
458
- built_in=True,
459
- built_in_type="image_generation_call",
460
- extra_body={
461
- "status": item["status"],
462
- "result": item["result"],
463
- },
466
+ elif item.get("type") == "image_generation_call":
467
+ parts.append(
468
+ ToolCall(
469
+ id=item["id"],
470
+ name="image_generation_call",
471
+ arguments={},
472
+ built_in=True,
473
+ built_in_type="image_generation_call",
474
+ extra_body={
475
+ "status": item["status"],
476
+ "result": item["result"],
477
+ },
478
+ )
464
479
  )
465
- )
466
480
 
467
- # Handle reasoning if present
468
- if "reasoning" in data and data["reasoning"].get("summary"):
469
- thinking = data["reasoning"]["summary"]
470
- parts.append(Thinking(thinking))
481
+ # Handle reasoning if present
482
+ if "reasoning" in data and data["reasoning"].get("summary"):
483
+ thinking = data["reasoning"]["summary"]
484
+ parts.append(Thinking(thinking))
471
485
 
472
- content = Message("assistant", parts)
486
+ content = Message("assistant", parts)
473
487
 
474
- # Extract usage information
475
- if "usage" in data:
476
- usage = Usage.from_openai_usage(data["usage"])
488
+ # Extract usage information
489
+ if "usage" in data and data["usage"] is not None:
490
+ usage = Usage.from_openai_usage(data["usage"])
477
491
 
478
- except Exception as e:
479
- is_error = True
480
- error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
481
- print("got data:", data)
482
- traceback = tb.format_exc()
483
- print(f"Error details:\n{traceback}")
492
+ except Exception as e:
493
+ is_error = True
494
+ error_message = f"Error parsing {self.model.name} responses API response: {str(e)}"
495
+ print("got data:", data)
496
+ traceback = tb.format_exc()
497
+ print(f"Error details:\n{traceback}")
484
498
 
485
499
  elif mimetype and "json" in mimetype.lower():
486
500
  print("is_error True, json response")
@@ -329,6 +329,18 @@ class Message:
329
329
  """Get all thinking parts with proper typing."""
330
330
  return [part for part in self.parts if part.type == "thinking"] # type: ignore
331
331
 
332
+ # @staticmethod
333
+ # def dump_part(part: Part):
334
+ # if isinstance(value, Text):
335
+ # return {"type": "text", "text": value.text}
336
+ # if isinstance(value, Image):
337
+ # w, h = value.size
338
+ # return {"type": "image", "tag": f"<Image ({w}×{h})>"}
339
+ # if isinstance(value, File):
340
+ # size = value.size
341
+ # return {"type": "file", "tag": f"<File ({size} bytes)>"}
342
+ # return repr(value)
343
+
332
344
  def to_log(self) -> dict:
333
345
  """
334
346
  Return a JSON-serialisable dict that fully captures the message.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.65
3
+ Version: 0.0.67
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes