lm-deluge 0.0.47__tar.gz → 0.0.49__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (80) hide show
  1. {lm_deluge-0.0.47/src/lm_deluge.egg-info → lm_deluge-0.0.49}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/anthropic.py +0 -4
  4. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/cache.py +10 -1
  5. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/client.py +6 -0
  6. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/image.py +21 -0
  7. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/prompt.py +516 -18
  8. {lm_deluge-0.0.47 → lm_deluge-0.0.49/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  9. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/LICENSE +0 -0
  10. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/README.md +0 -0
  11. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/setup.cfg +0 -0
  12. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/__init__.py +0 -0
  13. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/agent.py +0 -0
  14. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/__init__.py +0 -0
  15. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/base.py +0 -0
  16. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/bedrock.py +0 -0
  17. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/common.py +0 -0
  18. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  19. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  20. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  21. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  22. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  23. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/gemini.py +0 -0
  24. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/mistral.py +0 -0
  25. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/openai.py +0 -0
  26. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/api_requests/response.py +0 -0
  27. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/batches.py +0 -0
  28. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  29. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  30. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  31. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  32. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/built_in_tools/base.py +0 -0
  33. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/built_in_tools/openai.py +0 -0
  34. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/cli.py +0 -0
  35. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/config.py +0 -0
  36. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/embed.py +0 -0
  37. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/errors.py +0 -0
  38. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/file.py +0 -0
  39. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/gemini_limits.py +0 -0
  40. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/__init__.py +0 -0
  41. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/classify.py +0 -0
  42. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/extract.py +0 -0
  43. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/locate.py +0 -0
  44. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/ocr.py +0 -0
  45. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/score.py +0 -0
  46. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/llm_tools/translate.py +0 -0
  47. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/__init__.py +0 -0
  48. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/anthropic.py +0 -0
  49. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/bedrock.py +0 -0
  50. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/cerebras.py +0 -0
  51. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/cohere.py +0 -0
  52. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/deepseek.py +0 -0
  53. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/fireworks.py +0 -0
  54. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/google.py +0 -0
  55. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/grok.py +0 -0
  56. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/groq.py +0 -0
  57. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/meta.py +0 -0
  58. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/mistral.py +0 -0
  59. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/openai.py +0 -0
  60. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/openrouter.py +0 -0
  61. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/models/together.py +0 -0
  62. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/presets/cerebras.py +0 -0
  63. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/presets/meta.py +0 -0
  64. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/request_context.py +0 -0
  65. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/rerank.py +0 -0
  66. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/tool.py +0 -0
  67. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/tracker.py +0 -0
  68. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/usage.py +0 -0
  69. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/util/harmony.py +0 -0
  70. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/util/json.py +0 -0
  71. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/util/logprobs.py +0 -0
  72. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/util/spatial.py +0 -0
  73. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/util/validation.py +0 -0
  74. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge/util/xml.py +0 -0
  75. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  76. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  77. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge.egg-info/requires.txt +0 -0
  78. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/src/lm_deluge.egg-info/top_level.txt +0 -0
  79. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/tests/test_builtin_tools.py +0 -0
  80. {lm_deluge-0.0.47 → lm_deluge-0.0.49}/tests/test_native_mcp_server.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.47
3
+ Version: 0.0.49
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.47"
6
+ version = "0.0.49"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -28,10 +28,6 @@ def _add_beta(headers: dict, beta: str):
28
28
  def _build_anthropic_request(
29
29
  model: APIModel,
30
30
  context: RequestContext,
31
- # prompt: Conversation,
32
- # tools: list[Tool | dict | MCPServer] | None,
33
- # sampling_params: SamplingParams,
34
- # cache_pattern: CachePattern | None = None,
35
31
  ):
36
32
  prompt = context.prompt
37
33
  cache_pattern = context.cache
@@ -78,9 +78,18 @@ class LevelDBCache:
78
78
  Get an API response from the cache.
79
79
  """
80
80
  key = f"{self.cache_key}:{prompt.fingerprint}"
81
+ # print(f"DEBUG: Cache.get() looking for key: {key}")
81
82
  data = self.db.get(key.encode())
82
83
  if data is not None:
83
- return decode_api_response(data)
84
+ # print(f"DEBUG: Cache.get() FOUND data, calling decode_api_response")
85
+ try:
86
+ result = decode_api_response(data)
87
+ # print(f"DEBUG: Cache.get() decode_api_response returned: {type(result)}")
88
+ return result
89
+ except Exception:
90
+ # print(f"DEBUG: Cache.get() ERROR in decode_api_response: {e}")
91
+ return None
92
+ # print(f"DEBUG: Cache.get() NO data found, returning None")
84
93
  return None
85
94
 
86
95
  def put(self, prompt: Conversation, response: APIResponse):
@@ -250,12 +250,17 @@ class _LLMClient(BaseModel):
250
250
  return response
251
251
 
252
252
  if self.cache:
253
+ # print(f"DEBUG: Checking cache for prompt with {len(context.prompt.messages)} messages")
253
254
  cached = self.cache.get(context.prompt)
254
255
  if cached:
256
+ # print(f"DEBUG: Cache HIT! Returning cached response")
255
257
  cached.local_cache_hit = True
256
258
  if context.status_tracker:
257
259
  context.status_tracker.task_succeeded(context.task_id)
258
260
  return _maybe_postprocess(cached)
261
+ else:
262
+ # print(f"DEBUG: Cache MISS")
263
+ pass
259
264
 
260
265
  # Execute single request
261
266
  assert context.status_tracker
@@ -267,6 +272,7 @@ class _LLMClient(BaseModel):
267
272
  context.status_tracker.task_succeeded(context.task_id)
268
273
  # Cache successful responses immediately
269
274
  if self.cache and response.completion:
275
+ # print(f"DEBUG: Caching successful response")
270
276
  self.cache.put(context.prompt, response)
271
277
  # Call callback if provided
272
278
  context.maybe_callback(response, context.status_tracker)
@@ -23,6 +23,16 @@ class Image:
23
23
  _fingerprint_cache: str | None = field(init=False, default=None)
24
24
  _size_cache: tuple[int, int] | None = field(init=False, default=None)
25
25
 
26
+ # def __post_init__(self):
27
+ # DEBUG: Track image data at creation
28
+ # data_type = type(self.data)
29
+ # data_preview = (
30
+ # str(self.data)[:100]
31
+ # if isinstance(self.data, str)
32
+ # else f"[{data_type.__name__}]"
33
+ # )
34
+ # print(f"DEBUG: Image.__post_init__: id={id(self)}, data={data_type}, preview={data_preview}")
35
+
26
36
  def __repr__(self):
27
37
  return f"Image(data=[{type(self.data)}], media_type={self.media_type}, detail={self.detail})"
28
38
 
@@ -58,6 +68,15 @@ class Image:
58
68
 
59
69
  # helpers -----------------------------------------------------------------
60
70
  def _bytes(self) -> bytes:
71
+ # DEBUG: Track when _bytes is called and what data we have
72
+ # data_type = type(self.data)
73
+ # data_preview = (
74
+ # str(self.data)[:100]
75
+ # if isinstance(self.data, str)
76
+ # else f"[{data_type.__name__}]"
77
+ # )
78
+ # print(f"DEBUG: Image._bytes called: id={id(self)}, data={data_type}, preview={data_preview}")
79
+
61
80
  if isinstance(self.data, bytes):
62
81
  return self.data
63
82
  elif isinstance(self.data, io.BytesIO):
@@ -82,6 +101,7 @@ class Image:
82
101
  content = "[raw bytes]"
83
102
  else:
84
103
  content = f"[raw {type(self.data)}]"
104
+ # print(f"DEBUG: Image._bytes ERROR PATH: type={type(self.data)}, content={content}")
85
105
  raise ValueError(
86
106
  f"unreadable image format. type: {type(self.data)}. content: {content}"
87
107
  )
@@ -164,6 +184,7 @@ class Image:
164
184
  @property
165
185
  def fingerprint(self) -> str:
166
186
  # return base64 of a very small version of the image
187
+ # print(f"DEBUG: Image.fingerprint called, data type: {type(self.data)}")
167
188
  if self._fingerprint_cache is None:
168
189
  small_image = self._resize_longer(max_size=48) # longer side = 48px
169
190
  self._fingerprint_cache = base64.b64encode(small_image).decode("utf-8")
@@ -333,6 +333,7 @@ class Message:
333
333
  """
334
334
  Return a JSON-serialisable dict that fully captures the message.
335
335
  """
336
+
336
337
  def _json_safe(value):
337
338
  if isinstance(value, (str, int, float, bool)) or value is None:
338
339
  return value
@@ -385,6 +386,8 @@ class Message:
385
386
  @classmethod
386
387
  def from_log(cls, data: dict) -> "Message":
387
388
  """Re-hydrate a Message previously produced by `to_log()`."""
389
+ # DEBUG: Track when from_log is called
390
+ # print(f"DEBUG: Message.from_log called for {data['role']} message with {len(data['content'])} content blocks")
388
391
  role: Role = data["role"]
389
392
  parts: list[Part] = []
390
393
 
@@ -392,11 +395,12 @@ class Message:
392
395
  if p["type"] == "text":
393
396
  parts.append(Text(p["text"]))
394
397
  elif p["type"] == "image":
395
- # We only stored a placeholder tag, so keep that placeholder.
396
- parts.append(Image(p["tag"], detail="low"))
398
+ # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
399
+ # print(f"DEBUG: Message.from_log creating Text placeholder for image: {p['tag']}")
400
+ parts.append(Text(p["tag"]))
397
401
  elif p["type"] == "file":
398
- # We only stored a placeholder tag, so keep that placeholder.
399
- parts.append(File(p["tag"]))
402
+ # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
403
+ parts.append(Text(p["tag"]))
400
404
  elif p["type"] == "tool_call":
401
405
  parts.append(
402
406
  ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
@@ -562,6 +566,51 @@ class Message:
562
566
  @classmethod
563
567
  def from_anthropic(cls, msg: dict):
564
568
  pass
569
+ # role = (
570
+ # "system"
571
+ # if msg["role"] in ["developer", "system"]
572
+ # else ("user" if msg["role"] == "user" else "assistant")
573
+ # )
574
+ # parts: list[Part] = []
575
+ # content = msg["content"]
576
+ # if isinstance(content, str):
577
+ # parts = [Text(content)]
578
+ # else:
579
+ # part_list = []
580
+ # for item in content:
581
+ # if item["type"] == "text":
582
+ # part_list.append(Text(item["text"]))
583
+ # elif item["type"] == "image_url":
584
+ # part_list.append(Image(data=item["image_url"]["url"]))
585
+ # elif item["type"] == "file":
586
+ # file_data = item["file"]
587
+ # if "file_id" in file_data:
588
+ # # Handle file ID reference (not implemented yet)
589
+ # part_list.append(File(data=file_data["file_id"]))
590
+ # elif "file_data" in file_data:
591
+ # # Handle base64 file data
592
+ # part_list.append(
593
+ # File(
594
+ # data=file_data["file_data"],
595
+ # filename=file_data.get("filename"),
596
+ # )
597
+ # )
598
+ # parts = part_list
599
+
600
+ # # Handle tool calls (assistant messages)
601
+ # if "tool_calls" in msg:
602
+ # part_list = list(parts) if parts else []
603
+ # for tool_call in msg["tool_calls"]:
604
+ # part_list.append(
605
+ # ToolCall(
606
+ # id=tool_call["id"],
607
+ # name=tool_call["function"]["name"],
608
+ # arguments=json.loads(tool_call["function"]["arguments"]),
609
+ # )
610
+ # )
611
+ # parts = part_list
612
+
613
+ # return cls(role, parts)
565
614
 
566
615
  # ───── provider-specific emission ─────
567
616
  def oa_chat(self) -> dict:
@@ -571,11 +620,6 @@ class Message:
571
620
  if len(tool_results) == 1:
572
621
  tool_result = tool_results[0]
573
622
  return tool_result.oa_chat()
574
- # {
575
- # "role": "tool",
576
- # "tool_call_id": tool_result.tool_call_id,
577
- # "content": tool_result.result,
578
- # }
579
623
  else:
580
624
  raise ValueError(
581
625
  f"Tool role messages must contain exactly one ToolResult part for OpenAI, got {len(tool_results)}"
@@ -661,14 +705,469 @@ class Conversation:
661
705
  return cls([msg])
662
706
 
663
707
  @classmethod
664
- def from_openai(cls, messages: list[dict]):
708
+ def from_openai_chat(cls, messages: list[dict]):
665
709
  """Compatibility with openai-formatted messages"""
666
- pass
710
+
711
+ def _to_image_from_url(block: dict) -> Image:
712
+ payload = block.get("image_url") or block.get("input_image") or {}
713
+ url = payload.get("url") or payload.get("file_id")
714
+ detail = payload.get("detail", "auto")
715
+ media_type = payload.get("media_type")
716
+ if url is None:
717
+ raise ValueError("image content missing url")
718
+ return Image(data=url, media_type=media_type, detail=detail)
719
+
720
+ def _to_file(block: dict) -> File:
721
+ payload = block.get("file") or block.get("input_file") or {}
722
+ file_id = payload.get("file_id") or block.get("file_id")
723
+ filename = payload.get("filename")
724
+ file_data = payload.get("file_data")
725
+ if file_id is not None:
726
+ return File(data=b"", filename=filename, file_id=file_id)
727
+ if file_data is not None:
728
+ return File(data=file_data, filename=filename)
729
+ raise ValueError("file content missing file data or id")
730
+
731
+ def _to_audio_file(block: dict) -> File:
732
+ payload = block.get("audio") or block.get("input_audio") or {}
733
+ file_id = payload.get("file_id")
734
+ audio_format = payload.get("format", "wav")
735
+ media_type = f"audio/{audio_format}"
736
+ data = payload.get("data")
737
+ if file_id is not None:
738
+ return File(data=b"", media_type=media_type, file_id=file_id)
739
+ if data is not None:
740
+ data_url = f"data:{media_type};base64,{data}"
741
+ return File(data=data_url, media_type=media_type)
742
+ raise ValueError("audio block missing data or file id")
743
+
744
+ text_types = {"text", "input_text", "output_text", "refusal"}
745
+ image_types = {"image_url", "input_image", "image"}
746
+ file_types = {"file", "input_file"}
747
+ audio_types = {"audio", "input_audio"}
748
+
749
+ def _convert_content_blocks(content: str | list[dict] | None) -> list[Part]:
750
+ parts: list[Part] = []
751
+ if content is None:
752
+ return parts
753
+ if isinstance(content, str):
754
+ parts.append(Text(content))
755
+ return parts
756
+
757
+ for block in content:
758
+ block_type = block.get("type")
759
+ if block_type in text_types:
760
+ text_value = block.get("text") or block.get(block_type) or ""
761
+ parts.append(Text(text_value))
762
+ elif block_type in image_types:
763
+ parts.append(_to_image_from_url(block))
764
+ elif block_type in file_types:
765
+ parts.append(_to_file(block))
766
+ elif block_type in audio_types:
767
+ parts.append(_to_audio_file(block))
768
+ elif block_type == "tool_result":
769
+ # Rare: assistant echoing tool results – convert to text
770
+ result = block.get("content")
771
+ if isinstance(result, str):
772
+ parts.append(Text(result))
773
+ else:
774
+ parts.append(Text(json.dumps(result)))
775
+ elif block_type == "image_file":
776
+ payload = block.get("image_file", {})
777
+ file_id = payload.get("file_id")
778
+ placeholder = {"type": "image_file", "file_id": file_id}
779
+ parts.append(Text(json.dumps(placeholder)))
780
+ else:
781
+ parts.append(Text(json.dumps(block)))
782
+ return parts
783
+
784
+ def _convert_tool_arguments(raw: str | dict | None) -> dict:
785
+ if isinstance(raw, dict):
786
+ return raw
787
+ if raw is None:
788
+ return {}
789
+ try:
790
+ return json.loads(raw)
791
+ except json.JSONDecodeError:
792
+ return {"__raw__": raw}
793
+
794
+ def _convert_tool_result_content(
795
+ content: str | list[dict] | None,
796
+ ) -> str | list[ToolResultPart]:
797
+ if content is None:
798
+ return ""
799
+ if isinstance(content, str):
800
+ return content
801
+ result_parts: list[ToolResultPart] = []
802
+ for block in content:
803
+ block_type = block.get("type")
804
+ if block_type in {"text", "input_text", "output_text", "refusal"}:
805
+ text_value = block.get("text") or block.get(block_type) or ""
806
+ result_parts.append(Text(text_value))
807
+ elif block_type in image_types:
808
+ result_parts.append(_to_image_from_url(block))
809
+ else:
810
+ result_parts.append(Text(json.dumps(block)))
811
+ return result_parts
812
+
813
+ conversation_messages: list[Message] = []
814
+
815
+ for idx, raw_message in enumerate(messages):
816
+ role = raw_message.get("role")
817
+ if role is None:
818
+ raise ValueError("OpenAI message missing role")
819
+
820
+ role_lower = role.lower()
821
+ if role_lower in {"system", "developer"}:
822
+ parts = _convert_content_blocks(raw_message.get("content"))
823
+ conversation_messages.append(Message("system", parts))
824
+ continue
825
+
826
+ if role_lower == "tool" or role_lower == "function":
827
+ tool_call_id = (
828
+ raw_message.get("tool_call_id")
829
+ or raw_message.get("id")
830
+ or raw_message.get("name")
831
+ or f"tool_call_{idx}"
832
+ )
833
+ tool_result = ToolResult(
834
+ tool_call_id=tool_call_id,
835
+ result=_convert_tool_result_content(raw_message.get("content")),
836
+ )
837
+ conversation_messages.append(Message("tool", [tool_result]))
838
+ continue
839
+
840
+ mapped_role: Role
841
+ if role_lower == "user":
842
+ mapped_role = "user"
843
+ elif role_lower == "assistant":
844
+ mapped_role = "assistant"
845
+ else:
846
+ raise ValueError(f"Unsupported OpenAI message role: {role}")
847
+
848
+ parts = _convert_content_blocks(raw_message.get("content"))
849
+
850
+ tool_calls = raw_message.get("tool_calls")
851
+ if not tool_calls and raw_message.get("function_call") is not None:
852
+ tool_calls = [
853
+ {
854
+ "id": raw_message.get("id"),
855
+ "type": "function",
856
+ "function": raw_message["function_call"],
857
+ }
858
+ ]
859
+
860
+ if tool_calls:
861
+ for call_index, call in enumerate(tool_calls):
862
+ call_type = call.get("type", "function")
863
+ call_id = (
864
+ call.get("id")
865
+ or call.get("tool_call_id")
866
+ or call.get("call_id")
867
+ or f"tool_call_{idx}_{call_index}"
868
+ )
869
+
870
+ if call_type == "function":
871
+ function_payload = call.get("function", {})
872
+ name = (
873
+ function_payload.get("name")
874
+ or call.get("name")
875
+ or "function"
876
+ )
877
+ arguments = _convert_tool_arguments(
878
+ function_payload.get("arguments")
879
+ )
880
+ parts.append(
881
+ ToolCall(
882
+ id=call_id,
883
+ name=name,
884
+ arguments=arguments,
885
+ )
886
+ )
887
+ else:
888
+ payload = call.get(call_type, {})
889
+ if not isinstance(payload, dict):
890
+ payload = {"value": payload}
891
+ arguments = payload.get("arguments")
892
+ if arguments is None:
893
+ arguments = payload
894
+ parts.append(
895
+ ToolCall(
896
+ id=call_id,
897
+ name=call_type,
898
+ arguments=arguments
899
+ if isinstance(arguments, dict)
900
+ else {"value": arguments},
901
+ built_in=True,
902
+ built_in_type=call_type,
903
+ extra_body=payload,
904
+ )
905
+ )
906
+
907
+ conversation_messages.append(Message(mapped_role, parts))
908
+
909
+ return cls(conversation_messages)
667
910
 
668
911
  @classmethod
669
- def from_anthropic(cls, messages: list[dict], system: str | None = None):
912
+ def from_anthropic(
913
+ cls, messages: list[dict], system: str | list[dict] | None = None
914
+ ):
670
915
  """Compatibility with anthropic-formatted messages"""
671
- pass
916
+
917
+ def _anthropic_text_part(text_value: str | None) -> Text:
918
+ return Text(text_value or "")
919
+
920
+ def _anthropic_image(block: dict) -> Image:
921
+ source = block.get("source", {})
922
+ source_type = source.get("type")
923
+ if source_type == "base64":
924
+ media_type = source.get("media_type", "image/png")
925
+ data = source.get("data", "")
926
+ return Image(
927
+ data=f"data:{media_type};base64,{data}",
928
+ media_type=media_type,
929
+ )
930
+ if source_type == "url":
931
+ media_type = source.get("media_type")
932
+ url = source.get("url")
933
+ if url is None:
934
+ raise ValueError("Anthropic image source missing url")
935
+ return Image(data=url, media_type=media_type)
936
+ if source_type == "file":
937
+ file_id = source.get("file_id")
938
+ if file_id is None:
939
+ raise ValueError("Anthropic image file source missing file_id")
940
+ raise ValueError(
941
+ "Anthropic image file references require external fetch"
942
+ )
943
+ raise ValueError(f"Unsupported Anthropic image source: {source_type}")
944
+
945
+ def _anthropic_file(block: dict) -> File:
946
+ source = block.get("source", {})
947
+ source_type = source.get("type")
948
+ if source_type == "file":
949
+ file_id = source.get("file_id")
950
+ if file_id is None:
951
+ raise ValueError("Anthropic file source missing file_id")
952
+ return File(data=b"", file_id=file_id)
953
+ if source_type == "base64":
954
+ media_type = source.get("media_type")
955
+ data = source.get("data", "")
956
+ return File(
957
+ data=f"data:{media_type};base64,{data}",
958
+ media_type=media_type,
959
+ filename=block.get("name"),
960
+ )
961
+ raise ValueError(f"Unsupported Anthropic file source: {source_type}")
962
+
963
+ def _anthropic_tool_result_content(
964
+ content: str | list[dict] | None,
965
+ ) -> str | list[ToolResultPart]:
966
+ if content is None:
967
+ return ""
968
+ if isinstance(content, str):
969
+ return content
970
+ result_parts: list[ToolResultPart] = []
971
+ for part in content:
972
+ part_type = part.get("type")
973
+ if part_type == "text":
974
+ result_parts.append(_anthropic_text_part(part.get("text")))
975
+ elif part_type == "image":
976
+ try:
977
+ result_parts.append(_anthropic_image(part))
978
+ except ValueError:
979
+ result_parts.append(Text(json.dumps(part)))
980
+ else:
981
+ result_parts.append(Text(json.dumps(part)))
982
+ return result_parts
983
+
984
+ def _anthropic_content_to_parts(
985
+ role: Role, content: str | list[dict] | None
986
+ ) -> list[Part]:
987
+ parts: list[Part] = []
988
+ if content is None:
989
+ return parts
990
+ if isinstance(content, str):
991
+ parts.append(_anthropic_text_part(content))
992
+ return parts
993
+
994
+ for block in content:
995
+ block_type = block.get("type")
996
+ if block_type == "text":
997
+ parts.append(_anthropic_text_part(block.get("text")))
998
+ elif block_type == "image":
999
+ try:
1000
+ parts.append(_anthropic_image(block))
1001
+ except ValueError:
1002
+ parts.append(Text(json.dumps(block)))
1003
+ elif block_type == "document":
1004
+ try:
1005
+ parts.append(_anthropic_file(block))
1006
+ except ValueError:
1007
+ parts.append(Text(json.dumps(block)))
1008
+ elif block_type == "tool_use":
1009
+ tool_id = block.get("id")
1010
+ if tool_id is None:
1011
+ raise ValueError("Anthropic tool_use block missing id")
1012
+ name = block.get("name") or "tool"
1013
+ arguments = block.get("input") or {}
1014
+ parts.append(
1015
+ ToolCall(
1016
+ id=tool_id,
1017
+ name=name,
1018
+ arguments=arguments
1019
+ if isinstance(arguments, dict)
1020
+ else {"value": arguments},
1021
+ )
1022
+ )
1023
+ elif block_type == "tool_result":
1024
+ tool_use_id = block.get("tool_use_id")
1025
+ if tool_use_id is None:
1026
+ raise ValueError(
1027
+ "Anthropic tool_result block missing tool_use_id"
1028
+ )
1029
+ result = _anthropic_tool_result_content(block.get("content"))
1030
+ tool_result = ToolResult(tool_call_id=tool_use_id, result=result)
1031
+ parts.append(tool_result)
1032
+ elif block_type == "thinking":
1033
+ thinking_content = block.get("thinking", "")
1034
+ parts.append(Thinking(content=thinking_content, raw_payload=block))
1035
+ else:
1036
+ parts.append(Text(json.dumps(block)))
1037
+ return parts
1038
+
1039
+ conversation_messages: list[Message] = []
1040
+
1041
+ if system is not None:
1042
+ if isinstance(system, str):
1043
+ conversation_messages.append(Message("system", [Text(system)]))
1044
+ elif isinstance(system, list):
1045
+ system_parts = _anthropic_content_to_parts("system", system)
1046
+ conversation_messages.append(Message("system", system_parts))
1047
+ else:
1048
+ raise ValueError(
1049
+ "Anthropic system prompt must be string or list of blocks"
1050
+ )
1051
+
1052
+ for message in messages:
1053
+ role = message.get("role")
1054
+ if role is None:
1055
+ raise ValueError("Anthropic message missing role")
1056
+
1057
+ if role not in {"user", "assistant"}:
1058
+ raise ValueError(f"Unsupported Anthropic role: {role}")
1059
+
1060
+ base_role: Role = role # type: ignore[assignment]
1061
+ content = message.get("content")
1062
+ if isinstance(content, list):
1063
+ buffer_parts: list[Part] = []
1064
+ for block in content:
1065
+ block_type = block.get("type")
1066
+ if block_type == "tool_result":
1067
+ if buffer_parts:
1068
+ conversation_messages.append(
1069
+ Message(base_role, buffer_parts)
1070
+ )
1071
+ buffer_parts = []
1072
+ tool_use_id = block.get("tool_use_id")
1073
+ if tool_use_id is None:
1074
+ raise ValueError(
1075
+ "Anthropic tool_result block missing tool_use_id"
1076
+ )
1077
+ result = _anthropic_tool_result_content(block.get("content"))
1078
+ conversation_messages.append(
1079
+ Message(
1080
+ "tool",
1081
+ [ToolResult(tool_call_id=tool_use_id, result=result)],
1082
+ )
1083
+ )
1084
+ else:
1085
+ block_parts = _anthropic_content_to_parts(base_role, [block])
1086
+ buffer_parts.extend(block_parts)
1087
+
1088
+ if buffer_parts:
1089
+ conversation_messages.append(Message(base_role, buffer_parts))
1090
+ else:
1091
+ parts = _anthropic_content_to_parts(base_role, content)
1092
+ conversation_messages.append(Message(base_role, parts))
1093
+
1094
+ return cls(conversation_messages)
1095
+
1096
+ @classmethod
1097
+ def from_unknown(
1098
+ cls, messages: list[dict], *, system: str | list[dict] | None = None
1099
+ ) -> tuple["Conversation", str]:
1100
+ """Attempt to convert provider-formatted messages without knowing the provider.
1101
+
1102
+ Returns the parsed conversation together with the provider label that succeeded
1103
+ ("openai" or "anthropic").
1104
+ """
1105
+
1106
+ def _detect_provider() -> str:
1107
+ has_openai_markers = False
1108
+ has_anthropic_markers = False
1109
+
1110
+ for msg in messages:
1111
+ role = msg.get("role")
1112
+ if role == "tool":
1113
+ has_openai_markers = True
1114
+
1115
+ if role == "system":
1116
+ has_openai_markers = True
1117
+
1118
+ if (
1119
+ "tool_calls" in msg
1120
+ or "function_call" in msg
1121
+ or "tool_call_id" in msg
1122
+ ):
1123
+ has_openai_markers = True
1124
+
1125
+ content = msg.get("content")
1126
+ if isinstance(content, list):
1127
+ for block in content:
1128
+ if not isinstance(block, dict):
1129
+ continue
1130
+ block_type = block.get("type")
1131
+ if block_type in {
1132
+ "tool_use",
1133
+ "tool_result",
1134
+ "thinking",
1135
+ "assistant_response",
1136
+ "redacted",
1137
+ }:
1138
+ has_anthropic_markers = True
1139
+ if block_type == "tool_result" and block.get("tool_use_id"):
1140
+ has_anthropic_markers = True
1141
+ if block_type == "tool_use":
1142
+ has_anthropic_markers = True
1143
+
1144
+ if has_openai_markers and not has_anthropic_markers:
1145
+ return "openai"
1146
+ if has_anthropic_markers and not has_openai_markers:
1147
+ return "anthropic"
1148
+ if has_openai_markers:
1149
+ return "openai"
1150
+ if has_anthropic_markers:
1151
+ return "anthropic"
1152
+ # As a fallback, default to OpenAI which is the most permissive
1153
+ return "openai"
1154
+
1155
+ provider = _detect_provider()
1156
+ if provider == "openai":
1157
+ try:
1158
+ return cls.from_openai_chat(messages), "openai"
1159
+ except Exception:
1160
+ try:
1161
+ return cls.from_anthropic(messages, system=system), "anthropic"
1162
+ except Exception as anthropic_error:
1163
+ raise ValueError(
1164
+ "Unable to parse messages as OpenAI or Anthropic"
1165
+ ) from anthropic_error
1166
+ else:
1167
+ try:
1168
+ return cls.from_anthropic(messages, system=system), "anthropic"
1169
+ except Exception:
1170
+ return cls.from_openai_chat(messages), "openai"
672
1171
 
673
1172
  # fluent additions
674
1173
  def with_message(self, msg: Message) -> "Conversation":
@@ -974,12 +1473,11 @@ class Conversation:
974
1473
  if p["type"] == "text":
975
1474
  parts.append(Text(p["text"]))
976
1475
  elif p["type"] == "image":
977
- # We only stored a placeholder tag, so keep that placeholder.
978
- # You could raise instead if real image bytes are required.
979
- parts.append(Image(p["tag"], detail="low"))
1476
+ # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
1477
+ parts.append(Text(p["tag"]))
980
1478
  elif p["type"] == "file":
981
- # We only stored a placeholder tag, so keep that placeholder.
982
- parts.append(File(p["tag"]))
1479
+ # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
1480
+ parts.append(Text(p["tag"]))
983
1481
  elif p["type"] == "tool_call":
984
1482
  parts.append(
985
1483
  ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.47
3
+ Version: 0.0.49
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
File without changes
File without changes
File without changes