lm-deluge 0.0.48__py3-none-any.whl → 0.0.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge/api_requests/anthropic.py +0 -4
- lm_deluge/prompt.py +504 -17
- {lm_deluge-0.0.48.dist-info → lm_deluge-0.0.49.dist-info}/METADATA +1 -1
- {lm_deluge-0.0.48.dist-info → lm_deluge-0.0.49.dist-info}/RECORD +7 -7
- {lm_deluge-0.0.48.dist-info → lm_deluge-0.0.49.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.48.dist-info → lm_deluge-0.0.49.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.48.dist-info → lm_deluge-0.0.49.dist-info}/top_level.txt +0 -0
|
@@ -28,10 +28,6 @@ def _add_beta(headers: dict, beta: str):
|
|
|
28
28
|
def _build_anthropic_request(
|
|
29
29
|
model: APIModel,
|
|
30
30
|
context: RequestContext,
|
|
31
|
-
# prompt: Conversation,
|
|
32
|
-
# tools: list[Tool | dict | MCPServer] | None,
|
|
33
|
-
# sampling_params: SamplingParams,
|
|
34
|
-
# cache_pattern: CachePattern | None = None,
|
|
35
31
|
):
|
|
36
32
|
prompt = context.prompt
|
|
37
33
|
cache_pattern = context.cache
|
lm_deluge/prompt.py
CHANGED
|
@@ -334,14 +334,6 @@ class Message:
|
|
|
334
334
|
Return a JSON-serialisable dict that fully captures the message.
|
|
335
335
|
"""
|
|
336
336
|
|
|
337
|
-
# DEBUG: Track when to_log is called
|
|
338
|
-
# print(f"DEBUG: Message.to_log called on {self.role} message with {len(self.parts)} parts")
|
|
339
|
-
# for i, part in enumerate(self.parts):
|
|
340
|
-
# print(f" Part {i}: {type(part)} - {part.type if hasattr(part, 'type') else 'no type'}")
|
|
341
|
-
# if hasattr(part, 'type') and part.type == 'image':
|
|
342
|
-
# print(f" Image data type: {type(part.data)}")
|
|
343
|
-
# data_preview = str(part.data)[:50] if isinstance(part.data, str) else f"[{type(part.data).__name__}]"
|
|
344
|
-
# print(f" Image data preview: {data_preview}")
|
|
345
337
|
def _json_safe(value):
|
|
346
338
|
if isinstance(value, (str, int, float, bool)) or value is None:
|
|
347
339
|
return value
|
|
@@ -574,6 +566,51 @@ class Message:
|
|
|
574
566
|
@classmethod
|
|
575
567
|
def from_anthropic(cls, msg: dict):
|
|
576
568
|
pass
|
|
569
|
+
# role = (
|
|
570
|
+
# "system"
|
|
571
|
+
# if msg["role"] in ["developer", "system"]
|
|
572
|
+
# else ("user" if msg["role"] == "user" else "assistant")
|
|
573
|
+
# )
|
|
574
|
+
# parts: list[Part] = []
|
|
575
|
+
# content = msg["content"]
|
|
576
|
+
# if isinstance(content, str):
|
|
577
|
+
# parts = [Text(content)]
|
|
578
|
+
# else:
|
|
579
|
+
# part_list = []
|
|
580
|
+
# for item in content:
|
|
581
|
+
# if item["type"] == "text":
|
|
582
|
+
# part_list.append(Text(item["text"]))
|
|
583
|
+
# elif item["type"] == "image_url":
|
|
584
|
+
# part_list.append(Image(data=item["image_url"]["url"]))
|
|
585
|
+
# elif item["type"] == "file":
|
|
586
|
+
# file_data = item["file"]
|
|
587
|
+
# if "file_id" in file_data:
|
|
588
|
+
# # Handle file ID reference (not implemented yet)
|
|
589
|
+
# part_list.append(File(data=file_data["file_id"]))
|
|
590
|
+
# elif "file_data" in file_data:
|
|
591
|
+
# # Handle base64 file data
|
|
592
|
+
# part_list.append(
|
|
593
|
+
# File(
|
|
594
|
+
# data=file_data["file_data"],
|
|
595
|
+
# filename=file_data.get("filename"),
|
|
596
|
+
# )
|
|
597
|
+
# )
|
|
598
|
+
# parts = part_list
|
|
599
|
+
|
|
600
|
+
# # Handle tool calls (assistant messages)
|
|
601
|
+
# if "tool_calls" in msg:
|
|
602
|
+
# part_list = list(parts) if parts else []
|
|
603
|
+
# for tool_call in msg["tool_calls"]:
|
|
604
|
+
# part_list.append(
|
|
605
|
+
# ToolCall(
|
|
606
|
+
# id=tool_call["id"],
|
|
607
|
+
# name=tool_call["function"]["name"],
|
|
608
|
+
# arguments=json.loads(tool_call["function"]["arguments"]),
|
|
609
|
+
# )
|
|
610
|
+
# )
|
|
611
|
+
# parts = part_list
|
|
612
|
+
|
|
613
|
+
# return cls(role, parts)
|
|
577
614
|
|
|
578
615
|
# ───── provider-specific emission ─────
|
|
579
616
|
def oa_chat(self) -> dict:
|
|
@@ -583,11 +620,6 @@ class Message:
|
|
|
583
620
|
if len(tool_results) == 1:
|
|
584
621
|
tool_result = tool_results[0]
|
|
585
622
|
return tool_result.oa_chat()
|
|
586
|
-
# {
|
|
587
|
-
# "role": "tool",
|
|
588
|
-
# "tool_call_id": tool_result.tool_call_id,
|
|
589
|
-
# "content": tool_result.result,
|
|
590
|
-
# }
|
|
591
623
|
else:
|
|
592
624
|
raise ValueError(
|
|
593
625
|
f"Tool role messages must contain exactly one ToolResult part for OpenAI, got {len(tool_results)}"
|
|
@@ -673,14 +705,469 @@ class Conversation:
|
|
|
673
705
|
return cls([msg])
|
|
674
706
|
|
|
675
707
|
@classmethod
|
|
676
|
-
def
|
|
708
|
+
def from_openai_chat(cls, messages: list[dict]):
|
|
677
709
|
"""Compatibility with openai-formatted messages"""
|
|
678
|
-
|
|
710
|
+
|
|
711
|
+
def _to_image_from_url(block: dict) -> Image:
|
|
712
|
+
payload = block.get("image_url") or block.get("input_image") or {}
|
|
713
|
+
url = payload.get("url") or payload.get("file_id")
|
|
714
|
+
detail = payload.get("detail", "auto")
|
|
715
|
+
media_type = payload.get("media_type")
|
|
716
|
+
if url is None:
|
|
717
|
+
raise ValueError("image content missing url")
|
|
718
|
+
return Image(data=url, media_type=media_type, detail=detail)
|
|
719
|
+
|
|
720
|
+
def _to_file(block: dict) -> File:
|
|
721
|
+
payload = block.get("file") or block.get("input_file") or {}
|
|
722
|
+
file_id = payload.get("file_id") or block.get("file_id")
|
|
723
|
+
filename = payload.get("filename")
|
|
724
|
+
file_data = payload.get("file_data")
|
|
725
|
+
if file_id is not None:
|
|
726
|
+
return File(data=b"", filename=filename, file_id=file_id)
|
|
727
|
+
if file_data is not None:
|
|
728
|
+
return File(data=file_data, filename=filename)
|
|
729
|
+
raise ValueError("file content missing file data or id")
|
|
730
|
+
|
|
731
|
+
def _to_audio_file(block: dict) -> File:
|
|
732
|
+
payload = block.get("audio") or block.get("input_audio") or {}
|
|
733
|
+
file_id = payload.get("file_id")
|
|
734
|
+
audio_format = payload.get("format", "wav")
|
|
735
|
+
media_type = f"audio/{audio_format}"
|
|
736
|
+
data = payload.get("data")
|
|
737
|
+
if file_id is not None:
|
|
738
|
+
return File(data=b"", media_type=media_type, file_id=file_id)
|
|
739
|
+
if data is not None:
|
|
740
|
+
data_url = f"data:{media_type};base64,{data}"
|
|
741
|
+
return File(data=data_url, media_type=media_type)
|
|
742
|
+
raise ValueError("audio block missing data or file id")
|
|
743
|
+
|
|
744
|
+
text_types = {"text", "input_text", "output_text", "refusal"}
|
|
745
|
+
image_types = {"image_url", "input_image", "image"}
|
|
746
|
+
file_types = {"file", "input_file"}
|
|
747
|
+
audio_types = {"audio", "input_audio"}
|
|
748
|
+
|
|
749
|
+
def _convert_content_blocks(content: str | list[dict] | None) -> list[Part]:
|
|
750
|
+
parts: list[Part] = []
|
|
751
|
+
if content is None:
|
|
752
|
+
return parts
|
|
753
|
+
if isinstance(content, str):
|
|
754
|
+
parts.append(Text(content))
|
|
755
|
+
return parts
|
|
756
|
+
|
|
757
|
+
for block in content:
|
|
758
|
+
block_type = block.get("type")
|
|
759
|
+
if block_type in text_types:
|
|
760
|
+
text_value = block.get("text") or block.get(block_type) or ""
|
|
761
|
+
parts.append(Text(text_value))
|
|
762
|
+
elif block_type in image_types:
|
|
763
|
+
parts.append(_to_image_from_url(block))
|
|
764
|
+
elif block_type in file_types:
|
|
765
|
+
parts.append(_to_file(block))
|
|
766
|
+
elif block_type in audio_types:
|
|
767
|
+
parts.append(_to_audio_file(block))
|
|
768
|
+
elif block_type == "tool_result":
|
|
769
|
+
# Rare: assistant echoing tool results – convert to text
|
|
770
|
+
result = block.get("content")
|
|
771
|
+
if isinstance(result, str):
|
|
772
|
+
parts.append(Text(result))
|
|
773
|
+
else:
|
|
774
|
+
parts.append(Text(json.dumps(result)))
|
|
775
|
+
elif block_type == "image_file":
|
|
776
|
+
payload = block.get("image_file", {})
|
|
777
|
+
file_id = payload.get("file_id")
|
|
778
|
+
placeholder = {"type": "image_file", "file_id": file_id}
|
|
779
|
+
parts.append(Text(json.dumps(placeholder)))
|
|
780
|
+
else:
|
|
781
|
+
parts.append(Text(json.dumps(block)))
|
|
782
|
+
return parts
|
|
783
|
+
|
|
784
|
+
def _convert_tool_arguments(raw: str | dict | None) -> dict:
|
|
785
|
+
if isinstance(raw, dict):
|
|
786
|
+
return raw
|
|
787
|
+
if raw is None:
|
|
788
|
+
return {}
|
|
789
|
+
try:
|
|
790
|
+
return json.loads(raw)
|
|
791
|
+
except json.JSONDecodeError:
|
|
792
|
+
return {"__raw__": raw}
|
|
793
|
+
|
|
794
|
+
def _convert_tool_result_content(
|
|
795
|
+
content: str | list[dict] | None,
|
|
796
|
+
) -> str | list[ToolResultPart]:
|
|
797
|
+
if content is None:
|
|
798
|
+
return ""
|
|
799
|
+
if isinstance(content, str):
|
|
800
|
+
return content
|
|
801
|
+
result_parts: list[ToolResultPart] = []
|
|
802
|
+
for block in content:
|
|
803
|
+
block_type = block.get("type")
|
|
804
|
+
if block_type in {"text", "input_text", "output_text", "refusal"}:
|
|
805
|
+
text_value = block.get("text") or block.get(block_type) or ""
|
|
806
|
+
result_parts.append(Text(text_value))
|
|
807
|
+
elif block_type in image_types:
|
|
808
|
+
result_parts.append(_to_image_from_url(block))
|
|
809
|
+
else:
|
|
810
|
+
result_parts.append(Text(json.dumps(block)))
|
|
811
|
+
return result_parts
|
|
812
|
+
|
|
813
|
+
conversation_messages: list[Message] = []
|
|
814
|
+
|
|
815
|
+
for idx, raw_message in enumerate(messages):
|
|
816
|
+
role = raw_message.get("role")
|
|
817
|
+
if role is None:
|
|
818
|
+
raise ValueError("OpenAI message missing role")
|
|
819
|
+
|
|
820
|
+
role_lower = role.lower()
|
|
821
|
+
if role_lower in {"system", "developer"}:
|
|
822
|
+
parts = _convert_content_blocks(raw_message.get("content"))
|
|
823
|
+
conversation_messages.append(Message("system", parts))
|
|
824
|
+
continue
|
|
825
|
+
|
|
826
|
+
if role_lower == "tool" or role_lower == "function":
|
|
827
|
+
tool_call_id = (
|
|
828
|
+
raw_message.get("tool_call_id")
|
|
829
|
+
or raw_message.get("id")
|
|
830
|
+
or raw_message.get("name")
|
|
831
|
+
or f"tool_call_{idx}"
|
|
832
|
+
)
|
|
833
|
+
tool_result = ToolResult(
|
|
834
|
+
tool_call_id=tool_call_id,
|
|
835
|
+
result=_convert_tool_result_content(raw_message.get("content")),
|
|
836
|
+
)
|
|
837
|
+
conversation_messages.append(Message("tool", [tool_result]))
|
|
838
|
+
continue
|
|
839
|
+
|
|
840
|
+
mapped_role: Role
|
|
841
|
+
if role_lower == "user":
|
|
842
|
+
mapped_role = "user"
|
|
843
|
+
elif role_lower == "assistant":
|
|
844
|
+
mapped_role = "assistant"
|
|
845
|
+
else:
|
|
846
|
+
raise ValueError(f"Unsupported OpenAI message role: {role}")
|
|
847
|
+
|
|
848
|
+
parts = _convert_content_blocks(raw_message.get("content"))
|
|
849
|
+
|
|
850
|
+
tool_calls = raw_message.get("tool_calls")
|
|
851
|
+
if not tool_calls and raw_message.get("function_call") is not None:
|
|
852
|
+
tool_calls = [
|
|
853
|
+
{
|
|
854
|
+
"id": raw_message.get("id"),
|
|
855
|
+
"type": "function",
|
|
856
|
+
"function": raw_message["function_call"],
|
|
857
|
+
}
|
|
858
|
+
]
|
|
859
|
+
|
|
860
|
+
if tool_calls:
|
|
861
|
+
for call_index, call in enumerate(tool_calls):
|
|
862
|
+
call_type = call.get("type", "function")
|
|
863
|
+
call_id = (
|
|
864
|
+
call.get("id")
|
|
865
|
+
or call.get("tool_call_id")
|
|
866
|
+
or call.get("call_id")
|
|
867
|
+
or f"tool_call_{idx}_{call_index}"
|
|
868
|
+
)
|
|
869
|
+
|
|
870
|
+
if call_type == "function":
|
|
871
|
+
function_payload = call.get("function", {})
|
|
872
|
+
name = (
|
|
873
|
+
function_payload.get("name")
|
|
874
|
+
or call.get("name")
|
|
875
|
+
or "function"
|
|
876
|
+
)
|
|
877
|
+
arguments = _convert_tool_arguments(
|
|
878
|
+
function_payload.get("arguments")
|
|
879
|
+
)
|
|
880
|
+
parts.append(
|
|
881
|
+
ToolCall(
|
|
882
|
+
id=call_id,
|
|
883
|
+
name=name,
|
|
884
|
+
arguments=arguments,
|
|
885
|
+
)
|
|
886
|
+
)
|
|
887
|
+
else:
|
|
888
|
+
payload = call.get(call_type, {})
|
|
889
|
+
if not isinstance(payload, dict):
|
|
890
|
+
payload = {"value": payload}
|
|
891
|
+
arguments = payload.get("arguments")
|
|
892
|
+
if arguments is None:
|
|
893
|
+
arguments = payload
|
|
894
|
+
parts.append(
|
|
895
|
+
ToolCall(
|
|
896
|
+
id=call_id,
|
|
897
|
+
name=call_type,
|
|
898
|
+
arguments=arguments
|
|
899
|
+
if isinstance(arguments, dict)
|
|
900
|
+
else {"value": arguments},
|
|
901
|
+
built_in=True,
|
|
902
|
+
built_in_type=call_type,
|
|
903
|
+
extra_body=payload,
|
|
904
|
+
)
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
conversation_messages.append(Message(mapped_role, parts))
|
|
908
|
+
|
|
909
|
+
return cls(conversation_messages)
|
|
679
910
|
|
|
680
911
|
@classmethod
|
|
681
|
-
def from_anthropic(
|
|
912
|
+
def from_anthropic(
|
|
913
|
+
cls, messages: list[dict], system: str | list[dict] | None = None
|
|
914
|
+
):
|
|
682
915
|
"""Compatibility with anthropic-formatted messages"""
|
|
683
|
-
|
|
916
|
+
|
|
917
|
+
def _anthropic_text_part(text_value: str | None) -> Text:
|
|
918
|
+
return Text(text_value or "")
|
|
919
|
+
|
|
920
|
+
def _anthropic_image(block: dict) -> Image:
|
|
921
|
+
source = block.get("source", {})
|
|
922
|
+
source_type = source.get("type")
|
|
923
|
+
if source_type == "base64":
|
|
924
|
+
media_type = source.get("media_type", "image/png")
|
|
925
|
+
data = source.get("data", "")
|
|
926
|
+
return Image(
|
|
927
|
+
data=f"data:{media_type};base64,{data}",
|
|
928
|
+
media_type=media_type,
|
|
929
|
+
)
|
|
930
|
+
if source_type == "url":
|
|
931
|
+
media_type = source.get("media_type")
|
|
932
|
+
url = source.get("url")
|
|
933
|
+
if url is None:
|
|
934
|
+
raise ValueError("Anthropic image source missing url")
|
|
935
|
+
return Image(data=url, media_type=media_type)
|
|
936
|
+
if source_type == "file":
|
|
937
|
+
file_id = source.get("file_id")
|
|
938
|
+
if file_id is None:
|
|
939
|
+
raise ValueError("Anthropic image file source missing file_id")
|
|
940
|
+
raise ValueError(
|
|
941
|
+
"Anthropic image file references require external fetch"
|
|
942
|
+
)
|
|
943
|
+
raise ValueError(f"Unsupported Anthropic image source: {source_type}")
|
|
944
|
+
|
|
945
|
+
def _anthropic_file(block: dict) -> File:
|
|
946
|
+
source = block.get("source", {})
|
|
947
|
+
source_type = source.get("type")
|
|
948
|
+
if source_type == "file":
|
|
949
|
+
file_id = source.get("file_id")
|
|
950
|
+
if file_id is None:
|
|
951
|
+
raise ValueError("Anthropic file source missing file_id")
|
|
952
|
+
return File(data=b"", file_id=file_id)
|
|
953
|
+
if source_type == "base64":
|
|
954
|
+
media_type = source.get("media_type")
|
|
955
|
+
data = source.get("data", "")
|
|
956
|
+
return File(
|
|
957
|
+
data=f"data:{media_type};base64,{data}",
|
|
958
|
+
media_type=media_type,
|
|
959
|
+
filename=block.get("name"),
|
|
960
|
+
)
|
|
961
|
+
raise ValueError(f"Unsupported Anthropic file source: {source_type}")
|
|
962
|
+
|
|
963
|
+
def _anthropic_tool_result_content(
|
|
964
|
+
content: str | list[dict] | None,
|
|
965
|
+
) -> str | list[ToolResultPart]:
|
|
966
|
+
if content is None:
|
|
967
|
+
return ""
|
|
968
|
+
if isinstance(content, str):
|
|
969
|
+
return content
|
|
970
|
+
result_parts: list[ToolResultPart] = []
|
|
971
|
+
for part in content:
|
|
972
|
+
part_type = part.get("type")
|
|
973
|
+
if part_type == "text":
|
|
974
|
+
result_parts.append(_anthropic_text_part(part.get("text")))
|
|
975
|
+
elif part_type == "image":
|
|
976
|
+
try:
|
|
977
|
+
result_parts.append(_anthropic_image(part))
|
|
978
|
+
except ValueError:
|
|
979
|
+
result_parts.append(Text(json.dumps(part)))
|
|
980
|
+
else:
|
|
981
|
+
result_parts.append(Text(json.dumps(part)))
|
|
982
|
+
return result_parts
|
|
983
|
+
|
|
984
|
+
def _anthropic_content_to_parts(
|
|
985
|
+
role: Role, content: str | list[dict] | None
|
|
986
|
+
) -> list[Part]:
|
|
987
|
+
parts: list[Part] = []
|
|
988
|
+
if content is None:
|
|
989
|
+
return parts
|
|
990
|
+
if isinstance(content, str):
|
|
991
|
+
parts.append(_anthropic_text_part(content))
|
|
992
|
+
return parts
|
|
993
|
+
|
|
994
|
+
for block in content:
|
|
995
|
+
block_type = block.get("type")
|
|
996
|
+
if block_type == "text":
|
|
997
|
+
parts.append(_anthropic_text_part(block.get("text")))
|
|
998
|
+
elif block_type == "image":
|
|
999
|
+
try:
|
|
1000
|
+
parts.append(_anthropic_image(block))
|
|
1001
|
+
except ValueError:
|
|
1002
|
+
parts.append(Text(json.dumps(block)))
|
|
1003
|
+
elif block_type == "document":
|
|
1004
|
+
try:
|
|
1005
|
+
parts.append(_anthropic_file(block))
|
|
1006
|
+
except ValueError:
|
|
1007
|
+
parts.append(Text(json.dumps(block)))
|
|
1008
|
+
elif block_type == "tool_use":
|
|
1009
|
+
tool_id = block.get("id")
|
|
1010
|
+
if tool_id is None:
|
|
1011
|
+
raise ValueError("Anthropic tool_use block missing id")
|
|
1012
|
+
name = block.get("name") or "tool"
|
|
1013
|
+
arguments = block.get("input") or {}
|
|
1014
|
+
parts.append(
|
|
1015
|
+
ToolCall(
|
|
1016
|
+
id=tool_id,
|
|
1017
|
+
name=name,
|
|
1018
|
+
arguments=arguments
|
|
1019
|
+
if isinstance(arguments, dict)
|
|
1020
|
+
else {"value": arguments},
|
|
1021
|
+
)
|
|
1022
|
+
)
|
|
1023
|
+
elif block_type == "tool_result":
|
|
1024
|
+
tool_use_id = block.get("tool_use_id")
|
|
1025
|
+
if tool_use_id is None:
|
|
1026
|
+
raise ValueError(
|
|
1027
|
+
"Anthropic tool_result block missing tool_use_id"
|
|
1028
|
+
)
|
|
1029
|
+
result = _anthropic_tool_result_content(block.get("content"))
|
|
1030
|
+
tool_result = ToolResult(tool_call_id=tool_use_id, result=result)
|
|
1031
|
+
parts.append(tool_result)
|
|
1032
|
+
elif block_type == "thinking":
|
|
1033
|
+
thinking_content = block.get("thinking", "")
|
|
1034
|
+
parts.append(Thinking(content=thinking_content, raw_payload=block))
|
|
1035
|
+
else:
|
|
1036
|
+
parts.append(Text(json.dumps(block)))
|
|
1037
|
+
return parts
|
|
1038
|
+
|
|
1039
|
+
conversation_messages: list[Message] = []
|
|
1040
|
+
|
|
1041
|
+
if system is not None:
|
|
1042
|
+
if isinstance(system, str):
|
|
1043
|
+
conversation_messages.append(Message("system", [Text(system)]))
|
|
1044
|
+
elif isinstance(system, list):
|
|
1045
|
+
system_parts = _anthropic_content_to_parts("system", system)
|
|
1046
|
+
conversation_messages.append(Message("system", system_parts))
|
|
1047
|
+
else:
|
|
1048
|
+
raise ValueError(
|
|
1049
|
+
"Anthropic system prompt must be string or list of blocks"
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
for message in messages:
|
|
1053
|
+
role = message.get("role")
|
|
1054
|
+
if role is None:
|
|
1055
|
+
raise ValueError("Anthropic message missing role")
|
|
1056
|
+
|
|
1057
|
+
if role not in {"user", "assistant"}:
|
|
1058
|
+
raise ValueError(f"Unsupported Anthropic role: {role}")
|
|
1059
|
+
|
|
1060
|
+
base_role: Role = role # type: ignore[assignment]
|
|
1061
|
+
content = message.get("content")
|
|
1062
|
+
if isinstance(content, list):
|
|
1063
|
+
buffer_parts: list[Part] = []
|
|
1064
|
+
for block in content:
|
|
1065
|
+
block_type = block.get("type")
|
|
1066
|
+
if block_type == "tool_result":
|
|
1067
|
+
if buffer_parts:
|
|
1068
|
+
conversation_messages.append(
|
|
1069
|
+
Message(base_role, buffer_parts)
|
|
1070
|
+
)
|
|
1071
|
+
buffer_parts = []
|
|
1072
|
+
tool_use_id = block.get("tool_use_id")
|
|
1073
|
+
if tool_use_id is None:
|
|
1074
|
+
raise ValueError(
|
|
1075
|
+
"Anthropic tool_result block missing tool_use_id"
|
|
1076
|
+
)
|
|
1077
|
+
result = _anthropic_tool_result_content(block.get("content"))
|
|
1078
|
+
conversation_messages.append(
|
|
1079
|
+
Message(
|
|
1080
|
+
"tool",
|
|
1081
|
+
[ToolResult(tool_call_id=tool_use_id, result=result)],
|
|
1082
|
+
)
|
|
1083
|
+
)
|
|
1084
|
+
else:
|
|
1085
|
+
block_parts = _anthropic_content_to_parts(base_role, [block])
|
|
1086
|
+
buffer_parts.extend(block_parts)
|
|
1087
|
+
|
|
1088
|
+
if buffer_parts:
|
|
1089
|
+
conversation_messages.append(Message(base_role, buffer_parts))
|
|
1090
|
+
else:
|
|
1091
|
+
parts = _anthropic_content_to_parts(base_role, content)
|
|
1092
|
+
conversation_messages.append(Message(base_role, parts))
|
|
1093
|
+
|
|
1094
|
+
return cls(conversation_messages)
|
|
1095
|
+
|
|
1096
|
+
@classmethod
|
|
1097
|
+
def from_unknown(
|
|
1098
|
+
cls, messages: list[dict], *, system: str | list[dict] | None = None
|
|
1099
|
+
) -> tuple["Conversation", str]:
|
|
1100
|
+
"""Attempt to convert provider-formatted messages without knowing the provider.
|
|
1101
|
+
|
|
1102
|
+
Returns the parsed conversation together with the provider label that succeeded
|
|
1103
|
+
("openai" or "anthropic").
|
|
1104
|
+
"""
|
|
1105
|
+
|
|
1106
|
+
def _detect_provider() -> str:
|
|
1107
|
+
has_openai_markers = False
|
|
1108
|
+
has_anthropic_markers = False
|
|
1109
|
+
|
|
1110
|
+
for msg in messages:
|
|
1111
|
+
role = msg.get("role")
|
|
1112
|
+
if role == "tool":
|
|
1113
|
+
has_openai_markers = True
|
|
1114
|
+
|
|
1115
|
+
if role == "system":
|
|
1116
|
+
has_openai_markers = True
|
|
1117
|
+
|
|
1118
|
+
if (
|
|
1119
|
+
"tool_calls" in msg
|
|
1120
|
+
or "function_call" in msg
|
|
1121
|
+
or "tool_call_id" in msg
|
|
1122
|
+
):
|
|
1123
|
+
has_openai_markers = True
|
|
1124
|
+
|
|
1125
|
+
content = msg.get("content")
|
|
1126
|
+
if isinstance(content, list):
|
|
1127
|
+
for block in content:
|
|
1128
|
+
if not isinstance(block, dict):
|
|
1129
|
+
continue
|
|
1130
|
+
block_type = block.get("type")
|
|
1131
|
+
if block_type in {
|
|
1132
|
+
"tool_use",
|
|
1133
|
+
"tool_result",
|
|
1134
|
+
"thinking",
|
|
1135
|
+
"assistant_response",
|
|
1136
|
+
"redacted",
|
|
1137
|
+
}:
|
|
1138
|
+
has_anthropic_markers = True
|
|
1139
|
+
if block_type == "tool_result" and block.get("tool_use_id"):
|
|
1140
|
+
has_anthropic_markers = True
|
|
1141
|
+
if block_type == "tool_use":
|
|
1142
|
+
has_anthropic_markers = True
|
|
1143
|
+
|
|
1144
|
+
if has_openai_markers and not has_anthropic_markers:
|
|
1145
|
+
return "openai"
|
|
1146
|
+
if has_anthropic_markers and not has_openai_markers:
|
|
1147
|
+
return "anthropic"
|
|
1148
|
+
if has_openai_markers:
|
|
1149
|
+
return "openai"
|
|
1150
|
+
if has_anthropic_markers:
|
|
1151
|
+
return "anthropic"
|
|
1152
|
+
# As a fallback, default to OpenAI which is the most permissive
|
|
1153
|
+
return "openai"
|
|
1154
|
+
|
|
1155
|
+
provider = _detect_provider()
|
|
1156
|
+
if provider == "openai":
|
|
1157
|
+
try:
|
|
1158
|
+
return cls.from_openai_chat(messages), "openai"
|
|
1159
|
+
except Exception:
|
|
1160
|
+
try:
|
|
1161
|
+
return cls.from_anthropic(messages, system=system), "anthropic"
|
|
1162
|
+
except Exception as anthropic_error:
|
|
1163
|
+
raise ValueError(
|
|
1164
|
+
"Unable to parse messages as OpenAI or Anthropic"
|
|
1165
|
+
) from anthropic_error
|
|
1166
|
+
else:
|
|
1167
|
+
try:
|
|
1168
|
+
return cls.from_anthropic(messages, system=system), "anthropic"
|
|
1169
|
+
except Exception:
|
|
1170
|
+
return cls.from_openai_chat(messages), "openai"
|
|
684
1171
|
|
|
685
1172
|
# fluent additions
|
|
686
1173
|
def with_message(self, msg: Message) -> "Conversation":
|
|
@@ -10,14 +10,14 @@ lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
|
|
|
10
10
|
lm_deluge/file.py,sha256=FGomcG8s2go_55Z2CChflHgmU-UqgFftgFY8c7f_G70,5631
|
|
11
11
|
lm_deluge/gemini_limits.py,sha256=V9mpS9JtXYz7AY6OuKyQp5TuIMRH1BVv9YrSNmGmHNA,1569
|
|
12
12
|
lm_deluge/image.py,sha256=5AMXmn2x47yXeYNfMSMAOWcnlrOxxOel-4L8QCJwU70,8928
|
|
13
|
-
lm_deluge/prompt.py,sha256=
|
|
13
|
+
lm_deluge/prompt.py,sha256=2-6bALg_hOfExh9vHeKPFA6E_O8rHe6p9eIdvCulERs,59654
|
|
14
14
|
lm_deluge/request_context.py,sha256=o33LSEwnK6YPhZeulUoSE_VrdKCXiCQa0tjjixK2K6M,2540
|
|
15
15
|
lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
|
|
16
16
|
lm_deluge/tool.py,sha256=_coOKB9nPNVZoseMRumRyQ8BMR7_d0IlstzMHNT69JY,15732
|
|
17
17
|
lm_deluge/tracker.py,sha256=EHFPsS94NmsON2u97rSE70q1t6pwCsixUmGV-kIphMs,11531
|
|
18
18
|
lm_deluge/usage.py,sha256=VMEKghePFIID5JFBObqYxFpgYxnbYm_dnHy7V1-_T6M,4866
|
|
19
19
|
lm_deluge/api_requests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
20
|
-
lm_deluge/api_requests/anthropic.py,sha256=
|
|
20
|
+
lm_deluge/api_requests/anthropic.py,sha256=8MledxnN0S-H_fZRq8DGUokcjZPQ154mr8tPWAussJ8,7992
|
|
21
21
|
lm_deluge/api_requests/base.py,sha256=EVHNFtlttKbN7Tt1MnLaO-NjvKHPSV5CqlRv-OnpVAE,5593
|
|
22
22
|
lm_deluge/api_requests/bedrock.py,sha256=GmVxXz3ERAeQ7e52Nlztt81O4H9eJOQeOnS6b65vjm4,15453
|
|
23
23
|
lm_deluge/api_requests/common.py,sha256=BZ3vRO5TB669_UsNKugkkuFSzoLHOYJIKt4nV4sf4vc,422
|
|
@@ -66,8 +66,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
|
|
|
66
66
|
lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
|
|
67
67
|
lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
|
|
68
68
|
lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
|
|
69
|
-
lm_deluge-0.0.
|
|
70
|
-
lm_deluge-0.0.
|
|
71
|
-
lm_deluge-0.0.
|
|
72
|
-
lm_deluge-0.0.
|
|
73
|
-
lm_deluge-0.0.
|
|
69
|
+
lm_deluge-0.0.49.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
|
|
70
|
+
lm_deluge-0.0.49.dist-info/METADATA,sha256=wxyjPS3Kf3XlXbO_Qc6RcmEWp7Lfe3La3tcDB_OH0_w,13443
|
|
71
|
+
lm_deluge-0.0.49.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
72
|
+
lm_deluge-0.0.49.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
|
|
73
|
+
lm_deluge-0.0.49.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|