lm-deluge 0.0.8__py3-none-any.whl → 0.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge/api_requests/anthropic.py +23 -7
- lm_deluge/api_requests/base.py +38 -11
- lm_deluge/api_requests/bedrock.py +283 -0
- lm_deluge/api_requests/common.py +2 -0
- lm_deluge/api_requests/mistral.py +2 -2
- lm_deluge/api_requests/openai.py +37 -6
- lm_deluge/client.py +18 -3
- lm_deluge/models.py +89 -24
- lm_deluge/prompt.py +352 -16
- lm_deluge/tool.py +212 -38
- {lm_deluge-0.0.8.dist-info → lm_deluge-0.0.10.dist-info}/METADATA +4 -1
- {lm_deluge-0.0.8.dist-info → lm_deluge-0.0.10.dist-info}/RECORD +15 -14
- {lm_deluge-0.0.8.dist-info → lm_deluge-0.0.10.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.8.dist-info → lm_deluge-0.0.10.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.8.dist-info → lm_deluge-0.0.10.dist-info}/top_level.txt +0 -0
lm_deluge/prompt.py
CHANGED
|
@@ -4,14 +4,14 @@ import tiktoken
|
|
|
4
4
|
import xxhash
|
|
5
5
|
from dataclasses import dataclass, field
|
|
6
6
|
from pathlib import Path
|
|
7
|
-
from typing import Literal
|
|
7
|
+
from typing import Literal
|
|
8
8
|
from lm_deluge.models import APIModel
|
|
9
9
|
from lm_deluge.image import Image
|
|
10
10
|
|
|
11
11
|
###############################################################################
|
|
12
12
|
# 1. Low-level content blocks – either text or an image #
|
|
13
13
|
###############################################################################
|
|
14
|
-
Role = Literal["system", "user", "assistant"]
|
|
14
|
+
Role = Literal["system", "user", "assistant", "tool"]
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
@dataclass(slots=True)
|
|
@@ -40,18 +40,235 @@ class Text:
|
|
|
40
40
|
return {"type": "text", "text": self.text}
|
|
41
41
|
|
|
42
42
|
|
|
43
|
+
@dataclass(slots=True)
|
|
44
|
+
class ToolCall:
|
|
45
|
+
id: str # unique identifier
|
|
46
|
+
name: str # function name
|
|
47
|
+
arguments: dict # parsed arguments
|
|
48
|
+
type: str = field(init=False, default="tool_call")
|
|
49
|
+
|
|
50
|
+
@property
|
|
51
|
+
def fingerprint(self) -> str:
|
|
52
|
+
return xxhash.xxh64(
|
|
53
|
+
f"{self.id}:{self.name}:{json.dumps(self.arguments, sort_keys=True)}".encode()
|
|
54
|
+
).hexdigest()
|
|
55
|
+
|
|
56
|
+
# ── provider-specific emission ────────────────────────────────────────────
|
|
57
|
+
def oa_chat(self) -> dict: # OpenAI Chat Completions
|
|
58
|
+
return {
|
|
59
|
+
"id": self.id,
|
|
60
|
+
"type": "function",
|
|
61
|
+
"function": {"name": self.name, "arguments": json.dumps(self.arguments)},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
def oa_resp(self) -> dict: # OpenAI Responses
|
|
65
|
+
return {
|
|
66
|
+
"type": "function_call",
|
|
67
|
+
"id": self.id,
|
|
68
|
+
"name": self.name,
|
|
69
|
+
"arguments": self.arguments,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def anthropic(self) -> dict: # Anthropic Messages
|
|
73
|
+
return {
|
|
74
|
+
"type": "tool_use",
|
|
75
|
+
"id": self.id,
|
|
76
|
+
"name": self.name,
|
|
77
|
+
"input": self.arguments,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
def gemini(self) -> dict:
|
|
81
|
+
return {"functionCall": {"name": self.name, "args": self.arguments}}
|
|
82
|
+
|
|
83
|
+
def mistral(self) -> dict:
|
|
84
|
+
return {
|
|
85
|
+
"type": "tool_call",
|
|
86
|
+
"id": self.id,
|
|
87
|
+
"function": {"name": self.name, "arguments": json.dumps(self.arguments)},
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass(slots=True)
|
|
92
|
+
class ToolResult:
|
|
93
|
+
tool_call_id: str # references the ToolCall.id
|
|
94
|
+
result: str # tool execution result
|
|
95
|
+
type: str = field(init=False, default="tool_result")
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def fingerprint(self) -> str:
|
|
99
|
+
return xxhash.xxh64(f"{self.tool_call_id}:{self.result}".encode()).hexdigest()
|
|
100
|
+
|
|
101
|
+
# ── provider-specific emission ────────────────────────────────────────────
|
|
102
|
+
def oa_chat(
|
|
103
|
+
self,
|
|
104
|
+
) -> dict: # OpenAI Chat Completions - tool results are separate messages
|
|
105
|
+
return {"tool_call_id": self.tool_call_id, "content": self.result}
|
|
106
|
+
|
|
107
|
+
def oa_resp(self) -> dict: # OpenAI Responses
|
|
108
|
+
return {
|
|
109
|
+
"type": "function_result",
|
|
110
|
+
"call_id": self.tool_call_id,
|
|
111
|
+
"result": self.result,
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
def anthropic(self) -> dict: # Anthropic Messages
|
|
115
|
+
return {
|
|
116
|
+
"type": "tool_result",
|
|
117
|
+
"tool_use_id": self.tool_call_id,
|
|
118
|
+
"content": self.result,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
def gemini(self) -> dict:
|
|
122
|
+
return {
|
|
123
|
+
"functionResponse": {
|
|
124
|
+
"name": self.tool_call_id, # Gemini uses name field for ID
|
|
125
|
+
"response": {"result": self.result},
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
def mistral(self) -> dict:
|
|
130
|
+
return {
|
|
131
|
+
"type": "tool_result",
|
|
132
|
+
"tool_call_id": self.tool_call_id,
|
|
133
|
+
"content": self.result,
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@dataclass(slots=True)
|
|
138
|
+
class Thinking:
|
|
139
|
+
content: str # reasoning content (o1, Claude thinking, etc.)
|
|
140
|
+
type: str = field(init=False, default="thinking")
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def fingerprint(self) -> str:
|
|
144
|
+
return xxhash.xxh64(self.content.encode()).hexdigest()
|
|
145
|
+
|
|
146
|
+
# ── provider-specific emission ────────────────────────────────────────────
|
|
147
|
+
def oa_chat(self) -> dict: # OpenAI Chat Completions
|
|
148
|
+
# Thinking is typically not emitted back, but if needed:
|
|
149
|
+
return {"type": "text", "text": f"[Thinking: {self.content}]"}
|
|
150
|
+
|
|
151
|
+
def oa_resp(self) -> dict: # OpenAI Responses
|
|
152
|
+
return {"type": "reasoning", "content": self.content}
|
|
153
|
+
|
|
154
|
+
def anthropic(self) -> dict: # Anthropic Messages
|
|
155
|
+
return {"type": "thinking", "thinking": self.content}
|
|
156
|
+
|
|
157
|
+
def gemini(self) -> dict:
|
|
158
|
+
return {"text": f"[Thinking: {self.content}]"}
|
|
159
|
+
|
|
160
|
+
def mistral(self) -> dict:
|
|
161
|
+
return {"type": "text", "text": f"[Thinking: {self.content}]"}
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
Part = Text | Image | ToolCall | ToolResult | Thinking
|
|
165
|
+
|
|
166
|
+
|
|
43
167
|
###############################################################################
|
|
44
168
|
# 2. One conversational turn (role + parts) #
|
|
45
169
|
###############################################################################
|
|
46
170
|
@dataclass(slots=True)
|
|
47
171
|
class Message:
|
|
48
172
|
role: Role
|
|
49
|
-
parts: list[
|
|
173
|
+
parts: list[Part]
|
|
50
174
|
|
|
51
175
|
@property
|
|
52
176
|
def fingerprint(self) -> str:
|
|
53
177
|
return self.role + "," + ",".join(part.fingerprint for part in self.parts)
|
|
54
178
|
|
|
179
|
+
@property
|
|
180
|
+
def completion(self) -> str | None:
|
|
181
|
+
"""Extract text content from the first Text part, for backward compatibility."""
|
|
182
|
+
for part in self.parts:
|
|
183
|
+
if isinstance(part, Text):
|
|
184
|
+
return part.text
|
|
185
|
+
return None
|
|
186
|
+
|
|
187
|
+
@property
|
|
188
|
+
def tool_calls(self) -> list["ToolCall"]:
|
|
189
|
+
"""Get all tool call parts with proper typing."""
|
|
190
|
+
return [part for part in self.parts if part.type == "tool_call"] # type: ignore
|
|
191
|
+
|
|
192
|
+
@property
|
|
193
|
+
def tool_results(self) -> list["ToolResult"]:
|
|
194
|
+
"""Get all tool result parts with proper typing."""
|
|
195
|
+
return [part for part in self.parts if part.type == "tool_result"] # type: ignore
|
|
196
|
+
|
|
197
|
+
@property
|
|
198
|
+
def text_parts(self) -> list["Text"]:
|
|
199
|
+
"""Get all text parts with proper typing."""
|
|
200
|
+
return [part for part in self.parts if part.type == "text"] # type: ignore
|
|
201
|
+
|
|
202
|
+
@property
|
|
203
|
+
def images(self) -> list[Image]:
|
|
204
|
+
"""Get all image parts with proper typing."""
|
|
205
|
+
return [part for part in self.parts if part.type == "image"] # type: ignore
|
|
206
|
+
|
|
207
|
+
@property
|
|
208
|
+
def thinking_parts(self) -> list["Thinking"]:
|
|
209
|
+
"""Get all thinking parts with proper typing."""
|
|
210
|
+
return [part for part in self.parts if part.type == "thinking"] # type: ignore
|
|
211
|
+
|
|
212
|
+
def to_log(self) -> dict:
|
|
213
|
+
"""
|
|
214
|
+
Return a JSON-serialisable dict that fully captures the message.
|
|
215
|
+
"""
|
|
216
|
+
content_blocks: list[dict] = []
|
|
217
|
+
for p in self.parts:
|
|
218
|
+
if isinstance(p, Text):
|
|
219
|
+
content_blocks.append({"type": "text", "text": p.text})
|
|
220
|
+
elif isinstance(p, Image): # Image – redact the bytes, keep a hint
|
|
221
|
+
w, h = p.size
|
|
222
|
+
content_blocks.append({"type": "image", "tag": f"<Image ({w}×{h})>"})
|
|
223
|
+
elif isinstance(p, ToolCall):
|
|
224
|
+
content_blocks.append(
|
|
225
|
+
{
|
|
226
|
+
"type": "tool_call",
|
|
227
|
+
"id": p.id,
|
|
228
|
+
"name": p.name,
|
|
229
|
+
"arguments": p.arguments,
|
|
230
|
+
}
|
|
231
|
+
)
|
|
232
|
+
elif isinstance(p, ToolResult):
|
|
233
|
+
content_blocks.append(
|
|
234
|
+
{
|
|
235
|
+
"type": "tool_result",
|
|
236
|
+
"tool_call_id": p.tool_call_id,
|
|
237
|
+
"result": p.result,
|
|
238
|
+
}
|
|
239
|
+
)
|
|
240
|
+
elif isinstance(p, Thinking):
|
|
241
|
+
content_blocks.append({"type": "thinking", "content": p.content})
|
|
242
|
+
|
|
243
|
+
return {"role": self.role, "content": content_blocks}
|
|
244
|
+
|
|
245
|
+
@classmethod
|
|
246
|
+
def from_log(cls, data: dict) -> "Message":
|
|
247
|
+
"""Re-hydrate a Message previously produced by `to_log()`."""
|
|
248
|
+
role: Role = data["role"]
|
|
249
|
+
parts: list[Part] = []
|
|
250
|
+
|
|
251
|
+
for p in data["content"]:
|
|
252
|
+
if p["type"] == "text":
|
|
253
|
+
parts.append(Text(p["text"]))
|
|
254
|
+
elif p["type"] == "image":
|
|
255
|
+
# We only stored a placeholder tag, so keep that placeholder.
|
|
256
|
+
parts.append(Image(p["tag"], detail="low"))
|
|
257
|
+
elif p["type"] == "tool_call":
|
|
258
|
+
parts.append(
|
|
259
|
+
ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
|
|
260
|
+
)
|
|
261
|
+
elif p["type"] == "tool_result":
|
|
262
|
+
parts.append(
|
|
263
|
+
ToolResult(tool_call_id=p["tool_call_id"], result=p["result"])
|
|
264
|
+
)
|
|
265
|
+
elif p["type"] == "thinking":
|
|
266
|
+
parts.append(Thinking(content=p["content"]))
|
|
267
|
+
else:
|
|
268
|
+
raise ValueError(f"Unknown part type {p['type']!r}")
|
|
269
|
+
|
|
270
|
+
return cls(role, parts)
|
|
271
|
+
|
|
55
272
|
def add_text(self, content: str) -> "Message":
|
|
56
273
|
"""Append a text block and return self for chaining."""
|
|
57
274
|
self.parts.append(Text(content))
|
|
@@ -81,6 +298,21 @@ class Message:
|
|
|
81
298
|
self.parts.append(img)
|
|
82
299
|
return self
|
|
83
300
|
|
|
301
|
+
def add_tool_call(self, id: str, name: str, arguments: dict) -> "Message":
|
|
302
|
+
"""Append a tool call block and return self for chaining."""
|
|
303
|
+
self.parts.append(ToolCall(id=id, name=name, arguments=arguments))
|
|
304
|
+
return self
|
|
305
|
+
|
|
306
|
+
def add_tool_result(self, tool_call_id: str, result: str) -> "Message":
|
|
307
|
+
"""Append a tool result block and return self for chaining."""
|
|
308
|
+
self.parts.append(ToolResult(tool_call_id=tool_call_id, result=result))
|
|
309
|
+
return self
|
|
310
|
+
|
|
311
|
+
def add_thinking(self, content: str) -> "Message":
|
|
312
|
+
"""Append a thinking block and return self for chaining."""
|
|
313
|
+
self.parts.append(Thinking(content=content))
|
|
314
|
+
return self
|
|
315
|
+
|
|
84
316
|
# -------- convenient constructors --------
|
|
85
317
|
@classmethod
|
|
86
318
|
def user(
|
|
@@ -118,16 +350,32 @@ class Message:
|
|
|
118
350
|
if msg["role"] in ["developer", "system"]
|
|
119
351
|
else ("user" if msg["role"] == "user" else "assistant")
|
|
120
352
|
)
|
|
121
|
-
parts:
|
|
353
|
+
parts: list[Part] = []
|
|
122
354
|
content = msg["content"]
|
|
123
355
|
if isinstance(content, str):
|
|
124
|
-
parts
|
|
356
|
+
parts = [Text(content)]
|
|
125
357
|
else:
|
|
358
|
+
part_list = []
|
|
126
359
|
for item in content:
|
|
127
360
|
if item["type"] == "text":
|
|
128
|
-
|
|
361
|
+
part_list.append(Text(item["text"]))
|
|
129
362
|
elif item["type"] == "image_url":
|
|
130
|
-
|
|
363
|
+
part_list.append(Image(data=item["image_url"]["url"]))
|
|
364
|
+
parts = part_list
|
|
365
|
+
|
|
366
|
+
# Handle tool calls (assistant messages)
|
|
367
|
+
if "tool_calls" in msg:
|
|
368
|
+
part_list = list(parts) if parts else []
|
|
369
|
+
for tool_call in msg["tool_calls"]:
|
|
370
|
+
part_list.append(
|
|
371
|
+
ToolCall(
|
|
372
|
+
id=tool_call["id"],
|
|
373
|
+
name=tool_call["function"]["name"],
|
|
374
|
+
arguments=json.loads(tool_call["function"]["arguments"]),
|
|
375
|
+
)
|
|
376
|
+
)
|
|
377
|
+
parts = part_list
|
|
378
|
+
|
|
131
379
|
return cls(role, parts)
|
|
132
380
|
|
|
133
381
|
@classmethod
|
|
@@ -140,10 +388,35 @@ class Message:
|
|
|
140
388
|
|
|
141
389
|
# ───── provider-specific emission ─────
|
|
142
390
|
def oa_chat(self) -> dict:
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
391
|
+
if self.role == "tool":
|
|
392
|
+
# For tool messages, we expect a single ToolResult part (after splitting in to_openai)
|
|
393
|
+
tool_results = [p for p in self.parts if isinstance(p, ToolResult)]
|
|
394
|
+
if len(tool_results) == 1:
|
|
395
|
+
tool_result = tool_results[0]
|
|
396
|
+
return {
|
|
397
|
+
"role": "tool",
|
|
398
|
+
"tool_call_id": tool_result.tool_call_id,
|
|
399
|
+
"content": tool_result.result,
|
|
400
|
+
}
|
|
401
|
+
else:
|
|
402
|
+
raise ValueError(
|
|
403
|
+
f"Tool role messages must contain exactly one ToolResult part for OpenAI, got {len(tool_results)}"
|
|
404
|
+
)
|
|
405
|
+
else:
|
|
406
|
+
content = []
|
|
407
|
+
tool_calls = []
|
|
408
|
+
|
|
409
|
+
for p in self.parts:
|
|
410
|
+
if isinstance(p, ToolCall):
|
|
411
|
+
tool_calls.append(p.oa_chat())
|
|
412
|
+
else:
|
|
413
|
+
content.append(p.oa_chat())
|
|
414
|
+
|
|
415
|
+
result = {"role": self.role, "content": content}
|
|
416
|
+
if tool_calls:
|
|
417
|
+
result["tool_calls"] = tool_calls
|
|
418
|
+
|
|
419
|
+
return result
|
|
147
420
|
|
|
148
421
|
def oa_resp(self) -> dict:
|
|
149
422
|
content = [p.oa_resp() for p in self.parts]
|
|
@@ -155,7 +428,7 @@ class Message:
|
|
|
155
428
|
raise ValueError("Anthropic keeps system outside message list")
|
|
156
429
|
content = [p.anthropic() for p in self.parts]
|
|
157
430
|
# Shortcut: single text becomes a bare string
|
|
158
|
-
if len(content) == 1 and content[0]
|
|
431
|
+
if len(content) == 1 and content[0].get("type") == "text":
|
|
159
432
|
content = content[0]["text"]
|
|
160
433
|
return {"role": self.role, "content": content}
|
|
161
434
|
|
|
@@ -210,9 +483,34 @@ class Conversation:
|
|
|
210
483
|
self.messages.append(msg)
|
|
211
484
|
return self
|
|
212
485
|
|
|
486
|
+
def add_tool_result(self, tool_call_id: str, result: str) -> "Conversation":
|
|
487
|
+
"""Add a tool result to the conversation.
|
|
488
|
+
|
|
489
|
+
If the conversation ends with a tool message, append to it (for parallel tool calls).
|
|
490
|
+
Otherwise, create a new tool message.
|
|
491
|
+
"""
|
|
492
|
+
if self.messages and self.messages[-1].role == "tool":
|
|
493
|
+
# Append to existing tool message (parallel tool calls)
|
|
494
|
+
self.messages[-1].add_tool_result(tool_call_id, result)
|
|
495
|
+
else:
|
|
496
|
+
# Create new tool message
|
|
497
|
+
tool_msg = Message("tool", [])
|
|
498
|
+
tool_msg.add_tool_result(tool_call_id, result)
|
|
499
|
+
self.messages.append(tool_msg)
|
|
500
|
+
return self
|
|
501
|
+
|
|
213
502
|
# ── conversions -----------------------------------------------------------
|
|
214
503
|
def to_openai(self) -> list[dict]:
|
|
215
|
-
|
|
504
|
+
result = []
|
|
505
|
+
for m in self.messages:
|
|
506
|
+
if m.role == "tool" and len(m.tool_results) > 1:
|
|
507
|
+
# Split tool messages with multiple results into separate messages for OpenAI
|
|
508
|
+
for tool_result in m.tool_results:
|
|
509
|
+
tool_msg = Message("tool", [tool_result])
|
|
510
|
+
result.append(tool_msg.oa_chat())
|
|
511
|
+
else:
|
|
512
|
+
result.append(m.oa_chat())
|
|
513
|
+
return result
|
|
216
514
|
|
|
217
515
|
def to_openai_responses(self) -> dict:
|
|
218
516
|
# OpenAI Responses = single “input” array, role must be user/assistant
|
|
@@ -227,7 +525,16 @@ class Conversation:
|
|
|
227
525
|
),
|
|
228
526
|
None,
|
|
229
527
|
)
|
|
230
|
-
other = [
|
|
528
|
+
other = []
|
|
529
|
+
for m in self.messages:
|
|
530
|
+
if m.role == "system":
|
|
531
|
+
continue
|
|
532
|
+
elif m.role == "tool":
|
|
533
|
+
# Convert tool messages to user messages for Anthropic
|
|
534
|
+
user_msg = Message("user", m.parts)
|
|
535
|
+
other.append(user_msg.anthropic())
|
|
536
|
+
else:
|
|
537
|
+
other.append(m.anthropic())
|
|
231
538
|
return system_msg, other
|
|
232
539
|
|
|
233
540
|
def to_gemini(self) -> tuple[str | None, list[dict]]:
|
|
@@ -295,11 +602,30 @@ class Conversation:
|
|
|
295
602
|
for p in msg.parts:
|
|
296
603
|
if isinstance(p, Text):
|
|
297
604
|
content_blocks.append({"type": "text", "text": p.text})
|
|
298
|
-
|
|
605
|
+
elif isinstance(p, Image): # Image – redact the bytes, keep a hint
|
|
299
606
|
w, h = p.size
|
|
300
607
|
content_blocks.append(
|
|
301
608
|
{"type": "image", "tag": f"<Image ({w}×{h})>"}
|
|
302
609
|
)
|
|
610
|
+
elif isinstance(p, ToolCall):
|
|
611
|
+
content_blocks.append(
|
|
612
|
+
{
|
|
613
|
+
"type": "tool_call",
|
|
614
|
+
"id": p.id,
|
|
615
|
+
"name": p.name,
|
|
616
|
+
"arguments": p.arguments,
|
|
617
|
+
}
|
|
618
|
+
)
|
|
619
|
+
elif isinstance(p, ToolResult):
|
|
620
|
+
content_blocks.append(
|
|
621
|
+
{
|
|
622
|
+
"type": "tool_result",
|
|
623
|
+
"tool_call_id": p.tool_call_id,
|
|
624
|
+
"result": p.result,
|
|
625
|
+
}
|
|
626
|
+
)
|
|
627
|
+
elif isinstance(p, Thinking):
|
|
628
|
+
content_blocks.append({"type": "thinking", "content": p.content})
|
|
303
629
|
serialized.append({"role": msg.role, "content": content_blocks})
|
|
304
630
|
|
|
305
631
|
return {"messages": serialized}
|
|
@@ -311,7 +637,7 @@ class Conversation:
|
|
|
311
637
|
|
|
312
638
|
for m in payload.get("messages", []):
|
|
313
639
|
role: Role = m["role"] # 'system' | 'user' | 'assistant'
|
|
314
|
-
parts: list[Text | Image] = []
|
|
640
|
+
parts: list[Text | Image | ToolCall | ToolResult | Thinking] = []
|
|
315
641
|
|
|
316
642
|
for p in m["content"]:
|
|
317
643
|
if p["type"] == "text":
|
|
@@ -320,6 +646,16 @@ class Conversation:
|
|
|
320
646
|
# We only stored a placeholder tag, so keep that placeholder.
|
|
321
647
|
# You could raise instead if real image bytes are required.
|
|
322
648
|
parts.append(Image(p["tag"], detail="low"))
|
|
649
|
+
elif p["type"] == "tool_call":
|
|
650
|
+
parts.append(
|
|
651
|
+
ToolCall(id=p["id"], name=p["name"], arguments=p["arguments"])
|
|
652
|
+
)
|
|
653
|
+
elif p["type"] == "tool_result":
|
|
654
|
+
parts.append(
|
|
655
|
+
ToolResult(tool_call_id=p["tool_call_id"], result=p["result"])
|
|
656
|
+
)
|
|
657
|
+
elif p["type"] == "thinking":
|
|
658
|
+
parts.append(Thinking(content=p["content"]))
|
|
323
659
|
else:
|
|
324
660
|
raise ValueError(f"Unknown part type {p['type']!r}")
|
|
325
661
|
|