lm-deluge 0.0.88__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (41) hide show
  1. lm_deluge/__init__.py +0 -24
  2. lm_deluge/api_requests/anthropic.py +25 -5
  3. lm_deluge/api_requests/base.py +37 -0
  4. lm_deluge/api_requests/bedrock.py +23 -2
  5. lm_deluge/api_requests/gemini.py +36 -10
  6. lm_deluge/api_requests/openai.py +31 -4
  7. lm_deluge/batches.py +15 -45
  8. lm_deluge/client.py +27 -1
  9. lm_deluge/models/__init__.py +2 -0
  10. lm_deluge/models/anthropic.py +12 -12
  11. lm_deluge/models/google.py +13 -0
  12. lm_deluge/models/minimax.py +9 -1
  13. lm_deluge/models/openrouter.py +48 -0
  14. lm_deluge/models/zai.py +50 -1
  15. lm_deluge/pipelines/gepa/docs/samples.py +19 -10
  16. lm_deluge/prompt.py +333 -68
  17. lm_deluge/server/__init__.py +24 -0
  18. lm_deluge/server/__main__.py +144 -0
  19. lm_deluge/server/adapters.py +369 -0
  20. lm_deluge/server/app.py +388 -0
  21. lm_deluge/server/auth.py +71 -0
  22. lm_deluge/server/model_policy.py +215 -0
  23. lm_deluge/server/models_anthropic.py +172 -0
  24. lm_deluge/server/models_openai.py +175 -0
  25. lm_deluge/skills/anthropic.py +0 -0
  26. lm_deluge/skills/compat.py +0 -0
  27. lm_deluge/tool/__init__.py +13 -1
  28. lm_deluge/tool/prefab/sandbox/__init__.py +19 -0
  29. lm_deluge/tool/prefab/sandbox/daytona_sandbox.py +483 -0
  30. lm_deluge/tool/prefab/sandbox/docker_sandbox.py +609 -0
  31. lm_deluge/tool/prefab/sandbox/fargate_sandbox.py +546 -0
  32. lm_deluge/tool/prefab/sandbox/modal_sandbox.py +469 -0
  33. lm_deluge/tool/prefab/sandbox/seatbelt_sandbox.py +827 -0
  34. lm_deluge/tool/prefab/skills.py +0 -0
  35. {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/METADATA +4 -3
  36. {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/RECORD +39 -24
  37. lm_deluge/mock_openai.py +0 -643
  38. lm_deluge/tool/prefab/sandbox.py +0 -1621
  39. {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/WHEEL +0 -0
  40. {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/licenses/LICENSE +0 -0
  41. {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/top_level.txt +0 -0
lm_deluge/prompt.py CHANGED
@@ -23,16 +23,95 @@ CachePattern = Literal[
23
23
  # 1. Low-level content blocks – either text or an image #
24
24
  ###############################################################################
25
25
  Role = Literal["system", "user", "assistant", "tool"]
26
+ SignatureProvider = Literal["anthropic", "gemini"]
27
+
28
+
29
+ @dataclass(slots=True)
30
+ class ThoughtSignature:
31
+ value: str
32
+ provider: SignatureProvider | None = None
33
+
34
+ def for_provider(self, provider: SignatureProvider) -> str | None:
35
+ if self.provider is None or self.provider == provider:
36
+ return self.value
37
+ return None
38
+
39
+
40
+ ThoughtSignatureLike: TypeAlias = ThoughtSignature | str
41
+
42
+
43
+ def _normalize_signature(
44
+ signature: ThoughtSignatureLike | None,
45
+ *,
46
+ provider: SignatureProvider | None = None,
47
+ ) -> ThoughtSignature | None:
48
+ if signature is None:
49
+ return None
50
+ if isinstance(signature, ThoughtSignature):
51
+ if provider is not None and signature.provider is None:
52
+ return ThoughtSignature(signature.value, provider)
53
+ return signature
54
+ return ThoughtSignature(signature, provider)
55
+
56
+
57
+ def _signature_for_provider(
58
+ signature: ThoughtSignatureLike | None, provider: SignatureProvider
59
+ ) -> str | None:
60
+ if signature is None:
61
+ return None
62
+ if isinstance(signature, ThoughtSignature):
63
+ return signature.for_provider(provider)
64
+ return signature
65
+
66
+
67
+ def _signature_value(signature: ThoughtSignatureLike | None) -> str | None:
68
+ if signature is None:
69
+ return None
70
+ if isinstance(signature, ThoughtSignature):
71
+ return signature.value
72
+ return signature
73
+
74
+
75
+ def _serialize_signature(signature: ThoughtSignatureLike | None) -> str | dict | None:
76
+ if signature is None:
77
+ return None
78
+ if isinstance(signature, ThoughtSignature):
79
+ if signature.provider is None:
80
+ return signature.value
81
+ return {"value": signature.value, "provider": signature.provider}
82
+ return signature
83
+
84
+
85
+ def _deserialize_signature(payload: str | dict | None) -> ThoughtSignature | None:
86
+ if payload is None:
87
+ return None
88
+ if isinstance(payload, dict):
89
+ value = payload.get("value")
90
+ provider = payload.get("provider")
91
+ if isinstance(value, str):
92
+ if provider in ("anthropic", "gemini"):
93
+ return ThoughtSignature(value, provider)
94
+ return ThoughtSignature(value)
95
+ return None
96
+ if isinstance(payload, str):
97
+ return ThoughtSignature(payload)
98
+ return None
26
99
 
27
100
 
28
101
  @dataclass(slots=True)
29
102
  class Text:
30
103
  text: str
31
104
  type: str = field(init=False, default="text")
105
+ # for gemini 3 - thought signatures to maintain reasoning context
106
+ thought_signature: ThoughtSignatureLike | None = None
107
+
108
+ def __post_init__(self) -> None:
109
+ self.thought_signature = _normalize_signature(self.thought_signature)
32
110
 
33
111
  @property
34
112
  def fingerprint(self) -> str:
35
- return xxhash.xxh64(self.text.encode()).hexdigest()
113
+ signature = _signature_value(self.thought_signature) or ""
114
+ return xxhash.xxh64(f"{self.text}:{signature}".encode()).hexdigest()
36
115
 
37
116
  # ── provider-specific emission ────────────────────────────────────────────
38
117
  def oa_chat(self) -> dict | str: # OpenAI Chat Completions
@@ -45,7 +124,11 @@ class Text:
45
124
  return {"type": "text", "text": self.text}
46
125
 
47
126
  def gemini(self) -> dict:
48
- return {"text": self.text}
127
+ result = {"text": self.text}
128
+ signature = _signature_for_provider(self.thought_signature, "gemini")
129
+ if signature is not None:
130
+ result["thoughtSignature"] = signature
131
+ return result
49
132
 
50
133
  def mistral(self) -> dict:
51
134
  return {"type": "text", "text": self.text}
@@ -62,7 +145,10 @@ class ToolCall:
62
145
  built_in_type: str | None = None
63
146
  extra_body: dict | None = None
64
147
  # for gemini 3 - thought signatures to maintain reasoning context
65
- thought_signature: str | None = None
148
+ thought_signature: ThoughtSignatureLike | None = None
149
+
150
+ def __post_init__(self) -> None:
151
+ self.thought_signature = _normalize_signature(self.thought_signature)
66
152
 
67
153
  @property
68
154
  def fingerprint(self) -> str:
@@ -96,8 +182,9 @@ class ToolCall:
96
182
 
97
183
  def gemini(self) -> dict:
98
184
  result = {"functionCall": {"name": self.name, "args": self.arguments}}
99
- if self.thought_signature is not None:
100
- result["thoughtSignature"] = self.thought_signature # type: ignore
185
+ signature = _signature_for_provider(self.thought_signature, "gemini")
186
+ if signature is not None:
187
+ result["thoughtSignature"] = signature # type: ignore
101
188
  return result
102
189
 
103
190
  def mistral(self) -> dict:
@@ -287,7 +374,11 @@ class Thinking:
287
374
  # for openai - to keep conversation chain
288
375
  raw_payload: dict | None = None
289
376
  # for gemini 3 - thought signatures to maintain reasoning context
290
- thought_signature: str | None = None
377
+ thought_signature: ThoughtSignatureLike | None = None
378
+ summary: str | None = None # to differentiate summary text from actual content
379
+
380
+ def __post_init__(self) -> None:
381
+ self.thought_signature = _normalize_signature(self.thought_signature)
291
382
 
292
383
  @property
293
384
  def fingerprint(self) -> str:
@@ -302,12 +393,19 @@ class Thinking:
302
393
  return {"type": "reasoning", "content": self.content}
303
394
 
304
395
  def anthropic(self) -> dict: # Anthropic Messages
305
- return {"type": "thinking", "thinking": self.content}
396
+ if self.raw_payload:
397
+ return dict(self.raw_payload)
398
+ result = {"type": "thinking", "thinking": self.content}
399
+ signature = _signature_for_provider(self.thought_signature, "anthropic")
400
+ if signature is not None:
401
+ result["signature"] = signature
402
+ return result
306
403
 
307
404
  def gemini(self) -> dict:
308
405
  result = {"text": f"[Thinking: {self.content}]"}
309
- if self.thought_signature is not None:
310
- result["thoughtSignature"] = self.thought_signature
406
+ signature = _signature_for_provider(self.thought_signature, "gemini")
407
+ if signature is not None:
408
+ result["thoughtSignature"] = signature
311
409
  return result
312
410
 
313
411
  def mistral(self) -> dict:
@@ -379,10 +477,15 @@ class Message:
379
477
  # return {"type": "file", "tag": f"<File ({size} bytes)>"}
380
478
  # return repr(value)
381
479
 
382
- def to_log(self) -> dict:
480
+ def to_log(self, *, preserve_media: bool = False) -> dict:
383
481
  """
384
482
  Return a JSON-serialisable dict that fully captures the message.
483
+
484
+ Args:
485
+ preserve_media: If True, store full base64-encoded bytes for images and files.
486
+ If False (default), replace with placeholder tags.
385
487
  """
488
+ import base64
386
489
 
387
490
  def _json_safe(value):
388
491
  if isinstance(value, (str, int, float, bool)) or value is None:
@@ -404,13 +507,41 @@ class Message:
404
507
  content_blocks: list[dict] = []
405
508
  for p in self.parts:
406
509
  if isinstance(p, Text):
407
- content_blocks.append({"type": "text", "text": p.text})
408
- elif isinstance(p, Image): # Image – redact the bytes, keep a hint
409
- w, h = p.size
410
- content_blocks.append({"type": "image", "tag": f"<Image ({w}×{h})>"})
411
- elif isinstance(p, File): # File – redact the bytes, keep a hint
412
- size = p.size
413
- content_blocks.append({"type": "file", "tag": f"<File ({size} bytes)>"})
510
+ text_block: dict = {"type": "text", "text": p.text}
511
+ signature = _serialize_signature(p.thought_signature)
512
+ if signature is not None:
513
+ text_block["thought_signature"] = signature
514
+ content_blocks.append(text_block)
515
+ elif isinstance(p, Image):
516
+ if preserve_media:
517
+ content_blocks.append(
518
+ {
519
+ "type": "image",
520
+ "data": base64.b64encode(p._bytes()).decode("ascii"),
521
+ "media_type": p.media_type,
522
+ "detail": p.detail,
523
+ }
524
+ )
525
+ else:
526
+ w, h = p.size
527
+ content_blocks.append(
528
+ {"type": "image", "tag": f"<Image ({w}×{h})>"}
529
+ )
530
+ elif isinstance(p, File):
531
+ if preserve_media:
532
+ content_blocks.append(
533
+ {
534
+ "type": "file",
535
+ "data": base64.b64encode(p._bytes()).decode("ascii"),
536
+ "media_type": p.media_type,
537
+ "filename": p.filename,
538
+ }
539
+ )
540
+ else:
541
+ size = p.size
542
+ content_blocks.append(
543
+ {"type": "file", "tag": f"<File ({size} bytes)>"}
544
+ )
414
545
  elif isinstance(p, ToolCall):
415
546
  tool_call_block = {
416
547
  "type": "tool_call",
@@ -418,8 +549,9 @@ class Message:
418
549
  "name": p.name,
419
550
  "arguments": _json_safe(p.arguments),
420
551
  }
421
- if p.thought_signature is not None:
422
- tool_call_block["thought_signature"] = p.thought_signature
552
+ signature = _serialize_signature(p.thought_signature)
553
+ if signature is not None:
554
+ tool_call_block["thought_signature"] = signature
423
555
  content_blocks.append(tool_call_block)
424
556
  elif isinstance(p, ToolResult):
425
557
  content_blocks.append(
@@ -430,9 +562,10 @@ class Message:
430
562
  }
431
563
  )
432
564
  elif isinstance(p, Thinking):
433
- thinking_block = {"type": "thinking", "content": p.content}
434
- if p.thought_signature is not None:
435
- thinking_block["thought_signature"] = p.thought_signature
565
+ thinking_block: dict = {"type": "thinking", "content": p.content}
566
+ signature = _serialize_signature(p.thought_signature)
567
+ if signature is not None:
568
+ thinking_block["thought_signature"] = signature
436
569
  content_blocks.append(thinking_block)
437
570
 
438
571
  return {"role": self.role, "content": content_blocks}
@@ -440,28 +573,56 @@ class Message:
440
573
  @classmethod
441
574
  def from_log(cls, data: dict) -> "Message":
442
575
  """Re-hydrate a Message previously produced by `to_log()`."""
443
- # DEBUG: Track when from_log is called
444
- # print(f"DEBUG: Message.from_log called for {data['role']} message with {len(data['content'])} content blocks")
576
+ import base64
577
+
445
578
  role: Role = data["role"]
446
579
  parts: list[Part] = []
447
580
 
448
581
  for p in data["content"]:
449
582
  if p["type"] == "text":
450
- parts.append(Text(p["text"]))
583
+ parts.append(
584
+ Text(
585
+ p["text"],
586
+ thought_signature=_deserialize_signature(
587
+ p.get("thought_signature")
588
+ ),
589
+ )
590
+ )
451
591
  elif p["type"] == "image":
452
- # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
453
- # print(f"DEBUG: Message.from_log creating Text placeholder for image: {p['tag']}")
454
- parts.append(Text(p["tag"]))
592
+ if "data" in p:
593
+ # Full image data was preserved
594
+ parts.append(
595
+ Image(
596
+ data=base64.b64decode(p["data"]),
597
+ media_type=p.get("media_type"),
598
+ detail=p.get("detail", "auto"),
599
+ )
600
+ )
601
+ else:
602
+ # Placeholder tag only
603
+ parts.append(Text(p["tag"]))
455
604
  elif p["type"] == "file":
456
- # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
457
- parts.append(Text(p["tag"]))
605
+ if "data" in p:
606
+ # Full file data was preserved
607
+ parts.append(
608
+ File(
609
+ data=base64.b64decode(p["data"]),
610
+ media_type=p.get("media_type"),
611
+ filename=p.get("filename"),
612
+ )
613
+ )
614
+ else:
615
+ # Placeholder tag only
616
+ parts.append(Text(p["tag"]))
458
617
  elif p["type"] == "tool_call":
459
618
  parts.append(
460
619
  ToolCall(
461
620
  id=p["id"],
462
621
  name=p["name"],
463
622
  arguments=p["arguments"],
464
- thought_signature=p.get("thought_signature"),
623
+ thought_signature=_deserialize_signature(
624
+ p.get("thought_signature")
625
+ ),
465
626
  )
466
627
  )
467
628
  elif p["type"] == "tool_result":
@@ -472,7 +633,9 @@ class Message:
472
633
  parts.append(
473
634
  Thinking(
474
635
  content=p["content"],
475
- thought_signature=p.get("thought_signature"),
636
+ thought_signature=_deserialize_signature(
637
+ p.get("thought_signature")
638
+ ),
476
639
  )
477
640
  )
478
641
  else:
@@ -805,7 +968,15 @@ class Message:
805
968
  # Anthropic: system message is *not* in the list
806
969
  if self.role == "system":
807
970
  raise ValueError("Anthropic keeps system outside message list")
808
- content = [p.anthropic() for p in self.parts]
971
+ content: list[dict] = []
972
+ for part in self.parts:
973
+ if isinstance(part, Thinking) and part.raw_payload is None:
974
+ signature = _signature_for_provider(part.thought_signature, "anthropic")
975
+ if signature is None:
976
+ continue
977
+ content.append(part.anthropic())
978
+ if not content:
979
+ content = [{"type": "text", "text": ""}]
809
980
  # Shortcut: single text becomes a bare string
810
981
  if len(content) == 1 and content[0].get("type") == "text":
811
982
  content = content[0]["text"]
@@ -1134,7 +1305,9 @@ class Conversation:
1134
1305
  return result_parts
1135
1306
 
1136
1307
  def _anthropic_content_to_parts(
1137
- role: Role, content: str | list[dict] | None
1308
+ role: Role,
1309
+ content: str | list[dict] | None,
1310
+ signature_state: dict[str, ThoughtSignature | None] | None = None,
1138
1311
  ) -> list[Part]:
1139
1312
  parts: list[Part] = []
1140
1313
  if content is None:
@@ -1163,15 +1336,38 @@ class Conversation:
1163
1336
  raise ValueError("Anthropic tool_use block missing id")
1164
1337
  name = block.get("name") or "tool"
1165
1338
  arguments = block.get("input") or {}
1339
+ tool_call = ToolCall(
1340
+ id=tool_id,
1341
+ name=name,
1342
+ arguments=arguments
1343
+ if isinstance(arguments, dict)
1344
+ else {"value": arguments},
1345
+ )
1346
+ if signature_state is not None:
1347
+ pending_signature = signature_state.get("pending")
1348
+ if pending_signature:
1349
+ tool_call.thought_signature = pending_signature
1350
+ signature_state["pending"] = None
1351
+ parts.append(tool_call)
1352
+ elif block_type == "redacted_thinking":
1166
1353
  parts.append(
1167
- ToolCall(
1168
- id=tool_id,
1169
- name=name,
1170
- arguments=arguments
1171
- if isinstance(arguments, dict)
1172
- else {"value": arguments},
1354
+ Thinking(content=block.get("data", ""), raw_payload=block)
1355
+ )
1356
+ elif block_type == "thinking":
1357
+ thinking_content = block.get("thinking", "")
1358
+ signature = _normalize_signature(
1359
+ block.get("signature"),
1360
+ provider="anthropic",
1361
+ )
1362
+ parts.append(
1363
+ Thinking(
1364
+ content=thinking_content,
1365
+ raw_payload=block,
1366
+ thought_signature=signature,
1173
1367
  )
1174
1368
  )
1369
+ if signature_state is not None and signature is not None:
1370
+ signature_state["pending"] = signature
1175
1371
  elif block_type == "tool_result":
1176
1372
  tool_use_id = block.get("tool_use_id")
1177
1373
  if tool_use_id is None:
@@ -1181,9 +1377,6 @@ class Conversation:
1181
1377
  result = _anthropic_tool_result_content(block.get("content"))
1182
1378
  tool_result = ToolResult(tool_call_id=tool_use_id, result=result)
1183
1379
  parts.append(tool_result)
1184
- elif block_type == "thinking":
1185
- thinking_content = block.get("thinking", "")
1186
- parts.append(Thinking(content=thinking_content, raw_payload=block))
1187
1380
  else:
1188
1381
  parts.append(Text(json.dumps(block)))
1189
1382
  return parts
@@ -1213,6 +1406,9 @@ class Conversation:
1213
1406
  content = message.get("content")
1214
1407
  if isinstance(content, list):
1215
1408
  buffer_parts: list[Part] = []
1409
+ signature_state: None | dict[str, ThoughtSignature | None] = (
1410
+ {"pending": None} if base_role == "assistant" else None
1411
+ )
1216
1412
  for block in content:
1217
1413
  block_type = block.get("type")
1218
1414
  if block_type == "tool_result":
@@ -1234,7 +1430,11 @@ class Conversation:
1234
1430
  )
1235
1431
  )
1236
1432
  else:
1237
- block_parts = _anthropic_content_to_parts(base_role, [block])
1433
+ block_parts = _anthropic_content_to_parts(
1434
+ base_role,
1435
+ [block],
1436
+ signature_state=signature_state,
1437
+ )
1238
1438
  buffer_parts.extend(block_parts)
1239
1439
 
1240
1440
  if buffer_parts:
@@ -1576,27 +1776,57 @@ class Conversation:
1576
1776
  hasher.update(json.dumps([m.fingerprint for m in self.messages]).encode())
1577
1777
  return hasher.hexdigest()
1578
1778
 
1579
- def to_log(self) -> dict:
1779
+ def to_log(self, *, preserve_media: bool = False) -> dict:
1580
1780
  """
1581
1781
  Return a JSON-serialisable dict that fully captures the conversation.
1782
+
1783
+ Args:
1784
+ preserve_media: If True, store full base64-encoded bytes for images and files.
1785
+ If False (default), replace with placeholder tags.
1582
1786
  """
1787
+ import base64
1788
+
1583
1789
  serialized: list[dict] = []
1584
1790
 
1585
1791
  for msg in self.messages:
1586
1792
  content_blocks: list[dict] = []
1587
1793
  for p in msg.parts:
1588
1794
  if isinstance(p, Text):
1589
- content_blocks.append({"type": "text", "text": p.text})
1590
- elif isinstance(p, Image): # Image – redact the bytes, keep a hint
1591
- w, h = p.size
1592
- content_blocks.append(
1593
- {"type": "image", "tag": f"<Image ({w}×{h})>"}
1594
- )
1595
- elif isinstance(p, File): # File – redact the bytes, keep a hint
1596
- size = p.size
1597
- content_blocks.append(
1598
- {"type": "file", "tag": f"<File ({size} bytes)>"}
1599
- )
1795
+ text_block: dict = {"type": "text", "text": p.text}
1796
+ signature = _serialize_signature(p.thought_signature)
1797
+ if signature is not None:
1798
+ text_block["thought_signature"] = signature
1799
+ content_blocks.append(text_block)
1800
+ elif isinstance(p, Image):
1801
+ if preserve_media:
1802
+ content_blocks.append(
1803
+ {
1804
+ "type": "image",
1805
+ "data": base64.b64encode(p._bytes()).decode("ascii"),
1806
+ "media_type": p.media_type,
1807
+ "detail": p.detail,
1808
+ }
1809
+ )
1810
+ else:
1811
+ w, h = p.size
1812
+ content_blocks.append(
1813
+ {"type": "image", "tag": f"<Image ({w}×{h})>"}
1814
+ )
1815
+ elif isinstance(p, File):
1816
+ if preserve_media:
1817
+ content_blocks.append(
1818
+ {
1819
+ "type": "file",
1820
+ "data": base64.b64encode(p._bytes()).decode("ascii"),
1821
+ "media_type": p.media_type,
1822
+ "filename": p.filename,
1823
+ }
1824
+ )
1825
+ else:
1826
+ size = p.size
1827
+ content_blocks.append(
1828
+ {"type": "file", "tag": f"<File ({size} bytes)>"}
1829
+ )
1600
1830
  elif isinstance(p, ToolCall):
1601
1831
  tool_call_block = {
1602
1832
  "type": "tool_call",
@@ -1604,8 +1834,9 @@ class Conversation:
1604
1834
  "name": p.name,
1605
1835
  "arguments": p.arguments,
1606
1836
  }
1607
- if p.thought_signature is not None:
1608
- tool_call_block["thought_signature"] = p.thought_signature
1837
+ signature = _serialize_signature(p.thought_signature)
1838
+ if signature is not None:
1839
+ tool_call_block["thought_signature"] = signature
1609
1840
  content_blocks.append(tool_call_block)
1610
1841
  elif isinstance(p, ToolResult):
1611
1842
  content_blocks.append(
@@ -1618,9 +1849,10 @@ class Conversation:
1618
1849
  }
1619
1850
  )
1620
1851
  elif isinstance(p, Thinking):
1621
- thinking_block = {"type": "thinking", "content": p.content}
1622
- if p.thought_signature is not None:
1623
- thinking_block["thought_signature"] = p.thought_signature
1852
+ thinking_block: dict = {"type": "thinking", "content": p.content}
1853
+ signature = _serialize_signature(p.thought_signature)
1854
+ if signature is not None:
1855
+ thinking_block["thought_signature"] = signature
1624
1856
  content_blocks.append(thinking_block)
1625
1857
  serialized.append({"role": msg.role, "content": content_blocks})
1626
1858
 
@@ -1734,6 +1966,8 @@ class Conversation:
1734
1966
  @classmethod
1735
1967
  def from_log(cls, payload: dict) -> "Conversation":
1736
1968
  """Re-hydrate a Conversation previously produced by `to_log()`."""
1969
+ import base64
1970
+
1737
1971
  msgs: list[Message] = []
1738
1972
 
1739
1973
  for m in payload.get("messages", []):
@@ -1742,20 +1976,49 @@ class Conversation:
1742
1976
 
1743
1977
  for p in m["content"]:
1744
1978
  if p["type"] == "text":
1745
- parts.append(Text(p["text"]))
1979
+ parts.append(
1980
+ Text(
1981
+ p["text"],
1982
+ thought_signature=_deserialize_signature(
1983
+ p.get("thought_signature")
1984
+ ),
1985
+ )
1986
+ )
1746
1987
  elif p["type"] == "image":
1747
- # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
1748
- parts.append(Text(p["tag"]))
1988
+ if "data" in p:
1989
+ # Full image data was preserved
1990
+ parts.append(
1991
+ Image(
1992
+ data=base64.b64decode(p["data"]),
1993
+ media_type=p.get("media_type"),
1994
+ detail=p.get("detail", "auto"),
1995
+ )
1996
+ )
1997
+ else:
1998
+ # Placeholder tag only
1999
+ parts.append(Text(p["tag"]))
1749
2000
  elif p["type"] == "file":
1750
- # We only stored a placeholder tag; rehydrate as inert text to avoid byte access.
1751
- parts.append(Text(p["tag"]))
2001
+ if "data" in p:
2002
+ # Full file data was preserved
2003
+ parts.append(
2004
+ File(
2005
+ data=base64.b64decode(p["data"]),
2006
+ media_type=p.get("media_type"),
2007
+ filename=p.get("filename"),
2008
+ )
2009
+ )
2010
+ else:
2011
+ # Placeholder tag only
2012
+ parts.append(Text(p["tag"]))
1752
2013
  elif p["type"] == "tool_call":
1753
2014
  parts.append(
1754
2015
  ToolCall(
1755
2016
  id=p["id"],
1756
2017
  name=p["name"],
1757
2018
  arguments=p["arguments"],
1758
- thought_signature=p.get("thought_signature"),
2019
+ thought_signature=_deserialize_signature(
2020
+ p.get("thought_signature")
2021
+ ),
1759
2022
  )
1760
2023
  )
1761
2024
  elif p["type"] == "tool_result":
@@ -1766,7 +2029,9 @@ class Conversation:
1766
2029
  parts.append(
1767
2030
  Thinking(
1768
2031
  content=p["content"],
1769
- thought_signature=p.get("thought_signature"),
2032
+ thought_signature=_deserialize_signature(
2033
+ p.get("thought_signature")
2034
+ ),
1770
2035
  )
1771
2036
  )
1772
2037
  else:
@@ -0,0 +1,24 @@
1
+ """
2
+ LM-Deluge Proxy Server
3
+
4
+ A FastAPI-based proxy server that exposes OpenAI-compatible and
5
+ Anthropic-compatible API endpoints, routing requests through lm-deluge
6
+ to any supported provider.
7
+
8
+ Usage:
9
+ python -m lm_deluge.server
10
+
11
+ Environment Variables:
12
+ DELUGE_PROXY_API_KEY: Optional API key that clients must provide
13
+ DELUGE_PROXY_PORT: Port to run on (default: 8000)
14
+ DELUGE_PROXY_HOST: Host to bind (default: 0.0.0.0)
15
+ DELUGE_PROXY_LOG_REQUESTS: Log full incoming proxy requests when set
16
+ DELUGE_PROXY_LOG_PROVIDER_REQUESTS: Log outbound provider requests when set
17
+
18
+ Provider keys (same as LLMClient):
19
+ OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY, etc.
20
+ """
21
+
22
+ from .app import create_app
23
+
24
+ __all__ = ["create_app"]