lm-deluge 0.0.70__tar.gz → 0.0.71__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. {lm_deluge-0.0.70/src/lm_deluge.egg-info → lm_deluge-0.0.71}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/mock_openai.py +66 -64
  4. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/prompt.py +6 -3
  5. {lm_deluge-0.0.70 → lm_deluge-0.0.71/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  6. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/tests/test_mock_openai.py +103 -0
  7. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/LICENSE +0 -0
  8. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/README.md +0 -0
  9. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/setup.cfg +0 -0
  10. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/__init__.py +0 -0
  11. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/anthropic.py +0 -0
  13. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/base.py +0 -0
  14. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/bedrock.py +0 -0
  15. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
  16. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/common.py +0 -0
  17. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  18. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  19. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  20. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  21. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  22. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/gemini.py +0 -0
  23. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/mistral.py +0 -0
  24. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/openai.py +0 -0
  25. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/response.py +0 -0
  26. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/batches.py +0 -0
  27. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  28. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  29. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  30. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  31. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/base.py +0 -0
  32. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/openai.py +0 -0
  33. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/cache.py +0 -0
  34. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/cli.py +0 -0
  35. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/client.py +0 -0
  36. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/config.py +0 -0
  37. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/embed.py +0 -0
  38. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/errors.py +0 -0
  39. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/file.py +0 -0
  40. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/image.py +0 -0
  41. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/__init__.py +0 -0
  42. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/classify.py +0 -0
  43. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/extract.py +0 -0
  44. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/locate.py +0 -0
  45. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/ocr.py +0 -0
  46. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/score.py +0 -0
  47. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/translate.py +0 -0
  48. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/__init__.py +0 -0
  49. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/anthropic.py +0 -0
  50. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/bedrock.py +0 -0
  51. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/cerebras.py +0 -0
  52. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/cohere.py +0 -0
  53. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/deepseek.py +0 -0
  54. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/fireworks.py +0 -0
  55. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/google.py +0 -0
  56. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/grok.py +0 -0
  57. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/groq.py +0 -0
  58. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/kimi.py +0 -0
  59. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/meta.py +0 -0
  60. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/minimax.py +0 -0
  61. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/mistral.py +0 -0
  62. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/openai.py +0 -0
  63. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/openrouter.py +0 -0
  64. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/models/together.py +0 -0
  65. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/presets/cerebras.py +0 -0
  66. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/presets/meta.py +0 -0
  67. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/request_context.py +0 -0
  68. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/rerank.py +0 -0
  69. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/tool.py +0 -0
  70. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/tracker.py +0 -0
  71. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/usage.py +0 -0
  72. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/util/harmony.py +0 -0
  73. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/util/json.py +0 -0
  74. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/util/logprobs.py +0 -0
  75. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/util/spatial.py +0 -0
  76. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/util/validation.py +0 -0
  77. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/util/xml.py +0 -0
  78. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge/warnings.py +0 -0
  79. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  80. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  81. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/requires.txt +0 -0
  82. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/top_level.txt +0 -0
  83. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/tests/test_builtin_tools.py +0 -0
  84. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/tests/test_file_upload.py +0 -0
  85. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/tests/test_native_mcp_server.py +0 -0
  86. {lm_deluge-0.0.70 → lm_deluge-0.0.71}/tests/test_openrouter_generic.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.70
3
+ Version: 0.0.71
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.70"
6
+ version = "0.0.71"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -41,6 +41,8 @@ try:
41
41
  from openai.types.chat.chat_completion import Choice as ChatCompletionChoice
42
42
  from openai.types.chat.chat_completion_chunk import (
43
43
  Choice as ChunkChoice,
44
+ )
45
+ from openai.types.chat.chat_completion_chunk import (
44
46
  ChoiceDelta,
45
47
  ChoiceDeltaToolCall,
46
48
  ChoiceDeltaToolCallFunction,
@@ -63,56 +65,61 @@ __all__ = [
63
65
  "RateLimitError",
64
66
  ]
65
67
 
66
- from lm_deluge.client import LLMClient
67
- from lm_deluge.prompt import Conversation, Message, Part, Text, ToolCall, ToolResult
68
+ from lm_deluge.client import LLMClient, _LLMClient
69
+ from lm_deluge.prompt import CachePattern, Conversation, Message, Text, ToolCall
70
+ from lm_deluge.tool import Tool
68
71
 
69
72
 
70
- def _messages_to_conversation(messages: list[dict[str, Any]]) -> Conversation:
71
- """Convert OpenAI messages format to lm-deluge Conversation."""
72
- conv_messages = []
73
-
74
- for msg in messages:
75
- role = msg["role"]
76
- content = msg.get("content")
77
- tool_calls = msg.get("tool_calls")
78
- tool_call_id = msg.get("tool_call_id")
79
-
80
- parts: list[Part] = []
81
-
82
- # Handle regular content
83
- if content:
84
- if isinstance(content, str):
85
- parts.append(Text(content))
86
- elif isinstance(content, list):
87
- # Multi-part content (text, images, etc.)
88
- for item in content:
89
- if item.get("type") == "text":
90
- parts.append(Text(item["text"]))
91
- # Could add image support here later
92
-
93
- # Handle tool calls (from assistant)
94
- if tool_calls:
95
- for tc in tool_calls:
96
- # Parse arguments from JSON string to dict
97
- args_str = tc["function"]["arguments"]
98
- args_dict = (
99
- json.loads(args_str) if isinstance(args_str, str) else args_str
100
- )
101
- parts.append(
102
- ToolCall(
103
- id=tc["id"],
104
- name=tc["function"]["name"],
105
- arguments=args_dict,
106
- )
107
- )
73
+ def _openai_tools_to_lm_deluge(tools: list[dict[str, Any]]) -> list[Tool]:
74
+ """
75
+ Convert OpenAI tool format to lm-deluge Tool objects.
76
+
77
+ OpenAI format:
78
+ {
79
+ "type": "function",
80
+ "function": {
81
+ "name": "get_weather",
82
+ "description": "Get weather",
83
+ "parameters": {
84
+ "type": "object",
85
+ "properties": {...},
86
+ "required": [...]
87
+ }
88
+ }
89
+ }
90
+
91
+ lm-deluge format:
92
+ Tool(
93
+ name="get_weather",
94
+ description="Get weather",
95
+ parameters={...properties...},
96
+ required=[...]
97
+ )
98
+ """
99
+ lm_tools = []
100
+ for tool in tools:
101
+ if tool.get("type") == "function":
102
+ func = tool["function"]
103
+ params_schema = func.get("parameters", {})
104
+
105
+ # Extract properties and required from the parameters schema
106
+ properties = params_schema.get("properties", {})
107
+ required = params_schema.get("required", [])
108
+
109
+ lm_tool = Tool(
110
+ name=func["name"],
111
+ description=func.get("description"),
112
+ parameters=properties if properties else None,
113
+ required=required,
114
+ )
115
+ lm_tools.append(lm_tool)
108
116
 
109
- # Handle tool results (from tool role)
110
- if role == "tool" and tool_call_id:
111
- parts.append(ToolResult(tool_call_id=tool_call_id, result=content or ""))
117
+ return lm_tools
112
118
 
113
- conv_messages.append(Message(role=role, parts=parts))
114
119
 
115
- return Conversation(messages=conv_messages)
120
+ def _messages_to_conversation(messages: list[dict[str, Any]]) -> Conversation:
121
+ """Convert OpenAI messages format to lm-deluge Conversation."""
122
+ return Conversation.from_openai_chat(messages)
116
123
 
117
124
 
118
125
  def _response_to_chat_completion(
@@ -346,7 +353,7 @@ class MockCompletions:
346
353
  ChatCompletion (non-streaming) or AsyncIterator[ChatCompletionChunk] (streaming)
347
354
  """
348
355
  # Get or create client for this model
349
- client = self._parent._get_or_create_client(model)
356
+ client: _LLMClient = self._parent._get_or_create_client(model)
350
357
 
351
358
  # Convert messages to Conversation
352
359
  conversation = _messages_to_conversation(messages)
@@ -377,26 +384,19 @@ class MockCompletions:
377
384
  # Convert tools if provided
378
385
  lm_tools = None
379
386
  if tools:
380
- # For now, just pass through - lm-deluge will handle the format
381
- lm_tools = tools
387
+ # Convert from OpenAI format to lm-deluge Tool objects
388
+ lm_tools = _openai_tools_to_lm_deluge(tools)
382
389
 
383
390
  # Execute request
384
391
  if stream:
385
- # Streaming mode
386
- request_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
387
- # Note: client.stream() is an async generator, not a coroutine
388
- # We can directly wrap it
389
- stream_iter = client.stream(conversation, tools=lm_tools)
390
- # Verify it's a generator, not a coroutine
391
- if hasattr(stream_iter, "__anext__"):
392
- return _AsyncStreamWrapper(stream_iter, model, request_id)
393
- else:
394
- # If it's a coroutine, we need to await it first
395
- # But this shouldn't happen with the current implementation
396
- raise TypeError(f"Expected async generator, got {type(stream_iter)}")
392
+ raise RuntimeError("streaming not supported")
397
393
  else:
398
394
  # Non-streaming mode
399
- response = await client.start(conversation, tools=lm_tools)
395
+ response = await client.start(
396
+ conversation,
397
+ tools=lm_tools, # type: ignore
398
+ cache=self._parent.cache_pattern, # type: ignore
399
+ )
400
400
  return _response_to_chat_completion(response, model)
401
401
 
402
402
 
@@ -437,7 +437,7 @@ class MockTextCompletions:
437
437
  Completion object
438
438
  """
439
439
  # Get or create client for this model
440
- client = self._parent._get_or_create_client(model)
440
+ client: _LLMClient = self._parent._get_or_create_client(model)
441
441
 
442
442
  # Handle single prompt
443
443
  if isinstance(prompt, list):
@@ -464,7 +464,7 @@ class MockTextCompletions:
464
464
  client = self._parent._create_client_with_params(model, merged_params)
465
465
 
466
466
  # Execute request
467
- response = await client.start(conversation)
467
+ response = await client.start(conversation, cache=self._parent.cache_pattern) # type: ignore
468
468
 
469
469
  # Convert to Completion format
470
470
  completion_text = None
@@ -477,7 +477,7 @@ class MockTextCompletions:
477
477
  choice = TextCompletionChoice(
478
478
  index=0,
479
479
  text=completion_text or "",
480
- finish_reason=response.finish_reason or "stop",
480
+ finish_reason=response.finish_reason or "stop", # type: ignore
481
481
  )
482
482
 
483
483
  # Create usage
@@ -560,6 +560,7 @@ class MockAsyncOpenAI:
560
560
  max_completion_tokens: int | None = None,
561
561
  top_p: float | None = None,
562
562
  seed: int | None = None,
563
+ cache_pattern: CachePattern | None = None,
563
564
  **kwargs: Any,
564
565
  ):
565
566
  # OpenAI-compatible attributes
@@ -571,6 +572,7 @@ class MockAsyncOpenAI:
571
572
  self.max_retries = max_retries or 2
572
573
  self.default_headers = default_headers
573
574
  self.http_client = http_client
575
+ self.cache_pattern = cache_pattern
574
576
 
575
577
  # Internal attributes
576
578
  self._default_model = model or "gpt-4o-mini"
@@ -848,14 +848,16 @@ class Conversation:
848
848
  if content is None:
849
849
  return parts
850
850
  if isinstance(content, str):
851
- parts.append(Text(content))
851
+ if content.strip():
852
+ parts.append(Text(content))
852
853
  return parts
853
854
 
854
855
  for block in content:
855
856
  block_type = block.get("type")
856
857
  if block_type in text_types:
857
858
  text_value = block.get("text") or block.get(block_type) or ""
858
- parts.append(Text(text_value))
859
+ if text_value.strip():
860
+ parts.append(Text(text_value))
859
861
  elif block_type in image_types:
860
862
  parts.append(_to_image_from_url(block))
861
863
  elif block_type in file_types:
@@ -1001,7 +1003,8 @@ class Conversation:
1001
1003
  )
1002
1004
  )
1003
1005
 
1004
- conversation_messages.append(Message(mapped_role, parts))
1006
+ if parts:
1007
+ conversation_messages.append(Message(mapped_role, parts))
1005
1008
 
1006
1009
  return cls(conversation_messages)
1007
1010
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.70
3
+ Version: 0.0.71
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -427,6 +427,107 @@ async def test_verifiers_like_initialization():
427
427
  print(" Verifiers-like initialization works")
428
428
 
429
429
 
430
+ async def test_tool_conversion():
431
+ """Test that OpenAI tools are converted to lm-deluge format."""
432
+ print("\n✓ Testing tool conversion...")
433
+
434
+ from lm_deluge.mock_openai import _openai_tools_to_lm_deluge
435
+
436
+ # OpenAI tool format
437
+ openai_tools = [
438
+ {
439
+ "type": "function",
440
+ "function": {
441
+ "name": "get_weather",
442
+ "description": "Get the weather for a location",
443
+ "parameters": {
444
+ "type": "object",
445
+ "properties": {
446
+ "location": {
447
+ "type": "string",
448
+ "description": "The city name",
449
+ },
450
+ "units": {
451
+ "type": "string",
452
+ "enum": ["celsius", "fahrenheit"],
453
+ },
454
+ },
455
+ "required": ["location"],
456
+ },
457
+ },
458
+ }
459
+ ]
460
+
461
+ # Convert to lm-deluge format
462
+ lm_tools = _openai_tools_to_lm_deluge(openai_tools)
463
+
464
+ # Verify conversion
465
+ assert len(lm_tools) == 1
466
+ tool = lm_tools[0]
467
+ assert tool.name == "get_weather"
468
+ assert tool.description == "Get the weather for a location"
469
+ assert "location" in tool.parameters
470
+ assert "units" in tool.parameters
471
+ assert tool.required == ["location"]
472
+
473
+ print(" Tool conversion works correctly")
474
+ print(f" Tool name: {tool.name}")
475
+ print(f" Parameters: {list(tool.parameters.keys())}")
476
+
477
+
478
+ async def test_tool_result_role_conversion():
479
+ """Test that tool result messages are converted from role='tool' to role='user'."""
480
+ print("\n✓ Testing tool result role conversion...")
481
+
482
+ from lm_deluge.mock_openai import _messages_to_conversation
483
+
484
+ # OpenAI format with tool call and result
485
+ openai_messages = [
486
+ {"role": "user", "content": "What's the weather?"},
487
+ {
488
+ "role": "assistant",
489
+ "content": None,
490
+ "tool_calls": [
491
+ {
492
+ "id": "call_123",
493
+ "type": "function",
494
+ "function": {
495
+ "name": "get_weather",
496
+ "arguments": '{"location": "NYC"}',
497
+ },
498
+ }
499
+ ],
500
+ },
501
+ {
502
+ "role": "tool",
503
+ "tool_call_id": "call_123",
504
+ "content": "The weather is sunny, 72°F",
505
+ },
506
+ {"role": "assistant", "content": "The weather in NYC is sunny and 72°F."},
507
+ ]
508
+
509
+ # Convert to lm-deluge format
510
+ conversation = _messages_to_conversation(openai_messages)
511
+
512
+ # Verify conversion
513
+ assert len(conversation.messages) == 4
514
+ assert conversation.messages[0].role == "user"
515
+ assert conversation.messages[1].role == "assistant"
516
+ # The tool result message should be converted to role="user" (not "tool")
517
+ assert (
518
+ conversation.messages[2].role == "user"
519
+ ), "Tool result should be in user message"
520
+ # Message should have ONLY the ToolResult part (not Text)
521
+ assert len(conversation.messages[2].parts) == 1
522
+ assert hasattr(conversation.messages[2].parts[0], "tool_call_id")
523
+ assert conversation.messages[2].parts[0].tool_call_id == "call_123"
524
+ assert conversation.messages[2].parts[0].result == "The weather is sunny, 72°F"
525
+ assert conversation.messages[3].role == "assistant"
526
+
527
+ print(" Tool result role conversion works correctly")
528
+ print(" OpenAI role='tool' → lm-deluge role='user' ✓")
529
+
530
+
430
531
  async def run_all_tests():
431
532
  """Run all tests."""
432
533
  print("=" * 60)
@@ -441,6 +542,8 @@ async def run_all_tests():
441
542
  test_http_client_parameter,
442
543
  test_exception_imports,
443
544
  test_verifiers_like_initialization,
545
+ test_tool_conversion,
546
+ test_tool_result_role_conversion,
444
547
  # API key needed
445
548
  test_basic_completion,
446
549
  test_streaming,
File without changes
File without changes
File without changes