lm-deluge 0.0.69__tar.gz → 0.0.71__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. {lm_deluge-0.0.69/src/lm_deluge.egg-info → lm_deluge-0.0.71}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/__init__.py +16 -2
  4. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/mock_openai.py +228 -67
  5. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/prompt.py +6 -3
  6. {lm_deluge-0.0.69 → lm_deluge-0.0.71/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  7. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/tests/test_mock_openai.py +231 -1
  8. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/LICENSE +0 -0
  9. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/README.md +0 -0
  10. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/setup.cfg +0 -0
  11. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/__init__.py +0 -0
  12. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/anthropic.py +0 -0
  13. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/base.py +0 -0
  14. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/bedrock.py +0 -0
  15. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
  16. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/common.py +0 -0
  17. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  18. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  19. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  20. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  21. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  22. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/gemini.py +0 -0
  23. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/mistral.py +0 -0
  24. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/openai.py +0 -0
  25. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/api_requests/response.py +0 -0
  26. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/batches.py +0 -0
  27. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  28. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  29. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  30. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  31. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/base.py +0 -0
  32. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/built_in_tools/openai.py +0 -0
  33. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/cache.py +0 -0
  34. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/cli.py +0 -0
  35. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/client.py +0 -0
  36. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/config.py +0 -0
  37. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/embed.py +0 -0
  38. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/errors.py +0 -0
  39. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/file.py +0 -0
  40. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/image.py +0 -0
  41. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/__init__.py +0 -0
  42. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/classify.py +0 -0
  43. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/extract.py +0 -0
  44. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/locate.py +0 -0
  45. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/ocr.py +0 -0
  46. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/score.py +0 -0
  47. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/llm_tools/translate.py +0 -0
  48. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/__init__.py +0 -0
  49. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/anthropic.py +0 -0
  50. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/bedrock.py +0 -0
  51. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/cerebras.py +0 -0
  52. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/cohere.py +0 -0
  53. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/deepseek.py +0 -0
  54. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/fireworks.py +0 -0
  55. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/google.py +0 -0
  56. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/grok.py +0 -0
  57. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/groq.py +0 -0
  58. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/kimi.py +0 -0
  59. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/meta.py +0 -0
  60. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/minimax.py +0 -0
  61. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/mistral.py +0 -0
  62. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/openai.py +0 -0
  63. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/openrouter.py +0 -0
  64. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/models/together.py +0 -0
  65. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/presets/cerebras.py +0 -0
  66. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/presets/meta.py +0 -0
  67. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/request_context.py +0 -0
  68. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/rerank.py +0 -0
  69. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/tool.py +0 -0
  70. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/tracker.py +0 -0
  71. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/usage.py +0 -0
  72. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/util/harmony.py +0 -0
  73. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/util/json.py +0 -0
  74. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/util/logprobs.py +0 -0
  75. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/util/spatial.py +0 -0
  76. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/util/validation.py +0 -0
  77. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/util/xml.py +0 -0
  78. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge/warnings.py +0 -0
  79. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
  80. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  81. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/requires.txt +0 -0
  82. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/src/lm_deluge.egg-info/top_level.txt +0 -0
  83. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/tests/test_builtin_tools.py +0 -0
  84. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/tests/test_file_upload.py +0 -0
  85. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/tests/test_native_mcp_server.py +0 -0
  86. {lm_deluge-0.0.69 → lm_deluge-0.0.71}/tests/test_openrouter_generic.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.69
3
+ Version: 0.0.71
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.69"
6
+ version = "0.0.71"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -4,7 +4,13 @@ from .prompt import Conversation, Message
4
4
  from .tool import Tool, ToolParams
5
5
 
6
6
  try:
7
- from .mock_openai import MockAsyncOpenAI # noqa
7
+ from .mock_openai import ( # noqa
8
+ APIError,
9
+ APITimeoutError,
10
+ BadRequestError,
11
+ MockAsyncOpenAI,
12
+ RateLimitError,
13
+ )
8
14
 
9
15
  _has_openai = True
10
16
  except ImportError:
@@ -24,4 +30,12 @@ __all__ = [
24
30
  ]
25
31
 
26
32
  if _has_openai:
27
- __all__.append("MockAsyncOpenAI")
33
+ __all__.extend(
34
+ [
35
+ "MockAsyncOpenAI",
36
+ "APIError",
37
+ "APITimeoutError",
38
+ "BadRequestError",
39
+ "RateLimitError",
40
+ ]
41
+ )
@@ -25,20 +25,30 @@ import uuid
25
25
  from typing import Any, AsyncIterator, Literal, Union, overload
26
26
 
27
27
  try:
28
+ from openai import (
29
+ APIError,
30
+ APITimeoutError,
31
+ BadRequestError,
32
+ RateLimitError,
33
+ )
34
+ from openai.types import Completion
28
35
  from openai.types.chat import (
29
36
  ChatCompletion,
30
37
  ChatCompletionChunk,
31
38
  ChatCompletionMessage,
32
39
  ChatCompletionMessageToolCall,
33
40
  )
34
- from openai.types.chat.chat_completion import Choice as CompletionChoice
41
+ from openai.types.chat.chat_completion import Choice as ChatCompletionChoice
35
42
  from openai.types.chat.chat_completion_chunk import (
36
43
  Choice as ChunkChoice,
44
+ )
45
+ from openai.types.chat.chat_completion_chunk import (
37
46
  ChoiceDelta,
38
47
  ChoiceDeltaToolCall,
39
48
  ChoiceDeltaToolCallFunction,
40
49
  )
41
50
  from openai.types.chat.chat_completion_message_tool_call import Function
51
+ from openai.types.completion_choice import CompletionChoice as TextCompletionChoice
42
52
  from openai.types.completion_usage import CompletionUsage
43
53
  except ImportError:
44
54
  raise ImportError(
@@ -46,56 +56,70 @@ except ImportError:
46
56
  "Install it with: pip install lm-deluge[openai]"
47
57
  )
48
58
 
49
- from lm_deluge.client import LLMClient
50
- from lm_deluge.prompt import Conversation, Message, Part, Text, ToolCall, ToolResult
59
+ # Re-export exceptions for compatibility
60
+ __all__ = [
61
+ "MockAsyncOpenAI",
62
+ "APIError",
63
+ "APITimeoutError",
64
+ "BadRequestError",
65
+ "RateLimitError",
66
+ ]
51
67
 
68
+ from lm_deluge.client import LLMClient, _LLMClient
69
+ from lm_deluge.prompt import CachePattern, Conversation, Message, Text, ToolCall
70
+ from lm_deluge.tool import Tool
52
71
 
53
- def _messages_to_conversation(messages: list[dict[str, Any]]) -> Conversation:
54
- """Convert OpenAI messages format to lm-deluge Conversation."""
55
- conv_messages = []
56
-
57
- for msg in messages:
58
- role = msg["role"]
59
- content = msg.get("content")
60
- tool_calls = msg.get("tool_calls")
61
- tool_call_id = msg.get("tool_call_id")
62
-
63
- parts: list[Part] = []
64
-
65
- # Handle regular content
66
- if content:
67
- if isinstance(content, str):
68
- parts.append(Text(content))
69
- elif isinstance(content, list):
70
- # Multi-part content (text, images, etc.)
71
- for item in content:
72
- if item.get("type") == "text":
73
- parts.append(Text(item["text"]))
74
- # Could add image support here later
75
-
76
- # Handle tool calls (from assistant)
77
- if tool_calls:
78
- for tc in tool_calls:
79
- # Parse arguments from JSON string to dict
80
- args_str = tc["function"]["arguments"]
81
- args_dict = (
82
- json.loads(args_str) if isinstance(args_str, str) else args_str
83
- )
84
- parts.append(
85
- ToolCall(
86
- id=tc["id"],
87
- name=tc["function"]["name"],
88
- arguments=args_dict,
89
- )
90
- )
91
72
 
92
- # Handle tool results (from tool role)
93
- if role == "tool" and tool_call_id:
94
- parts.append(ToolResult(tool_call_id=tool_call_id, result=content or ""))
73
+ def _openai_tools_to_lm_deluge(tools: list[dict[str, Any]]) -> list[Tool]:
74
+ """
75
+ Convert OpenAI tool format to lm-deluge Tool objects.
76
+
77
+ OpenAI format:
78
+ {
79
+ "type": "function",
80
+ "function": {
81
+ "name": "get_weather",
82
+ "description": "Get weather",
83
+ "parameters": {
84
+ "type": "object",
85
+ "properties": {...},
86
+ "required": [...]
87
+ }
88
+ }
89
+ }
90
+
91
+ lm-deluge format:
92
+ Tool(
93
+ name="get_weather",
94
+ description="Get weather",
95
+ parameters={...properties...},
96
+ required=[...]
97
+ )
98
+ """
99
+ lm_tools = []
100
+ for tool in tools:
101
+ if tool.get("type") == "function":
102
+ func = tool["function"]
103
+ params_schema = func.get("parameters", {})
95
104
 
96
- conv_messages.append(Message(role=role, parts=parts))
105
+ # Extract properties and required from the parameters schema
106
+ properties = params_schema.get("properties", {})
107
+ required = params_schema.get("required", [])
97
108
 
98
- return Conversation(messages=conv_messages)
109
+ lm_tool = Tool(
110
+ name=func["name"],
111
+ description=func.get("description"),
112
+ parameters=properties if properties else None,
113
+ required=required,
114
+ )
115
+ lm_tools.append(lm_tool)
116
+
117
+ return lm_tools
118
+
119
+
120
+ def _messages_to_conversation(messages: list[dict[str, Any]]) -> Conversation:
121
+ """Convert OpenAI messages format to lm-deluge Conversation."""
122
+ return Conversation.from_openai_chat(messages)
99
123
 
100
124
 
101
125
  def _response_to_chat_completion(
@@ -114,7 +138,7 @@ def _response_to_chat_completion(
114
138
  role="assistant",
115
139
  content=response.error_message or "Error occurred",
116
140
  )
117
- choice = CompletionChoice(
141
+ choice = ChatCompletionChoice(
118
142
  index=0,
119
143
  message=message,
120
144
  finish_reason="stop", # or could use "error" but that's not standard
@@ -164,7 +188,7 @@ def _response_to_chat_completion(
164
188
  )
165
189
 
166
190
  # Create choice
167
- choice = CompletionChoice(
191
+ choice = ChatCompletionChoice(
168
192
  index=0,
169
193
  message=message,
170
194
  finish_reason=response.finish_reason or "stop",
@@ -329,7 +353,7 @@ class MockCompletions:
329
353
  ChatCompletion (non-streaming) or AsyncIterator[ChatCompletionChunk] (streaming)
330
354
  """
331
355
  # Get or create client for this model
332
- client = self._parent._get_or_create_client(model)
356
+ client: _LLMClient = self._parent._get_or_create_client(model)
333
357
 
334
358
  # Convert messages to Conversation
335
359
  conversation = _messages_to_conversation(messages)
@@ -360,29 +384,121 @@ class MockCompletions:
360
384
  # Convert tools if provided
361
385
  lm_tools = None
362
386
  if tools:
363
- # For now, just pass through - lm-deluge will handle the format
364
- lm_tools = tools
387
+ # Convert from OpenAI format to lm-deluge Tool objects
388
+ lm_tools = _openai_tools_to_lm_deluge(tools)
365
389
 
366
390
  # Execute request
367
391
  if stream:
368
- # Streaming mode
369
- request_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
370
- # Note: client.stream() is an async generator, not a coroutine
371
- # We can directly wrap it
372
- stream_iter = client.stream(conversation, tools=lm_tools)
373
- # Verify it's a generator, not a coroutine
374
- if hasattr(stream_iter, "__anext__"):
375
- return _AsyncStreamWrapper(stream_iter, model, request_id)
376
- else:
377
- # If it's a coroutine, we need to await it first
378
- # But this shouldn't happen with the current implementation
379
- raise TypeError(f"Expected async generator, got {type(stream_iter)}")
392
+ raise RuntimeError("streaming not supported")
380
393
  else:
381
394
  # Non-streaming mode
382
- response = await client.start(conversation, tools=lm_tools)
395
+ response = await client.start(
396
+ conversation,
397
+ tools=lm_tools, # type: ignore
398
+ cache=self._parent.cache_pattern, # type: ignore
399
+ )
383
400
  return _response_to_chat_completion(response, model)
384
401
 
385
402
 
403
+ class MockTextCompletions:
404
+ """Mock text completions resource for legacy completions API."""
405
+
406
+ def __init__(self, parent: "MockAsyncOpenAI"):
407
+ self._parent = parent
408
+
409
+ async def create(
410
+ self,
411
+ *,
412
+ model: str,
413
+ prompt: str | list[str],
414
+ temperature: float | None = None,
415
+ max_tokens: int | None = None,
416
+ top_p: float | None = None,
417
+ seed: int | None = None,
418
+ n: int | None = None,
419
+ stop: str | list[str] | None = None,
420
+ **kwargs: Any,
421
+ ) -> Completion:
422
+ """
423
+ Create a text completion using lm-deluge's LLMClient.
424
+
425
+ Args:
426
+ model: Model identifier
427
+ prompt: Text prompt or list of prompts
428
+ temperature: Sampling temperature
429
+ max_tokens: Max tokens to generate
430
+ top_p: Nucleus sampling parameter
431
+ seed: Random seed
432
+ n: Number of completions (currently ignored, always returns 1)
433
+ stop: Stop sequences
434
+ **kwargs: Other parameters
435
+
436
+ Returns:
437
+ Completion object
438
+ """
439
+ # Get or create client for this model
440
+ client: _LLMClient = self._parent._get_or_create_client(model)
441
+
442
+ # Handle single prompt
443
+ if isinstance(prompt, list):
444
+ # For now, just use the first prompt
445
+ prompt = prompt[0] if prompt else ""
446
+
447
+ # Convert prompt to Conversation
448
+ conversation = Conversation([Message(role="user", parts=[Text(prompt)])])
449
+
450
+ # Build sampling params
451
+ sampling_kwargs = {}
452
+ if temperature is not None:
453
+ sampling_kwargs["temperature"] = temperature
454
+ if max_tokens is not None:
455
+ sampling_kwargs["max_new_tokens"] = max_tokens
456
+ if top_p is not None:
457
+ sampling_kwargs["top_p"] = top_p
458
+ if seed is not None:
459
+ sampling_kwargs["seed"] = seed
460
+
461
+ # Create client with merged params if needed
462
+ if sampling_kwargs:
463
+ merged_params = {**self._parent._default_sampling_params, **sampling_kwargs}
464
+ client = self._parent._create_client_with_params(model, merged_params)
465
+
466
+ # Execute request
467
+ response = await client.start(conversation, cache=self._parent.cache_pattern) # type: ignore
468
+
469
+ # Convert to Completion format
470
+ completion_text = None
471
+ if response.content:
472
+ text_parts = [p.text for p in response.content.parts if isinstance(p, Text)]
473
+ if text_parts:
474
+ completion_text = "".join(text_parts)
475
+
476
+ # Create choice
477
+ choice = TextCompletionChoice(
478
+ index=0,
479
+ text=completion_text or "",
480
+ finish_reason=response.finish_reason or "stop", # type: ignore
481
+ )
482
+
483
+ # Create usage
484
+ usage = None
485
+ if response.usage:
486
+ usage = CompletionUsage(
487
+ prompt_tokens=response.usage.input_tokens,
488
+ completion_tokens=response.usage.output_tokens,
489
+ total_tokens=response.usage.input_tokens + response.usage.output_tokens,
490
+ )
491
+
492
+ return Completion(
493
+ id=f"cmpl-{uuid.uuid4().hex[:24]}",
494
+ choices=[choice],
495
+ created=int(time.time()),
496
+ model=model,
497
+ object="text_completion",
498
+ usage=usage,
499
+ )
500
+
501
+
386
502
  class MockChat:
387
503
  """Mock chat resource that provides access to completions."""
388
504
 
@@ -414,23 +530,52 @@ class MockAsyncOpenAI:
414
530
 
415
531
  Args:
416
532
  model: Default model to use (can be overridden in create())
533
+ api_key: API key (optional, for compatibility)
534
+ organization: Organization ID (optional, for compatibility)
535
+ project: Project ID (optional, for compatibility)
536
+ base_url: Base URL (defaults to OpenAI's URL for compatibility)
537
+ timeout: Request timeout (optional, for compatibility)
538
+ max_retries: Max retries (defaults to 2 for compatibility)
539
+ default_headers: Default headers (optional, for compatibility)
417
540
  temperature: Default temperature
418
541
  max_completion_tokens: Default max completion tokens
419
542
  top_p: Default top_p
543
+ seed: Default seed for deterministic sampling
420
544
  **kwargs: Additional parameters passed to LLMClient
421
545
  """
422
546
 
423
547
  def __init__(
424
548
  self,
425
549
  *,
426
- model: str,
550
+ model: str | None = None,
551
+ api_key: str | None = None,
552
+ organization: str | None = None,
553
+ project: str | None = None,
554
+ base_url: str | None = None,
555
+ timeout: float | None = None,
556
+ max_retries: int | None = None,
557
+ default_headers: dict[str, str] | None = None,
558
+ http_client: Any | None = None,
427
559
  temperature: float | None = None,
428
560
  max_completion_tokens: int | None = None,
429
561
  top_p: float | None = None,
430
562
  seed: int | None = None,
563
+ cache_pattern: CachePattern | None = None,
431
564
  **kwargs: Any,
432
565
  ):
433
- self._default_model = model
566
+ # OpenAI-compatible attributes
567
+ self.api_key = api_key
568
+ self.organization = organization
569
+ self.project = project
570
+ self.base_url = base_url or "https://api.openai.com/v1"
571
+ self.timeout = timeout
572
+ self.max_retries = max_retries or 2
573
+ self.default_headers = default_headers
574
+ self.http_client = http_client
575
+ self.cache_pattern = cache_pattern
576
+
577
+ # Internal attributes
578
+ self._default_model = model or "gpt-4o-mini"
434
579
  self._default_sampling_params = {}
435
580
 
436
581
  if temperature is not None:
@@ -449,10 +594,11 @@ class MockAsyncOpenAI:
449
594
  self._clients: dict[str, Any] = {}
450
595
 
451
596
  # Create the default client
452
- self._clients[model] = self._create_client(model)
597
+ self._clients[self._default_model] = self._create_client(self._default_model)
453
598
 
454
599
  # Create nested resources
455
600
  self._chat = MockChat(self)
601
+ self._completions = MockTextCompletions(self)
456
602
 
457
603
  def _create_client(self, model: str) -> Any:
458
604
  """Create a new LLMClient for the given model."""
@@ -480,3 +626,18 @@ class MockAsyncOpenAI:
480
626
  def chat(self) -> MockChat:
481
627
  """Access the chat resource."""
482
628
  return self._chat
629
+
630
+ @property
631
+ def completions(self) -> MockTextCompletions:
632
+ """Access the text completions resource."""
633
+ return self._completions
634
+
635
+ async def close(self) -> None:
636
+ """
637
+ Close the client and clean up resources.
638
+
639
+ This is provided for compatibility with AsyncOpenAI's close() method.
640
+ Currently a no-op as LLMClient instances don't need explicit cleanup.
641
+ """
642
+ # No cleanup needed for LLMClient instances
643
+ pass
@@ -848,14 +848,16 @@ class Conversation:
848
848
  if content is None:
849
849
  return parts
850
850
  if isinstance(content, str):
851
- parts.append(Text(content))
851
+ if content.strip():
852
+ parts.append(Text(content))
852
853
  return parts
853
854
 
854
855
  for block in content:
855
856
  block_type = block.get("type")
856
857
  if block_type in text_types:
857
858
  text_value = block.get("text") or block.get(block_type) or ""
858
- parts.append(Text(text_value))
859
+ if text_value.strip():
860
+ parts.append(Text(text_value))
859
861
  elif block_type in image_types:
860
862
  parts.append(_to_image_from_url(block))
861
863
  elif block_type in file_types:
@@ -1001,7 +1003,8 @@ class Conversation:
1001
1003
  )
1002
1004
  )
1003
1005
 
1004
- conversation_messages.append(Message(mapped_role, parts))
1006
+ if parts:
1007
+ conversation_messages.append(Message(mapped_role, parts))
1005
1008
 
1006
1009
  return cls(conversation_messages)
1007
1010
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.69
3
+ Version: 0.0.71
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -34,7 +34,21 @@ async def test_client_structure():
34
34
  assert hasattr(client.chat.completions, "create")
35
35
  assert callable(client.chat.completions.create)
36
36
 
37
+ # Verify OpenAI-compatible attributes exist
38
+ assert hasattr(client, "base_url")
39
+ assert hasattr(client, "api_key")
40
+ assert hasattr(client, "organization")
41
+ assert hasattr(client, "project")
42
+ assert hasattr(client, "timeout")
43
+ assert hasattr(client, "max_retries")
44
+ assert hasattr(client, "default_headers")
45
+
46
+ # Verify default values
47
+ assert client.base_url == "https://api.openai.com/v1"
48
+ assert client.max_retries == 2
49
+
37
50
  print(" Client structure verified - has chat.completions.create")
51
+ print(" OpenAI-compatible attributes verified (base_url, api_key, etc.)")
38
52
 
39
53
 
40
54
  async def test_basic_completion():
@@ -308,6 +322,212 @@ async def test_json_mode():
308
322
  print(f" ⚠️ Response was not valid JSON: {content}")
309
323
 
310
324
 
325
+ async def test_text_completions():
326
+ """Test text completions API (legacy API)."""
327
+ print("\n✓ Testing text completions...")
328
+
329
+ # Skip if no API key
330
+ if not os.getenv("OPENAI_API_KEY"):
331
+ print(" ⚠️ Skipping: OPENAI_API_KEY not set")
332
+ return
333
+
334
+ client = MockAsyncOpenAI(model="gpt-4o-mini")
335
+ response = await client.completions.create(
336
+ model="gpt-4o-mini",
337
+ prompt="Say hello in one word:",
338
+ temperature=0.0,
339
+ max_tokens=10,
340
+ )
341
+
342
+ assert isinstance(response, __import__("openai").types.Completion)
343
+ assert response.object == "text_completion"
344
+ assert len(response.choices) == 1
345
+ assert response.choices[0].text is not None
346
+ print(f" Text completion: {response.choices[0].text}")
347
+
348
+
349
+ async def test_close_method():
350
+ """Test that close() method exists and works."""
351
+ print("\n✓ Testing close() method...")
352
+
353
+ client = MockAsyncOpenAI(model="gpt-4o-mini")
354
+ # Should not raise an error
355
+ await client.close()
356
+ print(" close() method works")
357
+
358
+
359
+ async def test_subclassing():
360
+ """Test that MockAsyncOpenAI can be subclassed."""
361
+ print("\n✓ Testing subclassing...")
362
+
363
+ class CustomClient(MockAsyncOpenAI):
364
+ def __init__(self, **kwargs):
365
+ super().__init__(**kwargs)
366
+ self.custom_attr = "custom"
367
+
368
+ client = CustomClient(model="gpt-4o-mini", api_key="test-key")
369
+ assert client.custom_attr == "custom"
370
+ assert client.api_key == "test-key"
371
+ assert hasattr(client, "chat")
372
+ print(" Subclassing works correctly")
373
+
374
+
375
+ async def test_http_client_parameter():
376
+ """Test that http_client parameter is accepted."""
377
+ print("\n✓ Testing http_client parameter...")
378
+
379
+ # Mock http client
380
+ class MockHttpClient:
381
+ pass
382
+
383
+ http_client = MockHttpClient()
384
+ client = MockAsyncOpenAI(model="gpt-4o-mini", http_client=http_client)
385
+ assert client.http_client is http_client
386
+ print(" http_client parameter works")
387
+
388
+
389
+ async def test_exception_imports():
390
+ """Test that OpenAI exceptions are available."""
391
+ print("\n✓ Testing exception imports...")
392
+
393
+ from lm_deluge.mock_openai import (
394
+ APIError,
395
+ APITimeoutError,
396
+ BadRequestError,
397
+ RateLimitError,
398
+ )
399
+
400
+ # Verify they are the right types
401
+ assert APIError is not None
402
+ assert APITimeoutError is not None
403
+ assert BadRequestError is not None
404
+ assert RateLimitError is not None
405
+ print(" All exception types imported successfully")
406
+
407
+
408
+ async def test_verifiers_like_initialization():
409
+ """Test initialization pattern used by Verifiers library."""
410
+ print("\n✓ Testing Verifiers-like initialization...")
411
+
412
+ # Simulate Verifiers initialization pattern
413
+ client = MockAsyncOpenAI(
414
+ base_url="https://api.openai.com/v1",
415
+ api_key="test-key",
416
+ max_retries=3,
417
+ http_client=None,
418
+ )
419
+
420
+ # Verify attributes
421
+ assert str(client.base_url) == "https://api.openai.com/v1"
422
+ assert client.api_key == "test-key"
423
+ assert client.max_retries == 3
424
+
425
+ print(f" base_url: {client.base_url}")
426
+ print(f" api_key: {client.api_key}")
427
+ print(" Verifiers-like initialization works")
428
+
429
+
430
+ async def test_tool_conversion():
431
+ """Test that OpenAI tools are converted to lm-deluge format."""
432
+ print("\n✓ Testing tool conversion...")
433
+
434
+ from lm_deluge.mock_openai import _openai_tools_to_lm_deluge
435
+
436
+ # OpenAI tool format
437
+ openai_tools = [
438
+ {
439
+ "type": "function",
440
+ "function": {
441
+ "name": "get_weather",
442
+ "description": "Get the weather for a location",
443
+ "parameters": {
444
+ "type": "object",
445
+ "properties": {
446
+ "location": {
447
+ "type": "string",
448
+ "description": "The city name",
449
+ },
450
+ "units": {
451
+ "type": "string",
452
+ "enum": ["celsius", "fahrenheit"],
453
+ },
454
+ },
455
+ "required": ["location"],
456
+ },
457
+ },
458
+ }
459
+ ]
460
+
461
+ # Convert to lm-deluge format
462
+ lm_tools = _openai_tools_to_lm_deluge(openai_tools)
463
+
464
+ # Verify conversion
465
+ assert len(lm_tools) == 1
466
+ tool = lm_tools[0]
467
+ assert tool.name == "get_weather"
468
+ assert tool.description == "Get the weather for a location"
469
+ assert "location" in tool.parameters
470
+ assert "units" in tool.parameters
471
+ assert tool.required == ["location"]
472
+
473
+ print(" Tool conversion works correctly")
474
+ print(f" Tool name: {tool.name}")
475
+ print(f" Parameters: {list(tool.parameters.keys())}")
476
+
477
+
478
+ async def test_tool_result_role_conversion():
479
+ """Test that tool result messages are converted from role='tool' to role='user'."""
480
+ print("\n✓ Testing tool result role conversion...")
481
+
482
+ from lm_deluge.mock_openai import _messages_to_conversation
483
+
484
+ # OpenAI format with tool call and result
485
+ openai_messages = [
486
+ {"role": "user", "content": "What's the weather?"},
487
+ {
488
+ "role": "assistant",
489
+ "content": None,
490
+ "tool_calls": [
491
+ {
492
+ "id": "call_123",
493
+ "type": "function",
494
+ "function": {
495
+ "name": "get_weather",
496
+ "arguments": '{"location": "NYC"}',
497
+ },
498
+ }
499
+ ],
500
+ },
501
+ {
502
+ "role": "tool",
503
+ "tool_call_id": "call_123",
504
+ "content": "The weather is sunny, 72°F",
505
+ },
506
+ {"role": "assistant", "content": "The weather in NYC is sunny and 72°F."},
507
+ ]
508
+
509
+ # Convert to lm-deluge format
510
+ conversation = _messages_to_conversation(openai_messages)
511
+
512
+ # Verify conversion
513
+ assert len(conversation.messages) == 4
514
+ assert conversation.messages[0].role == "user"
515
+ assert conversation.messages[1].role == "assistant"
516
+ # The tool result message should be converted to role="user" (not "tool")
517
+ assert (
518
+ conversation.messages[2].role == "user"
519
+ ), "Tool result should be in user message"
520
+ # Message should have ONLY the ToolResult part (not Text)
521
+ assert len(conversation.messages[2].parts) == 1
522
+ assert hasattr(conversation.messages[2].parts[0], "tool_call_id")
523
+ assert conversation.messages[2].parts[0].tool_call_id == "call_123"
524
+ assert conversation.messages[2].parts[0].result == "The weather is sunny, 72°F"
525
+ assert conversation.messages[3].role == "assistant"
526
+
527
+ print(" Tool result role conversion works correctly")
528
+ print(" OpenAI role='tool' → lm-deluge role='user' ✓")
529
+
530
+
311
531
  async def run_all_tests():
312
532
  """Run all tests."""
313
533
  print("=" * 60)
@@ -315,7 +535,16 @@ async def run_all_tests():
315
535
  print("=" * 60)
316
536
 
317
537
  tests = [
318
- test_client_structure, # No API key needed
538
+ # No API key needed
539
+ test_client_structure,
540
+ test_close_method,
541
+ test_subclassing,
542
+ test_http_client_parameter,
543
+ test_exception_imports,
544
+ test_verifiers_like_initialization,
545
+ test_tool_conversion,
546
+ test_tool_result_role_conversion,
547
+ # API key needed
319
548
  test_basic_completion,
320
549
  test_streaming,
321
550
  test_tool_calling,
@@ -324,6 +553,7 @@ async def run_all_tests():
324
553
  test_multi_turn_conversation,
325
554
  test_with_different_provider,
326
555
  test_json_mode,
556
+ test_text_completions,
327
557
  ]
328
558
 
329
559
  failed = []
File without changes
File without changes
File without changes