lm-deluge 0.0.69__py3-none-any.whl → 0.0.70__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lm_deluge/__init__.py CHANGED
@@ -4,7 +4,13 @@ from .prompt import Conversation, Message
4
4
  from .tool import Tool, ToolParams
5
5
 
6
6
  try:
7
- from .mock_openai import MockAsyncOpenAI # noqa
7
+ from .mock_openai import ( # noqa
8
+ APIError,
9
+ APITimeoutError,
10
+ BadRequestError,
11
+ MockAsyncOpenAI,
12
+ RateLimitError,
13
+ )
8
14
 
9
15
  _has_openai = True
10
16
  except ImportError:
@@ -24,4 +30,12 @@ __all__ = [
24
30
  ]
25
31
 
26
32
  if _has_openai:
27
- __all__.append("MockAsyncOpenAI")
33
+ __all__.extend(
34
+ [
35
+ "MockAsyncOpenAI",
36
+ "APIError",
37
+ "APITimeoutError",
38
+ "BadRequestError",
39
+ "RateLimitError",
40
+ ]
41
+ )
lm_deluge/mock_openai.py CHANGED
@@ -25,13 +25,20 @@ import uuid
25
25
  from typing import Any, AsyncIterator, Literal, Union, overload
26
26
 
27
27
  try:
28
+ from openai import (
29
+ APIError,
30
+ APITimeoutError,
31
+ BadRequestError,
32
+ RateLimitError,
33
+ )
34
+ from openai.types import Completion
28
35
  from openai.types.chat import (
29
36
  ChatCompletion,
30
37
  ChatCompletionChunk,
31
38
  ChatCompletionMessage,
32
39
  ChatCompletionMessageToolCall,
33
40
  )
34
- from openai.types.chat.chat_completion import Choice as CompletionChoice
41
+ from openai.types.chat.chat_completion import Choice as ChatCompletionChoice
35
42
  from openai.types.chat.chat_completion_chunk import (
36
43
  Choice as ChunkChoice,
37
44
  ChoiceDelta,
@@ -39,6 +46,7 @@ try:
39
46
  ChoiceDeltaToolCallFunction,
40
47
  )
41
48
  from openai.types.chat.chat_completion_message_tool_call import Function
49
+ from openai.types.completion_choice import CompletionChoice as TextCompletionChoice
42
50
  from openai.types.completion_usage import CompletionUsage
43
51
  except ImportError:
44
52
  raise ImportError(
@@ -46,6 +54,15 @@ except ImportError:
46
54
  "Install it with: pip install lm-deluge[openai]"
47
55
  )
48
56
 
57
+ # Re-export exceptions for compatibility
58
+ __all__ = [
59
+ "MockAsyncOpenAI",
60
+ "APIError",
61
+ "APITimeoutError",
62
+ "BadRequestError",
63
+ "RateLimitError",
64
+ ]
65
+
49
66
  from lm_deluge.client import LLMClient
50
67
  from lm_deluge.prompt import Conversation, Message, Part, Text, ToolCall, ToolResult
51
68
 
@@ -114,7 +131,7 @@ def _response_to_chat_completion(
114
131
  role="assistant",
115
132
  content=response.error_message or "Error occurred",
116
133
  )
117
- choice = CompletionChoice(
134
+ choice = ChatCompletionChoice(
118
135
  index=0,
119
136
  message=message,
120
137
  finish_reason="stop", # or could use "error" but that's not standard
@@ -164,7 +181,7 @@ def _response_to_chat_completion(
164
181
  )
165
182
 
166
183
  # Create choice
167
- choice = CompletionChoice(
184
+ choice = ChatCompletionChoice(
168
185
  index=0,
169
186
  message=message,
170
187
  finish_reason=response.finish_reason or "stop",
@@ -383,6 +400,105 @@ class MockCompletions:
383
400
  return _response_to_chat_completion(response, model)
384
401
 
385
402
 
403
+ class MockTextCompletions:
404
+ """Mock text completions resource for legacy completions API."""
405
+
406
+ def __init__(self, parent: "MockAsyncOpenAI"):
407
+ self._parent = parent
408
+
409
+ async def create(
410
+ self,
411
+ *,
412
+ model: str,
413
+ prompt: str | list[str],
414
+ temperature: float | None = None,
415
+ max_tokens: int | None = None,
416
+ top_p: float | None = None,
417
+ seed: int | None = None,
418
+ n: int | None = None,
419
+ stop: str | list[str] | None = None,
420
+ **kwargs: Any,
421
+ ) -> Completion:
422
+ """
423
+ Create a text completion using lm-deluge's LLMClient.
424
+
425
+ Args:
426
+ model: Model identifier
427
+ prompt: Text prompt or list of prompts
428
+ temperature: Sampling temperature
429
+ max_tokens: Max tokens to generate
430
+ top_p: Nucleus sampling parameter
431
+ seed: Random seed
432
+ n: Number of completions (currently ignored, always returns 1)
433
+ stop: Stop sequences
434
+ **kwargs: Other parameters
435
+
436
+ Returns:
437
+ Completion object
438
+ """
439
+ # Get or create client for this model
440
+ client = self._parent._get_or_create_client(model)
441
+
442
+ # Handle single prompt
443
+ if isinstance(prompt, list):
444
+ # For now, just use the first prompt
445
+ prompt = prompt[0] if prompt else ""
446
+
447
+ # Convert prompt to Conversation
448
+ conversation = Conversation([Message(role="user", parts=[Text(prompt)])])
449
+
450
+ # Build sampling params
451
+ sampling_kwargs = {}
452
+ if temperature is not None:
453
+ sampling_kwargs["temperature"] = temperature
454
+ if max_tokens is not None:
455
+ sampling_kwargs["max_new_tokens"] = max_tokens
456
+ if top_p is not None:
457
+ sampling_kwargs["top_p"] = top_p
458
+ if seed is not None:
459
+ sampling_kwargs["seed"] = seed
460
+
461
+ # Create client with merged params if needed
462
+ if sampling_kwargs:
463
+ merged_params = {**self._parent._default_sampling_params, **sampling_kwargs}
464
+ client = self._parent._create_client_with_params(model, merged_params)
465
+
466
+ # Execute request
467
+ response = await client.start(conversation)
468
+
469
+ # Convert to Completion format
470
+ completion_text = None
471
+ if response.content:
472
+ text_parts = [p.text for p in response.content.parts if isinstance(p, Text)]
473
+ if text_parts:
474
+ completion_text = "".join(text_parts)
475
+
476
+ # Create choice
477
+ choice = TextCompletionChoice(
478
+ index=0,
479
+ text=completion_text or "",
480
+ finish_reason=response.finish_reason or "stop",
481
+ )
482
+
483
+ # Create usage
484
+ usage = None
485
+ if response.usage:
486
+ usage = CompletionUsage(
487
+ prompt_tokens=response.usage.input_tokens,
488
+ completion_tokens=response.usage.output_tokens,
489
+ total_tokens=response.usage.input_tokens + response.usage.output_tokens,
490
+ )
491
+
492
+ return Completion(
493
+ id=f"cmpl-{uuid.uuid4().hex[:24]}",
494
+ choices=[choice],
495
+ created=int(time.time()),
496
+ model=model,
497
+ object="text_completion",
498
+ usage=usage,
499
+ )
500
+
501
+
386
502
  class MockChat:
387
503
  """Mock chat resource that provides access to completions."""
388
504
 
@@ -414,23 +530,50 @@ class MockAsyncOpenAI:
414
530
 
415
531
  Args:
416
532
  model: Default model to use (can be overridden in create())
533
+ api_key: API key (optional, for compatibility)
534
+ organization: Organization ID (optional, for compatibility)
535
+ project: Project ID (optional, for compatibility)
536
+ base_url: Base URL (defaults to OpenAI's URL for compatibility)
537
+ timeout: Request timeout (optional, for compatibility)
538
+ max_retries: Max retries (defaults to 2 for compatibility)
539
+ default_headers: Default headers (optional, for compatibility)
417
540
  temperature: Default temperature
418
541
  max_completion_tokens: Default max completion tokens
419
542
  top_p: Default top_p
543
+ seed: Default seed for deterministic sampling
420
544
  **kwargs: Additional parameters passed to LLMClient
421
545
  """
422
546
 
423
547
  def __init__(
424
548
  self,
425
549
  *,
426
- model: str,
550
+ model: str | None = None,
551
+ api_key: str | None = None,
552
+ organization: str | None = None,
553
+ project: str | None = None,
554
+ base_url: str | None = None,
555
+ timeout: float | None = None,
556
+ max_retries: int | None = None,
557
+ default_headers: dict[str, str] | None = None,
558
+ http_client: Any | None = None,
427
559
  temperature: float | None = None,
428
560
  max_completion_tokens: int | None = None,
429
561
  top_p: float | None = None,
430
562
  seed: int | None = None,
431
563
  **kwargs: Any,
432
564
  ):
433
- self._default_model = model
565
+ # OpenAI-compatible attributes
566
+ self.api_key = api_key
567
+ self.organization = organization
568
+ self.project = project
569
+ self.base_url = base_url or "https://api.openai.com/v1"
570
+ self.timeout = timeout
571
+ self.max_retries = max_retries or 2
572
+ self.default_headers = default_headers
573
+ self.http_client = http_client
574
+
575
+ # Internal attributes
576
+ self._default_model = model or "gpt-4o-mini"
434
577
  self._default_sampling_params = {}
435
578
 
436
579
  if temperature is not None:
@@ -449,10 +592,11 @@ class MockAsyncOpenAI:
449
592
  self._clients: dict[str, Any] = {}
450
593
 
451
594
  # Create the default client
452
- self._clients[model] = self._create_client(model)
595
+ self._clients[self._default_model] = self._create_client(self._default_model)
453
596
 
454
597
  # Create nested resources
455
598
  self._chat = MockChat(self)
599
+ self._completions = MockTextCompletions(self)
456
600
 
457
601
  def _create_client(self, model: str) -> Any:
458
602
  """Create a new LLMClient for the given model."""
@@ -480,3 +624,18 @@ class MockAsyncOpenAI:
480
624
  def chat(self) -> MockChat:
481
625
  """Access the chat resource."""
482
626
  return self._chat
627
+
628
+ @property
629
+ def completions(self) -> MockTextCompletions:
630
+ """Access the text completions resource."""
631
+ return self._completions
632
+
633
+ async def close(self) -> None:
634
+ """
635
+ Close the client and clean up resources.
636
+
637
+ This is provided for compatibility with AsyncOpenAI's close() method.
638
+ Currently a no-op as LLMClient instances don't need explicit cleanup.
639
+ """
640
+ # No cleanup needed for LLMClient instances
641
+ pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.69
3
+ Version: 0.0.70
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -1,4 +1,4 @@
1
- lm_deluge/__init__.py,sha256=bGF2eVo60StVEGjt5lgVhSoJmYBlvQTUa8DR96sNh0w,558
1
+ lm_deluge/__init__.py,sha256=zF5lAitfgJ8A28IXJ5BE9OUCqGOqSnGOWn3ZIlizNyY,822
2
2
  lm_deluge/batches.py,sha256=Km6QM5_7BlF2qEyo4WPlhkaZkpzrLqf50AaveHXQOoY,25127
3
3
  lm_deluge/cache.py,sha256=xO2AIYvP3tUpTMKQjwQQYfGRJSRi6e7sMlRhLjsS-u4,4873
4
4
  lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
@@ -8,7 +8,7 @@ lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
8
8
  lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
9
9
  lm_deluge/file.py,sha256=PTmlJQ-IaYcYUFun9V0bJ1NPVP84edJrR0hvCMWFylY,19697
10
10
  lm_deluge/image.py,sha256=5AMXmn2x47yXeYNfMSMAOWcnlrOxxOel-4L8QCJwU70,8928
11
- lm_deluge/mock_openai.py,sha256=OelIYWGBf5vBZXJOLaz54s5gE-HPIg1kPXARnv4NoKg,16592
11
+ lm_deluge/mock_openai.py,sha256=dYZDBKgTepQ-yd5zPRYBgMRXO6TeLqiM1fDQe622Ono,22110
12
12
  lm_deluge/prompt.py,sha256=Bgszws8-3GPefiVRa-Mht4tfyfoqD_hV5MX1nrbkJn0,63465
13
13
  lm_deluge/request_context.py,sha256=cBayMFWupWhde2OjRugW3JH-Gin-WFGc6DK2Mb4Prdc,2576
14
14
  lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
@@ -69,8 +69,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
69
69
  lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
70
70
  lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
71
71
  lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
72
- lm_deluge-0.0.69.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
73
- lm_deluge-0.0.69.dist-info/METADATA,sha256=BMFkIulQwTPBEtqViIDyY6RjaMH4hZBzC-4qTCXpGQY,13514
74
- lm_deluge-0.0.69.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- lm_deluge-0.0.69.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
76
- lm_deluge-0.0.69.dist-info/RECORD,,
72
+ lm_deluge-0.0.70.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
73
+ lm_deluge-0.0.70.dist-info/METADATA,sha256=URQWK2LB1itY_viE7mv0ijJOfUolZMDRzvK-Pdzmn_o,13514
74
+ lm_deluge-0.0.70.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
+ lm_deluge-0.0.70.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
76
+ lm_deluge-0.0.70.dist-info/RECORD,,