lm-deluge 0.0.69__py3-none-any.whl → 0.0.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lm_deluge/__init__.py +16 -2
- lm_deluge/mock_openai.py +228 -67
- lm_deluge/prompt.py +6 -3
- {lm_deluge-0.0.69.dist-info → lm_deluge-0.0.71.dist-info}/METADATA +1 -1
- {lm_deluge-0.0.69.dist-info → lm_deluge-0.0.71.dist-info}/RECORD +8 -8
- {lm_deluge-0.0.69.dist-info → lm_deluge-0.0.71.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.69.dist-info → lm_deluge-0.0.71.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.69.dist-info → lm_deluge-0.0.71.dist-info}/top_level.txt +0 -0
lm_deluge/__init__.py
CHANGED
|
@@ -4,7 +4,13 @@ from .prompt import Conversation, Message
|
|
|
4
4
|
from .tool import Tool, ToolParams
|
|
5
5
|
|
|
6
6
|
try:
|
|
7
|
-
from .mock_openai import
|
|
7
|
+
from .mock_openai import ( # noqa
|
|
8
|
+
APIError,
|
|
9
|
+
APITimeoutError,
|
|
10
|
+
BadRequestError,
|
|
11
|
+
MockAsyncOpenAI,
|
|
12
|
+
RateLimitError,
|
|
13
|
+
)
|
|
8
14
|
|
|
9
15
|
_has_openai = True
|
|
10
16
|
except ImportError:
|
|
@@ -24,4 +30,12 @@ __all__ = [
|
|
|
24
30
|
]
|
|
25
31
|
|
|
26
32
|
if _has_openai:
|
|
27
|
-
__all__.
|
|
33
|
+
__all__.extend(
|
|
34
|
+
[
|
|
35
|
+
"MockAsyncOpenAI",
|
|
36
|
+
"APIError",
|
|
37
|
+
"APITimeoutError",
|
|
38
|
+
"BadRequestError",
|
|
39
|
+
"RateLimitError",
|
|
40
|
+
]
|
|
41
|
+
)
|
lm_deluge/mock_openai.py
CHANGED
|
@@ -25,20 +25,30 @@ import uuid
|
|
|
25
25
|
from typing import Any, AsyncIterator, Literal, Union, overload
|
|
26
26
|
|
|
27
27
|
try:
|
|
28
|
+
from openai import (
|
|
29
|
+
APIError,
|
|
30
|
+
APITimeoutError,
|
|
31
|
+
BadRequestError,
|
|
32
|
+
RateLimitError,
|
|
33
|
+
)
|
|
34
|
+
from openai.types import Completion
|
|
28
35
|
from openai.types.chat import (
|
|
29
36
|
ChatCompletion,
|
|
30
37
|
ChatCompletionChunk,
|
|
31
38
|
ChatCompletionMessage,
|
|
32
39
|
ChatCompletionMessageToolCall,
|
|
33
40
|
)
|
|
34
|
-
from openai.types.chat.chat_completion import Choice as
|
|
41
|
+
from openai.types.chat.chat_completion import Choice as ChatCompletionChoice
|
|
35
42
|
from openai.types.chat.chat_completion_chunk import (
|
|
36
43
|
Choice as ChunkChoice,
|
|
44
|
+
)
|
|
45
|
+
from openai.types.chat.chat_completion_chunk import (
|
|
37
46
|
ChoiceDelta,
|
|
38
47
|
ChoiceDeltaToolCall,
|
|
39
48
|
ChoiceDeltaToolCallFunction,
|
|
40
49
|
)
|
|
41
50
|
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
51
|
+
from openai.types.completion_choice import CompletionChoice as TextCompletionChoice
|
|
42
52
|
from openai.types.completion_usage import CompletionUsage
|
|
43
53
|
except ImportError:
|
|
44
54
|
raise ImportError(
|
|
@@ -46,56 +56,70 @@ except ImportError:
|
|
|
46
56
|
"Install it with: pip install lm-deluge[openai]"
|
|
47
57
|
)
|
|
48
58
|
|
|
49
|
-
|
|
50
|
-
|
|
59
|
+
# Re-export exceptions for compatibility
|
|
60
|
+
__all__ = [
|
|
61
|
+
"MockAsyncOpenAI",
|
|
62
|
+
"APIError",
|
|
63
|
+
"APITimeoutError",
|
|
64
|
+
"BadRequestError",
|
|
65
|
+
"RateLimitError",
|
|
66
|
+
]
|
|
51
67
|
|
|
68
|
+
from lm_deluge.client import LLMClient, _LLMClient
|
|
69
|
+
from lm_deluge.prompt import CachePattern, Conversation, Message, Text, ToolCall
|
|
70
|
+
from lm_deluge.tool import Tool
|
|
52
71
|
|
|
53
|
-
def _messages_to_conversation(messages: list[dict[str, Any]]) -> Conversation:
|
|
54
|
-
"""Convert OpenAI messages format to lm-deluge Conversation."""
|
|
55
|
-
conv_messages = []
|
|
56
|
-
|
|
57
|
-
for msg in messages:
|
|
58
|
-
role = msg["role"]
|
|
59
|
-
content = msg.get("content")
|
|
60
|
-
tool_calls = msg.get("tool_calls")
|
|
61
|
-
tool_call_id = msg.get("tool_call_id")
|
|
62
|
-
|
|
63
|
-
parts: list[Part] = []
|
|
64
|
-
|
|
65
|
-
# Handle regular content
|
|
66
|
-
if content:
|
|
67
|
-
if isinstance(content, str):
|
|
68
|
-
parts.append(Text(content))
|
|
69
|
-
elif isinstance(content, list):
|
|
70
|
-
# Multi-part content (text, images, etc.)
|
|
71
|
-
for item in content:
|
|
72
|
-
if item.get("type") == "text":
|
|
73
|
-
parts.append(Text(item["text"]))
|
|
74
|
-
# Could add image support here later
|
|
75
|
-
|
|
76
|
-
# Handle tool calls (from assistant)
|
|
77
|
-
if tool_calls:
|
|
78
|
-
for tc in tool_calls:
|
|
79
|
-
# Parse arguments from JSON string to dict
|
|
80
|
-
args_str = tc["function"]["arguments"]
|
|
81
|
-
args_dict = (
|
|
82
|
-
json.loads(args_str) if isinstance(args_str, str) else args_str
|
|
83
|
-
)
|
|
84
|
-
parts.append(
|
|
85
|
-
ToolCall(
|
|
86
|
-
id=tc["id"],
|
|
87
|
-
name=tc["function"]["name"],
|
|
88
|
-
arguments=args_dict,
|
|
89
|
-
)
|
|
90
|
-
)
|
|
91
72
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
73
|
+
def _openai_tools_to_lm_deluge(tools: list[dict[str, Any]]) -> list[Tool]:
|
|
74
|
+
"""
|
|
75
|
+
Convert OpenAI tool format to lm-deluge Tool objects.
|
|
76
|
+
|
|
77
|
+
OpenAI format:
|
|
78
|
+
{
|
|
79
|
+
"type": "function",
|
|
80
|
+
"function": {
|
|
81
|
+
"name": "get_weather",
|
|
82
|
+
"description": "Get weather",
|
|
83
|
+
"parameters": {
|
|
84
|
+
"type": "object",
|
|
85
|
+
"properties": {...},
|
|
86
|
+
"required": [...]
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
lm-deluge format:
|
|
92
|
+
Tool(
|
|
93
|
+
name="get_weather",
|
|
94
|
+
description="Get weather",
|
|
95
|
+
parameters={...properties...},
|
|
96
|
+
required=[...]
|
|
97
|
+
)
|
|
98
|
+
"""
|
|
99
|
+
lm_tools = []
|
|
100
|
+
for tool in tools:
|
|
101
|
+
if tool.get("type") == "function":
|
|
102
|
+
func = tool["function"]
|
|
103
|
+
params_schema = func.get("parameters", {})
|
|
95
104
|
|
|
96
|
-
|
|
105
|
+
# Extract properties and required from the parameters schema
|
|
106
|
+
properties = params_schema.get("properties", {})
|
|
107
|
+
required = params_schema.get("required", [])
|
|
97
108
|
|
|
98
|
-
|
|
109
|
+
lm_tool = Tool(
|
|
110
|
+
name=func["name"],
|
|
111
|
+
description=func.get("description"),
|
|
112
|
+
parameters=properties if properties else None,
|
|
113
|
+
required=required,
|
|
114
|
+
)
|
|
115
|
+
lm_tools.append(lm_tool)
|
|
116
|
+
|
|
117
|
+
return lm_tools
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def _messages_to_conversation(messages: list[dict[str, Any]]) -> Conversation:
|
|
121
|
+
"""Convert OpenAI messages format to lm-deluge Conversation."""
|
|
122
|
+
return Conversation.from_openai_chat(messages)
|
|
99
123
|
|
|
100
124
|
|
|
101
125
|
def _response_to_chat_completion(
|
|
@@ -114,7 +138,7 @@ def _response_to_chat_completion(
|
|
|
114
138
|
role="assistant",
|
|
115
139
|
content=response.error_message or "Error occurred",
|
|
116
140
|
)
|
|
117
|
-
choice =
|
|
141
|
+
choice = ChatCompletionChoice(
|
|
118
142
|
index=0,
|
|
119
143
|
message=message,
|
|
120
144
|
finish_reason="stop", # or could use "error" but that's not standard
|
|
@@ -164,7 +188,7 @@ def _response_to_chat_completion(
|
|
|
164
188
|
)
|
|
165
189
|
|
|
166
190
|
# Create choice
|
|
167
|
-
choice =
|
|
191
|
+
choice = ChatCompletionChoice(
|
|
168
192
|
index=0,
|
|
169
193
|
message=message,
|
|
170
194
|
finish_reason=response.finish_reason or "stop",
|
|
@@ -329,7 +353,7 @@ class MockCompletions:
|
|
|
329
353
|
ChatCompletion (non-streaming) or AsyncIterator[ChatCompletionChunk] (streaming)
|
|
330
354
|
"""
|
|
331
355
|
# Get or create client for this model
|
|
332
|
-
client = self._parent._get_or_create_client(model)
|
|
356
|
+
client: _LLMClient = self._parent._get_or_create_client(model)
|
|
333
357
|
|
|
334
358
|
# Convert messages to Conversation
|
|
335
359
|
conversation = _messages_to_conversation(messages)
|
|
@@ -360,29 +384,121 @@ class MockCompletions:
|
|
|
360
384
|
# Convert tools if provided
|
|
361
385
|
lm_tools = None
|
|
362
386
|
if tools:
|
|
363
|
-
#
|
|
364
|
-
lm_tools = tools
|
|
387
|
+
# Convert from OpenAI format to lm-deluge Tool objects
|
|
388
|
+
lm_tools = _openai_tools_to_lm_deluge(tools)
|
|
365
389
|
|
|
366
390
|
# Execute request
|
|
367
391
|
if stream:
|
|
368
|
-
|
|
369
|
-
request_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
|
|
370
|
-
# Note: client.stream() is an async generator, not a coroutine
|
|
371
|
-
# We can directly wrap it
|
|
372
|
-
stream_iter = client.stream(conversation, tools=lm_tools)
|
|
373
|
-
# Verify it's a generator, not a coroutine
|
|
374
|
-
if hasattr(stream_iter, "__anext__"):
|
|
375
|
-
return _AsyncStreamWrapper(stream_iter, model, request_id)
|
|
376
|
-
else:
|
|
377
|
-
# If it's a coroutine, we need to await it first
|
|
378
|
-
# But this shouldn't happen with the current implementation
|
|
379
|
-
raise TypeError(f"Expected async generator, got {type(stream_iter)}")
|
|
392
|
+
raise RuntimeError("streaming not supported")
|
|
380
393
|
else:
|
|
381
394
|
# Non-streaming mode
|
|
382
|
-
response = await client.start(
|
|
395
|
+
response = await client.start(
|
|
396
|
+
conversation,
|
|
397
|
+
tools=lm_tools, # type: ignore
|
|
398
|
+
cache=self._parent.cache_pattern, # type: ignore
|
|
399
|
+
)
|
|
383
400
|
return _response_to_chat_completion(response, model)
|
|
384
401
|
|
|
385
402
|
|
|
403
|
+
class MockTextCompletions:
|
|
404
|
+
"""Mock text completions resource for legacy completions API."""
|
|
405
|
+
|
|
406
|
+
def __init__(self, parent: "MockAsyncOpenAI"):
|
|
407
|
+
self._parent = parent
|
|
408
|
+
|
|
409
|
+
async def create(
|
|
410
|
+
self,
|
|
411
|
+
*,
|
|
412
|
+
model: str,
|
|
413
|
+
prompt: str | list[str],
|
|
414
|
+
temperature: float | None = None,
|
|
415
|
+
max_tokens: int | None = None,
|
|
416
|
+
top_p: float | None = None,
|
|
417
|
+
seed: int | None = None,
|
|
418
|
+
n: int | None = None,
|
|
419
|
+
stop: str | list[str] | None = None,
|
|
420
|
+
**kwargs: Any,
|
|
421
|
+
) -> Completion:
|
|
422
|
+
"""
|
|
423
|
+
Create a text completion using lm-deluge's LLMClient.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
model: Model identifier
|
|
427
|
+
prompt: Text prompt or list of prompts
|
|
428
|
+
temperature: Sampling temperature
|
|
429
|
+
max_tokens: Max tokens to generate
|
|
430
|
+
top_p: Nucleus sampling parameter
|
|
431
|
+
seed: Random seed
|
|
432
|
+
n: Number of completions (currently ignored, always returns 1)
|
|
433
|
+
stop: Stop sequences
|
|
434
|
+
**kwargs: Other parameters
|
|
435
|
+
|
|
436
|
+
Returns:
|
|
437
|
+
Completion object
|
|
438
|
+
"""
|
|
439
|
+
# Get or create client for this model
|
|
440
|
+
client: _LLMClient = self._parent._get_or_create_client(model)
|
|
441
|
+
|
|
442
|
+
# Handle single prompt
|
|
443
|
+
if isinstance(prompt, list):
|
|
444
|
+
# For now, just use the first prompt
|
|
445
|
+
prompt = prompt[0] if prompt else ""
|
|
446
|
+
|
|
447
|
+
# Convert prompt to Conversation
|
|
448
|
+
conversation = Conversation([Message(role="user", parts=[Text(prompt)])])
|
|
449
|
+
|
|
450
|
+
# Build sampling params
|
|
451
|
+
sampling_kwargs = {}
|
|
452
|
+
if temperature is not None:
|
|
453
|
+
sampling_kwargs["temperature"] = temperature
|
|
454
|
+
if max_tokens is not None:
|
|
455
|
+
sampling_kwargs["max_new_tokens"] = max_tokens
|
|
456
|
+
if top_p is not None:
|
|
457
|
+
sampling_kwargs["top_p"] = top_p
|
|
458
|
+
if seed is not None:
|
|
459
|
+
sampling_kwargs["seed"] = seed
|
|
460
|
+
|
|
461
|
+
# Create client with merged params if needed
|
|
462
|
+
if sampling_kwargs:
|
|
463
|
+
merged_params = {**self._parent._default_sampling_params, **sampling_kwargs}
|
|
464
|
+
client = self._parent._create_client_with_params(model, merged_params)
|
|
465
|
+
|
|
466
|
+
# Execute request
|
|
467
|
+
response = await client.start(conversation, cache=self._parent.cache_pattern) # type: ignore
|
|
468
|
+
|
|
469
|
+
# Convert to Completion format
|
|
470
|
+
completion_text = None
|
|
471
|
+
if response.content:
|
|
472
|
+
text_parts = [p.text for p in response.content.parts if isinstance(p, Text)]
|
|
473
|
+
if text_parts:
|
|
474
|
+
completion_text = "".join(text_parts)
|
|
475
|
+
|
|
476
|
+
# Create choice
|
|
477
|
+
choice = TextCompletionChoice(
|
|
478
|
+
index=0,
|
|
479
|
+
text=completion_text or "",
|
|
480
|
+
finish_reason=response.finish_reason or "stop", # type: ignore
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
# Create usage
|
|
484
|
+
usage = None
|
|
485
|
+
if response.usage:
|
|
486
|
+
usage = CompletionUsage(
|
|
487
|
+
prompt_tokens=response.usage.input_tokens,
|
|
488
|
+
completion_tokens=response.usage.output_tokens,
|
|
489
|
+
total_tokens=response.usage.input_tokens + response.usage.output_tokens,
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
return Completion(
|
|
493
|
+
id=f"cmpl-{uuid.uuid4().hex[:24]}",
|
|
494
|
+
choices=[choice],
|
|
495
|
+
created=int(time.time()),
|
|
496
|
+
model=model,
|
|
497
|
+
object="text_completion",
|
|
498
|
+
usage=usage,
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
|
|
386
502
|
class MockChat:
|
|
387
503
|
"""Mock chat resource that provides access to completions."""
|
|
388
504
|
|
|
@@ -414,23 +530,52 @@ class MockAsyncOpenAI:
|
|
|
414
530
|
|
|
415
531
|
Args:
|
|
416
532
|
model: Default model to use (can be overridden in create())
|
|
533
|
+
api_key: API key (optional, for compatibility)
|
|
534
|
+
organization: Organization ID (optional, for compatibility)
|
|
535
|
+
project: Project ID (optional, for compatibility)
|
|
536
|
+
base_url: Base URL (defaults to OpenAI's URL for compatibility)
|
|
537
|
+
timeout: Request timeout (optional, for compatibility)
|
|
538
|
+
max_retries: Max retries (defaults to 2 for compatibility)
|
|
539
|
+
default_headers: Default headers (optional, for compatibility)
|
|
417
540
|
temperature: Default temperature
|
|
418
541
|
max_completion_tokens: Default max completion tokens
|
|
419
542
|
top_p: Default top_p
|
|
543
|
+
seed: Default seed for deterministic sampling
|
|
420
544
|
**kwargs: Additional parameters passed to LLMClient
|
|
421
545
|
"""
|
|
422
546
|
|
|
423
547
|
def __init__(
|
|
424
548
|
self,
|
|
425
549
|
*,
|
|
426
|
-
model: str,
|
|
550
|
+
model: str | None = None,
|
|
551
|
+
api_key: str | None = None,
|
|
552
|
+
organization: str | None = None,
|
|
553
|
+
project: str | None = None,
|
|
554
|
+
base_url: str | None = None,
|
|
555
|
+
timeout: float | None = None,
|
|
556
|
+
max_retries: int | None = None,
|
|
557
|
+
default_headers: dict[str, str] | None = None,
|
|
558
|
+
http_client: Any | None = None,
|
|
427
559
|
temperature: float | None = None,
|
|
428
560
|
max_completion_tokens: int | None = None,
|
|
429
561
|
top_p: float | None = None,
|
|
430
562
|
seed: int | None = None,
|
|
563
|
+
cache_pattern: CachePattern | None = None,
|
|
431
564
|
**kwargs: Any,
|
|
432
565
|
):
|
|
433
|
-
|
|
566
|
+
# OpenAI-compatible attributes
|
|
567
|
+
self.api_key = api_key
|
|
568
|
+
self.organization = organization
|
|
569
|
+
self.project = project
|
|
570
|
+
self.base_url = base_url or "https://api.openai.com/v1"
|
|
571
|
+
self.timeout = timeout
|
|
572
|
+
self.max_retries = max_retries or 2
|
|
573
|
+
self.default_headers = default_headers
|
|
574
|
+
self.http_client = http_client
|
|
575
|
+
self.cache_pattern = cache_pattern
|
|
576
|
+
|
|
577
|
+
# Internal attributes
|
|
578
|
+
self._default_model = model or "gpt-4o-mini"
|
|
434
579
|
self._default_sampling_params = {}
|
|
435
580
|
|
|
436
581
|
if temperature is not None:
|
|
@@ -449,10 +594,11 @@ class MockAsyncOpenAI:
|
|
|
449
594
|
self._clients: dict[str, Any] = {}
|
|
450
595
|
|
|
451
596
|
# Create the default client
|
|
452
|
-
self._clients[
|
|
597
|
+
self._clients[self._default_model] = self._create_client(self._default_model)
|
|
453
598
|
|
|
454
599
|
# Create nested resources
|
|
455
600
|
self._chat = MockChat(self)
|
|
601
|
+
self._completions = MockTextCompletions(self)
|
|
456
602
|
|
|
457
603
|
def _create_client(self, model: str) -> Any:
|
|
458
604
|
"""Create a new LLMClient for the given model."""
|
|
@@ -480,3 +626,18 @@ class MockAsyncOpenAI:
|
|
|
480
626
|
def chat(self) -> MockChat:
|
|
481
627
|
"""Access the chat resource."""
|
|
482
628
|
return self._chat
|
|
629
|
+
|
|
630
|
+
@property
|
|
631
|
+
def completions(self) -> MockTextCompletions:
|
|
632
|
+
"""Access the text completions resource."""
|
|
633
|
+
return self._completions
|
|
634
|
+
|
|
635
|
+
async def close(self) -> None:
|
|
636
|
+
"""
|
|
637
|
+
Close the client and clean up resources.
|
|
638
|
+
|
|
639
|
+
This is provided for compatibility with AsyncOpenAI's close() method.
|
|
640
|
+
Currently a no-op as LLMClient instances don't need explicit cleanup.
|
|
641
|
+
"""
|
|
642
|
+
# No cleanup needed for LLMClient instances
|
|
643
|
+
pass
|
lm_deluge/prompt.py
CHANGED
|
@@ -848,14 +848,16 @@ class Conversation:
|
|
|
848
848
|
if content is None:
|
|
849
849
|
return parts
|
|
850
850
|
if isinstance(content, str):
|
|
851
|
-
|
|
851
|
+
if content.strip():
|
|
852
|
+
parts.append(Text(content))
|
|
852
853
|
return parts
|
|
853
854
|
|
|
854
855
|
for block in content:
|
|
855
856
|
block_type = block.get("type")
|
|
856
857
|
if block_type in text_types:
|
|
857
858
|
text_value = block.get("text") or block.get(block_type) or ""
|
|
858
|
-
|
|
859
|
+
if text_value.strip():
|
|
860
|
+
parts.append(Text(text_value))
|
|
859
861
|
elif block_type in image_types:
|
|
860
862
|
parts.append(_to_image_from_url(block))
|
|
861
863
|
elif block_type in file_types:
|
|
@@ -1001,7 +1003,8 @@ class Conversation:
|
|
|
1001
1003
|
)
|
|
1002
1004
|
)
|
|
1003
1005
|
|
|
1004
|
-
|
|
1006
|
+
if parts:
|
|
1007
|
+
conversation_messages.append(Message(mapped_role, parts))
|
|
1005
1008
|
|
|
1006
1009
|
return cls(conversation_messages)
|
|
1007
1010
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
lm_deluge/__init__.py,sha256=
|
|
1
|
+
lm_deluge/__init__.py,sha256=zF5lAitfgJ8A28IXJ5BE9OUCqGOqSnGOWn3ZIlizNyY,822
|
|
2
2
|
lm_deluge/batches.py,sha256=Km6QM5_7BlF2qEyo4WPlhkaZkpzrLqf50AaveHXQOoY,25127
|
|
3
3
|
lm_deluge/cache.py,sha256=xO2AIYvP3tUpTMKQjwQQYfGRJSRi6e7sMlRhLjsS-u4,4873
|
|
4
4
|
lm_deluge/cli.py,sha256=Ilww5gOw3J5v0NReq_Ra4hhxU4BCIJBl1oTGxJZKedc,12065
|
|
@@ -8,8 +8,8 @@ lm_deluge/embed.py,sha256=CO-TOlC5kOTAM8lcnicoG4u4K664vCBwHF1vHa-nAGg,13382
|
|
|
8
8
|
lm_deluge/errors.py,sha256=oHjt7YnxWbh-eXMScIzov4NvpJMo0-2r5J6Wh5DQ1tk,209
|
|
9
9
|
lm_deluge/file.py,sha256=PTmlJQ-IaYcYUFun9V0bJ1NPVP84edJrR0hvCMWFylY,19697
|
|
10
10
|
lm_deluge/image.py,sha256=5AMXmn2x47yXeYNfMSMAOWcnlrOxxOel-4L8QCJwU70,8928
|
|
11
|
-
lm_deluge/mock_openai.py,sha256
|
|
12
|
-
lm_deluge/prompt.py,sha256=
|
|
11
|
+
lm_deluge/mock_openai.py,sha256=-u4kxSzwoxDt_2fLh5LaiqETnu0Jg_VDL7TWAAYHGNw,21762
|
|
12
|
+
lm_deluge/prompt.py,sha256=b93ZZHlK9luujgilcnSkwoPCD-U6r1wLWXxWJ4D4ZIE,63578
|
|
13
13
|
lm_deluge/request_context.py,sha256=cBayMFWupWhde2OjRugW3JH-Gin-WFGc6DK2Mb4Prdc,2576
|
|
14
14
|
lm_deluge/rerank.py,sha256=-NBAJdHz9OB-SWWJnHzkFmeVO4wR6lFV7Vw-SxG7aVo,11457
|
|
15
15
|
lm_deluge/tool.py,sha256=Kp2O5lDq_WVo_ASxjLQSHzVRbaxZkS6J0JIIskBjux0,28909
|
|
@@ -69,8 +69,8 @@ lm_deluge/util/logprobs.py,sha256=UkBZakOxWluaLqHrjARu7xnJ0uCHVfLGHJdnYlEcutk,11
|
|
|
69
69
|
lm_deluge/util/spatial.py,sha256=BsF_UKhE-x0xBirc-bV1xSKZRTUhsOBdGqsMKme20C8,4099
|
|
70
70
|
lm_deluge/util/validation.py,sha256=hz5dDb3ebvZrZhnaWxOxbNSVMI6nmaOODBkk0htAUhs,1575
|
|
71
71
|
lm_deluge/util/xml.py,sha256=Ft4zajoYBJR3HHCt2oHwGfymGLdvp_gegVmJ-Wqk4Ck,10547
|
|
72
|
-
lm_deluge-0.0.
|
|
73
|
-
lm_deluge-0.0.
|
|
74
|
-
lm_deluge-0.0.
|
|
75
|
-
lm_deluge-0.0.
|
|
76
|
-
lm_deluge-0.0.
|
|
72
|
+
lm_deluge-0.0.71.dist-info/licenses/LICENSE,sha256=uNNXGXPCw2TC7CUs7SEBkA-Mz6QBQFWUUEWDMgEs1dU,1058
|
|
73
|
+
lm_deluge-0.0.71.dist-info/METADATA,sha256=kgq3xiS7tMIbXpx5UkhCEA_yJAJvgGOPaie_ZlScTxQ,13514
|
|
74
|
+
lm_deluge-0.0.71.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
75
|
+
lm_deluge-0.0.71.dist-info/top_level.txt,sha256=hqU-TJX93yBwpgkDtYcXyLr3t7TLSCCZ_reytJjwBaE,10
|
|
76
|
+
lm_deluge-0.0.71.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|