lm-deluge 0.0.88__py3-none-any.whl → 0.0.90__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- lm_deluge/__init__.py +0 -24
- lm_deluge/api_requests/anthropic.py +25 -5
- lm_deluge/api_requests/base.py +37 -0
- lm_deluge/api_requests/bedrock.py +23 -2
- lm_deluge/api_requests/gemini.py +36 -10
- lm_deluge/api_requests/openai.py +31 -4
- lm_deluge/batches.py +15 -45
- lm_deluge/client.py +27 -1
- lm_deluge/models/__init__.py +2 -0
- lm_deluge/models/anthropic.py +12 -12
- lm_deluge/models/google.py +13 -0
- lm_deluge/models/minimax.py +9 -1
- lm_deluge/models/openrouter.py +48 -0
- lm_deluge/models/zai.py +50 -1
- lm_deluge/pipelines/gepa/docs/samples.py +19 -10
- lm_deluge/prompt.py +333 -68
- lm_deluge/server/__init__.py +24 -0
- lm_deluge/server/__main__.py +144 -0
- lm_deluge/server/adapters.py +369 -0
- lm_deluge/server/app.py +388 -0
- lm_deluge/server/auth.py +71 -0
- lm_deluge/server/model_policy.py +215 -0
- lm_deluge/server/models_anthropic.py +172 -0
- lm_deluge/server/models_openai.py +175 -0
- lm_deluge/skills/anthropic.py +0 -0
- lm_deluge/skills/compat.py +0 -0
- lm_deluge/tool/__init__.py +13 -1
- lm_deluge/tool/prefab/sandbox/__init__.py +19 -0
- lm_deluge/tool/prefab/sandbox/daytona_sandbox.py +483 -0
- lm_deluge/tool/prefab/sandbox/docker_sandbox.py +609 -0
- lm_deluge/tool/prefab/sandbox/fargate_sandbox.py +546 -0
- lm_deluge/tool/prefab/sandbox/modal_sandbox.py +469 -0
- lm_deluge/tool/prefab/sandbox/seatbelt_sandbox.py +827 -0
- lm_deluge/tool/prefab/skills.py +0 -0
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/METADATA +4 -3
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/RECORD +39 -24
- lm_deluge/mock_openai.py +0 -643
- lm_deluge/tool/prefab/sandbox.py +0 -1621
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/WHEEL +0 -0
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/licenses/LICENSE +0 -0
- {lm_deluge-0.0.88.dist-info → lm_deluge-0.0.90.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic models for Anthropic-compatible API request/response formats.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import uuid
|
|
8
|
+
from typing import Any, Literal
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ============================================================================
|
|
14
|
+
# Request Models
|
|
15
|
+
# ============================================================================
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AnthropicContentBlock(BaseModel):
|
|
19
|
+
"""Content block in Anthropic message."""
|
|
20
|
+
|
|
21
|
+
type: Literal[
|
|
22
|
+
"text",
|
|
23
|
+
"image",
|
|
24
|
+
"tool_use",
|
|
25
|
+
"tool_result",
|
|
26
|
+
"document",
|
|
27
|
+
"container_upload",
|
|
28
|
+
"thinking",
|
|
29
|
+
"redacted_thinking",
|
|
30
|
+
] = "text"
|
|
31
|
+
|
|
32
|
+
# Text content
|
|
33
|
+
text: str | None = None
|
|
34
|
+
citations: list[dict[str, Any]] | None = None
|
|
35
|
+
cache_control: dict[str, Any] | None = None
|
|
36
|
+
|
|
37
|
+
# Image/document content
|
|
38
|
+
source: dict[str, Any] | None = None
|
|
39
|
+
title: str | None = None
|
|
40
|
+
context: str | None = None
|
|
41
|
+
|
|
42
|
+
# Container upload content
|
|
43
|
+
file_id: str | None = None
|
|
44
|
+
|
|
45
|
+
# Tool use (assistant response)
|
|
46
|
+
id: str | None = None
|
|
47
|
+
name: str | None = None
|
|
48
|
+
input: dict[str, Any] | None = None
|
|
49
|
+
caller: dict[str, Any] | None = None
|
|
50
|
+
|
|
51
|
+
# Tool result (user message)
|
|
52
|
+
tool_use_id: str | None = None
|
|
53
|
+
content: str | list[dict[str, Any]] | None = None
|
|
54
|
+
is_error: bool | None = None
|
|
55
|
+
|
|
56
|
+
# Thinking content
|
|
57
|
+
thinking: str | None = None
|
|
58
|
+
signature: str | None = None
|
|
59
|
+
|
|
60
|
+
# Redacted thinking content
|
|
61
|
+
data: str | None = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class AnthropicMessage(BaseModel):
|
|
65
|
+
"""Anthropic message format."""
|
|
66
|
+
|
|
67
|
+
role: Literal["user", "assistant"]
|
|
68
|
+
content: str | list[AnthropicContentBlock]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class AnthropicTool(BaseModel):
|
|
72
|
+
"""Tool definition for Anthropic."""
|
|
73
|
+
|
|
74
|
+
name: str
|
|
75
|
+
description: str | None = None
|
|
76
|
+
input_schema: dict[str, Any] | None = None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class AnthropicMessagesRequest(BaseModel):
|
|
80
|
+
"""Anthropic Messages API request format."""
|
|
81
|
+
|
|
82
|
+
model: str
|
|
83
|
+
max_tokens: int
|
|
84
|
+
messages: list[AnthropicMessage]
|
|
85
|
+
stream: bool = False
|
|
86
|
+
|
|
87
|
+
# System prompt (can be string or content blocks)
|
|
88
|
+
system: str | list[AnthropicContentBlock] | None = None
|
|
89
|
+
|
|
90
|
+
# Thinking configuration (Anthropic reasoning)
|
|
91
|
+
thinking: dict[str, Any] | None = None
|
|
92
|
+
|
|
93
|
+
# Sampling parameters
|
|
94
|
+
temperature: float | None = None
|
|
95
|
+
top_p: float | None = None
|
|
96
|
+
top_k: int | None = None
|
|
97
|
+
|
|
98
|
+
# Tool calling
|
|
99
|
+
tools: list[AnthropicTool] | None = None
|
|
100
|
+
tool_choice: dict[str, Any] | None = None
|
|
101
|
+
|
|
102
|
+
# Metadata
|
|
103
|
+
metadata: dict[str, Any] | None = None
|
|
104
|
+
stop_sequences: list[str] | None = None
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# ============================================================================
|
|
108
|
+
# Response Models
|
|
109
|
+
# ============================================================================
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class AnthropicResponseContentBlock(BaseModel):
|
|
113
|
+
"""Content block in Anthropic response."""
|
|
114
|
+
|
|
115
|
+
type: Literal["text", "tool_use", "thinking", "redacted_thinking"]
|
|
116
|
+
|
|
117
|
+
# Text content
|
|
118
|
+
text: str | None = None
|
|
119
|
+
citations: list[dict[str, Any]] | None = None
|
|
120
|
+
|
|
121
|
+
# Tool use
|
|
122
|
+
id: str | None = None
|
|
123
|
+
name: str | None = None
|
|
124
|
+
input: dict[str, Any] | None = None
|
|
125
|
+
|
|
126
|
+
# Thinking content
|
|
127
|
+
thinking: str | None = None
|
|
128
|
+
signature: str | None = None
|
|
129
|
+
|
|
130
|
+
# Redacted thinking content
|
|
131
|
+
data: str | None = None
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class AnthropicUsage(BaseModel):
|
|
135
|
+
"""Token usage for Anthropic."""
|
|
136
|
+
|
|
137
|
+
input_tokens: int = 0
|
|
138
|
+
output_tokens: int = 0
|
|
139
|
+
cache_creation_input_tokens: int | None = None
|
|
140
|
+
cache_read_input_tokens: int | None = None
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class AnthropicMessagesResponse(BaseModel):
|
|
144
|
+
"""Anthropic Messages API response format."""
|
|
145
|
+
|
|
146
|
+
id: str = Field(default_factory=lambda: f"msg_{uuid.uuid4().hex[:24]}")
|
|
147
|
+
type: Literal["message"] = "message"
|
|
148
|
+
role: Literal["assistant"] = "assistant"
|
|
149
|
+
model: str
|
|
150
|
+
content: list[AnthropicResponseContentBlock]
|
|
151
|
+
stop_reason: str | None = None
|
|
152
|
+
stop_sequence: str | None = None
|
|
153
|
+
usage: AnthropicUsage
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# ============================================================================
|
|
157
|
+
# Error Response
|
|
158
|
+
# ============================================================================
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class AnthropicErrorDetail(BaseModel):
|
|
162
|
+
"""Error detail for Anthropic."""
|
|
163
|
+
|
|
164
|
+
type: str
|
|
165
|
+
message: str
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
class AnthropicErrorResponse(BaseModel):
|
|
169
|
+
"""Anthropic error response format."""
|
|
170
|
+
|
|
171
|
+
type: Literal["error"] = "error"
|
|
172
|
+
error: AnthropicErrorDetail
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic models for OpenAI-compatible API request/response formats.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import time
|
|
8
|
+
import uuid
|
|
9
|
+
from typing import Any, Literal
|
|
10
|
+
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# ============================================================================
|
|
15
|
+
# Request Models
|
|
16
|
+
# ============================================================================
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OpenAIMessage(BaseModel):
|
|
20
|
+
"""OpenAI chat message format."""
|
|
21
|
+
|
|
22
|
+
role: Literal["system", "user", "assistant", "tool", "function", "developer"]
|
|
23
|
+
content: str | list[dict[str, Any]] | None = None
|
|
24
|
+
name: str | None = None
|
|
25
|
+
tool_call_id: str | None = None
|
|
26
|
+
tool_calls: list[dict[str, Any]] | None = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class OpenAIToolFunction(BaseModel):
|
|
30
|
+
"""Function definition within a tool."""
|
|
31
|
+
|
|
32
|
+
name: str
|
|
33
|
+
description: str | None = None
|
|
34
|
+
parameters: dict[str, Any] | None = None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class OpenAITool(BaseModel):
|
|
38
|
+
"""Tool definition for function calling."""
|
|
39
|
+
|
|
40
|
+
type: Literal["function"] = "function"
|
|
41
|
+
function: OpenAIToolFunction
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class OpenAIChatCompletionsRequest(BaseModel):
|
|
45
|
+
"""OpenAI Chat Completions API request format."""
|
|
46
|
+
|
|
47
|
+
model: str
|
|
48
|
+
messages: list[OpenAIMessage]
|
|
49
|
+
stream: bool = False
|
|
50
|
+
|
|
51
|
+
# Sampling parameters
|
|
52
|
+
max_tokens: int | None = None
|
|
53
|
+
max_completion_tokens: int | None = None
|
|
54
|
+
temperature: float | None = None
|
|
55
|
+
top_p: float | None = None
|
|
56
|
+
seed: int | None = None
|
|
57
|
+
|
|
58
|
+
# Tool calling
|
|
59
|
+
tools: list[OpenAITool] | None = None
|
|
60
|
+
tool_choice: str | dict[str, Any] | None = None
|
|
61
|
+
|
|
62
|
+
# Response formatting
|
|
63
|
+
response_format: dict[str, Any] | None = None
|
|
64
|
+
|
|
65
|
+
# Reasoning models
|
|
66
|
+
reasoning_effort: Literal["low", "medium", "high"] | None = None
|
|
67
|
+
|
|
68
|
+
# Other options (accepted but may be ignored)
|
|
69
|
+
n: int | None = None
|
|
70
|
+
stop: str | list[str] | None = None
|
|
71
|
+
presence_penalty: float | None = None
|
|
72
|
+
frequency_penalty: float | None = None
|
|
73
|
+
logit_bias: dict[str, float] | None = None
|
|
74
|
+
logprobs: bool | None = None
|
|
75
|
+
top_logprobs: int | None = None
|
|
76
|
+
user: str | None = None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
# ============================================================================
|
|
80
|
+
# Response Models
|
|
81
|
+
# ============================================================================
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class OpenAIFunctionCall(BaseModel):
|
|
85
|
+
"""Function call within a tool call."""
|
|
86
|
+
|
|
87
|
+
name: str
|
|
88
|
+
arguments: str # JSON string
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class OpenAIToolCall(BaseModel):
|
|
92
|
+
"""Tool call in assistant message."""
|
|
93
|
+
|
|
94
|
+
id: str
|
|
95
|
+
type: Literal["function"] = "function"
|
|
96
|
+
function: OpenAIFunctionCall
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class OpenAIResponseMessage(BaseModel):
|
|
100
|
+
"""Message in completion response."""
|
|
101
|
+
|
|
102
|
+
role: Literal["assistant"] = "assistant"
|
|
103
|
+
content: str | None = None
|
|
104
|
+
tool_calls: list[OpenAIToolCall] | None = None
|
|
105
|
+
refusal: str | None = None
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class OpenAIChoice(BaseModel):
|
|
109
|
+
"""Choice in completion response."""
|
|
110
|
+
|
|
111
|
+
index: int = 0
|
|
112
|
+
message: OpenAIResponseMessage
|
|
113
|
+
finish_reason: str | None = None
|
|
114
|
+
logprobs: dict[str, Any] | None = None
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class OpenAIUsage(BaseModel):
|
|
118
|
+
"""Token usage statistics."""
|
|
119
|
+
|
|
120
|
+
prompt_tokens: int = 0
|
|
121
|
+
completion_tokens: int = 0
|
|
122
|
+
total_tokens: int = 0
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class OpenAIChatCompletionsResponse(BaseModel):
|
|
126
|
+
"""OpenAI Chat Completions API response format."""
|
|
127
|
+
|
|
128
|
+
id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex[:24]}")
|
|
129
|
+
object: Literal["chat.completion"] = "chat.completion"
|
|
130
|
+
created: int = Field(default_factory=lambda: int(time.time()))
|
|
131
|
+
model: str
|
|
132
|
+
choices: list[OpenAIChoice]
|
|
133
|
+
usage: OpenAIUsage | None = None
|
|
134
|
+
system_fingerprint: str | None = None
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
# ============================================================================
|
|
138
|
+
# Models List Response
|
|
139
|
+
# ============================================================================
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class OpenAIModelInfo(BaseModel):
|
|
143
|
+
"""Model information for /v1/models endpoint."""
|
|
144
|
+
|
|
145
|
+
id: str
|
|
146
|
+
object: Literal["model"] = "model"
|
|
147
|
+
created: int = Field(default_factory=lambda: int(time.time()))
|
|
148
|
+
owned_by: str = "lm-deluge"
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
class OpenAIModelsResponse(BaseModel):
|
|
152
|
+
"""Response for /v1/models endpoint."""
|
|
153
|
+
|
|
154
|
+
object: Literal["list"] = "list"
|
|
155
|
+
data: list[OpenAIModelInfo]
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
# ============================================================================
|
|
159
|
+
# Error Response
|
|
160
|
+
# ============================================================================
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class OpenAIErrorDetail(BaseModel):
|
|
164
|
+
"""Error detail object."""
|
|
165
|
+
|
|
166
|
+
message: str
|
|
167
|
+
type: str
|
|
168
|
+
param: str | None = None
|
|
169
|
+
code: str | None = None
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class OpenAIErrorResponse(BaseModel):
|
|
173
|
+
"""Error response format."""
|
|
174
|
+
|
|
175
|
+
error: OpenAIErrorDetail
|
|
File without changes
|
|
File without changes
|
lm_deluge/tool/__init__.py
CHANGED
|
@@ -1007,10 +1007,22 @@ class Tool(BaseModel):
|
|
|
1007
1007
|
"""
|
|
1008
1008
|
Shape used by google.genai docs.
|
|
1009
1009
|
"""
|
|
1010
|
+
|
|
1011
|
+
def _strip_additional_properties(schema: Any) -> Any:
|
|
1012
|
+
if isinstance(schema, dict):
|
|
1013
|
+
return {
|
|
1014
|
+
key: _strip_additional_properties(value)
|
|
1015
|
+
for key, value in schema.items()
|
|
1016
|
+
if key != "additionalProperties"
|
|
1017
|
+
}
|
|
1018
|
+
if isinstance(schema, list):
|
|
1019
|
+
return [_strip_additional_properties(item) for item in schema]
|
|
1020
|
+
return schema
|
|
1021
|
+
|
|
1010
1022
|
return {
|
|
1011
1023
|
"name": self.name,
|
|
1012
1024
|
"description": self.description,
|
|
1013
|
-
"parameters": self._json_schema(),
|
|
1025
|
+
"parameters": _strip_additional_properties(self._json_schema()),
|
|
1014
1026
|
}
|
|
1015
1027
|
|
|
1016
1028
|
def for_mistral(self) -> dict[str, Any]:
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
from .daytona_sandbox import DaytonaSandbox
|
|
4
|
+
from .docker_sandbox import DockerSandbox
|
|
5
|
+
from .fargate_sandbox import FargateSandbox
|
|
6
|
+
from .modal_sandbox import ModalSandbox
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"DaytonaSandbox",
|
|
10
|
+
"DockerSandbox",
|
|
11
|
+
"FargateSandbox",
|
|
12
|
+
"ModalSandbox",
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
# SeatbeltSandbox is macOS only
|
|
16
|
+
if sys.platform == "darwin":
|
|
17
|
+
from .seatbelt_sandbox import SandboxMode, SeatbeltSandbox # noqa
|
|
18
|
+
|
|
19
|
+
__all__.extend(["SandboxMode", "SeatbeltSandbox"])
|