lm-deluge 0.0.67__py3-none-any.whl → 0.0.90__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

Files changed (108) hide show
  1. lm_deluge/__init__.py +1 -2
  2. lm_deluge/api_requests/anthropic.py +117 -22
  3. lm_deluge/api_requests/base.py +84 -11
  4. lm_deluge/api_requests/bedrock.py +30 -6
  5. lm_deluge/api_requests/chat_reasoning.py +4 -0
  6. lm_deluge/api_requests/gemini.py +166 -20
  7. lm_deluge/api_requests/openai.py +145 -25
  8. lm_deluge/batches.py +15 -45
  9. lm_deluge/client.py +309 -50
  10. lm_deluge/config.py +15 -3
  11. lm_deluge/models/__init__.py +14 -1
  12. lm_deluge/models/anthropic.py +29 -14
  13. lm_deluge/models/arcee.py +16 -0
  14. lm_deluge/models/deepseek.py +36 -4
  15. lm_deluge/models/google.py +42 -0
  16. lm_deluge/models/grok.py +24 -0
  17. lm_deluge/models/kimi.py +36 -0
  18. lm_deluge/models/minimax.py +18 -0
  19. lm_deluge/models/openai.py +100 -0
  20. lm_deluge/models/openrouter.py +133 -7
  21. lm_deluge/models/together.py +11 -0
  22. lm_deluge/models/zai.py +50 -0
  23. lm_deluge/pipelines/gepa/__init__.py +95 -0
  24. lm_deluge/pipelines/gepa/core.py +354 -0
  25. lm_deluge/pipelines/gepa/docs/samples.py +705 -0
  26. lm_deluge/pipelines/gepa/examples/01_synthetic_keywords.py +140 -0
  27. lm_deluge/pipelines/gepa/examples/02_gsm8k_math.py +261 -0
  28. lm_deluge/pipelines/gepa/examples/03_hotpotqa_multihop.py +300 -0
  29. lm_deluge/pipelines/gepa/examples/04_batch_classification.py +271 -0
  30. lm_deluge/pipelines/gepa/examples/simple_qa.py +129 -0
  31. lm_deluge/pipelines/gepa/optimizer.py +435 -0
  32. lm_deluge/pipelines/gepa/proposer.py +235 -0
  33. lm_deluge/pipelines/gepa/util.py +165 -0
  34. lm_deluge/{llm_tools → pipelines}/score.py +2 -2
  35. lm_deluge/{llm_tools → pipelines}/translate.py +5 -3
  36. lm_deluge/prompt.py +537 -88
  37. lm_deluge/request_context.py +7 -2
  38. lm_deluge/server/__init__.py +24 -0
  39. lm_deluge/server/__main__.py +144 -0
  40. lm_deluge/server/adapters.py +369 -0
  41. lm_deluge/server/app.py +388 -0
  42. lm_deluge/server/auth.py +71 -0
  43. lm_deluge/server/model_policy.py +215 -0
  44. lm_deluge/server/models_anthropic.py +172 -0
  45. lm_deluge/server/models_openai.py +175 -0
  46. lm_deluge/tool/__init__.py +1130 -0
  47. lm_deluge/tool/builtin/anthropic/__init__.py +300 -0
  48. lm_deluge/tool/builtin/anthropic/bash.py +0 -0
  49. lm_deluge/tool/builtin/anthropic/computer_use.py +0 -0
  50. lm_deluge/tool/builtin/gemini.py +59 -0
  51. lm_deluge/tool/builtin/openai.py +74 -0
  52. lm_deluge/tool/cua/__init__.py +173 -0
  53. lm_deluge/tool/cua/actions.py +148 -0
  54. lm_deluge/tool/cua/base.py +27 -0
  55. lm_deluge/tool/cua/batch.py +215 -0
  56. lm_deluge/tool/cua/converters.py +466 -0
  57. lm_deluge/tool/cua/kernel.py +702 -0
  58. lm_deluge/tool/cua/trycua.py +989 -0
  59. lm_deluge/tool/prefab/__init__.py +45 -0
  60. lm_deluge/tool/prefab/batch_tool.py +156 -0
  61. lm_deluge/tool/prefab/docs.py +1119 -0
  62. lm_deluge/tool/prefab/email.py +294 -0
  63. lm_deluge/tool/prefab/filesystem.py +1711 -0
  64. lm_deluge/tool/prefab/full_text_search/__init__.py +285 -0
  65. lm_deluge/tool/prefab/full_text_search/tantivy_index.py +396 -0
  66. lm_deluge/tool/prefab/memory.py +458 -0
  67. lm_deluge/tool/prefab/otc/__init__.py +165 -0
  68. lm_deluge/tool/prefab/otc/executor.py +281 -0
  69. lm_deluge/tool/prefab/otc/parse.py +188 -0
  70. lm_deluge/tool/prefab/random.py +212 -0
  71. lm_deluge/tool/prefab/rlm/__init__.py +296 -0
  72. lm_deluge/tool/prefab/rlm/executor.py +349 -0
  73. lm_deluge/tool/prefab/rlm/parse.py +144 -0
  74. lm_deluge/tool/prefab/sandbox/__init__.py +19 -0
  75. lm_deluge/tool/prefab/sandbox/daytona_sandbox.py +483 -0
  76. lm_deluge/tool/prefab/sandbox/docker_sandbox.py +609 -0
  77. lm_deluge/tool/prefab/sandbox/fargate_sandbox.py +546 -0
  78. lm_deluge/tool/prefab/sandbox/modal_sandbox.py +469 -0
  79. lm_deluge/tool/prefab/sandbox/seatbelt_sandbox.py +827 -0
  80. lm_deluge/tool/prefab/sheets.py +385 -0
  81. lm_deluge/tool/prefab/skills.py +0 -0
  82. lm_deluge/tool/prefab/subagents.py +233 -0
  83. lm_deluge/tool/prefab/todos.py +342 -0
  84. lm_deluge/tool/prefab/tool_search.py +169 -0
  85. lm_deluge/tool/prefab/web_search.py +199 -0
  86. lm_deluge/tracker.py +16 -13
  87. lm_deluge/util/schema.py +412 -0
  88. lm_deluge/warnings.py +8 -0
  89. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/METADATA +23 -9
  90. lm_deluge-0.0.90.dist-info/RECORD +132 -0
  91. lm_deluge/built_in_tools/anthropic/__init__.py +0 -128
  92. lm_deluge/built_in_tools/openai.py +0 -28
  93. lm_deluge/presets/cerebras.py +0 -17
  94. lm_deluge/presets/meta.py +0 -13
  95. lm_deluge/tool.py +0 -849
  96. lm_deluge-0.0.67.dist-info/RECORD +0 -72
  97. lm_deluge/{llm_tools → pipelines}/__init__.py +1 -1
  98. /lm_deluge/{llm_tools → pipelines}/classify.py +0 -0
  99. /lm_deluge/{llm_tools → pipelines}/extract.py +0 -0
  100. /lm_deluge/{llm_tools → pipelines}/locate.py +0 -0
  101. /lm_deluge/{llm_tools → pipelines}/ocr.py +0 -0
  102. /lm_deluge/{built_in_tools/anthropic/bash.py → skills/anthropic.py} +0 -0
  103. /lm_deluge/{built_in_tools/anthropic/computer_use.py → skills/compat.py} +0 -0
  104. /lm_deluge/{built_in_tools → tool/builtin}/anthropic/editor.py +0 -0
  105. /lm_deluge/{built_in_tools → tool/builtin}/base.py +0 -0
  106. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/WHEEL +0 -0
  107. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/licenses/LICENSE +0 -0
  108. {lm_deluge-0.0.67.dist-info → lm_deluge-0.0.90.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,172 @@
1
+ """
2
+ Pydantic models for Anthropic-compatible API request/response formats.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import uuid
8
+ from typing import Any, Literal
9
+
10
+ from pydantic import BaseModel, Field
11
+
12
+
13
+ # ============================================================================
14
+ # Request Models
15
+ # ============================================================================
16
+
17
+
18
+ class AnthropicContentBlock(BaseModel):
19
+ """Content block in Anthropic message."""
20
+
21
+ type: Literal[
22
+ "text",
23
+ "image",
24
+ "tool_use",
25
+ "tool_result",
26
+ "document",
27
+ "container_upload",
28
+ "thinking",
29
+ "redacted_thinking",
30
+ ] = "text"
31
+
32
+ # Text content
33
+ text: str | None = None
34
+ citations: list[dict[str, Any]] | None = None
35
+ cache_control: dict[str, Any] | None = None
36
+
37
+ # Image/document content
38
+ source: dict[str, Any] | None = None
39
+ title: str | None = None
40
+ context: str | None = None
41
+
42
+ # Container upload content
43
+ file_id: str | None = None
44
+
45
+ # Tool use (assistant response)
46
+ id: str | None = None
47
+ name: str | None = None
48
+ input: dict[str, Any] | None = None
49
+ caller: dict[str, Any] | None = None
50
+
51
+ # Tool result (user message)
52
+ tool_use_id: str | None = None
53
+ content: str | list[dict[str, Any]] | None = None
54
+ is_error: bool | None = None
55
+
56
+ # Thinking content
57
+ thinking: str | None = None
58
+ signature: str | None = None
59
+
60
+ # Redacted thinking content
61
+ data: str | None = None
62
+
63
+
64
+ class AnthropicMessage(BaseModel):
65
+ """Anthropic message format."""
66
+
67
+ role: Literal["user", "assistant"]
68
+ content: str | list[AnthropicContentBlock]
69
+
70
+
71
+ class AnthropicTool(BaseModel):
72
+ """Tool definition for Anthropic."""
73
+
74
+ name: str
75
+ description: str | None = None
76
+ input_schema: dict[str, Any] | None = None
77
+
78
+
79
+ class AnthropicMessagesRequest(BaseModel):
80
+ """Anthropic Messages API request format."""
81
+
82
+ model: str
83
+ max_tokens: int
84
+ messages: list[AnthropicMessage]
85
+ stream: bool = False
86
+
87
+ # System prompt (can be string or content blocks)
88
+ system: str | list[AnthropicContentBlock] | None = None
89
+
90
+ # Thinking configuration (Anthropic reasoning)
91
+ thinking: dict[str, Any] | None = None
92
+
93
+ # Sampling parameters
94
+ temperature: float | None = None
95
+ top_p: float | None = None
96
+ top_k: int | None = None
97
+
98
+ # Tool calling
99
+ tools: list[AnthropicTool] | None = None
100
+ tool_choice: dict[str, Any] | None = None
101
+
102
+ # Metadata
103
+ metadata: dict[str, Any] | None = None
104
+ stop_sequences: list[str] | None = None
105
+
106
+
107
+ # ============================================================================
108
+ # Response Models
109
+ # ============================================================================
110
+
111
+
112
+ class AnthropicResponseContentBlock(BaseModel):
113
+ """Content block in Anthropic response."""
114
+
115
+ type: Literal["text", "tool_use", "thinking", "redacted_thinking"]
116
+
117
+ # Text content
118
+ text: str | None = None
119
+ citations: list[dict[str, Any]] | None = None
120
+
121
+ # Tool use
122
+ id: str | None = None
123
+ name: str | None = None
124
+ input: dict[str, Any] | None = None
125
+
126
+ # Thinking content
127
+ thinking: str | None = None
128
+ signature: str | None = None
129
+
130
+ # Redacted thinking content
131
+ data: str | None = None
132
+
133
+
134
+ class AnthropicUsage(BaseModel):
135
+ """Token usage for Anthropic."""
136
+
137
+ input_tokens: int = 0
138
+ output_tokens: int = 0
139
+ cache_creation_input_tokens: int | None = None
140
+ cache_read_input_tokens: int | None = None
141
+
142
+
143
+ class AnthropicMessagesResponse(BaseModel):
144
+ """Anthropic Messages API response format."""
145
+
146
+ id: str = Field(default_factory=lambda: f"msg_{uuid.uuid4().hex[:24]}")
147
+ type: Literal["message"] = "message"
148
+ role: Literal["assistant"] = "assistant"
149
+ model: str
150
+ content: list[AnthropicResponseContentBlock]
151
+ stop_reason: str | None = None
152
+ stop_sequence: str | None = None
153
+ usage: AnthropicUsage
154
+
155
+
156
+ # ============================================================================
157
+ # Error Response
158
+ # ============================================================================
159
+
160
+
161
+ class AnthropicErrorDetail(BaseModel):
162
+ """Error detail for Anthropic."""
163
+
164
+ type: str
165
+ message: str
166
+
167
+
168
+ class AnthropicErrorResponse(BaseModel):
169
+ """Anthropic error response format."""
170
+
171
+ type: Literal["error"] = "error"
172
+ error: AnthropicErrorDetail
@@ -0,0 +1,175 @@
1
+ """
2
+ Pydantic models for OpenAI-compatible API request/response formats.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import time
8
+ import uuid
9
+ from typing import Any, Literal
10
+
11
+ from pydantic import BaseModel, Field
12
+
13
+
14
+ # ============================================================================
15
+ # Request Models
16
+ # ============================================================================
17
+
18
+
19
+ class OpenAIMessage(BaseModel):
20
+ """OpenAI chat message format."""
21
+
22
+ role: Literal["system", "user", "assistant", "tool", "function", "developer"]
23
+ content: str | list[dict[str, Any]] | None = None
24
+ name: str | None = None
25
+ tool_call_id: str | None = None
26
+ tool_calls: list[dict[str, Any]] | None = None
27
+
28
+
29
+ class OpenAIToolFunction(BaseModel):
30
+ """Function definition within a tool."""
31
+
32
+ name: str
33
+ description: str | None = None
34
+ parameters: dict[str, Any] | None = None
35
+
36
+
37
+ class OpenAITool(BaseModel):
38
+ """Tool definition for function calling."""
39
+
40
+ type: Literal["function"] = "function"
41
+ function: OpenAIToolFunction
42
+
43
+
44
+ class OpenAIChatCompletionsRequest(BaseModel):
45
+ """OpenAI Chat Completions API request format."""
46
+
47
+ model: str
48
+ messages: list[OpenAIMessage]
49
+ stream: bool = False
50
+
51
+ # Sampling parameters
52
+ max_tokens: int | None = None
53
+ max_completion_tokens: int | None = None
54
+ temperature: float | None = None
55
+ top_p: float | None = None
56
+ seed: int | None = None
57
+
58
+ # Tool calling
59
+ tools: list[OpenAITool] | None = None
60
+ tool_choice: str | dict[str, Any] | None = None
61
+
62
+ # Response formatting
63
+ response_format: dict[str, Any] | None = None
64
+
65
+ # Reasoning models
66
+ reasoning_effort: Literal["low", "medium", "high"] | None = None
67
+
68
+ # Other options (accepted but may be ignored)
69
+ n: int | None = None
70
+ stop: str | list[str] | None = None
71
+ presence_penalty: float | None = None
72
+ frequency_penalty: float | None = None
73
+ logit_bias: dict[str, float] | None = None
74
+ logprobs: bool | None = None
75
+ top_logprobs: int | None = None
76
+ user: str | None = None
77
+
78
+
79
+ # ============================================================================
80
+ # Response Models
81
+ # ============================================================================
82
+
83
+
84
+ class OpenAIFunctionCall(BaseModel):
85
+ """Function call within a tool call."""
86
+
87
+ name: str
88
+ arguments: str # JSON string
89
+
90
+
91
+ class OpenAIToolCall(BaseModel):
92
+ """Tool call in assistant message."""
93
+
94
+ id: str
95
+ type: Literal["function"] = "function"
96
+ function: OpenAIFunctionCall
97
+
98
+
99
+ class OpenAIResponseMessage(BaseModel):
100
+ """Message in completion response."""
101
+
102
+ role: Literal["assistant"] = "assistant"
103
+ content: str | None = None
104
+ tool_calls: list[OpenAIToolCall] | None = None
105
+ refusal: str | None = None
106
+
107
+
108
+ class OpenAIChoice(BaseModel):
109
+ """Choice in completion response."""
110
+
111
+ index: int = 0
112
+ message: OpenAIResponseMessage
113
+ finish_reason: str | None = None
114
+ logprobs: dict[str, Any] | None = None
115
+
116
+
117
+ class OpenAIUsage(BaseModel):
118
+ """Token usage statistics."""
119
+
120
+ prompt_tokens: int = 0
121
+ completion_tokens: int = 0
122
+ total_tokens: int = 0
123
+
124
+
125
+ class OpenAIChatCompletionsResponse(BaseModel):
126
+ """OpenAI Chat Completions API response format."""
127
+
128
+ id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4().hex[:24]}")
129
+ object: Literal["chat.completion"] = "chat.completion"
130
+ created: int = Field(default_factory=lambda: int(time.time()))
131
+ model: str
132
+ choices: list[OpenAIChoice]
133
+ usage: OpenAIUsage | None = None
134
+ system_fingerprint: str | None = None
135
+
136
+
137
+ # ============================================================================
138
+ # Models List Response
139
+ # ============================================================================
140
+
141
+
142
+ class OpenAIModelInfo(BaseModel):
143
+ """Model information for /v1/models endpoint."""
144
+
145
+ id: str
146
+ object: Literal["model"] = "model"
147
+ created: int = Field(default_factory=lambda: int(time.time()))
148
+ owned_by: str = "lm-deluge"
149
+
150
+
151
+ class OpenAIModelsResponse(BaseModel):
152
+ """Response for /v1/models endpoint."""
153
+
154
+ object: Literal["list"] = "list"
155
+ data: list[OpenAIModelInfo]
156
+
157
+
158
+ # ============================================================================
159
+ # Error Response
160
+ # ============================================================================
161
+
162
+
163
+ class OpenAIErrorDetail(BaseModel):
164
+ """Error detail object."""
165
+
166
+ message: str
167
+ type: str
168
+ param: str | None = None
169
+ code: str | None = None
170
+
171
+
172
+ class OpenAIErrorResponse(BaseModel):
173
+ """Error response format."""
174
+
175
+ error: OpenAIErrorDetail