lm-deluge 0.0.82__tar.gz → 0.0.83__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lm_deluge-0.0.82/src/lm_deluge.egg-info → lm_deluge-0.0.83}/PKG-INFO +1 -1
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/pyproject.toml +1 -1
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/anthropic.py +6 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/client.py +14 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/__init__.py +3 -1
- lm_deluge-0.0.83/src/lm_deluge/models/arcee.py +16 -0
- lm_deluge-0.0.83/src/lm_deluge/models/deepseek.py +59 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/kimi.py +2 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/openrouter.py +10 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/together.py +11 -0
- lm_deluge-0.0.83/src/lm_deluge/models/zai.py +1 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/__init__.py +8 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/docs.py +1119 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/email.py +294 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/filesystem.py +1711 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/memory.py +458 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/random.py +212 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/sheets.py +385 -0
- lm_deluge-0.0.83/src/lm_deluge/tool/prefab/web_search.py +206 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/warnings.py +1 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83/src/lm_deluge.egg-info}/PKG-INFO +1 -1
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/SOURCES.txt +12 -0
- lm_deluge-0.0.83/tests/test_docs.py +480 -0
- lm_deluge-0.0.83/tests/test_random.py +364 -0
- lm_deluge-0.0.83/tests/test_random_integration.py +98 -0
- lm_deluge-0.0.83/tests/test_random_simple.py +108 -0
- lm_deluge-0.0.83/tests/test_sheets.py +282 -0
- lm_deluge-0.0.82/src/lm_deluge/models/deepseek.py +0 -27
- lm_deluge-0.0.82/src/lm_deluge/tool/prefab/filesystem.py +0 -821
- lm_deluge-0.0.82/src/lm_deluge/tool/prefab/memory.py +0 -190
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/LICENSE +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/README.md +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/setup.cfg +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/openai.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/mock_openai.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/minimax.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/models/openai.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/classify.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/extract.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/locate.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/ocr.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/score.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/translate.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/batch_tool.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/otc/__init__.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/otc/executor.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/otc/parse.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/sandbox.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/subagents.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/todos.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/tool_search.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/schema.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_batch_tool.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_file_upload.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_filesystem.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_filesystem_live.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_mock_openai.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_native_mcp_server.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_openrouter_generic.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_otc.py +0 -0
- {lm_deluge-0.0.82 → lm_deluge-0.0.83}/tests/test_tool_search.py +0 -0
|
@@ -101,11 +101,14 @@ def _build_anthropic_request(
|
|
|
101
101
|
request_json["max_tokens"] += budget
|
|
102
102
|
else:
|
|
103
103
|
request_json["thinking"] = {"type": "disabled"}
|
|
104
|
+
if "kimi" in model.id and "thinking" in model.id:
|
|
105
|
+
maybe_warn("WARN_KIMI_THINKING_NO_REASONING")
|
|
104
106
|
|
|
105
107
|
else:
|
|
106
108
|
request_json["thinking"] = {"type": "disabled"}
|
|
107
109
|
if sampling_params.reasoning_effort:
|
|
108
110
|
print("ignoring reasoning_effort for non-reasoning model")
|
|
111
|
+
|
|
109
112
|
if system_message is not None:
|
|
110
113
|
request_json["system"] = system_message
|
|
111
114
|
|
|
@@ -231,6 +234,9 @@ class AnthropicRequest(APIRequestBase):
|
|
|
231
234
|
data = await http_response.json()
|
|
232
235
|
response_content = data["content"]
|
|
233
236
|
|
|
237
|
+
# print("=== CONTENT ===")
|
|
238
|
+
# print(response_content)
|
|
239
|
+
|
|
234
240
|
# Parse response into Message with parts
|
|
235
241
|
parts = []
|
|
236
242
|
for item in response_content:
|
|
@@ -84,6 +84,8 @@ class _LLMClient(BaseModel):
|
|
|
84
84
|
json_mode: bool = False
|
|
85
85
|
max_new_tokens: int = 512
|
|
86
86
|
reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None
|
|
87
|
+
global_effort: Literal["low", "medium", "high"] | None = None
|
|
88
|
+
thinking_budget: int | None = None
|
|
87
89
|
logprobs: bool = False
|
|
88
90
|
top_logprobs: int | None = None
|
|
89
91
|
force_local_mcp: bool = False
|
|
@@ -212,6 +214,8 @@ class _LLMClient(BaseModel):
|
|
|
212
214
|
json_mode=self.json_mode,
|
|
213
215
|
max_new_tokens=self.max_new_tokens,
|
|
214
216
|
reasoning_effort=self.reasoning_effort,
|
|
217
|
+
global_effort=self.global_effort or "high",
|
|
218
|
+
thinking_budget=self.thinking_budget,
|
|
215
219
|
logprobs=self.logprobs,
|
|
216
220
|
top_logprobs=self.top_logprobs,
|
|
217
221
|
)
|
|
@@ -347,6 +351,8 @@ class _LLMClient(BaseModel):
|
|
|
347
351
|
json_mode=data.get("json_mode", False),
|
|
348
352
|
max_new_tokens=data.get("max_new_tokens", 512),
|
|
349
353
|
reasoning_effort=data.get("reasoning_effort", None),
|
|
354
|
+
global_effort=data.get("global_effort") or "high",
|
|
355
|
+
thinking_budget=data.get("thinking_budget", None),
|
|
350
356
|
logprobs=data.get("logprobs", False),
|
|
351
357
|
top_logprobs=data.get("top_logprobs", None),
|
|
352
358
|
)
|
|
@@ -1077,6 +1083,8 @@ def LLMClient(
|
|
|
1077
1083
|
json_mode: bool = False,
|
|
1078
1084
|
max_new_tokens: int = 512,
|
|
1079
1085
|
reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
|
|
1086
|
+
global_effort: Literal["low", "medium", "high"] | None = None,
|
|
1087
|
+
thinking_budget: int | None = None,
|
|
1080
1088
|
logprobs: bool = False,
|
|
1081
1089
|
top_logprobs: int | None = None,
|
|
1082
1090
|
force_local_mcp: bool = False,
|
|
@@ -1106,6 +1114,8 @@ def LLMClient(
|
|
|
1106
1114
|
json_mode: bool = False,
|
|
1107
1115
|
max_new_tokens: int = 512,
|
|
1108
1116
|
reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
|
|
1117
|
+
global_effort: Literal["low", "medium", "high"] | None = None,
|
|
1118
|
+
thinking_budget: int | None = None,
|
|
1109
1119
|
logprobs: bool = False,
|
|
1110
1120
|
top_logprobs: int | None = None,
|
|
1111
1121
|
force_local_mcp: bool = False,
|
|
@@ -1134,6 +1144,8 @@ def LLMClient(
|
|
|
1134
1144
|
json_mode: bool = False,
|
|
1135
1145
|
max_new_tokens: int = 512,
|
|
1136
1146
|
reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
|
|
1147
|
+
global_effort: Literal["low", "medium", "high"] | None = None,
|
|
1148
|
+
thinking_budget: int | None = None,
|
|
1137
1149
|
logprobs: bool = False,
|
|
1138
1150
|
top_logprobs: int | None = None,
|
|
1139
1151
|
force_local_mcp: bool = False,
|
|
@@ -1174,6 +1186,8 @@ def LLMClient(
|
|
|
1174
1186
|
json_mode=json_mode,
|
|
1175
1187
|
max_new_tokens=max_new_tokens,
|
|
1176
1188
|
reasoning_effort=reasoning_effort,
|
|
1189
|
+
global_effort=global_effort,
|
|
1190
|
+
thinking_budget=thinking_budget,
|
|
1177
1191
|
logprobs=logprobs,
|
|
1178
1192
|
top_logprobs=top_logprobs,
|
|
1179
1193
|
force_local_mcp=force_local_mcp,
|
|
@@ -4,9 +4,10 @@ import random
|
|
|
4
4
|
from dataclasses import dataclass, field
|
|
5
5
|
|
|
6
6
|
from ..request_context import RequestContext
|
|
7
|
+
from .anthropic import ANTHROPIC_MODELS
|
|
7
8
|
|
|
8
9
|
# Import and register all provider models
|
|
9
|
-
from .
|
|
10
|
+
from .arcee import ARCEE_MODELS
|
|
10
11
|
from .bedrock import BEDROCK_MODELS
|
|
11
12
|
from .cerebras import CEREBRAS_MODELS
|
|
12
13
|
from .cohere import COHERE_MODELS
|
|
@@ -128,6 +129,7 @@ def register_model(
|
|
|
128
129
|
# Register all models from all providers
|
|
129
130
|
for model_dict in [
|
|
130
131
|
ANTHROPIC_MODELS,
|
|
132
|
+
ARCEE_MODELS,
|
|
131
133
|
BEDROCK_MODELS,
|
|
132
134
|
COHERE_MODELS,
|
|
133
135
|
DEEPSEEK_MODELS,
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
ARCEE_MODELS = {
|
|
2
|
+
"trinity-mini": {
|
|
3
|
+
"id": "trinity-mini",
|
|
4
|
+
"name": "trinity-mini",
|
|
5
|
+
"api_base": "https://api.arcee.ai/api/v1",
|
|
6
|
+
"api_key_env_var": "ARCEE_API_KEY",
|
|
7
|
+
"supports_json": True,
|
|
8
|
+
"supports_logprobs": False,
|
|
9
|
+
"supports_responses": False,
|
|
10
|
+
"api_spec": "openai",
|
|
11
|
+
"input_cost": 0.045,
|
|
12
|
+
"cached_input_cost": 0.045,
|
|
13
|
+
"output_cost": 0.15,
|
|
14
|
+
"reasoning_model": True,
|
|
15
|
+
}
|
|
16
|
+
}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
DEEPSEEK_MODELS = {
|
|
2
|
+
# ______ _
|
|
3
|
+
# (______) | |
|
|
4
|
+
# _ _ _____ _____ ____ ___ _____ _____| | _
|
|
5
|
+
# | | | | ___ | ___ | _ \ /___) ___ | ___ | |_/ )
|
|
6
|
+
# | |__/ /| ____| ____| |_| |___ | ____| ____| _ (
|
|
7
|
+
# |_____/ |_____)_____) __/(___/|_____)_____)_| \_)
|
|
8
|
+
# |_|
|
|
9
|
+
"deepseek-chat": {
|
|
10
|
+
"id": "deepseek-chat",
|
|
11
|
+
"name": "deepseek-chat",
|
|
12
|
+
"api_base": "https://api.deepseek.com/v1",
|
|
13
|
+
"api_key_env_var": "DEEPSEEK_API_KEY",
|
|
14
|
+
"api_spec": "openai",
|
|
15
|
+
"input_cost": 0.28,
|
|
16
|
+
"cached_input_cost": 0.028,
|
|
17
|
+
"output_cost": 0.42,
|
|
18
|
+
},
|
|
19
|
+
"deepseek-r1": {
|
|
20
|
+
"id": "deepseek-r1",
|
|
21
|
+
"name": "deepseek-reasoner",
|
|
22
|
+
"api_base": "https://api.deepseek.com/v1",
|
|
23
|
+
"api_key_env_var": "DEEPSEEK_API_KEY",
|
|
24
|
+
"api_spec": "openai",
|
|
25
|
+
"input_cost": 0.28,
|
|
26
|
+
"cached_input_cost": 0.028,
|
|
27
|
+
"output_cost": 0.42,
|
|
28
|
+
},
|
|
29
|
+
"deepseek-reasoner": {
|
|
30
|
+
"id": "deepseek-reasoner",
|
|
31
|
+
"name": "deepseek-reasoner",
|
|
32
|
+
"api_base": "https://api.deepseek.com/v1",
|
|
33
|
+
"api_key_env_var": "DEEPSEEK_API_KEY",
|
|
34
|
+
"api_spec": "openai",
|
|
35
|
+
"input_cost": 0.28,
|
|
36
|
+
"cached_input_cost": 0.028,
|
|
37
|
+
"output_cost": 0.42,
|
|
38
|
+
},
|
|
39
|
+
"deepseek-reasoner-anthropic-compat": {
|
|
40
|
+
"id": "deepseek-reasoner-anthropic-compat",
|
|
41
|
+
"name": "deepseek-reasoner",
|
|
42
|
+
"api_base": "https://api.deepseek.com/anthropic",
|
|
43
|
+
"api_key_env_var": "DEEPSEEK_API_KEY",
|
|
44
|
+
"api_spec": "anthropic",
|
|
45
|
+
"input_cost": 0.28,
|
|
46
|
+
"cached_input_cost": 0.028,
|
|
47
|
+
"output_cost": 0.42,
|
|
48
|
+
},
|
|
49
|
+
"deepseek-speciale": {
|
|
50
|
+
"id": "deepseek-speciale",
|
|
51
|
+
"name": "deepseek-reasoner",
|
|
52
|
+
"api_base": "https://api.deepseek.com/v3.2_speciale_expires_on_20251215/v1",
|
|
53
|
+
"api_key_env_var": "DEEPSEEK_API_KEY",
|
|
54
|
+
"api_spec": "openai",
|
|
55
|
+
"input_cost": 0.28,
|
|
56
|
+
"cached_input_cost": 0.028,
|
|
57
|
+
"output_cost": 0.42,
|
|
58
|
+
},
|
|
59
|
+
}
|
|
@@ -22,6 +22,7 @@ KIMI_MODELS = {
|
|
|
22
22
|
"api_key_env_var": "MOONSHOT_API_KEY",
|
|
23
23
|
"supports_json": True,
|
|
24
24
|
"api_spec": "anthropic",
|
|
25
|
+
"reasoning_model": True,
|
|
25
26
|
},
|
|
26
27
|
"kimi-k2-thinking-turbo": {
|
|
27
28
|
"id": "kimi-k2-thinking-turbo",
|
|
@@ -30,5 +31,6 @@ KIMI_MODELS = {
|
|
|
30
31
|
"api_key_env_var": "MOONSHOT_API_KEY",
|
|
31
32
|
"supports_json": True,
|
|
32
33
|
"api_spec": "anthropic",
|
|
34
|
+
"reasoning_model": True,
|
|
33
35
|
},
|
|
34
36
|
}
|
|
@@ -71,4 +71,14 @@ OPENROUTER_MODELS = {
|
|
|
71
71
|
"input_cost": 0.2,
|
|
72
72
|
"output_cost": 35,
|
|
73
73
|
},
|
|
74
|
+
"trinity-mini-openrouter": {
|
|
75
|
+
"id": "trinity-mini-openrouter",
|
|
76
|
+
"name": "arcee-ai/trinity-mini:free",
|
|
77
|
+
"api_base": "https://openrouter.ai/api/v1",
|
|
78
|
+
"api_key_env_var": "OPENROUTER_API_KEY",
|
|
79
|
+
"supports_json": True,
|
|
80
|
+
"api_spec": "openai",
|
|
81
|
+
"input_cost": 0.045,
|
|
82
|
+
"output_cost": 0.15,
|
|
83
|
+
},
|
|
74
84
|
}
|
|
@@ -93,4 +93,15 @@ TOGETHER_MODELS = {
|
|
|
93
93
|
"output_cost": 0.59,
|
|
94
94
|
"reasoning_model": True,
|
|
95
95
|
},
|
|
96
|
+
"trinity-mini-together": {
|
|
97
|
+
"id": "trinity-mini-together",
|
|
98
|
+
"name": "arcee-ai/trinity-mini",
|
|
99
|
+
"api_base": "https://api.together.xyz/v1",
|
|
100
|
+
"api_key_env_var": "TOGETHER_API_KEY",
|
|
101
|
+
"supports_json": False,
|
|
102
|
+
"api_spec": "openai",
|
|
103
|
+
"input_cost": 0.18,
|
|
104
|
+
"output_cost": 0.59,
|
|
105
|
+
"reasoning_model": True,
|
|
106
|
+
},
|
|
96
107
|
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
ZAI_MODELS = {}
|
|
@@ -8,8 +8,12 @@ from .batch_tool import BatchTool
|
|
|
8
8
|
from .tool_search import ToolSearchTool
|
|
9
9
|
from .otc import ToolComposer
|
|
10
10
|
from .sandbox import DaytonaSandbox, ModalSandbox
|
|
11
|
+
from .docs import DocsManager
|
|
12
|
+
from .sheets import SheetsManager
|
|
13
|
+
from .random import RandomTools
|
|
11
14
|
from .subagents import SubAgentManager
|
|
12
15
|
from .todos import TodoItem, TodoManager, TodoPriority, TodoStatus
|
|
16
|
+
from .email import EmailManager
|
|
13
17
|
|
|
14
18
|
__all__ = [
|
|
15
19
|
"BatchTool",
|
|
@@ -26,4 +30,8 @@ __all__ = [
|
|
|
26
30
|
"WorkspaceBackend",
|
|
27
31
|
"ModalSandbox",
|
|
28
32
|
"DaytonaSandbox",
|
|
33
|
+
"DocsManager",
|
|
34
|
+
"SheetsManager",
|
|
35
|
+
"RandomTools",
|
|
36
|
+
"EmailManager",
|
|
29
37
|
]
|