lm-deluge 0.0.81__tar.gz → 0.0.83__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. {lm_deluge-0.0.81/src/lm_deluge.egg-info → lm_deluge-0.0.83}/PKG-INFO +1 -1
  2. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/pyproject.toml +1 -1
  3. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/anthropic.py +6 -0
  4. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/client.py +19 -0
  5. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/__init__.py +3 -1
  6. lm_deluge-0.0.83/src/lm_deluge/models/arcee.py +16 -0
  7. lm_deluge-0.0.83/src/lm_deluge/models/deepseek.py +59 -0
  8. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/kimi.py +2 -0
  9. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/openrouter.py +10 -0
  10. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/together.py +11 -0
  11. lm_deluge-0.0.83/src/lm_deluge/models/zai.py +1 -0
  12. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/__init__.py +8 -0
  13. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/docs.py +1119 -0
  14. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/email.py +294 -0
  15. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/filesystem.py +1711 -0
  16. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/memory.py +458 -0
  17. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/random.py +212 -0
  18. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/sheets.py +385 -0
  19. lm_deluge-0.0.83/src/lm_deluge/tool/prefab/web_search.py +206 -0
  20. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tracker.py +16 -13
  21. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/warnings.py +1 -0
  22. {lm_deluge-0.0.81 → lm_deluge-0.0.83/src/lm_deluge.egg-info}/PKG-INFO +1 -1
  23. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/SOURCES.txt +12 -0
  24. lm_deluge-0.0.83/tests/test_docs.py +480 -0
  25. lm_deluge-0.0.83/tests/test_random.py +364 -0
  26. lm_deluge-0.0.83/tests/test_random_integration.py +98 -0
  27. lm_deluge-0.0.83/tests/test_random_simple.py +108 -0
  28. lm_deluge-0.0.83/tests/test_sheets.py +282 -0
  29. lm_deluge-0.0.81/src/lm_deluge/models/deepseek.py +0 -27
  30. lm_deluge-0.0.81/src/lm_deluge/tool/prefab/filesystem.py +0 -821
  31. lm_deluge-0.0.81/src/lm_deluge/tool/prefab/memory.py +0 -190
  32. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/LICENSE +0 -0
  33. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/README.md +0 -0
  34. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/setup.cfg +0 -0
  35. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/__init__.py +0 -0
  36. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/__init__.py +0 -0
  37. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/base.py +0 -0
  38. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/bedrock.py +0 -0
  39. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
  40. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/common.py +0 -0
  41. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
  42. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
  43. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
  44. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
  45. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
  46. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/gemini.py +0 -0
  47. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/mistral.py +0 -0
  48. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/openai.py +0 -0
  49. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/api_requests/response.py +0 -0
  50. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/batches.py +0 -0
  51. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
  52. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
  53. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
  54. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
  55. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/base.py +0 -0
  56. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/built_in_tools/openai.py +0 -0
  57. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/cache.py +0 -0
  58. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/cli.py +0 -0
  59. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/config.py +0 -0
  60. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/embed.py +0 -0
  61. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/errors.py +0 -0
  62. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/file.py +0 -0
  63. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/image.py +0 -0
  64. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/llm_tools/__init__.py +0 -0
  65. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/mock_openai.py +0 -0
  66. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/anthropic.py +0 -0
  67. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/bedrock.py +0 -0
  68. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/cerebras.py +0 -0
  69. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/cohere.py +0 -0
  70. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/fireworks.py +0 -0
  71. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/google.py +0 -0
  72. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/grok.py +0 -0
  73. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/groq.py +0 -0
  74. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/meta.py +0 -0
  75. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/minimax.py +0 -0
  76. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/mistral.py +0 -0
  77. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/models/openai.py +0 -0
  78. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/__init__.py +0 -0
  79. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/classify.py +0 -0
  80. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/extract.py +0 -0
  81. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/locate.py +0 -0
  82. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/ocr.py +0 -0
  83. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/score.py +0 -0
  84. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/pipelines/translate.py +0 -0
  85. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/prompt.py +0 -0
  86. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/request_context.py +0 -0
  87. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/rerank.py +0 -0
  88. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/__init__.py +0 -0
  89. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/batch_tool.py +0 -0
  90. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/otc/__init__.py +0 -0
  91. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/otc/executor.py +0 -0
  92. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/otc/parse.py +0 -0
  93. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/sandbox.py +0 -0
  94. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/subagents.py +0 -0
  95. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/todos.py +0 -0
  96. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/tool/prefab/tool_search.py +0 -0
  97. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/usage.py +0 -0
  98. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/harmony.py +0 -0
  99. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/json.py +0 -0
  100. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/logprobs.py +0 -0
  101. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/schema.py +0 -0
  102. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/spatial.py +0 -0
  103. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/validation.py +0 -0
  104. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge/util/xml.py +0 -0
  105. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
  106. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/requires.txt +0 -0
  107. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/src/lm_deluge.egg-info/top_level.txt +0 -0
  108. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_batch_tool.py +0 -0
  109. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_builtin_tools.py +0 -0
  110. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_file_upload.py +0 -0
  111. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_filesystem.py +0 -0
  112. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_filesystem_live.py +0 -0
  113. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_mock_openai.py +0 -0
  114. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_native_mcp_server.py +0 -0
  115. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_openrouter_generic.py +0 -0
  116. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_otc.py +0 -0
  117. {lm_deluge-0.0.81 → lm_deluge-0.0.83}/tests/test_tool_search.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lm_deluge
3
- Version: 0.0.81
3
+ Version: 0.0.83
4
4
  Summary: Python utility for using LLM API models.
5
5
  Author-email: Benjamin Anderson <ben@trytaylor.ai>
6
6
  Requires-Python: >=3.10
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel"]
3
3
 
4
4
  [project]
5
5
  name = "lm_deluge"
6
- version = "0.0.81"
6
+ version = "0.0.83"
7
7
  authors = [{ name = "Benjamin Anderson", email = "ben@trytaylor.ai" }]
8
8
  description = "Python utility for using LLM API models."
9
9
  readme = "README.md"
@@ -101,11 +101,14 @@ def _build_anthropic_request(
101
101
  request_json["max_tokens"] += budget
102
102
  else:
103
103
  request_json["thinking"] = {"type": "disabled"}
104
+ if "kimi" in model.id and "thinking" in model.id:
105
+ maybe_warn("WARN_KIMI_THINKING_NO_REASONING")
104
106
 
105
107
  else:
106
108
  request_json["thinking"] = {"type": "disabled"}
107
109
  if sampling_params.reasoning_effort:
108
110
  print("ignoring reasoning_effort for non-reasoning model")
111
+
109
112
  if system_message is not None:
110
113
  request_json["system"] = system_message
111
114
 
@@ -231,6 +234,9 @@ class AnthropicRequest(APIRequestBase):
231
234
  data = await http_response.json()
232
235
  response_content = data["content"]
233
236
 
237
+ # print("=== CONTENT ===")
238
+ # print(response_content)
239
+
234
240
  # Parse response into Message with parts
235
241
  parts = []
236
242
  for item in response_content:
@@ -84,6 +84,8 @@ class _LLMClient(BaseModel):
84
84
  json_mode: bool = False
85
85
  max_new_tokens: int = 512
86
86
  reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None
87
+ global_effort: Literal["low", "medium", "high"] | None = None
88
+ thinking_budget: int | None = None
87
89
  logprobs: bool = False
88
90
  top_logprobs: int | None = None
89
91
  force_local_mcp: bool = False
@@ -103,6 +105,11 @@ class _LLMClient(BaseModel):
103
105
  _tracker: StatusTracker | None = PrivateAttr(default=None)
104
106
  _capacity_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
105
107
 
108
+ # usage
109
+ def print_usage(self):
110
+ if self._tracker:
111
+ self._tracker.log_usage()
112
+
106
113
  # Progress management for queueing API
107
114
  def open(self, total: int | None = None, show_progress: bool = True):
108
115
  self._tracker = StatusTracker(
@@ -207,6 +214,8 @@ class _LLMClient(BaseModel):
207
214
  json_mode=self.json_mode,
208
215
  max_new_tokens=self.max_new_tokens,
209
216
  reasoning_effort=self.reasoning_effort,
217
+ global_effort=self.global_effort or "high",
218
+ thinking_budget=self.thinking_budget,
210
219
  logprobs=self.logprobs,
211
220
  top_logprobs=self.top_logprobs,
212
221
  )
@@ -342,6 +351,8 @@ class _LLMClient(BaseModel):
342
351
  json_mode=data.get("json_mode", False),
343
352
  max_new_tokens=data.get("max_new_tokens", 512),
344
353
  reasoning_effort=data.get("reasoning_effort", None),
354
+ global_effort=data.get("global_effort") or "high",
355
+ thinking_budget=data.get("thinking_budget", None),
345
356
  logprobs=data.get("logprobs", False),
346
357
  top_logprobs=data.get("top_logprobs", None),
347
358
  )
@@ -1072,6 +1083,8 @@ def LLMClient(
1072
1083
  json_mode: bool = False,
1073
1084
  max_new_tokens: int = 512,
1074
1085
  reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
1086
+ global_effort: Literal["low", "medium", "high"] | None = None,
1087
+ thinking_budget: int | None = None,
1075
1088
  logprobs: bool = False,
1076
1089
  top_logprobs: int | None = None,
1077
1090
  force_local_mcp: bool = False,
@@ -1101,6 +1114,8 @@ def LLMClient(
1101
1114
  json_mode: bool = False,
1102
1115
  max_new_tokens: int = 512,
1103
1116
  reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
1117
+ global_effort: Literal["low", "medium", "high"] | None = None,
1118
+ thinking_budget: int | None = None,
1104
1119
  logprobs: bool = False,
1105
1120
  top_logprobs: int | None = None,
1106
1121
  force_local_mcp: bool = False,
@@ -1129,6 +1144,8 @@ def LLMClient(
1129
1144
  json_mode: bool = False,
1130
1145
  max_new_tokens: int = 512,
1131
1146
  reasoning_effort: Literal["low", "medium", "high", "minimal", "none", None] = None,
1147
+ global_effort: Literal["low", "medium", "high"] | None = None,
1148
+ thinking_budget: int | None = None,
1132
1149
  logprobs: bool = False,
1133
1150
  top_logprobs: int | None = None,
1134
1151
  force_local_mcp: bool = False,
@@ -1169,6 +1186,8 @@ def LLMClient(
1169
1186
  json_mode=json_mode,
1170
1187
  max_new_tokens=max_new_tokens,
1171
1188
  reasoning_effort=reasoning_effort,
1189
+ global_effort=global_effort,
1190
+ thinking_budget=thinking_budget,
1172
1191
  logprobs=logprobs,
1173
1192
  top_logprobs=top_logprobs,
1174
1193
  force_local_mcp=force_local_mcp,
@@ -4,9 +4,10 @@ import random
4
4
  from dataclasses import dataclass, field
5
5
 
6
6
  from ..request_context import RequestContext
7
+ from .anthropic import ANTHROPIC_MODELS
7
8
 
8
9
  # Import and register all provider models
9
- from .anthropic import ANTHROPIC_MODELS
10
+ from .arcee import ARCEE_MODELS
10
11
  from .bedrock import BEDROCK_MODELS
11
12
  from .cerebras import CEREBRAS_MODELS
12
13
  from .cohere import COHERE_MODELS
@@ -128,6 +129,7 @@ def register_model(
128
129
  # Register all models from all providers
129
130
  for model_dict in [
130
131
  ANTHROPIC_MODELS,
132
+ ARCEE_MODELS,
131
133
  BEDROCK_MODELS,
132
134
  COHERE_MODELS,
133
135
  DEEPSEEK_MODELS,
@@ -0,0 +1,16 @@
1
+ ARCEE_MODELS = {
2
+ "trinity-mini": {
3
+ "id": "trinity-mini",
4
+ "name": "trinity-mini",
5
+ "api_base": "https://api.arcee.ai/api/v1",
6
+ "api_key_env_var": "ARCEE_API_KEY",
7
+ "supports_json": True,
8
+ "supports_logprobs": False,
9
+ "supports_responses": False,
10
+ "api_spec": "openai",
11
+ "input_cost": 0.045,
12
+ "cached_input_cost": 0.045,
13
+ "output_cost": 0.15,
14
+ "reasoning_model": True,
15
+ }
16
+ }
@@ -0,0 +1,59 @@
1
+ DEEPSEEK_MODELS = {
2
+ # ______ _
3
+ # (______) | |
4
+ # _ _ _____ _____ ____ ___ _____ _____| | _
5
+ # | | | | ___ | ___ | _ \ /___) ___ | ___ | |_/ )
6
+ # | |__/ /| ____| ____| |_| |___ | ____| ____| _ (
7
+ # |_____/ |_____)_____) __/(___/|_____)_____)_| \_)
8
+ # |_|
9
+ "deepseek-chat": {
10
+ "id": "deepseek-chat",
11
+ "name": "deepseek-chat",
12
+ "api_base": "https://api.deepseek.com/v1",
13
+ "api_key_env_var": "DEEPSEEK_API_KEY",
14
+ "api_spec": "openai",
15
+ "input_cost": 0.28,
16
+ "cached_input_cost": 0.028,
17
+ "output_cost": 0.42,
18
+ },
19
+ "deepseek-r1": {
20
+ "id": "deepseek-r1",
21
+ "name": "deepseek-reasoner",
22
+ "api_base": "https://api.deepseek.com/v1",
23
+ "api_key_env_var": "DEEPSEEK_API_KEY",
24
+ "api_spec": "openai",
25
+ "input_cost": 0.28,
26
+ "cached_input_cost": 0.028,
27
+ "output_cost": 0.42,
28
+ },
29
+ "deepseek-reasoner": {
30
+ "id": "deepseek-reasoner",
31
+ "name": "deepseek-reasoner",
32
+ "api_base": "https://api.deepseek.com/v1",
33
+ "api_key_env_var": "DEEPSEEK_API_KEY",
34
+ "api_spec": "openai",
35
+ "input_cost": 0.28,
36
+ "cached_input_cost": 0.028,
37
+ "output_cost": 0.42,
38
+ },
39
+ "deepseek-reasoner-anthropic-compat": {
40
+ "id": "deepseek-reasoner-anthropic-compat",
41
+ "name": "deepseek-reasoner",
42
+ "api_base": "https://api.deepseek.com/anthropic",
43
+ "api_key_env_var": "DEEPSEEK_API_KEY",
44
+ "api_spec": "anthropic",
45
+ "input_cost": 0.28,
46
+ "cached_input_cost": 0.028,
47
+ "output_cost": 0.42,
48
+ },
49
+ "deepseek-speciale": {
50
+ "id": "deepseek-speciale",
51
+ "name": "deepseek-reasoner",
52
+ "api_base": "https://api.deepseek.com/v3.2_speciale_expires_on_20251215/v1",
53
+ "api_key_env_var": "DEEPSEEK_API_KEY",
54
+ "api_spec": "openai",
55
+ "input_cost": 0.28,
56
+ "cached_input_cost": 0.028,
57
+ "output_cost": 0.42,
58
+ },
59
+ }
@@ -22,6 +22,7 @@ KIMI_MODELS = {
22
22
  "api_key_env_var": "MOONSHOT_API_KEY",
23
23
  "supports_json": True,
24
24
  "api_spec": "anthropic",
25
+ "reasoning_model": True,
25
26
  },
26
27
  "kimi-k2-thinking-turbo": {
27
28
  "id": "kimi-k2-thinking-turbo",
@@ -30,5 +31,6 @@ KIMI_MODELS = {
30
31
  "api_key_env_var": "MOONSHOT_API_KEY",
31
32
  "supports_json": True,
32
33
  "api_spec": "anthropic",
34
+ "reasoning_model": True,
33
35
  },
34
36
  }
@@ -71,4 +71,14 @@ OPENROUTER_MODELS = {
71
71
  "input_cost": 0.2,
72
72
  "output_cost": 35,
73
73
  },
74
+ "trinity-mini-openrouter": {
75
+ "id": "trinity-mini-openrouter",
76
+ "name": "arcee-ai/trinity-mini:free",
77
+ "api_base": "https://openrouter.ai/api/v1",
78
+ "api_key_env_var": "OPENROUTER_API_KEY",
79
+ "supports_json": True,
80
+ "api_spec": "openai",
81
+ "input_cost": 0.045,
82
+ "output_cost": 0.15,
83
+ },
74
84
  }
@@ -93,4 +93,15 @@ TOGETHER_MODELS = {
93
93
  "output_cost": 0.59,
94
94
  "reasoning_model": True,
95
95
  },
96
+ "trinity-mini-together": {
97
+ "id": "trinity-mini-together",
98
+ "name": "arcee-ai/trinity-mini",
99
+ "api_base": "https://api.together.xyz/v1",
100
+ "api_key_env_var": "TOGETHER_API_KEY",
101
+ "supports_json": False,
102
+ "api_spec": "openai",
103
+ "input_cost": 0.18,
104
+ "output_cost": 0.59,
105
+ "reasoning_model": True,
106
+ },
96
107
  }
@@ -0,0 +1 @@
1
+ ZAI_MODELS = {}
@@ -8,8 +8,12 @@ from .batch_tool import BatchTool
8
8
  from .tool_search import ToolSearchTool
9
9
  from .otc import ToolComposer
10
10
  from .sandbox import DaytonaSandbox, ModalSandbox
11
+ from .docs import DocsManager
12
+ from .sheets import SheetsManager
13
+ from .random import RandomTools
11
14
  from .subagents import SubAgentManager
12
15
  from .todos import TodoItem, TodoManager, TodoPriority, TodoStatus
16
+ from .email import EmailManager
13
17
 
14
18
  __all__ = [
15
19
  "BatchTool",
@@ -26,4 +30,8 @@ __all__ = [
26
30
  "WorkspaceBackend",
27
31
  "ModalSandbox",
28
32
  "DaytonaSandbox",
33
+ "DocsManager",
34
+ "SheetsManager",
35
+ "RandomTools",
36
+ "EmailManager",
29
37
  ]