lm-deluge 0.0.83__tar.gz → 0.0.85__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lm_deluge-0.0.83/src/lm_deluge.egg-info → lm_deluge-0.0.85}/PKG-INFO +1 -1
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/pyproject.toml +1 -1
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/anthropic.py +3 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/gemini.py +34 -2
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/openai.py +1 -1
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/client.py +86 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/google.py +14 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/openai.py +28 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/prompt.py +39 -11
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/__init__.py +11 -4
- lm_deluge-0.0.85/src/lm_deluge/tool/builtin/anthropic/__init__.py +300 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/builtin/gemini.py +59 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/builtin/openai.py +74 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/__init__.py +173 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/actions.py +148 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/base.py +27 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/batch.py +215 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/converters.py +466 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/kernel.py +702 -0
- lm_deluge-0.0.85/src/lm_deluge/tool/cua/trycua.py +989 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/web_search.py +62 -69
- {lm_deluge-0.0.83 → lm_deluge-0.0.85/src/lm_deluge.egg-info}/PKG-INFO +1 -1
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge.egg-info/SOURCES.txt +15 -23
- lm_deluge-0.0.83/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -128
- lm_deluge-0.0.83/src/lm_deluge/built_in_tools/openai.py +0 -28
- lm_deluge-0.0.83/src/lm_deluge/llm_tools/__init__.py +0 -25
- lm_deluge-0.0.83/tests/test_batch_tool.py +0 -98
- lm_deluge-0.0.83/tests/test_builtin_tools.py +0 -58
- lm_deluge-0.0.83/tests/test_docs.py +0 -480
- lm_deluge-0.0.83/tests/test_file_upload.py +0 -627
- lm_deluge-0.0.83/tests/test_filesystem.py +0 -121
- lm_deluge-0.0.83/tests/test_filesystem_live.py +0 -82
- lm_deluge-0.0.83/tests/test_mock_openai.py +0 -582
- lm_deluge-0.0.83/tests/test_native_mcp_server.py +0 -66
- lm_deluge-0.0.83/tests/test_openrouter_generic.py +0 -238
- lm_deluge-0.0.83/tests/test_otc.py +0 -117
- lm_deluge-0.0.83/tests/test_random.py +0 -364
- lm_deluge-0.0.83/tests/test_random_integration.py +0 -98
- lm_deluge-0.0.83/tests/test_random_simple.py +0 -108
- lm_deluge-0.0.83/tests/test_sheets.py +0 -282
- lm_deluge-0.0.83/tests/test_tool_search.py +0 -86
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/LICENSE +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/README.md +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/setup.cfg +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/__init__.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/chat_reasoning.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/mock_openai.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/arcee.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/kimi.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/minimax.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/models/zai.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/__init__.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/classify.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/extract.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/locate.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/ocr.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/score.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/pipelines/translate.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.83/src/lm_deluge/built_in_tools → lm_deluge-0.0.85/src/lm_deluge/tool/builtin}/anthropic/bash.py +0 -0
- {lm_deluge-0.0.83/src/lm_deluge/built_in_tools → lm_deluge-0.0.85/src/lm_deluge/tool/builtin}/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.83/src/lm_deluge/built_in_tools → lm_deluge-0.0.85/src/lm_deluge/tool/builtin}/anthropic/editor.py +0 -0
- {lm_deluge-0.0.83/src/lm_deluge/built_in_tools → lm_deluge-0.0.85/src/lm_deluge/tool/builtin}/base.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/__init__.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/batch_tool.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/docs.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/email.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/filesystem.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/memory.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/otc/__init__.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/otc/executor.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/otc/parse.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/random.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/sandbox.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/sheets.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/subagents.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/todos.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tool/prefab/tool_search.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/schema.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge/warnings.py +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.83 → lm_deluge-0.0.85}/src/lm_deluge.egg-info/top_level.txt +0 -0
|
@@ -167,6 +167,9 @@ def _build_anthropic_request(
|
|
|
167
167
|
"bash_20241022",
|
|
168
168
|
]:
|
|
169
169
|
_add_beta(base_headers, "computer-use-2024-10-22")
|
|
170
|
+
elif tool["type"] == "computer_20251124":
|
|
171
|
+
# Claude Opus 4.5 - newest computer use with zoom support
|
|
172
|
+
_add_beta(base_headers, "computer-use-2025-11-24")
|
|
170
173
|
elif tool["type"] == "computer_20250124":
|
|
171
174
|
_add_beta(base_headers, "computer-use-2025-01-24")
|
|
172
175
|
elif tool["type"] == "code_execution_20250522":
|
|
@@ -114,8 +114,40 @@ async def _build_gemini_request(
|
|
|
114
114
|
|
|
115
115
|
# Add tools if provided
|
|
116
116
|
if tools:
|
|
117
|
-
|
|
118
|
-
|
|
117
|
+
request_tools = []
|
|
118
|
+
function_declarations = []
|
|
119
|
+
|
|
120
|
+
for tool in tools:
|
|
121
|
+
if isinstance(tool, dict) and tool.get("type") == "gemini_computer_use":
|
|
122
|
+
# Gemini computer use tool - add as separate tool entry
|
|
123
|
+
env_map = {
|
|
124
|
+
"browser": "ENVIRONMENT_BROWSER",
|
|
125
|
+
"android": "ENVIRONMENT_ANDROID",
|
|
126
|
+
}
|
|
127
|
+
env = env_map.get(
|
|
128
|
+
tool.get("environment", "browser"), "ENVIRONMENT_BROWSER"
|
|
129
|
+
)
|
|
130
|
+
cu_tool: dict = {
|
|
131
|
+
"computerUse": {
|
|
132
|
+
"environment": env,
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
excluded = tool.get("excluded_predefined_functions")
|
|
136
|
+
if excluded:
|
|
137
|
+
cu_tool["computerUse"]["excludedPredefinedFunctions"] = excluded
|
|
138
|
+
request_tools.append(cu_tool)
|
|
139
|
+
elif hasattr(tool, "dump_for"):
|
|
140
|
+
# Regular Tool object
|
|
141
|
+
function_declarations.append(tool.dump_for("google"))
|
|
142
|
+
elif isinstance(tool, dict):
|
|
143
|
+
# Raw dict tool - assume it's a function declaration
|
|
144
|
+
function_declarations.append(tool)
|
|
145
|
+
|
|
146
|
+
if function_declarations:
|
|
147
|
+
request_tools.append({"functionDeclarations": function_declarations})
|
|
148
|
+
|
|
149
|
+
if request_tools:
|
|
150
|
+
request_json["tools"] = request_tools
|
|
119
151
|
|
|
120
152
|
# Handle JSON mode
|
|
121
153
|
if sampling_params.json_mode and model.supports_json:
|
|
@@ -367,7 +367,7 @@ async def _build_oa_responses_request(
|
|
|
367
367
|
elif isinstance(tool, dict):
|
|
368
368
|
# if computer use, make sure model supports it
|
|
369
369
|
if tool["type"] == "computer_use_preview":
|
|
370
|
-
if model.name != "
|
|
370
|
+
if model.name != "computer-use-preview":
|
|
371
371
|
raise ValueError(f"model {model.id} does not support computer use")
|
|
372
372
|
# have to use truncation
|
|
373
373
|
request_json["truncation"] = "auto"
|
|
@@ -1012,6 +1012,92 @@ class _LLMClient(BaseModel):
|
|
|
1012
1012
|
)
|
|
1013
1013
|
)
|
|
1014
1014
|
|
|
1015
|
+
async def process_agent_loops_async(
|
|
1016
|
+
self,
|
|
1017
|
+
prompts: Sequence[Prompt],
|
|
1018
|
+
*,
|
|
1019
|
+
tools: Sequence[Tool | dict | MCPServer] | None = None,
|
|
1020
|
+
max_rounds: int = 5,
|
|
1021
|
+
max_concurrent_agents: int = 10,
|
|
1022
|
+
show_progress: bool = True,
|
|
1023
|
+
) -> list[tuple[Conversation, APIResponse]]:
|
|
1024
|
+
"""Process multiple agent loops concurrently.
|
|
1025
|
+
|
|
1026
|
+
Each prompt becomes an independent agent loop that can make multiple LLM
|
|
1027
|
+
calls and execute tools until completion. The agent loops run concurrently,
|
|
1028
|
+
limited by ``max_concurrent_agents``, while the underlying LLM requests
|
|
1029
|
+
are still governed by ``max_concurrent_requests``.
|
|
1030
|
+
|
|
1031
|
+
Args:
|
|
1032
|
+
prompts: Sequence of prompts, each becoming a separate agent loop.
|
|
1033
|
+
tools: Tools available to all agent loops.
|
|
1034
|
+
max_rounds: Maximum rounds per agent loop (default 5).
|
|
1035
|
+
max_concurrent_agents: Maximum number of agent loops running
|
|
1036
|
+
concurrently (default 10). This is separate from the LLM request
|
|
1037
|
+
concurrency limit.
|
|
1038
|
+
show_progress: Whether to show progress bar for LLM requests.
|
|
1039
|
+
|
|
1040
|
+
Returns:
|
|
1041
|
+
List of (Conversation, APIResponse) tuples in the same order as
|
|
1042
|
+
the input prompts.
|
|
1043
|
+
"""
|
|
1044
|
+
# Convert prompts to Conversations
|
|
1045
|
+
conversations = prompts_to_conversations(list(prompts))
|
|
1046
|
+
|
|
1047
|
+
# Ensure tracker exists for underlying LLM requests
|
|
1048
|
+
if self._tracker is None:
|
|
1049
|
+
self.open(total=0, show_progress=show_progress)
|
|
1050
|
+
tracker_preopened = False
|
|
1051
|
+
else:
|
|
1052
|
+
tracker_preopened = True
|
|
1053
|
+
|
|
1054
|
+
# Semaphore to limit concurrent agent loops
|
|
1055
|
+
agent_semaphore = asyncio.Semaphore(max_concurrent_agents)
|
|
1056
|
+
|
|
1057
|
+
async def run_single_loop(
|
|
1058
|
+
idx: int, conv: Conversation
|
|
1059
|
+
) -> tuple[int, Conversation, APIResponse]:
|
|
1060
|
+
"""Run a single agent loop with semaphore protection."""
|
|
1061
|
+
async with agent_semaphore:
|
|
1062
|
+
task_id = self._next_task_id
|
|
1063
|
+
self._next_task_id += 1
|
|
1064
|
+
result = await self._run_agent_loop_internal(
|
|
1065
|
+
task_id, conv, tools=tools, max_rounds=max_rounds
|
|
1066
|
+
)
|
|
1067
|
+
return idx, result.conversation, result.final_response
|
|
1068
|
+
|
|
1069
|
+
# Launch all agent loops concurrently (semaphore limits actual concurrency)
|
|
1070
|
+
tasks = [run_single_loop(idx, conv) for idx, conv in enumerate(conversations)]
|
|
1071
|
+
completed = await asyncio.gather(*tasks)
|
|
1072
|
+
|
|
1073
|
+
# Close tracker if we opened it
|
|
1074
|
+
if not tracker_preopened:
|
|
1075
|
+
self.close()
|
|
1076
|
+
|
|
1077
|
+
# Sort by original index and extract results
|
|
1078
|
+
completed_sorted = sorted(completed, key=lambda x: x[0])
|
|
1079
|
+
return [(conv, resp) for _, conv, resp in completed_sorted]
|
|
1080
|
+
|
|
1081
|
+
def process_agent_loops_sync(
|
|
1082
|
+
self,
|
|
1083
|
+
prompts: Sequence[Prompt],
|
|
1084
|
+
*,
|
|
1085
|
+
tools: Sequence[Tool | dict | MCPServer] | None = None,
|
|
1086
|
+
max_rounds: int = 5,
|
|
1087
|
+
max_concurrent_agents: int = 10,
|
|
1088
|
+
show_progress: bool = True,
|
|
1089
|
+
) -> list[tuple[Conversation, APIResponse]]:
|
|
1090
|
+
"""Synchronous wrapper for :meth:`process_agent_loops_async`."""
|
|
1091
|
+
return asyncio.run(
|
|
1092
|
+
self.process_agent_loops_async(
|
|
1093
|
+
prompts,
|
|
1094
|
+
tools=tools,
|
|
1095
|
+
max_rounds=max_rounds,
|
|
1096
|
+
max_concurrent_agents=max_concurrent_agents,
|
|
1097
|
+
show_progress=show_progress,
|
|
1098
|
+
)
|
|
1099
|
+
)
|
|
1100
|
+
|
|
1015
1101
|
async def submit_batch_job(
|
|
1016
1102
|
self,
|
|
1017
1103
|
prompts: Prompt | Sequence[Prompt],
|
|
@@ -153,4 +153,18 @@ GOOGLE_MODELS = {
|
|
|
153
153
|
# Note: >200k tokens pricing is $4/$18 per million
|
|
154
154
|
"reasoning_model": True,
|
|
155
155
|
},
|
|
156
|
+
# Gemini 2.5 Computer Use model
|
|
157
|
+
"gemini-2.5-computer-use": {
|
|
158
|
+
"id": "gemini-2.5-computer-use",
|
|
159
|
+
"name": "gemini-2.5-computer-use-preview-10-2025",
|
|
160
|
+
"api_base": "https://generativelanguage.googleapis.com/v1beta",
|
|
161
|
+
"api_key_env_var": "GEMINI_API_KEY",
|
|
162
|
+
"supports_json": True,
|
|
163
|
+
"supports_logprobs": False,
|
|
164
|
+
"api_spec": "gemini",
|
|
165
|
+
"input_cost": 1.25, # same as gemini-2.5-pro for now
|
|
166
|
+
"cached_input_cost": 0.31,
|
|
167
|
+
"output_cost": 10.0,
|
|
168
|
+
"reasoning_model": True,
|
|
169
|
+
},
|
|
156
170
|
}
|
|
@@ -149,6 +149,34 @@ OPENAI_MODELS = {
|
|
|
149
149
|
"output_cost": 6.0,
|
|
150
150
|
"reasoning_model": True,
|
|
151
151
|
},
|
|
152
|
+
"o4-mini-deep-research": {
|
|
153
|
+
"id": "o4-mini-deep-research",
|
|
154
|
+
"name": "o4-mini-deep-research",
|
|
155
|
+
"api_base": "https://api.openai.com/v1",
|
|
156
|
+
"api_key_env_var": "OPENAI_API_KEY",
|
|
157
|
+
"supports_json": True,
|
|
158
|
+
"supports_logprobs": False,
|
|
159
|
+
"supports_responses": True,
|
|
160
|
+
"api_spec": "openai",
|
|
161
|
+
"input_cost": 2,
|
|
162
|
+
"cached_input_cost": 0.5,
|
|
163
|
+
"output_cost": 8.0,
|
|
164
|
+
"reasoning_model": True,
|
|
165
|
+
},
|
|
166
|
+
"o3-deep-research": {
|
|
167
|
+
"id": "o3-deep-research",
|
|
168
|
+
"name": "o3-deep-research",
|
|
169
|
+
"api_base": "https://api.openai.com/v1",
|
|
170
|
+
"api_key_env_var": "OPENAI_API_KEY",
|
|
171
|
+
"supports_json": True,
|
|
172
|
+
"supports_logprobs": False,
|
|
173
|
+
"supports_responses": True,
|
|
174
|
+
"api_spec": "openai",
|
|
175
|
+
"input_cost": 10,
|
|
176
|
+
"cached_input_cost": 2.50,
|
|
177
|
+
"output_cost": 40.0,
|
|
178
|
+
"reasoning_model": True,
|
|
179
|
+
},
|
|
152
180
|
"o3": {
|
|
153
181
|
"id": "o3",
|
|
154
182
|
"name": "o3-2025-04-16",
|
|
@@ -203,6 +203,8 @@ class ToolResult:
|
|
|
203
203
|
"call_id": self.tool_call_id,
|
|
204
204
|
}
|
|
205
205
|
if self.built_in_type == "computer_call":
|
|
206
|
+
# OpenAI expects "computer_call_output" for the result type
|
|
207
|
+
result["type"] = "computer_call_output"
|
|
206
208
|
result["output"] = output_data.get("output", {})
|
|
207
209
|
if "acknowledged_safety_checks" in output_data:
|
|
208
210
|
result["acknowledged_safety_checks"] = output_data[
|
|
@@ -235,15 +237,41 @@ class ToolResult:
|
|
|
235
237
|
raise ValueError("unsupported self.result type")
|
|
236
238
|
|
|
237
239
|
def gemini(self) -> dict:
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
"functionResponse": {
|
|
242
|
-
"name": self.tool_call_id, # Gemini uses name field for ID
|
|
243
|
-
"response": {"result": self.result},
|
|
244
|
-
}
|
|
240
|
+
# Build the function response
|
|
241
|
+
func_response: dict = {
|
|
242
|
+
"name": self.tool_call_id, # Gemini uses name field for ID
|
|
245
243
|
}
|
|
246
244
|
|
|
245
|
+
# Handle different result types
|
|
246
|
+
if isinstance(self.result, str):
|
|
247
|
+
func_response["response"] = {"result": self.result}
|
|
248
|
+
elif isinstance(self.result, dict):
|
|
249
|
+
# Check for Gemini computer use format with inline screenshot
|
|
250
|
+
if self.built_in_type == "gemini_computer_use":
|
|
251
|
+
# Gemini CU expects response dict with optional inline_data parts
|
|
252
|
+
func_response["response"] = self.result.get("response", {})
|
|
253
|
+
# Include inline data (screenshot) if present
|
|
254
|
+
if "inline_data" in self.result:
|
|
255
|
+
func_response["parts"] = [
|
|
256
|
+
{
|
|
257
|
+
"inlineData": {
|
|
258
|
+
"mimeType": self.result["inline_data"].get(
|
|
259
|
+
"mime_type", "image/png"
|
|
260
|
+
),
|
|
261
|
+
"data": self.result["inline_data"]["data"],
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
]
|
|
265
|
+
else:
|
|
266
|
+
func_response["response"] = self.result
|
|
267
|
+
elif isinstance(self.result, list):
|
|
268
|
+
# Handle content blocks (images, etc.) - not yet implemented
|
|
269
|
+
raise ValueError("can't handle content blocks for gemini yet")
|
|
270
|
+
else:
|
|
271
|
+
func_response["response"] = {"result": str(self.result)}
|
|
272
|
+
|
|
273
|
+
return {"functionResponse": func_response}
|
|
274
|
+
|
|
247
275
|
def mistral(self) -> dict:
|
|
248
276
|
return {
|
|
249
277
|
"type": "tool_result",
|
|
@@ -1367,14 +1395,14 @@ class Conversation:
|
|
|
1367
1395
|
# For assistant messages, extract computer calls as separate items
|
|
1368
1396
|
text_parts = []
|
|
1369
1397
|
for p in m.parts:
|
|
1370
|
-
if isinstance(p, ToolCall) and p.
|
|
1398
|
+
if isinstance(p, ToolCall) and p.built_in_type == "computer_call":
|
|
1371
1399
|
# Computer calls become separate items in the input array
|
|
1372
|
-
|
|
1400
|
+
# p.arguments already contains the full action dict with "type"
|
|
1373
1401
|
input_items.append(
|
|
1374
1402
|
{
|
|
1375
1403
|
"type": "computer_call",
|
|
1376
1404
|
"call_id": p.id,
|
|
1377
|
-
"action":
|
|
1405
|
+
"action": p.arguments,
|
|
1378
1406
|
}
|
|
1379
1407
|
)
|
|
1380
1408
|
elif isinstance(p, Text):
|
|
@@ -1752,7 +1780,7 @@ class Conversation:
|
|
|
1752
1780
|
Prompt: TypeAlias = str | list[dict] | Message | Conversation
|
|
1753
1781
|
|
|
1754
1782
|
|
|
1755
|
-
def prompts_to_conversations(prompts: Sequence[Prompt]) -> Sequence[
|
|
1783
|
+
def prompts_to_conversations(prompts: Sequence[Prompt]) -> Sequence[Conversation]:
|
|
1756
1784
|
converted = []
|
|
1757
1785
|
for prompt in prompts:
|
|
1758
1786
|
if isinstance(prompt, Conversation):
|
|
@@ -614,6 +614,7 @@ class Tool(BaseModel):
|
|
|
614
614
|
cls,
|
|
615
615
|
func: Callable,
|
|
616
616
|
*,
|
|
617
|
+
name: str | None = None,
|
|
617
618
|
include_output_schema_in_description: bool = False,
|
|
618
619
|
) -> "Tool":
|
|
619
620
|
"""
|
|
@@ -629,6 +630,8 @@ class Tool(BaseModel):
|
|
|
629
630
|
|
|
630
631
|
Args:
|
|
631
632
|
func: The function to create a tool from.
|
|
633
|
+
name: Optional name override for the tool. If not provided,
|
|
634
|
+
uses the function's __name__.
|
|
632
635
|
include_output_schema_in_description: If True, append the return type
|
|
633
636
|
and any complex type definitions to the tool description. This can
|
|
634
637
|
help the model understand what the tool returns. Default is False.
|
|
@@ -646,6 +649,10 @@ class Tool(BaseModel):
|
|
|
646
649
|
# tool.output_schema contains schema for list[dict]
|
|
647
650
|
# tool.call(query="test", validate_output=True) validates return value
|
|
648
651
|
|
|
652
|
+
# With custom name:
|
|
653
|
+
tool = Tool.from_function(search, name="search_database")
|
|
654
|
+
# tool.name is "search_database"
|
|
655
|
+
|
|
649
656
|
# With output schema in description:
|
|
650
657
|
tool = Tool.from_function(search, include_output_schema_in_description=True)
|
|
651
658
|
# Description becomes:
|
|
@@ -653,11 +660,11 @@ class Tool(BaseModel):
|
|
|
653
660
|
#
|
|
654
661
|
# Returns: list[dict]"
|
|
655
662
|
"""
|
|
656
|
-
# Get function name
|
|
657
|
-
|
|
663
|
+
# Get function name (use override if provided)
|
|
664
|
+
tool_name = name if name is not None else func.__name__
|
|
658
665
|
|
|
659
666
|
# Get docstring for description
|
|
660
|
-
description = func.__doc__ or f"Call the {
|
|
667
|
+
description = func.__doc__ or f"Call the {tool_name} function"
|
|
661
668
|
description = description.strip()
|
|
662
669
|
|
|
663
670
|
# Use TypeAdapter for robust schema generation
|
|
@@ -705,7 +712,7 @@ class Tool(BaseModel):
|
|
|
705
712
|
description = f"{description}\n\n{output_info}"
|
|
706
713
|
|
|
707
714
|
tool = cls(
|
|
708
|
-
name=
|
|
715
|
+
name=tool_name,
|
|
709
716
|
description=description,
|
|
710
717
|
parameters=parameters,
|
|
711
718
|
required=required,
|
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
from typing import Literal
|
|
2
|
+
|
|
3
|
+
# Tool version identifiers corresponding to Anthropic's versioned tools
|
|
4
|
+
# - 2024-10-22: Claude 3.5/3.6 (original computer use)
|
|
5
|
+
# - 2025-01-24: Claude Sonnet 3.7 and Claude 4 models
|
|
6
|
+
# - 2025-11-24: Claude Opus 4.5 (adds zoom action)
|
|
7
|
+
ToolVersion = Literal["2024-10-22", "2025-01-24", "2025-11-24"]
|
|
8
|
+
ToolType = Literal["bash", "computer", "editor"]
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def model_to_version(model: str) -> ToolVersion:
|
|
12
|
+
"""
|
|
13
|
+
Determine the appropriate tool version for a given model.
|
|
14
|
+
|
|
15
|
+
Model compatibility:
|
|
16
|
+
- Claude Opus 4.5 (claude-opus-4-5-*): Uses 2025-11-24 tools with zoom support
|
|
17
|
+
- Claude 4 models (claude-4-*, claude-sonnet-4-*, claude-opus-4-*, etc.): Uses 2025-01-24 tools
|
|
18
|
+
- Claude Sonnet 3.7 (deprecated): Uses 2025-01-24 tools
|
|
19
|
+
- Claude 3.5/3.6: Uses 2024-10-22 tools
|
|
20
|
+
"""
|
|
21
|
+
model_lower = model.lower()
|
|
22
|
+
|
|
23
|
+
# Check for valid model families
|
|
24
|
+
if not any(x in model_lower for x in ["opus", "sonnet", "haiku"]):
|
|
25
|
+
raise ValueError(
|
|
26
|
+
f"Cannot use computer tools with model '{model}'. "
|
|
27
|
+
"Computer use requires Claude Opus, Sonnet, or Haiku models."
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Claude Opus 4.5 - newest tool version with zoom support
|
|
31
|
+
# Matches: claude-opus-4-5-*, claude-4.5-opus, etc.
|
|
32
|
+
if (
|
|
33
|
+
"opus-4-5" in model_lower
|
|
34
|
+
or "opus-4.5" in model_lower
|
|
35
|
+
or "4.5-opus" in model_lower
|
|
36
|
+
):
|
|
37
|
+
return "2025-11-24"
|
|
38
|
+
|
|
39
|
+
# Claude 4 models (Sonnet 4.5, Opus 4, Sonnet 4, Haiku 4.5, etc.)
|
|
40
|
+
# Matches aliases like claude-4-sonnet, claude-4.5-sonnet
|
|
41
|
+
# and full names like claude-sonnet-4-20250514, claude-sonnet-4-5-20250929
|
|
42
|
+
claude_4_patterns = [
|
|
43
|
+
"claude-4", # alias prefix: claude-4-sonnet, claude-4-opus
|
|
44
|
+
"4.5-sonnet", # alias: claude-4.5-sonnet
|
|
45
|
+
"4.5-haiku", # alias: claude-4.5-haiku
|
|
46
|
+
"sonnet-4-5", # full name: claude-sonnet-4-5-*
|
|
47
|
+
"sonnet-4-", # full name: claude-sonnet-4-* (note trailing dash to avoid matching 3-5)
|
|
48
|
+
"opus-4-", # full name: claude-opus-4-* (but not opus-4-5 handled above)
|
|
49
|
+
"haiku-4-5", # full name: claude-haiku-4-5-*
|
|
50
|
+
]
|
|
51
|
+
if any(p in model_lower for p in claude_4_patterns):
|
|
52
|
+
return "2025-01-24"
|
|
53
|
+
|
|
54
|
+
# Claude Sonnet 3.7 (deprecated but still supported)
|
|
55
|
+
if "3.7" in model_lower or "3-7" in model_lower:
|
|
56
|
+
return "2025-01-24"
|
|
57
|
+
|
|
58
|
+
# Claude 3.5/3.6 (older models)
|
|
59
|
+
if any(x in model_lower for x in ["3.5", "3-5", "3.6", "3-6"]):
|
|
60
|
+
return "2024-10-22"
|
|
61
|
+
|
|
62
|
+
raise ValueError(
|
|
63
|
+
f"Unsupported model '{model}' for Anthropic computer use. "
|
|
64
|
+
"Supported: Claude Opus 4.5, Claude 4 models, Sonnet 3.7, or 3.5/3.6."
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def get_beta_header(model: str) -> str:
|
|
69
|
+
"""
|
|
70
|
+
Get the appropriate beta header for computer use with the given model.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Beta header string to use in the API request.
|
|
74
|
+
"""
|
|
75
|
+
version = model_to_version(model)
|
|
76
|
+
|
|
77
|
+
if version == "2025-11-24":
|
|
78
|
+
return "computer-use-2025-11-24"
|
|
79
|
+
elif version == "2025-01-24":
|
|
80
|
+
return "computer-use-2025-01-24"
|
|
81
|
+
else: # 2024-10-22
|
|
82
|
+
return "computer-use-2024-10-22"
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def get_anthropic_cu_tools(
|
|
86
|
+
model: str,
|
|
87
|
+
display_width: int = 1024,
|
|
88
|
+
display_height: int = 768,
|
|
89
|
+
exclude_tools: list[ToolType] | None = None,
|
|
90
|
+
enable_zoom: bool = False,
|
|
91
|
+
) -> list[dict]:
|
|
92
|
+
"""
|
|
93
|
+
Get the computer use tools for the given model.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
model: The model name (e.g., "claude-opus-4-5-20251124", "claude-4-sonnet")
|
|
97
|
+
display_width: Display width in pixels (recommended <= 1280)
|
|
98
|
+
display_height: Display height in pixels (recommended <= 800)
|
|
99
|
+
exclude_tools: List of tool types to exclude ("bash", "computer", "editor")
|
|
100
|
+
enable_zoom: Enable zoom action for Opus 4.5 (computer_20251124 only)
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
List of tool definitions for the Anthropic API.
|
|
104
|
+
|
|
105
|
+
Note:
|
|
106
|
+
Keep display resolution at or below 1280x800 (WXGA) for best performance.
|
|
107
|
+
Higher resolutions may cause accuracy issues due to image resizing.
|
|
108
|
+
"""
|
|
109
|
+
version = model_to_version(model)
|
|
110
|
+
|
|
111
|
+
if version == "2024-10-22":
|
|
112
|
+
# Claude 3.5/3.6 - original computer use
|
|
113
|
+
result = [
|
|
114
|
+
{
|
|
115
|
+
"name": "computer",
|
|
116
|
+
"type": "computer_20241022",
|
|
117
|
+
"display_width_px": display_width,
|
|
118
|
+
"display_height_px": display_height,
|
|
119
|
+
"display_number": None,
|
|
120
|
+
},
|
|
121
|
+
{"name": "str_replace_editor", "type": "text_editor_20241022"},
|
|
122
|
+
{"name": "bash", "type": "bash_20241022"},
|
|
123
|
+
]
|
|
124
|
+
elif version == "2025-01-24":
|
|
125
|
+
# Claude 4 models and Sonnet 3.7
|
|
126
|
+
# Uses computer_20250124 and text_editor_20250728
|
|
127
|
+
result = [
|
|
128
|
+
{
|
|
129
|
+
"name": "computer",
|
|
130
|
+
"type": "computer_20250124",
|
|
131
|
+
"display_width_px": display_width,
|
|
132
|
+
"display_height_px": display_height,
|
|
133
|
+
"display_number": None,
|
|
134
|
+
},
|
|
135
|
+
{"name": "str_replace_based_edit_tool", "type": "text_editor_20250728"},
|
|
136
|
+
{"name": "bash", "type": "bash_20250124"},
|
|
137
|
+
]
|
|
138
|
+
elif version == "2025-11-24":
|
|
139
|
+
# Claude Opus 4.5 - newest with zoom support
|
|
140
|
+
computer_tool: dict = {
|
|
141
|
+
"name": "computer",
|
|
142
|
+
"type": "computer_20251124",
|
|
143
|
+
"display_width_px": display_width,
|
|
144
|
+
"display_height_px": display_height,
|
|
145
|
+
"display_number": None,
|
|
146
|
+
}
|
|
147
|
+
# Enable zoom action if requested (allows Claude to zoom into screen regions)
|
|
148
|
+
if enable_zoom:
|
|
149
|
+
computer_tool["enable_zoom"] = True
|
|
150
|
+
|
|
151
|
+
result = [
|
|
152
|
+
computer_tool,
|
|
153
|
+
{"name": "str_replace_based_edit_tool", "type": "text_editor_20250728"},
|
|
154
|
+
{"name": "bash", "type": "bash_20250124"},
|
|
155
|
+
]
|
|
156
|
+
else:
|
|
157
|
+
raise ValueError(f"Invalid tool version: {version}")
|
|
158
|
+
|
|
159
|
+
if exclude_tools is None:
|
|
160
|
+
return result
|
|
161
|
+
|
|
162
|
+
if "bash" in exclude_tools:
|
|
163
|
+
result = [x for x in result if x["name"] != "bash"]
|
|
164
|
+
if "editor" in exclude_tools:
|
|
165
|
+
result = [x for x in result if "edit" not in x["name"]]
|
|
166
|
+
if "computer" in exclude_tools:
|
|
167
|
+
result = [x for x in result if x["name"] != "computer"]
|
|
168
|
+
|
|
169
|
+
return result
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def bash_tool(model: str = "claude-4-sonnet") -> dict:
|
|
173
|
+
"""
|
|
174
|
+
Get the bash tool definition for the given model.
|
|
175
|
+
|
|
176
|
+
The bash tool allows Claude to execute shell commands.
|
|
177
|
+
|
|
178
|
+
Note: Claude 3.5 requires the computer-use-2024-10-22 beta header.
|
|
179
|
+
The bash tool is generally available in Claude 4 and Sonnet 3.7.
|
|
180
|
+
"""
|
|
181
|
+
version = model_to_version(model)
|
|
182
|
+
|
|
183
|
+
if version in ("2025-11-24", "2025-01-24"):
|
|
184
|
+
return {"type": "bash_20250124", "name": "bash"}
|
|
185
|
+
else: # 2024-10-22
|
|
186
|
+
return {"type": "bash_20241022", "name": "bash"}
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def text_editor_tool(model: str = "claude-4-sonnet") -> dict:
|
|
190
|
+
"""
|
|
191
|
+
Get the text editor tool definition for the given model.
|
|
192
|
+
|
|
193
|
+
The text editor tool allows Claude to view, create, and edit files.
|
|
194
|
+
|
|
195
|
+
Note:
|
|
196
|
+
- Claude 4 and Opus 4.5 use text_editor_20250728 with name "str_replace_based_edit_tool"
|
|
197
|
+
(no undo_edit command, has optional max_characters parameter)
|
|
198
|
+
- Claude Sonnet 3.7 uses text_editor_20250124 with name "str_replace_editor"
|
|
199
|
+
(includes undo_edit command)
|
|
200
|
+
- Claude 3.5/3.6 uses text_editor_20241022 with name "str_replace_editor"
|
|
201
|
+
"""
|
|
202
|
+
version = model_to_version(model)
|
|
203
|
+
|
|
204
|
+
if version in ("2025-11-24", "2025-01-24"):
|
|
205
|
+
return {"type": "text_editor_20250728", "name": "str_replace_based_edit_tool"}
|
|
206
|
+
else: # 2024-10-22
|
|
207
|
+
return {"type": "text_editor_20241022", "name": "str_replace_editor"}
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def computer_tool(
|
|
211
|
+
model: str = "claude-4-sonnet",
|
|
212
|
+
display_width: int = 1024,
|
|
213
|
+
display_height: int = 768,
|
|
214
|
+
enable_zoom: bool = False,
|
|
215
|
+
) -> dict:
|
|
216
|
+
"""
|
|
217
|
+
Get the computer use tool definition for the given model.
|
|
218
|
+
|
|
219
|
+
The computer tool allows Claude to see and control desktop environments
|
|
220
|
+
through screenshots and mouse/keyboard actions.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
model: The model name
|
|
224
|
+
display_width: Display width in pixels (recommended <= 1280)
|
|
225
|
+
display_height: Display height in pixels (recommended <= 800)
|
|
226
|
+
enable_zoom: Enable zoom action (Opus 4.5 only). When enabled, Claude can
|
|
227
|
+
use the zoom action to view specific screen regions at full resolution.
|
|
228
|
+
|
|
229
|
+
Available actions by version:
|
|
230
|
+
- All versions: screenshot, left_click, type, key, mouse_move
|
|
231
|
+
- computer_20250124+: scroll, left_click_drag, right_click, middle_click,
|
|
232
|
+
double_click, triple_click, left_mouse_down, left_mouse_up, hold_key, wait
|
|
233
|
+
- computer_20251124 (Opus 4.5): All above + zoom (requires enable_zoom=True)
|
|
234
|
+
"""
|
|
235
|
+
version = model_to_version(model)
|
|
236
|
+
|
|
237
|
+
if version == "2025-11-24":
|
|
238
|
+
tool: dict = {
|
|
239
|
+
"name": "computer",
|
|
240
|
+
"type": "computer_20251124",
|
|
241
|
+
"display_width_px": display_width,
|
|
242
|
+
"display_height_px": display_height,
|
|
243
|
+
"display_number": None,
|
|
244
|
+
}
|
|
245
|
+
if enable_zoom:
|
|
246
|
+
tool["enable_zoom"] = True
|
|
247
|
+
return tool
|
|
248
|
+
elif version == "2025-01-24":
|
|
249
|
+
return {
|
|
250
|
+
"name": "computer",
|
|
251
|
+
"type": "computer_20250124",
|
|
252
|
+
"display_width_px": display_width,
|
|
253
|
+
"display_height_px": display_height,
|
|
254
|
+
"display_number": None,
|
|
255
|
+
}
|
|
256
|
+
else: # 2024-10-22
|
|
257
|
+
return {
|
|
258
|
+
"name": "computer",
|
|
259
|
+
"type": "computer_20241022",
|
|
260
|
+
"display_width_px": display_width,
|
|
261
|
+
"display_height_px": display_height,
|
|
262
|
+
"display_number": None,
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def web_search_tool(
|
|
267
|
+
max_uses: int = 5,
|
|
268
|
+
allowed_domains: list[str] | None = None,
|
|
269
|
+
blocked_domains: list[str] | None = None,
|
|
270
|
+
) -> dict:
|
|
271
|
+
"""
|
|
272
|
+
Get the web search tool definition.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
max_uses: Maximum number of searches per request (default: 5)
|
|
276
|
+
allowed_domains: Only include results from these domains
|
|
277
|
+
blocked_domains: Never include results from these domains
|
|
278
|
+
|
|
279
|
+
Note: You can use either allowed_domains or blocked_domains, but not both.
|
|
280
|
+
"""
|
|
281
|
+
res: dict = {
|
|
282
|
+
"type": "web_search_20250305",
|
|
283
|
+
"name": "web_search",
|
|
284
|
+
"max_uses": max_uses,
|
|
285
|
+
}
|
|
286
|
+
if allowed_domains:
|
|
287
|
+
res["allowed_domains"] = allowed_domains
|
|
288
|
+
if blocked_domains:
|
|
289
|
+
res["blocked_domains"] = blocked_domains
|
|
290
|
+
return res
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def code_execution_tool() -> dict:
|
|
294
|
+
"""
|
|
295
|
+
Get the code execution tool definition.
|
|
296
|
+
|
|
297
|
+
The code execution tool is currently in beta.
|
|
298
|
+
This feature requires the beta header: "anthropic-beta": "code-execution-2025-05-22"
|
|
299
|
+
"""
|
|
300
|
+
return {"type": "code_execution_20250522", "name": "code_execution"}
|