tooluniverse 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tooluniverse might be problematic. Click here for more details.

Files changed (32) hide show
  1. tooluniverse/__init__.py +17 -5
  2. tooluniverse/agentic_tool.py +268 -330
  3. tooluniverse/compose_scripts/output_summarizer.py +21 -15
  4. tooluniverse/data/agentic_tools.json +2 -2
  5. tooluniverse/data/odphp_tools.json +354 -0
  6. tooluniverse/data/output_summarization_tools.json +2 -2
  7. tooluniverse/default_config.py +1 -0
  8. tooluniverse/llm_clients.py +570 -0
  9. tooluniverse/mcp_tool_registry.py +3 -3
  10. tooluniverse/odphp_tool.py +226 -0
  11. tooluniverse/output_hook.py +92 -3
  12. tooluniverse/remote/boltz/boltz_mcp_server.py +2 -2
  13. tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +2 -2
  14. tooluniverse/smcp.py +204 -112
  15. tooluniverse/smcp_server.py +23 -20
  16. tooluniverse/test/list_azure_openai_models.py +210 -0
  17. tooluniverse/test/test_agentic_tool_azure_models.py +91 -0
  18. tooluniverse/test/test_api_key_validation_min.py +64 -0
  19. tooluniverse/test/test_claude_sdk.py +86 -0
  20. tooluniverse/test/test_global_fallback.py +288 -0
  21. tooluniverse/test/test_hooks_direct.py +219 -0
  22. tooluniverse/test/test_odphp_tool.py +166 -0
  23. tooluniverse/test/test_openrouter_client.py +288 -0
  24. tooluniverse/test/test_stdio_hooks.py +285 -0
  25. tooluniverse/test/test_tool_finder.py +1 -1
  26. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/METADATA +101 -74
  27. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/RECORD +31 -19
  28. tooluniverse-1.0.5.dist-info/licenses/LICENSE +201 -0
  29. tooluniverse-1.0.3.dist-info/licenses/LICENSE +0 -21
  30. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/WHEEL +0 -0
  31. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/entry_points.txt +0 -0
  32. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,219 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 简化的 MCP stdio 模式 hooks 测试
4
+ 直接测试工具调用功能
5
+ """
6
+ import subprocess
7
+ import sys
8
+
9
+
10
+ def test_tool_call_directly(hooks_enabled=False):
11
+ """直接测试工具调用"""
12
+ print(f"\n{'='*60}")
13
+ print(f"测试模式: {'开启 hooks' if hooks_enabled else '关闭 hooks'}")
14
+ print(f"{'='*60}")
15
+
16
+ # 构建命令
17
+ cmd = [
18
+ sys.executable,
19
+ "-c",
20
+ f"""
21
+ import sys
22
+ import time
23
+ sys.path.insert(0, 'src')
24
+ from tooluniverse.execute_function import ToolUniverse
25
+
26
+ # 创建 ToolUniverse 实例
27
+ tooluniverse = ToolUniverse()
28
+
29
+ # 配置 hooks
30
+ if {hooks_enabled}:
31
+ print("启用 hooks...")
32
+ tooluniverse.toggle_hooks(True)
33
+ else:
34
+ print("禁用 hooks...")
35
+ tooluniverse.toggle_hooks(False)
36
+
37
+ # 加载工具
38
+ print("加载工具...")
39
+ tooluniverse.load_tools()
40
+
41
+ # 测试工具调用
42
+ function_call = {{
43
+ "name": "OpenTargets_get_target_gene_ontology_by_ensemblID",
44
+ "arguments": {{"ensemblId": "ENSG00000012048"}}
45
+ }}
46
+
47
+ print("开始工具调用...")
48
+ start_time = time.time()
49
+ result = tooluniverse.run_one_function(function_call)
50
+ end_time = time.time()
51
+
52
+ response_time = end_time - start_time
53
+ result_str = str(result)
54
+ result_length = len(result_str)
55
+
56
+ print(f"工具调用完成")
57
+ print(f"响应时间: {{response_time:.2f}} 秒")
58
+ print(f"响应长度: {{result_length}} 字符")
59
+ print(f"响应类型: {{type(result)}}")
60
+
61
+ # 检查是否是摘要
62
+ if "summary" in result_str.lower() or "摘要" in result_str:
63
+ print("✅ 检测到摘要内容")
64
+ else:
65
+ print("📄 原始内容(未摘要)")
66
+
67
+ # 输出结果的前200个字符
68
+ print(f"结果预览: {{result_str[:200]}}...")
69
+ """,
70
+ ]
71
+
72
+ print(f"启动命令: {' '.join(cmd[:3])} ...")
73
+
74
+ # 启动进程
75
+ process = subprocess.Popen(
76
+ cmd,
77
+ stdin=subprocess.PIPE,
78
+ stdout=subprocess.PIPE,
79
+ stderr=subprocess.PIPE,
80
+ text=True,
81
+ bufsize=0,
82
+ )
83
+
84
+ try:
85
+ # 等待执行完成
86
+ stdout, stderr = process.communicate(timeout=60)
87
+
88
+ print("标准输出:")
89
+ print(stdout)
90
+
91
+ if stderr:
92
+ print("标准错误:")
93
+ print(stderr)
94
+
95
+ # 解析结果
96
+ lines = stdout.split("\n")
97
+ response_time = None
98
+ result_length = None
99
+ is_summary = False
100
+
101
+ for line in lines:
102
+ if "响应时间:" in line:
103
+ try:
104
+ response_time = float(line.split(":")[1].strip().split()[0])
105
+ except (ValueError, IndexError):
106
+ pass
107
+ elif "响应长度:" in line:
108
+ try:
109
+ result_length = int(line.split(":")[1].strip().split()[0])
110
+ except (ValueError, IndexError):
111
+ pass
112
+ elif "检测到摘要内容" in line:
113
+ is_summary = True
114
+
115
+ return {
116
+ "hooks_enabled": hooks_enabled,
117
+ "response_time": response_time,
118
+ "result_length": result_length,
119
+ "is_summary": is_summary,
120
+ "success": process.returncode == 0,
121
+ "stdout": stdout,
122
+ "stderr": stderr,
123
+ }
124
+
125
+ except subprocess.TimeoutExpired:
126
+ print("❌ 测试超时")
127
+ process.kill()
128
+ return {
129
+ "hooks_enabled": hooks_enabled,
130
+ "response_time": None,
131
+ "result_length": None,
132
+ "is_summary": False,
133
+ "success": False,
134
+ "error": "超时",
135
+ }
136
+ except Exception as e:
137
+ print(f"❌ 测试失败: {e}")
138
+ return {
139
+ "hooks_enabled": hooks_enabled,
140
+ "response_time": None,
141
+ "result_length": None,
142
+ "is_summary": False,
143
+ "success": False,
144
+ "error": str(e),
145
+ }
146
+
147
+
148
+ def main():
149
+ """主函数"""
150
+ print("MCP stdio 模式 hooks 直接测试")
151
+ print("测试工具: OpenTargets_get_target_gene_ontology_by_ensemblID")
152
+ print("测试参数: ensemblId=ENSG00000012048")
153
+
154
+ # 测试关闭 hooks
155
+ result_no_hooks = test_tool_call_directly(hooks_enabled=False)
156
+
157
+ # 测试开启 hooks
158
+ result_with_hooks = test_tool_call_directly(hooks_enabled=True)
159
+
160
+ # 对比结果
161
+ print(f"\n{'='*60}")
162
+ print("测试结果对比")
163
+ print(f"{'='*60}")
164
+
165
+ print("关闭 hooks:")
166
+ if result_no_hooks["success"]:
167
+ print(
168
+ f" ✅ 成功 - 响应时间: {result_no_hooks['response_time']:.2f}s, 长度: {result_no_hooks['result_length']} 字符"
169
+ )
170
+ if result_no_hooks["is_summary"]:
171
+ print(" 📄 检测到摘要内容")
172
+ else:
173
+ print(" 📄 原始内容(未摘要)")
174
+ else:
175
+ print(f" ❌ 失败 - {result_no_hooks.get('error', '未知错误')}")
176
+
177
+ print("开启 hooks:")
178
+ if result_with_hooks["success"]:
179
+ print(
180
+ f" ✅ 成功 - 响应时间: {result_with_hooks['response_time']:.2f}s, 长度: {result_with_hooks['result_length']} 字符"
181
+ )
182
+ if result_with_hooks["is_summary"]:
183
+ print(" ✅ 检测到摘要内容")
184
+ else:
185
+ print(" 📄 原始内容(未摘要)")
186
+ else:
187
+ print(f" ❌ 失败 - {result_with_hooks.get('error', '未知错误')}")
188
+
189
+ # 性能对比
190
+ if result_no_hooks["success"] and result_with_hooks["success"]:
191
+ time_diff = (
192
+ result_with_hooks["response_time"] - result_no_hooks["response_time"]
193
+ )
194
+ length_diff = (
195
+ result_with_hooks["result_length"] - result_no_hooks["result_length"]
196
+ )
197
+
198
+ print("\n性能对比:")
199
+ print(
200
+ f" 时间差异: {time_diff:+.2f}s ({'hooks 更慢' if time_diff > 0 else 'hooks 更快'})"
201
+ )
202
+ print(
203
+ f" 长度差异: {length_diff:+d} 字符 ({'hooks 更长' if length_diff > 0 else 'hooks 更短'})"
204
+ )
205
+
206
+ if abs(time_diff) < 5.0:
207
+ print(" ✅ 时间差异在可接受范围内")
208
+ else:
209
+ print(" ⚠️ 时间差异较大,需要进一步优化")
210
+
211
+ # 检查 hooks 是否生效
212
+ if result_with_hooks["is_summary"] and not result_no_hooks["is_summary"]:
213
+ print(" ✅ Hooks 功能正常工作")
214
+ elif result_with_hooks["is_summary"] == result_no_hooks["is_summary"]:
215
+ print(" ⚠️ Hooks 功能可能未生效")
216
+
217
+
218
+ if __name__ == "__main__":
219
+ main()
@@ -0,0 +1,166 @@
1
+ import json
2
+ import os
3
+ from tooluniverse import ToolUniverse
4
+ import pytest
5
+
6
+ schema_path = os.path.join(os.path.dirname(__file__), "..", "data", "odphp_tools.json")
7
+ with open(schema_path) as f:
8
+ schemas = {tool["name"]: tool["return_schema"] for tool in json.load(f)}
9
+
10
+ tooluni = ToolUniverse()
11
+ tooluni.load_tools()
12
+
13
+
14
+ def summarize_result(tool_name, res):
15
+ if isinstance(res, str):
16
+ return f"{tool_name}: INVALID Raw string response: {res[:200]}..."
17
+ if isinstance(res, dict):
18
+ if "error" in res:
19
+ return f"{tool_name}: ERROR {res['error']}"
20
+ data = res.get("data", {})
21
+ total = data.get("Total") if isinstance(data, dict) else None
22
+ msg = f"{tool_name}: SUCCESS"
23
+ if isinstance(total, int):
24
+ msg += f" | Total={total}"
25
+
26
+ if tool_name == "odphp_myhealthfinder":
27
+ heading = data.get("MyHFHeading", "")
28
+ resources = (data.get("Resources", {}).get("All", {}).get("Resource", [])) or []
29
+ first_title = resources[0].get("Title") if resources else None
30
+ msg += f" | Heading='{heading[:60]}...'"
31
+ if first_title:
32
+ msg += f" | FirstResource='{first_title}'"
33
+ callouts = (data.get("Callouts", {}).get("All", {}).get("Resource", [])) or []
34
+ if callouts and callouts[0].get("MyHFTitle"):
35
+ msg += f" | FirstCallout='{callouts[0].get('MyHFTitle')}'"
36
+
37
+ elif tool_name == "odphp_itemlist":
38
+ items = data.get("Items", {}).get("Item", []) or []
39
+ titles = [i.get("Title") for i in items[:3]]
40
+ if titles:
41
+ msg += f" | ExampleItems={titles}"
42
+
43
+ elif tool_name == "odphp_topicsearch":
44
+ resources = data.get("Resources", {}).get("Resource", []) or []
45
+ titles = [r.get("Title") for r in resources[:3]]
46
+ if titles:
47
+ msg += f" | ExampleTopics={titles}"
48
+
49
+ elif tool_name.startswith("odphp_outlink_fetch"):
50
+ results = res.get("results") or []
51
+ if results:
52
+ first = results[0]
53
+ msg += f" | url={first.get('url')} status={first.get('status')} type={first.get('content_type')}"
54
+ if first.get("title"):
55
+ msg += f" | Title='{first['title'][:50]}...'"
56
+ if first.get("text"):
57
+ snippet = first["text"][:80].replace("\n", " ")
58
+ msg += f" | TextSnippet='{snippet}...'"
59
+
60
+ expected_keys = schemas.get(tool_name, {}).get("properties", {}).keys()
61
+ missing = [k for k in expected_keys if k not in data and k not in res]
62
+ if missing:
63
+ msg += f" | WARNING: Missing keys {missing}"
64
+ else:
65
+ msg += " | Schema OK"
66
+ return msg
67
+ return f"{tool_name}: INVALID Unexpected type {type(res)}"
68
+
69
+
70
+ def test_01_myhealthfinder_valid():
71
+ res = tooluni.run({"name": "odphp_myhealthfinder",
72
+ "arguments": {"age": 35, "sex": "female", "pregnant": "no", "lang": "en"}})
73
+ print(summarize_result("odphp_myhealthfinder", res))
74
+ assert isinstance(res, dict) and not res.get("error")
75
+
76
+
77
+ def test_02_itemlist_valid():
78
+ res = tooluni.run({"name": "odphp_itemlist", "arguments": {"type": "topic", "lang": "en"}})
79
+ print(summarize_result("odphp_itemlist", res))
80
+ assert isinstance(res, dict) and not res.get("error")
81
+
82
+
83
+ def test_03_topicsearch_keyword_valid():
84
+ res = tooluni.run({"name": "odphp_topicsearch", "arguments": {"keyword": "cancer", "lang": "en"}})
85
+ print(summarize_result("odphp_topicsearch", res))
86
+ assert isinstance(res, dict) and not res.get("error")
87
+
88
+
89
+ def test_04_invalid_types_fail_fast():
90
+ r1 = tooluni.run({"name": "odphp_myhealthfinder", "arguments": {"age": "banana"}})
91
+ r2 = tooluni.run({"name": "odphp_topicsearch", "arguments": {"topicId": 123}})
92
+ print("Expected type errors:", r1, r2)
93
+ assert isinstance(r1, str) and "Type mismatches" in r1
94
+ assert isinstance(r2, str) and "Type mismatches" in r2
95
+
96
+
97
+ def test_05_sections_case_and_strip_html():
98
+ res = tooluni.run({"name": "odphp_topicsearch",
99
+ "arguments": {"keyword": "Keep Your Heart Healthy", "lang": "en", "strip_html": True}})
100
+ print(summarize_result("odphp_topicsearch", res))
101
+ assert isinstance(res, dict) and not res.get("error")
102
+ data = res.get("data") or {}
103
+ resources = (data.get("Resources") or {}).get("Resource") or []
104
+ if resources:
105
+ s_any = resources[0].get("Sections", {})
106
+ arr = s_any.get("Section") or s_any.get("section") or []
107
+ assert isinstance(arr, list)
108
+ assert "PlainSections" in resources[0]
109
+
110
+
111
+ def test_06_outlink_fetch_accessible_version():
112
+ url = "https://odphp.health.gov/myhealthfinder/health-conditions/heart-health/keep-your-heart-healthy"
113
+ res = tooluni.run({"name": "odphp_outlink_fetch",
114
+ "arguments": {"urls": [url], "max_chars": 4000}})
115
+ print(summarize_result("odphp_outlink_fetch", res))
116
+ assert isinstance(res, dict) and not res.get("error")
117
+ results = res.get("results") or []
118
+ assert results and results[0].get("status") in (200, 301, 302)
119
+ if "text/html" in (results[0].get("content_type") or ""):
120
+ assert len(results[0].get("text", "")) > 100
121
+
122
+
123
+ def test_07_itemlist_spanish():
124
+ res = tooluni.run({"name": "odphp_itemlist", "arguments": {"type": "topic", "lang": "es"}})
125
+ print(summarize_result("odphp_itemlist", res))
126
+ assert isinstance(res, dict) and not res.get("error")
127
+
128
+
129
+ def test_08_topicsearch_by_category():
130
+ cats = tooluni.run({"name": "odphp_itemlist", "arguments": {"type": "category", "lang": "en"}})
131
+ first_cat = (cats.get("data", {}).get("Items", {}).get("Item") or [])[0]
132
+ cid = first_cat.get("Id")
133
+ res = tooluni.run({"name": "odphp_topicsearch", "arguments": {"categoryId": cid, "lang": "en"}})
134
+ print(summarize_result("odphp_topicsearch", res))
135
+ assert isinstance(res, dict) and not res.get("error")
136
+
137
+ def test_09_outlink_fetch_pdf():
138
+ url = "https://odphp.health.gov/sites/default/files/2021-12/DGA_Pregnancy_FactSheet-508.pdf"
139
+ res = tooluni.run({"name": "odphp_outlink_fetch",
140
+ "arguments": {"urls": [url], "max_chars": 1000}})
141
+ print(summarize_result("odphp_outlink_fetch_pdf", res))
142
+
143
+ assert isinstance(res, dict) and not res.get("error")
144
+ results = res.get("results") or []
145
+ assert results, "No results returned for PDF URL"
146
+
147
+ ctype = results[0].get("content_type", "")
148
+ assert ctype.startswith("application/pdf"), f"Expected PDF but got {ctype}"
149
+
150
+ # Ensure text extraction worked at least partially
151
+ text = results[0].get("text", "")
152
+ assert text and len(text) > 50, "Extracted PDF text too short"
153
+
154
+
155
+ if __name__ == "__main__":
156
+ print("\nRunning ODPHP tool tests...\n")
157
+ test_01_myhealthfinder_valid()
158
+ test_02_itemlist_valid()
159
+ test_03_topicsearch_keyword_valid()
160
+ test_04_invalid_types_fail_fast()
161
+ test_05_sections_case_and_strip_html()
162
+ test_06_outlink_fetch_accessible_version()
163
+ test_07_itemlist_spanish()
164
+ test_08_topicsearch_by_category()
165
+ test_09_outlink_fetch_pdf()
166
+ print("\nAll ODPHP tests executed.\n")
@@ -0,0 +1,288 @@
1
+ """
2
+ Tests for OpenRouter client integration.
3
+
4
+ These tests verify that the OpenRouter client is properly integrated
5
+ with the ToolUniverse system.
6
+ """
7
+
8
+ import os
9
+ import pytest
10
+ from unittest.mock import Mock, patch, MagicMock
11
+ from tooluniverse.llm_clients import OpenRouterClient
12
+ from tooluniverse.agentic_tool import AgenticTool
13
+
14
+
15
+ class TestOpenRouterClient:
16
+ """Test suite for OpenRouterClient."""
17
+
18
+ def test_client_initialization_without_api_key(self):
19
+ """Test that client raises error when API key is not set."""
20
+ # Remove API key if present
21
+ old_key = os.environ.pop("OPENROUTER_API_KEY", None)
22
+
23
+ try:
24
+ with pytest.raises(ValueError, match="OPENROUTER_API_KEY not set"):
25
+ logger = Mock()
26
+ OpenRouterClient("openai/gpt-4o", logger)
27
+ finally:
28
+ # Restore old key if it existed
29
+ if old_key:
30
+ os.environ["OPENROUTER_API_KEY"] = old_key
31
+
32
+ @patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
33
+ @patch("tooluniverse.llm_clients.OpenRouterClient._OpenAI")
34
+ def test_client_initialization_with_api_key(self, mock_openai_class):
35
+ """Test that client initializes correctly with API key."""
36
+ logger = Mock()
37
+ mock_client = Mock()
38
+ mock_openai_class.return_value = mock_client
39
+
40
+ client = OpenRouterClient("openai/gpt-4o", logger)
41
+
42
+ assert client.model_name == "openai/gpt-4o"
43
+ assert client.logger == logger
44
+ mock_openai_class.assert_called_once()
45
+
46
+ # Verify base_url and api_key
47
+ call_kwargs = mock_openai_class.call_args[1]
48
+ assert call_kwargs["base_url"] == "https://openrouter.ai/api/v1"
49
+ assert call_kwargs["api_key"] == "test_key"
50
+
51
+ @patch.dict(
52
+ os.environ,
53
+ {
54
+ "OPENROUTER_API_KEY": "test_key",
55
+ "OPENROUTER_SITE_URL": "https://example.com",
56
+ "OPENROUTER_SITE_NAME": "Test App"
57
+ }
58
+ )
59
+ @patch("tooluniverse.llm_clients.OpenRouterClient._OpenAI")
60
+ def test_client_with_optional_headers(self, mock_openai_class):
61
+ """Test that optional headers are set correctly."""
62
+ logger = Mock()
63
+ mock_client = Mock()
64
+ mock_openai_class.return_value = mock_client
65
+
66
+ client = OpenRouterClient("openai/gpt-4o", logger)
67
+
68
+ call_kwargs = mock_openai_class.call_args[1]
69
+ assert "default_headers" in call_kwargs
70
+ headers = call_kwargs["default_headers"]
71
+ assert headers["HTTP-Referer"] == "https://example.com"
72
+ assert headers["X-Title"] == "Test App"
73
+
74
+ @patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
75
+ @patch("tooluniverse.llm_clients.OpenRouterClient._OpenAI")
76
+ def test_resolve_default_max_tokens(self, mock_openai_class):
77
+ """Test max tokens resolution for known models."""
78
+ logger = Mock()
79
+ mock_client = Mock()
80
+ mock_openai_class.return_value = mock_client
81
+
82
+ client = OpenRouterClient("openai/gpt-4o", logger)
83
+
84
+ # Test known model
85
+ max_tokens = client._resolve_default_max_tokens("openai/gpt-4o")
86
+ assert max_tokens == 64000
87
+
88
+ # Test another known model
89
+ max_tokens = client._resolve_default_max_tokens("anthropic/claude-3.5-sonnet")
90
+ assert max_tokens == 8192
91
+
92
+ # Test unknown model
93
+ max_tokens = client._resolve_default_max_tokens("unknown/model")
94
+ assert max_tokens is None
95
+
96
+ @patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
97
+ @patch("tooluniverse.llm_clients.OpenRouterClient._OpenAI")
98
+ def test_infer_basic(self, mock_openai_class):
99
+ """Test basic inference functionality."""
100
+ logger = Mock()
101
+ mock_client = Mock()
102
+ mock_openai_class.return_value = mock_client
103
+
104
+ # Mock the completion response
105
+ mock_response = Mock()
106
+ mock_response.choices = [Mock()]
107
+ mock_response.choices[0].message.content = "Test response"
108
+ mock_client.chat.completions.create.return_value = mock_response
109
+
110
+ client = OpenRouterClient("openai/gpt-4o", logger)
111
+
112
+ messages = [{"role": "user", "content": "Test prompt"}]
113
+ result = client.infer(
114
+ messages=messages,
115
+ temperature=0.7,
116
+ max_tokens=100,
117
+ return_json=False
118
+ )
119
+
120
+ assert result == "Test response"
121
+ mock_client.chat.completions.create.assert_called_once()
122
+
123
+ # Verify call arguments
124
+ call_kwargs = mock_client.chat.completions.create.call_args[1]
125
+ assert call_kwargs["model"] == "openai/gpt-4o"
126
+ assert call_kwargs["messages"] == messages
127
+ assert call_kwargs["temperature"] == 0.7
128
+ assert call_kwargs["max_tokens"] == 100
129
+
130
+
131
+ class TestAgenticToolWithOpenRouter:
132
+ """Test AgenticTool integration with OpenRouter."""
133
+
134
+ @patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
135
+ @patch("tooluniverse.agentic_tool.OpenRouterClient")
136
+ def test_agentic_tool_with_openrouter(self, mock_client_class):
137
+ """Test that AgenticTool can use OpenRouter."""
138
+ # Mock the client
139
+ mock_client = Mock()
140
+ mock_client_class.return_value = mock_client
141
+ mock_client.test_api = Mock()
142
+ mock_client.infer = Mock(return_value="Test result")
143
+
144
+ # Create tool config
145
+ tool_config = {
146
+ "name": "Test_Tool",
147
+ "prompt": "Test prompt: {input}",
148
+ "input_arguments": ["input"],
149
+ "parameter": {
150
+ "type": "object",
151
+ "properties": {
152
+ "input": {"type": "string", "required": True}
153
+ },
154
+ "required": ["input"]
155
+ },
156
+ "configs": {
157
+ "api_type": "OPENROUTER",
158
+ "model_id": "openai/gpt-4o",
159
+ "temperature": 0.5,
160
+ "validate_api_key": True,
161
+ "return_metadata": False
162
+ }
163
+ }
164
+
165
+ # Create tool
166
+ tool = AgenticTool(tool_config)
167
+
168
+ # Verify initialization
169
+ assert tool._is_available
170
+ assert tool._current_api_type == "OPENROUTER"
171
+ assert tool._current_model_id == "openai/gpt-4o"
172
+ mock_client.test_api.assert_called_once()
173
+
174
+ # Test execution
175
+ result = tool.run({"input": "test data"})
176
+ assert result == "Test result"
177
+ mock_client.infer.assert_called_once()
178
+
179
+ def test_openrouter_in_supported_types(self):
180
+ """Test that OPENROUTER is in supported API types."""
181
+ tool_config = {
182
+ "name": "Test_Tool",
183
+ "prompt": "Test: {x}",
184
+ "input_arguments": ["x"],
185
+ "parameter": {
186
+ "type": "object",
187
+ "properties": {"x": {"type": "string"}},
188
+ "required": ["x"]
189
+ },
190
+ "configs": {
191
+ "api_type": "OPENROUTER",
192
+ "model_id": "openai/gpt-4o",
193
+ "validate_api_key": False
194
+ }
195
+ }
196
+
197
+ # This should not raise an error
198
+ try:
199
+ tool = AgenticTool(tool_config)
200
+ # Validation should pass
201
+ validation = tool.validate_configuration()
202
+ assert validation["valid"]
203
+ except ValueError as e:
204
+ if "Unsupported API type" in str(e):
205
+ pytest.fail("OPENROUTER should be a supported API type")
206
+
207
+
208
+ class TestOpenRouterModels:
209
+ """Test model configuration and limits."""
210
+
211
+ @patch.dict(os.environ, {"OPENROUTER_API_KEY": "test_key"})
212
+ @patch("tooluniverse.llm_clients.OpenRouterClient._OpenAI")
213
+ def test_model_limits_configuration(self, mock_openai_class):
214
+ """Test that model limits are correctly configured."""
215
+ logger = Mock()
216
+ mock_client = Mock()
217
+ mock_openai_class.return_value = mock_client
218
+
219
+ client = OpenRouterClient("openai/gpt-4o", logger)
220
+
221
+ # Check some key models
222
+ expected_models = {
223
+ "openai/gpt-4o": {"max_output": 64000, "context_window": 1_048_576},
224
+ "anthropic/claude-3.7-sonnet": {"max_output": 8192, "context_window": 200_000},
225
+ "google/gemini-2.0-flash-exp": {"max_output": 8192, "context_window": 1_048_576},
226
+ "qwen/qwq-32b-preview": {"max_output": 8192, "context_window": 32_768},
227
+ }
228
+
229
+ for model_id, expected_limits in expected_models.items():
230
+ assert model_id in client.DEFAULT_MODEL_LIMITS
231
+ assert client.DEFAULT_MODEL_LIMITS[model_id] == expected_limits
232
+
233
+ @patch.dict(
234
+ os.environ,
235
+ {
236
+ "OPENROUTER_API_KEY": "test_key",
237
+ "OPENROUTER_MAX_TOKENS_BY_MODEL": '{"openai/gpt-4o": 32000}'
238
+ }
239
+ )
240
+ @patch("tooluniverse.llm_clients.OpenRouterClient._OpenAI")
241
+ def test_env_override_max_tokens(self, mock_openai_class):
242
+ """Test that environment variables can override max tokens."""
243
+ logger = Mock()
244
+ mock_client = Mock()
245
+ mock_openai_class.return_value = mock_client
246
+
247
+ client = OpenRouterClient("openai/gpt-4o", logger)
248
+
249
+ # Should return the overridden value
250
+ max_tokens = client._resolve_default_max_tokens("openai/gpt-4o")
251
+ assert max_tokens == 32000
252
+
253
+
254
+ class TestOpenRouterFallback:
255
+ """Test fallback configuration with OpenRouter."""
256
+
257
+ def test_openrouter_in_default_fallback_chain(self):
258
+ """Test that OpenRouter is in the default fallback chain."""
259
+ from tooluniverse.agentic_tool import DEFAULT_FALLBACK_CHAIN
260
+
261
+ # Check that OPENROUTER is in the default chain
262
+ openrouter_configs = [
263
+ config for config in DEFAULT_FALLBACK_CHAIN
264
+ if config["api_type"] == "OPENROUTER"
265
+ ]
266
+
267
+ assert len(openrouter_configs) > 0, "OPENROUTER should be in default fallback chain"
268
+
269
+ # Verify it has a model_id
270
+ for config in openrouter_configs:
271
+ assert "model_id" in config
272
+ assert config["model_id"].startswith("openai/") or \
273
+ config["model_id"].startswith("anthropic/") or \
274
+ config["model_id"].startswith("google/") or \
275
+ config["model_id"].startswith("qwen/")
276
+
277
+ def test_openrouter_in_api_key_env_vars(self):
278
+ """Test that OPENROUTER is in API key environment variables mapping."""
279
+ from tooluniverse.agentic_tool import API_KEY_ENV_VARS
280
+
281
+ assert "OPENROUTER" in API_KEY_ENV_VARS
282
+ assert "OPENROUTER_API_KEY" in API_KEY_ENV_VARS["OPENROUTER"]
283
+
284
+
285
+ if __name__ == "__main__":
286
+ pytest.main([__file__, "-v"])
287
+
288
+