tooluniverse 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tooluniverse might be problematic. Click here for more details.

Files changed (32) hide show
  1. tooluniverse/__init__.py +17 -5
  2. tooluniverse/agentic_tool.py +268 -330
  3. tooluniverse/compose_scripts/output_summarizer.py +21 -15
  4. tooluniverse/data/agentic_tools.json +2 -2
  5. tooluniverse/data/odphp_tools.json +354 -0
  6. tooluniverse/data/output_summarization_tools.json +2 -2
  7. tooluniverse/default_config.py +1 -0
  8. tooluniverse/llm_clients.py +570 -0
  9. tooluniverse/mcp_tool_registry.py +3 -3
  10. tooluniverse/odphp_tool.py +226 -0
  11. tooluniverse/output_hook.py +92 -3
  12. tooluniverse/remote/boltz/boltz_mcp_server.py +2 -2
  13. tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +2 -2
  14. tooluniverse/smcp.py +204 -112
  15. tooluniverse/smcp_server.py +23 -20
  16. tooluniverse/test/list_azure_openai_models.py +210 -0
  17. tooluniverse/test/test_agentic_tool_azure_models.py +91 -0
  18. tooluniverse/test/test_api_key_validation_min.py +64 -0
  19. tooluniverse/test/test_claude_sdk.py +86 -0
  20. tooluniverse/test/test_global_fallback.py +288 -0
  21. tooluniverse/test/test_hooks_direct.py +219 -0
  22. tooluniverse/test/test_odphp_tool.py +166 -0
  23. tooluniverse/test/test_openrouter_client.py +288 -0
  24. tooluniverse/test/test_stdio_hooks.py +285 -0
  25. tooluniverse/test/test_tool_finder.py +1 -1
  26. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/METADATA +101 -74
  27. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/RECORD +31 -19
  28. tooluniverse-1.0.5.dist-info/licenses/LICENSE +201 -0
  29. tooluniverse-1.0.3.dist-info/licenses/LICENSE +0 -21
  30. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/WHEEL +0 -0
  31. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/entry_points.txt +0 -0
  32. {tooluniverse-1.0.3.dist-info → tooluniverse-1.0.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,226 @@
1
+ import re
2
+ import requests
3
+ from typing import Dict, Any, Optional, List
4
+ from .base_tool import BaseTool
5
+ from .tool_registry import register_tool
6
+
7
+ # Optional but recommended: text extraction for HTML
8
+ try:
9
+ from bs4 import BeautifulSoup # pip install beautifulsoup4
10
+ except ImportError:
11
+ BeautifulSoup = None # We’ll guard uses so the tool still loads
12
+
13
+ ODPHP_BASE_URL = "https://odphp.health.gov/myhealthfinder/api/v4"
14
+
15
+
16
+ class ODPHPRESTTool(BaseTool):
17
+ """Base class for ODPHP (MyHealthfinder) REST API tools."""
18
+
19
+ def __init__(self, tool_config):
20
+ super().__init__(tool_config)
21
+ self.endpoint = tool_config["fields"]["endpoint"]
22
+
23
+ def _make_request(self, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
24
+ url = f"{ODPHP_BASE_URL}{self.endpoint}"
25
+ try:
26
+ resp = requests.get(url, params=params, timeout=30)
27
+ resp.raise_for_status()
28
+ data = resp.json()
29
+ return {
30
+ "data": data.get("Result"),
31
+ "metadata": {
32
+ "source": "ODPHP MyHealthfinder",
33
+ "endpoint": url,
34
+ "query": params,
35
+ },
36
+ }
37
+ except requests.exceptions.RequestException as e:
38
+ return {"error": f"Request failed: {str(e)}"}
39
+ except ValueError as e:
40
+ return {"error": f"Failed to parse JSON: {str(e)}"}
41
+
42
+
43
+ def _sections_array(resource: Dict[str, Any]) -> List[Dict[str, Any]]:
44
+ """
45
+ Tolerant accessor for the sections array.
46
+ Data sometimes uses Sections.Section (capital S) and sometimes Sections.section (lowercase).
47
+ """
48
+ sect = resource.get("Sections") or {}
49
+ arr = sect.get("Section")
50
+ if not isinstance(arr, list):
51
+ arr = sect.get("section")
52
+ return arr if isinstance(arr, list) else []
53
+
54
+
55
+ def _strip_html_to_text(html: str) -> str:
56
+ if not html:
57
+ return ""
58
+ if BeautifulSoup is None:
59
+ # fallback: very light tag remover
60
+ text = re.sub(r"<[^>]+>", " ", html)
61
+ return re.sub(r"\s+", " ", text).strip()
62
+ soup = BeautifulSoup(html, "html.parser")
63
+ # remove scripts/styles
64
+ for t in soup(["script", "style", "noscript"]):
65
+ t.decompose()
66
+ text = soup.get_text("\n", strip=True)
67
+ text = re.sub(r"\n{2,}", "\n\n", text)
68
+ return text.strip()
69
+
70
+
71
+ @register_tool("ODPHPMyHealthfinder")
72
+ class ODPHPMyHealthfinder(ODPHPRESTTool):
73
+ """Search for demographic-specific health recommendations (MyHealthfinder)."""
74
+
75
+ def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
76
+ params: Dict[str, Any] = {}
77
+ if "lang" in arguments:
78
+ params["lang"] = arguments["lang"]
79
+ if "age" in arguments:
80
+ params["age"] = arguments["age"]
81
+ if "sex" in arguments:
82
+ params["sex"] = arguments["sex"]
83
+ if "pregnant" in arguments:
84
+ params["pregnant"] = arguments["pregnant"]
85
+
86
+ res = self._make_request(params)
87
+
88
+ # Optional: attach PlainSections if requested
89
+ if isinstance(res, dict) and not res.get("error") and arguments.get("strip_html"):
90
+ data = res.get("data") or {}
91
+ resources = (((data.get("Resources") or {}).get("All") or {}).get("Resource")) or []
92
+ if isinstance(resources, list):
93
+ for r in resources:
94
+ plain = []
95
+ for sec in _sections_array(r):
96
+ plain.append({
97
+ "Title": sec.get("Title", ""),
98
+ "PlainContent": _strip_html_to_text(sec.get("Content", "")),
99
+ })
100
+ if plain:
101
+ r["PlainSections"] = plain
102
+ return res
103
+
104
+
105
+ @register_tool("ODPHPItemList")
106
+ class ODPHPItemList(ODPHPRESTTool):
107
+ """Retrieve list of topics or categories."""
108
+
109
+ def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
110
+ params: Dict[str, Any] = {}
111
+ if "lang" in arguments:
112
+ params["lang"] = arguments["lang"]
113
+ if "type" in arguments:
114
+ params["type"] = arguments["type"]
115
+ return self._make_request(params)
116
+
117
+
118
+ @register_tool("ODPHPTopicSearch")
119
+ class ODPHPTopicSearch(ODPHPRESTTool):
120
+ """Search for health topics by ID, category, or keyword."""
121
+
122
+ def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
123
+ params: Dict[str, Any] = {}
124
+ if "lang" in arguments:
125
+ params["lang"] = arguments["lang"]
126
+ if "topicId" in arguments:
127
+ params["topicId"] = arguments["topicId"]
128
+ if "categoryId" in arguments:
129
+ params["categoryId"] = arguments["categoryId"]
130
+ if "keyword" in arguments:
131
+ params["keyword"] = arguments["keyword"]
132
+
133
+ res = self._make_request(params)
134
+
135
+ # Optional: attach PlainSections if requested
136
+ if isinstance(res, dict) and not res.get("error") and arguments.get("strip_html"):
137
+ data = res.get("data") or {}
138
+ resources = ((data.get("Resources") or {}).get("Resource")) or []
139
+ if isinstance(resources, list):
140
+ for r in resources:
141
+ plain = []
142
+ for sec in _sections_array(r):
143
+ plain.append({
144
+ "Title": sec.get("Title", ""),
145
+ "PlainContent": _strip_html_to_text(sec.get("Content", "")),
146
+ })
147
+ if plain:
148
+ r["PlainSections"] = plain
149
+ return res
150
+
151
+
152
+ @register_tool("ODPHPOutlinkFetch")
153
+ class ODPHPOutlinkFetch(BaseTool):
154
+ """
155
+ Fetch article pages referenced by AccessibleVersion / RelatedItems.Url and return readable text.
156
+ - HTML: extracts main/article/body text; strips nav/aside/footer/script/style.
157
+ - PDF or non-HTML: returns metadata + URL so the agent can surface it.
158
+ """
159
+
160
+ def __init__(self, tool_config):
161
+ super().__init__(tool_config)
162
+ self.timeout = 30
163
+
164
+ def _extract_text(self, html: str) -> Dict[str, str]:
165
+ if BeautifulSoup is None:
166
+ # fallback: crude extraction
167
+ title = ""
168
+ # attempt to find <title>
169
+ m = re.search(r"<title[^>]*>(.*?)</title>", html, flags=re.I | re.S)
170
+ if m:
171
+ title = re.sub(r"\s+", " ", m.group(1)).strip()
172
+ text = re.sub(r"<[^>]+>", " ", html)
173
+ text = re.sub(r"\s+", " ", text).strip()
174
+ return {"title": title, "text": text}
175
+
176
+ soup = BeautifulSoup(html, "html.parser")
177
+ # remove non-content
178
+ for tag in soup(["script", "style", "noscript", "footer", "nav", "aside"]):
179
+ tag.decompose()
180
+
181
+ candidate = soup.find("main") or soup.find("article") or soup.body or soup
182
+ title = ""
183
+ # prefer main/article heading, else <title>
184
+ h = candidate.find(["h1", "h2"]) if candidate else None
185
+ if h:
186
+ title = h.get_text(" ", strip=True)
187
+ elif soup.title and soup.title.string:
188
+ title = soup.title.string.strip()
189
+
190
+ text = candidate.get_text("\n", strip=True) if candidate else soup.get_text("\n", strip=True)
191
+ text = re.sub(r"\n{2,}", "\n\n", text)
192
+ return {"title": title, "text": text}
193
+
194
+ def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
195
+ urls: List[str] = arguments.get("urls", [])
196
+ max_chars: Optional[int] = arguments.get("max_chars")
197
+ return_html: bool = bool(arguments.get("return_html", False))
198
+
199
+ if not urls or not isinstance(urls, list):
200
+ return {"error": "Missing required parameter 'urls' (array of 1–3 URLs)."}
201
+
202
+ out: List[Dict[str, Any]] = []
203
+ for u in urls[:3]:
204
+ try:
205
+ resp = requests.get(u, timeout=self.timeout, allow_redirects=True)
206
+ ct = resp.headers.get("Content-Type", "")
207
+ item: Dict[str, Any] = {"url": u, "status": resp.status_code, "content_type": ct}
208
+
209
+ if "text/html" in ct or (not ct and resp.text.startswith("<!")):
210
+ ex = self._extract_text(resp.text)
211
+ if isinstance(max_chars, int) and max_chars > 0:
212
+ ex["text"] = ex["text"][:max_chars]
213
+ item.update(ex)
214
+ if return_html:
215
+ item["html"] = resp.text
216
+ elif "pdf" in ct or u.lower().endswith(".pdf"):
217
+ item["title"] = "(PDF Document)"
218
+ item["text"] = f"[PDF file: {u}]"
219
+ else:
220
+ item["title"] = ""
221
+ item["text"] = ""
222
+ out.append(item)
223
+ except requests.exceptions.RequestException as e:
224
+ out.append({"url": u, "status": 0, "content_type": "", "title": "", "text": "", "error": str(e)})
225
+
226
+ return {"results": out, "metadata": {"source": "ODPHP OutlinkFetch"}}
@@ -226,6 +226,9 @@ class SummarizationHook(OutputHook):
226
226
  self.chunk_size = hook_config.get("chunk_size", 2000)
227
227
  self.focus_areas = hook_config.get("focus_areas", "key_findings_and_results")
228
228
  self.max_summary_length = hook_config.get("max_summary_length", 3000)
229
+ # Optional timeout to prevent hangs in composer / LLM calls
230
+ # If the composer does not return within this window, we gracefully fall back
231
+ self.composer_timeout_sec = hook_config.get("composer_timeout_sec", 20)
229
232
 
230
233
  def process(
231
234
  self,
@@ -252,6 +255,19 @@ class SummarizationHook(OutputHook):
252
255
  Any: The summarized output, or original output if summarization fails
253
256
  """
254
257
  try:
258
+ # Debug: basic context
259
+ try:
260
+ _len = len(str(result))
261
+ except Exception:
262
+ _len = -1
263
+ import sys as _sys
264
+
265
+ print(
266
+ f"[SummarizationHook] process: tool={tool_name}, result_len={_len}, "
267
+ f"chunk_size={self.chunk_size}, max_summary_length={self.max_summary_length}",
268
+ file=_sys.stderr,
269
+ flush=True,
270
+ )
255
271
  # Check if the required tools are available
256
272
  if (
257
273
  self.composer_tool_name not in self.tooluniverse.callable_functions
@@ -277,9 +293,49 @@ class SummarizationHook(OutputHook):
277
293
  }
278
294
 
279
295
  # Call Compose Summarizer Tool through ToolUniverse
280
- composer_result = self.tooluniverse.run_one_function(
281
- {"name": self.composer_tool_name, "arguments": composer_args}
296
+ print(
297
+ f"[SummarizationHook] calling composer tool: {self.composer_tool_name} (timeout={self.composer_timeout_sec}s)",
298
+ file=_sys.stderr,
299
+ flush=True,
282
300
  )
301
+ # Run composer with timeout to avoid hangs
302
+ try:
303
+ from concurrent.futures import (
304
+ ThreadPoolExecutor,
305
+ )
306
+
307
+ def _call_composer():
308
+ return self.tooluniverse.run_one_function(
309
+ {"name": self.composer_tool_name, "arguments": composer_args}
310
+ )
311
+
312
+ with ThreadPoolExecutor(max_workers=1) as _pool:
313
+ _future = _pool.submit(_call_composer)
314
+ composer_result = _future.result(timeout=self.composer_timeout_sec)
315
+ except Exception as _e_timeout:
316
+ # Timeout or execution error; log and fall back to original output
317
+ print(
318
+ f"[SummarizationHook] composer execution failed/timeout: {_e_timeout}",
319
+ file=_sys.stderr,
320
+ flush=True,
321
+ )
322
+ return result
323
+ # Debug: show composer result meta
324
+ try:
325
+ if isinstance(composer_result, dict):
326
+ success = composer_result.get("success", False)
327
+ summary_len = len(composer_result.get("summary", ""))
328
+ print(
329
+ f"[SummarizationHook] composer_result: success={success} summary_len={summary_len}",
330
+ file=_sys.stderr,
331
+ flush=True,
332
+ )
333
+ except Exception as _e_dbg:
334
+ print(
335
+ f"[SummarizationHook] debug error inspecting composer_result: {_e_dbg}",
336
+ file=_sys.stderr,
337
+ flush=True,
338
+ )
283
339
 
284
340
  # Process Compose Tool result
285
341
  if isinstance(composer_result, dict) and composer_result.get("success"):
@@ -294,7 +350,13 @@ class SummarizationHook(OutputHook):
294
350
 
295
351
  except Exception as e:
296
352
  error_msg = str(e)
297
- print(f"Error in summarization hook: {error_msg}")
353
+ import sys as _sys
354
+
355
+ print(
356
+ f"Error in summarization hook: {error_msg}",
357
+ file=_sys.stderr,
358
+ flush=True,
359
+ )
298
360
 
299
361
  # Check if the error is due to missing tools
300
362
  if "not found" in error_msg.lower() or "ToolOutputSummarizer" in error_msg:
@@ -365,6 +427,16 @@ class HookManager:
365
427
  self.config_path = config.get("config_path", "template/hook_config.json")
366
428
  self._pending_tools_to_load: List[str] = []
367
429
  self._load_hook_config()
430
+
431
+ # Validate LLM API keys before loading hooks
432
+ if not self._validate_llm_api_keys():
433
+ print("⚠️ Warning: LLM API keys not available. Hooks will be disabled.")
434
+ print(
435
+ " To enable hooks, please set AZURE_OPENAI_API_KEY environment variable."
436
+ )
437
+ self.enabled = False
438
+ return
439
+
368
440
  self._load_hooks()
369
441
 
370
442
  def apply_hooks(
@@ -415,6 +487,23 @@ class HookManager:
415
487
 
416
488
  return result
417
489
 
490
+ def _validate_llm_api_keys(self) -> bool:
491
+ """
492
+ Validate that LLM API keys are available for hook tools.
493
+
494
+ Returns:
495
+ bool: True if API keys are available, False otherwise
496
+ """
497
+ from .agentic_tool import AgenticTool
498
+
499
+ if AgenticTool.has_any_api_keys():
500
+ print("✅ LLM API keys validated successfully")
501
+ return True
502
+ else:
503
+ print("❌ LLM API key validation failed: No API keys available")
504
+ print(" To enable hooks, please set API key environment variables.")
505
+ return False
506
+
418
507
  def enable_hook(self, hook_name: str):
419
508
  """
420
509
  Enable a specific hook by name.
@@ -17,7 +17,7 @@ except FileNotFoundError as e:
17
17
  )
18
18
  sys.exit(1)
19
19
 
20
- server = FastMCP("Your MCP Server", stateless_http=True)
20
+ server = FastMCP("Your MCP Server")
21
21
  agents = {}
22
22
  for tool_config in boltz_tools:
23
23
  agents[tool_config["name"]] = Boltz2DockingTool(tool_config=tool_config)
@@ -47,4 +47,4 @@ def run_boltz2(query: dict):
47
47
 
48
48
 
49
49
  if __name__ == "__main__":
50
- server.run(transport="streamable-http", host="0.0.0.0", port=8080)
50
+ server.run(transport="streamable-http", host="0.0.0.0", port=8080, stateless_http=True)
@@ -18,7 +18,7 @@ except FileNotFoundError as e:
18
18
  )
19
19
  sys.exit(1)
20
20
 
21
- server = FastMCP("Your MCP Server", stateless_http=True)
21
+ server = FastMCP("Your MCP Server")
22
22
  agents = {}
23
23
  for tool_config in uspto_downloader_tools:
24
24
  agents[tool_config["name"]] = USPTOPatentDocumentDownloader(tool_config=tool_config)
@@ -58,4 +58,4 @@ def download_full_text(query: dict):
58
58
 
59
59
 
60
60
  if __name__ == "__main__":
61
- server.run(transport="streamable-http", host="0.0.0.0", port=8081)
61
+ server.run(transport="streamable-http", host="0.0.0.0", port=8081, stateless_http=True)