tooluniverse 1.0.9__py3-none-any.whl → 1.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tooluniverse might be problematic. Click here for more details.
- tooluniverse/admetai_tool.py +1 -1
- tooluniverse/agentic_tool.py +65 -17
- tooluniverse/base_tool.py +19 -8
- tooluniverse/boltz_tool.py +1 -1
- tooluniverse/cache/result_cache_manager.py +167 -12
- tooluniverse/compose_scripts/drug_safety_analyzer.py +1 -1
- tooluniverse/compose_scripts/multi_agent_literature_search.py +1 -1
- tooluniverse/compose_scripts/output_summarizer.py +4 -4
- tooluniverse/compose_scripts/tool_graph_composer.py +1 -1
- tooluniverse/compose_scripts/tool_metadata_generator.py +1 -1
- tooluniverse/compose_tool.py +9 -9
- tooluniverse/core_tool.py +2 -2
- tooluniverse/ctg_tool.py +4 -4
- tooluniverse/custom_tool.py +1 -1
- tooluniverse/dataset_tool.py +2 -2
- tooluniverse/default_config.py +1 -1
- tooluniverse/enrichr_tool.py +14 -14
- tooluniverse/execute_function.py +520 -15
- tooluniverse/extended_hooks.py +4 -4
- tooluniverse/gene_ontology_tool.py +1 -1
- tooluniverse/generate_tools.py +3 -3
- tooluniverse/humanbase_tool.py +10 -10
- tooluniverse/logging_config.py +2 -2
- tooluniverse/mcp_client_tool.py +57 -129
- tooluniverse/mcp_integration.py +52 -49
- tooluniverse/mcp_tool_registry.py +147 -528
- tooluniverse/openalex_tool.py +8 -8
- tooluniverse/openfda_tool.py +2 -2
- tooluniverse/output_hook.py +15 -15
- tooluniverse/package_tool.py +1 -1
- tooluniverse/pmc_tool.py +2 -2
- tooluniverse/remote/boltz/boltz_mcp_server.py +1 -1
- tooluniverse/remote/depmap_24q2/depmap_24q2_mcp_tool.py +2 -2
- tooluniverse/remote/immune_compass/compass_tool.py +3 -3
- tooluniverse/remote/pinnacle/pinnacle_tool.py +2 -2
- tooluniverse/remote/transcriptformer/transcriptformer_tool.py +3 -3
- tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +3 -3
- tooluniverse/remote_tool.py +4 -4
- tooluniverse/scripts/filter_tool_files.py +2 -2
- tooluniverse/smcp.py +93 -12
- tooluniverse/smcp_server.py +100 -20
- tooluniverse/space/__init__.py +46 -0
- tooluniverse/space/loader.py +133 -0
- tooluniverse/space/validator.py +353 -0
- tooluniverse/tool_finder_embedding.py +2 -2
- tooluniverse/tool_finder_keyword.py +9 -9
- tooluniverse/tool_finder_llm.py +6 -6
- tooluniverse/tools/_shared_client.py +3 -3
- tooluniverse/url_tool.py +1 -1
- tooluniverse/uspto_tool.py +1 -1
- tooluniverse/utils.py +10 -10
- {tooluniverse-1.0.9.dist-info → tooluniverse-1.0.10.dist-info}/METADATA +7 -3
- {tooluniverse-1.0.9.dist-info → tooluniverse-1.0.10.dist-info}/RECORD +57 -54
- {tooluniverse-1.0.9.dist-info → tooluniverse-1.0.10.dist-info}/WHEEL +0 -0
- {tooluniverse-1.0.9.dist-info → tooluniverse-1.0.10.dist-info}/entry_points.txt +0 -0
- {tooluniverse-1.0.9.dist-info → tooluniverse-1.0.10.dist-info}/licenses/LICENSE +0 -0
- {tooluniverse-1.0.9.dist-info → tooluniverse-1.0.10.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Space Configuration Validator
|
|
3
|
+
|
|
4
|
+
Comprehensive validation for Space configurations using JSON Schema.
|
|
5
|
+
Supports validation, default value filling, and structure checking for
|
|
6
|
+
Space YAML files.
|
|
7
|
+
|
|
8
|
+
The validation system is based on a comprehensive JSON Schema that defines:
|
|
9
|
+
- All possible fields and their types
|
|
10
|
+
- Default values for optional fields
|
|
11
|
+
- Required fields and validation rules
|
|
12
|
+
- Enum values for specific fields
|
|
13
|
+
- Nested object structures and arrays
|
|
14
|
+
|
|
15
|
+
This provides a robust, flexible, and maintainable validation system that can:
|
|
16
|
+
1. Validate YAML structure and content
|
|
17
|
+
2. Fill in missing default values automatically
|
|
18
|
+
3. Provide detailed error messages for validation failures
|
|
19
|
+
4. Support both simple tool collections and complex workspaces
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from typing import Any, Dict, List, Tuple
|
|
23
|
+
import yaml
|
|
24
|
+
import jsonschema
|
|
25
|
+
from jsonschema import validate
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Space JSON Schema Definition
|
|
29
|
+
# ================================
|
|
30
|
+
# This schema defines the complete structure and validation rules for
|
|
31
|
+
# Space configurations. It serves as the single source of truth for:
|
|
32
|
+
# - Field definitions and types
|
|
33
|
+
# - Default values
|
|
34
|
+
# - Required fields
|
|
35
|
+
# - Validation constraints
|
|
36
|
+
# - Enum values for specific fields
|
|
37
|
+
#
|
|
38
|
+
# The schema supports two main configuration types:
|
|
39
|
+
# 1. Simple tool collections (e.g., literature-search.yaml) - minimal config
|
|
40
|
+
# 2. Complete workspaces (e.g., full-workspace.yaml) - full config with LLM
|
|
41
|
+
#
|
|
42
|
+
# Key features:
|
|
43
|
+
# - Automatic default value filling
|
|
44
|
+
# - Comprehensive validation rules
|
|
45
|
+
# - Support for nested objects and arrays
|
|
46
|
+
# - Flexible tool selection (by name, category, type)
|
|
47
|
+
# - LLM configuration with provider and model settings
|
|
48
|
+
# - Hook system for output processing
|
|
49
|
+
# - Environment variable requirements documentation
|
|
50
|
+
SPACE_SCHEMA = {
|
|
51
|
+
"type": "object",
|
|
52
|
+
"properties": {
|
|
53
|
+
"name": {
|
|
54
|
+
"type": "string",
|
|
55
|
+
"description": "Space name - unique identifier for this configuration",
|
|
56
|
+
},
|
|
57
|
+
"version": {
|
|
58
|
+
"type": "string",
|
|
59
|
+
"default": "1.0.0",
|
|
60
|
+
"description": "Space version - follows semantic versioning "
|
|
61
|
+
"(e.g., 1.0.0, 1.2.3)",
|
|
62
|
+
},
|
|
63
|
+
"description": {
|
|
64
|
+
"type": "string",
|
|
65
|
+
"description": "Space description - explains what this "
|
|
66
|
+
"configuration does and its purpose",
|
|
67
|
+
},
|
|
68
|
+
"tags": {
|
|
69
|
+
"type": "array",
|
|
70
|
+
"items": {"type": "string"},
|
|
71
|
+
"default": [],
|
|
72
|
+
"description": "Space tags - keywords for categorization and "
|
|
73
|
+
'discovery (e.g., ["research", "biology", "literature"])',
|
|
74
|
+
},
|
|
75
|
+
"tools": {
|
|
76
|
+
"type": "object",
|
|
77
|
+
"properties": {
|
|
78
|
+
"include_tools": {
|
|
79
|
+
"type": "array",
|
|
80
|
+
"items": {"type": "string"},
|
|
81
|
+
"description": "Tools to include by exact name - most precise "
|
|
82
|
+
"way to select specific tools",
|
|
83
|
+
},
|
|
84
|
+
"categories": {
|
|
85
|
+
"type": "array",
|
|
86
|
+
"items": {"type": "string"},
|
|
87
|
+
"description": "Tool categories to include - broader selection "
|
|
88
|
+
'based on tool categories (e.g., ["literature", "clinical"])',
|
|
89
|
+
},
|
|
90
|
+
"exclude_tools": {
|
|
91
|
+
"type": "array",
|
|
92
|
+
"items": {"type": "string"},
|
|
93
|
+
"description": "Tools to exclude by exact name - removes "
|
|
94
|
+
"specific tools from the selection",
|
|
95
|
+
},
|
|
96
|
+
"include_tool_types": {
|
|
97
|
+
"type": "array",
|
|
98
|
+
"items": {"type": "string"},
|
|
99
|
+
"description": "Tool types to include - filter by tool type "
|
|
100
|
+
'(e.g., ["api", "local", "agentic"])',
|
|
101
|
+
},
|
|
102
|
+
"exclude_tool_types": {
|
|
103
|
+
"type": "array",
|
|
104
|
+
"items": {"type": "string"},
|
|
105
|
+
"description": "Tool types to exclude - removes tools of "
|
|
106
|
+
"specific types from the selection",
|
|
107
|
+
},
|
|
108
|
+
},
|
|
109
|
+
"additionalProperties": False,
|
|
110
|
+
"description": "Tool configuration - defines which tools to load "
|
|
111
|
+
"and how to filter them",
|
|
112
|
+
},
|
|
113
|
+
"llm_config": {
|
|
114
|
+
"type": "object",
|
|
115
|
+
"properties": {
|
|
116
|
+
"mode": {
|
|
117
|
+
"type": "string",
|
|
118
|
+
"enum": ["default", "fallback"],
|
|
119
|
+
"default": "default",
|
|
120
|
+
"description": 'LLM configuration mode - "default" uses this '
|
|
121
|
+
'config as primary, "fallback" uses as backup '
|
|
122
|
+
"when primary fails",
|
|
123
|
+
},
|
|
124
|
+
"default_provider": {
|
|
125
|
+
"type": "string",
|
|
126
|
+
"description": "Default LLM provider - must match AgenticTool "
|
|
127
|
+
"API types (CHATGPT, GEMINI, OPENROUTER, VLLM, etc.)",
|
|
128
|
+
},
|
|
129
|
+
"models": {
|
|
130
|
+
"type": "object",
|
|
131
|
+
"additionalProperties": {"type": "string"},
|
|
132
|
+
"description": "Task-specific model mappings - maps task names "
|
|
133
|
+
'to model IDs (e.g., {"default": "gpt-4o", '
|
|
134
|
+
'"analysis": "gpt-4-turbo"})',
|
|
135
|
+
},
|
|
136
|
+
"temperature": {
|
|
137
|
+
"type": "number",
|
|
138
|
+
"minimum": 0,
|
|
139
|
+
"maximum": 2,
|
|
140
|
+
"description": "LLM temperature - controls randomness in "
|
|
141
|
+
"responses (0.0 = deterministic, 2.0 = very random)",
|
|
142
|
+
},
|
|
143
|
+
},
|
|
144
|
+
"additionalProperties": False,
|
|
145
|
+
"description": "LLM configuration - settings for AI-powered tools "
|
|
146
|
+
"(AgenticTool) - only needed for complete workspaces",
|
|
147
|
+
},
|
|
148
|
+
"hooks": {
|
|
149
|
+
"type": "array",
|
|
150
|
+
"items": {
|
|
151
|
+
"type": "object",
|
|
152
|
+
"properties": {
|
|
153
|
+
"type": {
|
|
154
|
+
"type": "string",
|
|
155
|
+
"description": "Hook type - identifies the hook implementation "
|
|
156
|
+
'(e.g., "output_summarization", "file_save")',
|
|
157
|
+
},
|
|
158
|
+
"enabled": {
|
|
159
|
+
"type": "boolean",
|
|
160
|
+
"default": True,
|
|
161
|
+
"description": "Whether hook is enabled - allows enabling/"
|
|
162
|
+
"disabling hooks without removing them",
|
|
163
|
+
},
|
|
164
|
+
"config": {
|
|
165
|
+
"type": "object",
|
|
166
|
+
"description": "Hook configuration - specific settings for "
|
|
167
|
+
"this hook instance",
|
|
168
|
+
},
|
|
169
|
+
},
|
|
170
|
+
"required": ["type"],
|
|
171
|
+
"additionalProperties": False,
|
|
172
|
+
},
|
|
173
|
+
"description": "Hook configurations - post-processing functions for "
|
|
174
|
+
"tool outputs (e.g., summarization, file saving)",
|
|
175
|
+
},
|
|
176
|
+
"required_env": {
|
|
177
|
+
"type": "array",
|
|
178
|
+
"items": {"type": "string"},
|
|
179
|
+
"description": "Required environment variables - documents which "
|
|
180
|
+
"environment variables should be set (for documentation "
|
|
181
|
+
"purposes only)",
|
|
182
|
+
},
|
|
183
|
+
},
|
|
184
|
+
"required": ["name", "version"],
|
|
185
|
+
"additionalProperties": False,
|
|
186
|
+
"description": "Space Configuration Schema - defines the structure for "
|
|
187
|
+
"Space YAML configuration files",
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
class ValidationError(Exception):
|
|
192
|
+
"""Raised when configuration validation fails."""
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def validate_space_config(config: Dict[str, Any]) -> Tuple[bool, List[str]]:
|
|
196
|
+
"""
|
|
197
|
+
Validate a Space configuration using JSON Schema.
|
|
198
|
+
|
|
199
|
+
This is a legacy function that now uses the JSON Schema validation system.
|
|
200
|
+
For new code, use validate_with_schema() instead.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
config: Configuration dictionary
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
Tuple of (is_valid, list_of_errors)
|
|
207
|
+
"""
|
|
208
|
+
# Convert dict to YAML string for validation
|
|
209
|
+
yaml_content = yaml.dump(config, default_flow_style=False, allow_unicode=True)
|
|
210
|
+
is_valid, errors, _ = validate_with_schema(yaml_content, fill_defaults_flag=False)
|
|
211
|
+
return is_valid, errors
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def validate_yaml_format_by_template(yaml_content: str) -> Tuple[bool, List[str]]:
|
|
215
|
+
"""
|
|
216
|
+
Validate YAML format by comparing against default template format.
|
|
217
|
+
|
|
218
|
+
This method uses the JSON Schema as a reference to validate
|
|
219
|
+
the structure and content of Space YAML configurations.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
yaml_content: YAML content string
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
Tuple of (is_valid, list_of_errors)
|
|
226
|
+
"""
|
|
227
|
+
# Use the new JSON Schema validation instead
|
|
228
|
+
is_valid, errors, _ = validate_with_schema(yaml_content, fill_defaults_flag=False)
|
|
229
|
+
return is_valid, errors
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def validate_yaml_file(file_path: str) -> Tuple[bool, List[str]]:
|
|
233
|
+
"""
|
|
234
|
+
Validate a YAML file by comparing against default template format.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
file_path: Path to YAML file
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
Tuple of (is_valid, list_of_errors)
|
|
241
|
+
"""
|
|
242
|
+
try:
|
|
243
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
244
|
+
yaml_content = f.read()
|
|
245
|
+
return validate_yaml_format_by_template(yaml_content)
|
|
246
|
+
except FileNotFoundError:
|
|
247
|
+
return False, [f"File not found: {file_path}"]
|
|
248
|
+
except Exception as e:
|
|
249
|
+
return False, [f"Error reading file: {e}"]
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def fill_defaults(data: Dict[str, Any], schema: Dict[str, Any]) -> Dict[str, Any]:
|
|
253
|
+
"""
|
|
254
|
+
Recursively fill default values from JSON schema.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
data: Configuration data
|
|
258
|
+
schema: JSON schema with default values
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Configuration with default values filled
|
|
262
|
+
"""
|
|
263
|
+
if not isinstance(data, dict) or not isinstance(schema, dict):
|
|
264
|
+
return data
|
|
265
|
+
|
|
266
|
+
result = data.copy()
|
|
267
|
+
|
|
268
|
+
for key, value in schema.get("properties", {}).items():
|
|
269
|
+
if key not in result and "default" in value:
|
|
270
|
+
result[key] = value["default"]
|
|
271
|
+
elif key in result and isinstance(value, dict) and "properties" in value:
|
|
272
|
+
result[key] = fill_defaults(result[key], value)
|
|
273
|
+
elif (
|
|
274
|
+
key in result
|
|
275
|
+
and isinstance(value, dict)
|
|
276
|
+
and value.get("type") == "array"
|
|
277
|
+
and "items" in value
|
|
278
|
+
):
|
|
279
|
+
if isinstance(result[key], list) and value["items"].get("type") == "object":
|
|
280
|
+
result[key] = [
|
|
281
|
+
fill_defaults(item, value["items"]) for item in result[key]
|
|
282
|
+
]
|
|
283
|
+
|
|
284
|
+
return result
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def validate_with_schema(
|
|
288
|
+
yaml_content: str, fill_defaults_flag: bool = True
|
|
289
|
+
) -> Tuple[bool, List[str], Dict[str, Any]]:
|
|
290
|
+
"""
|
|
291
|
+
Validate YAML content using JSON Schema and optionally fill default values.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
yaml_content: YAML content string
|
|
295
|
+
fill_defaults_flag: Whether to fill default values
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
Tuple of (is_valid, list_of_errors, processed_config)
|
|
299
|
+
"""
|
|
300
|
+
errors = []
|
|
301
|
+
|
|
302
|
+
try:
|
|
303
|
+
# Parse YAML
|
|
304
|
+
config = yaml.safe_load(yaml_content)
|
|
305
|
+
if not isinstance(config, dict):
|
|
306
|
+
return False, ["YAML content must be a dictionary"], {}
|
|
307
|
+
|
|
308
|
+
# Fill default values if requested
|
|
309
|
+
if fill_defaults_flag:
|
|
310
|
+
config = fill_defaults(config, SPACE_SCHEMA)
|
|
311
|
+
|
|
312
|
+
# Validate against schema
|
|
313
|
+
validate(instance=config, schema=SPACE_SCHEMA)
|
|
314
|
+
|
|
315
|
+
return True, [], config
|
|
316
|
+
|
|
317
|
+
except yaml.YAMLError as e:
|
|
318
|
+
return False, [f"YAML parsing error: {e}"], {}
|
|
319
|
+
except jsonschema.ValidationError as e:
|
|
320
|
+
return (
|
|
321
|
+
False,
|
|
322
|
+
[f"Schema validation error: {e.message}"],
|
|
323
|
+
(config if "config" in locals() else {}),
|
|
324
|
+
)
|
|
325
|
+
except Exception as e:
|
|
326
|
+
return (
|
|
327
|
+
False,
|
|
328
|
+
[f"Validation error: {e}"],
|
|
329
|
+
(config if "config" in locals() else {}),
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def validate_yaml_file_with_schema(
|
|
334
|
+
file_path: str, fill_defaults_flag: bool = True
|
|
335
|
+
) -> Tuple[bool, List[str], Dict[str, Any]]:
|
|
336
|
+
"""
|
|
337
|
+
Validate a YAML file using JSON Schema and optionally fill default values.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
file_path: Path to YAML file
|
|
341
|
+
fill_defaults_flag: Whether to fill default values
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Tuple of (is_valid, list_of_errors, processed_config)
|
|
345
|
+
"""
|
|
346
|
+
try:
|
|
347
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
348
|
+
yaml_content = f.read()
|
|
349
|
+
return validate_with_schema(yaml_content, fill_defaults_flag)
|
|
350
|
+
except FileNotFoundError:
|
|
351
|
+
return False, [f"File not found: {file_path}"], {}
|
|
352
|
+
except Exception as e:
|
|
353
|
+
return False, [f"Error reading file: {e}"], {}
|
|
@@ -161,7 +161,7 @@ class ToolFinderEmbedding(BaseTool):
|
|
|
161
161
|
query (str): User query or description of desired functionality
|
|
162
162
|
top_k (int, optional): Number of top tools to return. Defaults to 5.
|
|
163
163
|
|
|
164
|
-
Returns
|
|
164
|
+
Returns
|
|
165
165
|
list: List of top-k tool names ranked by relevance to the query
|
|
166
166
|
|
|
167
167
|
Raises:
|
|
@@ -203,7 +203,7 @@ class ToolFinderEmbedding(BaseTool):
|
|
|
203
203
|
return_call_result (bool, optional): If True, returns both prompts and tool names. Defaults to False.
|
|
204
204
|
categories (list, optional): List of tool categories to filter by. Currently not implemented for embedding-based search.
|
|
205
205
|
|
|
206
|
-
Returns
|
|
206
|
+
Returns
|
|
207
207
|
str or tuple:
|
|
208
208
|
- If return_call_result is False: Tool prompts as a formatted string
|
|
209
209
|
- If return_call_result is True: Tuple of (tool_prompts, tool_names)
|
|
@@ -182,7 +182,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
182
182
|
Args:
|
|
183
183
|
text (str): Input text to tokenize
|
|
184
184
|
|
|
185
|
-
Returns
|
|
185
|
+
Returns
|
|
186
186
|
List[str]: List of processed tokens
|
|
187
187
|
"""
|
|
188
188
|
if not text:
|
|
@@ -210,7 +210,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
210
210
|
Args:
|
|
211
211
|
word (str): Word to stem
|
|
212
212
|
|
|
213
|
-
Returns
|
|
213
|
+
Returns
|
|
214
214
|
str: Stemmed word
|
|
215
215
|
"""
|
|
216
216
|
if len(word) <= 3:
|
|
@@ -232,7 +232,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
232
232
|
tokens (List[str]): Tokenized words
|
|
233
233
|
max_phrase_length (int): Maximum length of phrases to extract
|
|
234
234
|
|
|
235
|
-
Returns
|
|
235
|
+
Returns
|
|
236
236
|
List[str]: List of phrases and individual tokens
|
|
237
237
|
"""
|
|
238
238
|
phrases = []
|
|
@@ -305,7 +305,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
305
305
|
Args:
|
|
306
306
|
parameter_schema (Dict): Tool parameter schema
|
|
307
307
|
|
|
308
|
-
Returns
|
|
308
|
+
Returns
|
|
309
309
|
List[str]: List of text elements from parameters
|
|
310
310
|
"""
|
|
311
311
|
text_elements = []
|
|
@@ -329,7 +329,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
329
329
|
query_terms (List[str]): Processed query terms and phrases
|
|
330
330
|
tool_name (str): Name of the tool to score
|
|
331
331
|
|
|
332
|
-
Returns
|
|
332
|
+
Returns
|
|
333
333
|
float: TF-IDF relevance score
|
|
334
334
|
"""
|
|
335
335
|
if tool_name not in self._tool_index:
|
|
@@ -364,7 +364,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
364
364
|
query (str): Original query string
|
|
365
365
|
tool (Dict): Tool configuration
|
|
366
366
|
|
|
367
|
-
Returns
|
|
367
|
+
Returns
|
|
368
368
|
float: Exact match bonus score
|
|
369
369
|
"""
|
|
370
370
|
query_lower = query.lower()
|
|
@@ -414,7 +414,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
414
414
|
return_call_result (bool, optional): If True, returns both prompts and tool names. Defaults to False.
|
|
415
415
|
categories (list, optional): List of tool categories to filter by.
|
|
416
416
|
|
|
417
|
-
Returns
|
|
417
|
+
Returns
|
|
418
418
|
str or tuple:
|
|
419
419
|
- If return_call_result is False: Tool prompts as a formatted string
|
|
420
420
|
- If return_call_result is True: Tuple of (tool_prompts, tool_names)
|
|
@@ -472,7 +472,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
472
472
|
- picked_tool_names (list, optional): Pre-selected tool names to process
|
|
473
473
|
- return_call_result (bool, optional): Whether to return both prompts and names. Defaults to False.
|
|
474
474
|
|
|
475
|
-
Returns
|
|
475
|
+
Returns
|
|
476
476
|
str or tuple:
|
|
477
477
|
- If return_call_result is False: Tool prompts as a formatted string
|
|
478
478
|
- If return_call_result is True: Tuple of (tool_prompts, tool_names)
|
|
@@ -504,7 +504,7 @@ class ToolFinderKeyword(BaseTool):
|
|
|
504
504
|
Args:
|
|
505
505
|
arguments (dict): Search arguments
|
|
506
506
|
|
|
507
|
-
Returns
|
|
507
|
+
Returns
|
|
508
508
|
str: JSON string containing search results with relevance scores
|
|
509
509
|
"""
|
|
510
510
|
try:
|
tooluniverse/tool_finder_llm.py
CHANGED
|
@@ -167,7 +167,7 @@ Requirements:
|
|
|
167
167
|
Args:
|
|
168
168
|
force_refresh (bool): Whether to force refresh the cache
|
|
169
169
|
|
|
170
|
-
Returns
|
|
170
|
+
Returns
|
|
171
171
|
list: List of tool dictionaries with names and descriptions
|
|
172
172
|
"""
|
|
173
173
|
current_time = datetime.now()
|
|
@@ -220,7 +220,7 @@ Requirements:
|
|
|
220
220
|
query (str): User query
|
|
221
221
|
max_tools (int): Maximum number of tools to send to LLM
|
|
222
222
|
|
|
223
|
-
Returns
|
|
223
|
+
Returns
|
|
224
224
|
list: Filtered list of tools
|
|
225
225
|
"""
|
|
226
226
|
if len(available_tools) <= max_tools:
|
|
@@ -269,7 +269,7 @@ Requirements:
|
|
|
269
269
|
Args:
|
|
270
270
|
tools (list): List of tool dictionaries
|
|
271
271
|
|
|
272
|
-
Returns
|
|
272
|
+
Returns
|
|
273
273
|
str: Compact formatted tool descriptions for the prompt
|
|
274
274
|
"""
|
|
275
275
|
formatted_tools = []
|
|
@@ -296,7 +296,7 @@ Requirements:
|
|
|
296
296
|
include_reasoning (bool): Whether to include selection reasoning
|
|
297
297
|
categories (list, optional): List of tool categories to filter by
|
|
298
298
|
|
|
299
|
-
Returns
|
|
299
|
+
Returns
|
|
300
300
|
dict: Dictionary containing selected tools and metadata
|
|
301
301
|
"""
|
|
302
302
|
try:
|
|
@@ -452,7 +452,7 @@ Requirements:
|
|
|
452
452
|
categories (list, optional): List of tool categories to filter by. Applied before LLM selection.
|
|
453
453
|
return_list_only (bool, optional): If True, returns only a list of tool specifications. Overrides other return options.
|
|
454
454
|
|
|
455
|
-
Returns
|
|
455
|
+
Returns
|
|
456
456
|
str, tuple, or list:
|
|
457
457
|
- If return_list_only is True: List of tool specifications
|
|
458
458
|
- If return_call_result is False: Tool prompts as a formatted string
|
|
@@ -532,7 +532,7 @@ Requirements:
|
|
|
532
532
|
categories: Requested categories filter
|
|
533
533
|
return_call_result: Whether return_call_result was True
|
|
534
534
|
|
|
535
|
-
Returns
|
|
535
|
+
Returns
|
|
536
536
|
str: JSON formatted search results
|
|
537
537
|
"""
|
|
538
538
|
import json
|
|
@@ -64,7 +64,7 @@ def get_shared_client(
|
|
|
64
64
|
shared instance already exists, these parameters are
|
|
65
65
|
ignored.
|
|
66
66
|
|
|
67
|
-
Returns
|
|
67
|
+
Returns
|
|
68
68
|
ToolUniverse: The client instance to use for tool execution
|
|
69
69
|
|
|
70
70
|
Thread Safety:
|
|
@@ -76,7 +76,7 @@ def get_shared_client(
|
|
|
76
76
|
of the shared instance. Subsequent calls with different parameters
|
|
77
77
|
will not affect the already-created instance.
|
|
78
78
|
|
|
79
|
-
Examples
|
|
79
|
+
Examples
|
|
80
80
|
# Basic usage
|
|
81
81
|
client = get_shared_client()
|
|
82
82
|
|
|
@@ -125,7 +125,7 @@ def reset_shared_client():
|
|
|
125
125
|
may cause unexpected behavior. It's recommended to only call this
|
|
126
126
|
function when you're certain no other threads are accessing the client.
|
|
127
127
|
|
|
128
|
-
Examples
|
|
128
|
+
Examples
|
|
129
129
|
# Reset for testing
|
|
130
130
|
reset_shared_client()
|
|
131
131
|
|
tooluniverse/url_tool.py
CHANGED
tooluniverse/uspto_tool.py
CHANGED
|
@@ -132,7 +132,7 @@ class USPTOOpenDataPortalTool(BaseTool):
|
|
|
132
132
|
Args:
|
|
133
133
|
arguments: A dictionary of arguments for the tool, matching the parameters in the tool definition.
|
|
134
134
|
|
|
135
|
-
Returns
|
|
135
|
+
Returns
|
|
136
136
|
The result of the API call, either as a dictionary (for JSON) or a string (for CSV).
|
|
137
137
|
"""
|
|
138
138
|
endpoint = self.tool_config.get("api_endpoint")
|
tooluniverse/utils.py
CHANGED
|
@@ -113,7 +113,7 @@ def yaml_to_dict(yaml_file_path):
|
|
|
113
113
|
Args:
|
|
114
114
|
yaml_file_path (str): Path to the YAML file.
|
|
115
115
|
|
|
116
|
-
Returns
|
|
116
|
+
Returns
|
|
117
117
|
dict: Dictionary representation of the YAML file content.
|
|
118
118
|
"""
|
|
119
119
|
try:
|
|
@@ -130,10 +130,10 @@ def read_json_list(file_path):
|
|
|
130
130
|
"""
|
|
131
131
|
Reads a list of JSON objects from a file.
|
|
132
132
|
|
|
133
|
-
Parameters
|
|
133
|
+
Parameters
|
|
134
134
|
file_path (str): The path to the JSON file.
|
|
135
135
|
|
|
136
|
-
Returns
|
|
136
|
+
Returns
|
|
137
137
|
list: A list of dictionaries containing the JSON objects.
|
|
138
138
|
"""
|
|
139
139
|
with open(file_path, "r") as file:
|
|
@@ -355,7 +355,7 @@ def format_error_response(
|
|
|
355
355
|
tool_name (str, optional): Name of the tool that failed
|
|
356
356
|
context (Dict[str, Any], optional): Additional context about the error
|
|
357
357
|
|
|
358
|
-
Returns
|
|
358
|
+
Returns
|
|
359
359
|
Dict[str, Any]: Standardized error response
|
|
360
360
|
"""
|
|
361
361
|
from .exceptions import ToolError
|
|
@@ -391,7 +391,7 @@ def get_parameter_schema(tool_config: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
391
391
|
Args:
|
|
392
392
|
tool_config (Dict[str, Any]): Tool configuration dictionary
|
|
393
393
|
|
|
394
|
-
Returns
|
|
394
|
+
Returns
|
|
395
395
|
Dict[str, Any]: Parameter schema dictionary
|
|
396
396
|
"""
|
|
397
397
|
return tool_config.get("parameter", {})
|
|
@@ -404,7 +404,7 @@ def validate_query(query: Dict[str, Any]) -> bool:
|
|
|
404
404
|
Args:
|
|
405
405
|
query (Dict[str, Any]): The query dictionary to validate
|
|
406
406
|
|
|
407
|
-
Returns
|
|
407
|
+
Returns
|
|
408
408
|
bool: True if query is valid, False otherwise
|
|
409
409
|
"""
|
|
410
410
|
if not isinstance(query, dict):
|
|
@@ -427,7 +427,7 @@ def normalize_gene_symbol(gene_symbol: str) -> str:
|
|
|
427
427
|
Args:
|
|
428
428
|
gene_symbol (str): The gene symbol to normalize
|
|
429
429
|
|
|
430
|
-
Returns
|
|
430
|
+
Returns
|
|
431
431
|
str: Normalized gene symbol
|
|
432
432
|
"""
|
|
433
433
|
if not isinstance(gene_symbol, str):
|
|
@@ -454,7 +454,7 @@ def format_api_response(
|
|
|
454
454
|
response_data (Any): The response data to format
|
|
455
455
|
format_type (str): The desired output format ('json', 'pretty', 'minimal')
|
|
456
456
|
|
|
457
|
-
Returns
|
|
457
|
+
Returns
|
|
458
458
|
Union[str, Dict[str, Any]]: Formatted response
|
|
459
459
|
"""
|
|
460
460
|
if format_type == "json":
|
|
@@ -493,7 +493,7 @@ def validate_hook_config(config: Dict[str, Any]) -> bool:
|
|
|
493
493
|
Args:
|
|
494
494
|
config (Dict[str, Any]): Hook configuration to validate
|
|
495
495
|
|
|
496
|
-
Returns
|
|
496
|
+
Returns
|
|
497
497
|
bool: True if configuration is valid, False otherwise
|
|
498
498
|
"""
|
|
499
499
|
try:
|
|
@@ -561,7 +561,7 @@ def validate_hook_conditions(conditions: Dict[str, Any]) -> bool:
|
|
|
561
561
|
Args:
|
|
562
562
|
conditions (Dict[str, Any]): Hook conditions to validate
|
|
563
563
|
|
|
564
|
-
Returns
|
|
564
|
+
Returns
|
|
565
565
|
bool: True if conditions are valid, False otherwise
|
|
566
566
|
"""
|
|
567
567
|
try:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tooluniverse
|
|
3
|
-
Version: 1.0.
|
|
3
|
+
Version: 1.0.10
|
|
4
4
|
Summary: A comprehensive collection of scientific tools for Agentic AI, offering integration with the ToolUniverse SDK and MCP Server to support advanced scientific workflows.
|
|
5
5
|
Author-email: Shanghua Gao <shanghuagao@gmail.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/mims-harvard/ToolUniverse
|
|
@@ -86,14 +86,18 @@ Requires-Dist: kaleido>=0.2.1; extra == "visualization"
|
|
|
86
86
|
Requires-Dist: scipy>=1.7.0; extra == "visualization"
|
|
87
87
|
Requires-Dist: matplotlib>=3.5.0; extra == "visualization"
|
|
88
88
|
Requires-Dist: networkx>=3.4.0; extra == "visualization"
|
|
89
|
+
Provides-Extra: space
|
|
90
|
+
Requires-Dist: huggingface_hub>=0.34.0; extra == "space"
|
|
91
|
+
Requires-Dist: pyyaml>=6.0.0; extra == "space"
|
|
92
|
+
Requires-Dist: requests>=2.32.0; extra == "space"
|
|
89
93
|
Provides-Extra: all
|
|
90
|
-
Requires-Dist: tooluniverse[dev,docs,graph,visualization]; extra == "all"
|
|
94
|
+
Requires-Dist: tooluniverse[dev,docs,graph,space,visualization]; extra == "all"
|
|
91
95
|
Dynamic: license-file
|
|
92
96
|
|
|
93
97
|
# <img src="docs/_static/logo.png" alt="ToolUniverse Logo" height="28" style="vertical-align: middle; margin-right: 8px;" /> ToolUniverse: Democratizing AI scientists
|
|
94
98
|
|
|
95
99
|
[](https://arxiv.org/abs/2509.23426)
|
|
96
|
-
[](https://badge.fury.io/py/tooluniverse)
|
|
97
101
|
[](https://github.com/mims-harvard/ToolUniverse)
|
|
98
102
|
[_Supported-green)](README_USAGE.md#running-the-mcp-server)
|
|
99
103
|
[](https://zitniklab.hms.harvard.edu/ToolUniverse/)
|