tooluniverse 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tooluniverse might be problematic. Click here for more details.
- tooluniverse/__init__.py +71 -57
- tooluniverse/alphafold_tool.py +51 -20
- tooluniverse/compose_scripts/tool_graph_generation.py +249 -0
- tooluniverse/compose_scripts/tool_metadata_generator.py +375 -0
- tooluniverse/data/agentic_tools.json +143 -28
- tooluniverse/data/alphafold_tools.json +203 -61
- tooluniverse/data/compose_tools.json +63 -0
- tooluniverse/data/special_tools.json +2 -0
- tooluniverse/execute_function.py +185 -70
- tooluniverse/scripts/filter_tool_files.py +194 -0
- tooluniverse/test/test_alphafold_tool.py +66 -29
- tooluniverse/test/test_list_built_in_tools.py +33 -0
- {tooluniverse-1.0.1.dist-info → tooluniverse-1.0.3.dist-info}/METADATA +116 -184
- {tooluniverse-1.0.1.dist-info → tooluniverse-1.0.3.dist-info}/RECORD +18 -14
- {tooluniverse-1.0.1.dist-info → tooluniverse-1.0.3.dist-info}/WHEEL +0 -0
- {tooluniverse-1.0.1.dist-info → tooluniverse-1.0.3.dist-info}/entry_points.txt +0 -0
- {tooluniverse-1.0.1.dist-info → tooluniverse-1.0.3.dist-info}/licenses/LICENSE +0 -0
- {tooluniverse-1.0.1.dist-info → tooluniverse-1.0.3.dist-info}/top_level.txt +0 -0
tooluniverse/execute_function.py
CHANGED
|
@@ -1000,9 +1000,9 @@ class ToolUniverse:
|
|
|
1000
1000
|
"""
|
|
1001
1001
|
return copy.deepcopy(self.all_tools)
|
|
1002
1002
|
|
|
1003
|
-
def list_built_in_tools(self, mode="config"):
|
|
1003
|
+
def list_built_in_tools(self, mode="config", scan_all=False):
|
|
1004
1004
|
"""
|
|
1005
|
-
List all built-in tool categories and their statistics with
|
|
1005
|
+
List all built-in tool categories and their statistics with different modes.
|
|
1006
1006
|
|
|
1007
1007
|
This method provides a comprehensive overview of all available tools in the ToolUniverse,
|
|
1008
1008
|
organized by categories. It reads directly from the default tool files to gather statistics,
|
|
@@ -1012,39 +1012,62 @@ class ToolUniverse:
|
|
|
1012
1012
|
mode (str, optional): Organization mode for tools. Defaults to 'config'.
|
|
1013
1013
|
- 'config': Organize by config file categories (original behavior)
|
|
1014
1014
|
- 'type': Organize by tool types (implementation classes)
|
|
1015
|
+
- 'list_name': Return a list of all tool names
|
|
1016
|
+
- 'list_spec': Return a list of all tool specifications
|
|
1017
|
+
scan_all (bool, optional): Whether to scan all JSON files in data directory recursively.
|
|
1018
|
+
If True, scans all JSON files in data/ and its subdirectories.
|
|
1019
|
+
If False (default), uses predefined tool file mappings.
|
|
1015
1020
|
|
|
1016
1021
|
Returns:
|
|
1017
|
-
dict
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
'category_name': {
|
|
1022
|
-
'count': int, # Number of tools in this category
|
|
1023
|
-
'tools': list # List of tool names (only when mode='type')
|
|
1024
|
-
},
|
|
1025
|
-
...
|
|
1026
|
-
},
|
|
1027
|
-
'total_categories': int, # Total number of tool categories
|
|
1028
|
-
'total_tools': int, # Total number of unique tools
|
|
1029
|
-
'mode': str, # The mode used for organization
|
|
1030
|
-
'summary': str # Human-readable summary of statistics
|
|
1031
|
-
}
|
|
1022
|
+
dict or list:
|
|
1023
|
+
- For 'config' and 'type' modes: A dictionary containing tool statistics
|
|
1024
|
+
- For 'list_name' mode: A list of all tool names
|
|
1025
|
+
- For 'list_spec' mode: A list of all tool specifications
|
|
1032
1026
|
|
|
1033
1027
|
Example:
|
|
1034
1028
|
>>> tool_universe = ToolUniverse()
|
|
1035
|
-
>>> # Group by config file categories
|
|
1029
|
+
>>> # Group by config file categories (predefined files only)
|
|
1036
1030
|
>>> stats = tool_universe.list_built_in_tools(mode='config')
|
|
1037
|
-
>>> #
|
|
1038
|
-
>>> stats = tool_universe.list_built_in_tools(mode='
|
|
1031
|
+
>>> # Scan all JSON files in data directory recursively
|
|
1032
|
+
>>> stats = tool_universe.list_built_in_tools(mode='config', scan_all=True)
|
|
1033
|
+
>>> # Get all tool names from all JSON files
|
|
1034
|
+
>>> tool_names = tool_universe.list_built_in_tools(mode='list_name', scan_all=True)
|
|
1039
1035
|
|
|
1040
1036
|
Note:
|
|
1041
1037
|
- This method reads directly from tool files and works without calling load_tools()
|
|
1042
1038
|
- Tools are deduplicated across categories, so the same tool won't be counted multiple times
|
|
1043
|
-
- The summary is automatically printed to console when this method is called
|
|
1039
|
+
- The summary is automatically printed to console when this method is called (except for list_name and list_spec modes)
|
|
1040
|
+
- When scan_all=True, all JSON files in data/ and subdirectories are scanned
|
|
1044
1041
|
"""
|
|
1045
|
-
if mode not in ["config", "type"]:
|
|
1046
|
-
raise ValueError(
|
|
1042
|
+
if mode not in ["config", "type", "list_name", "list_spec"]:
|
|
1043
|
+
raise ValueError(
|
|
1044
|
+
"Mode must be one of: 'config', 'type', 'list_name', 'list_spec'"
|
|
1045
|
+
)
|
|
1047
1046
|
|
|
1047
|
+
# For list_name and list_spec modes, we can return early with just the data
|
|
1048
|
+
if mode in ["list_name", "list_spec"]:
|
|
1049
|
+
all_tools = []
|
|
1050
|
+
all_tool_names = set() # For deduplication across categories
|
|
1051
|
+
|
|
1052
|
+
if scan_all:
|
|
1053
|
+
# Scan all JSON files in data directory recursively
|
|
1054
|
+
all_tools, all_tool_names = self._scan_all_json_files()
|
|
1055
|
+
else:
|
|
1056
|
+
# Use predefined tool files (original behavior)
|
|
1057
|
+
all_tools, all_tool_names = self._scan_predefined_files()
|
|
1058
|
+
|
|
1059
|
+
# Deduplicate tools by name
|
|
1060
|
+
unique_tools = {}
|
|
1061
|
+
for tool in all_tools:
|
|
1062
|
+
if tool["name"] not in unique_tools:
|
|
1063
|
+
unique_tools[tool["name"]] = tool
|
|
1064
|
+
|
|
1065
|
+
if mode == "list_name":
|
|
1066
|
+
return sorted(list(unique_tools.keys()))
|
|
1067
|
+
elif mode == "list_spec":
|
|
1068
|
+
return list(unique_tools.values())
|
|
1069
|
+
|
|
1070
|
+
# Original logic for config and type modes
|
|
1048
1071
|
result = {
|
|
1049
1072
|
"categories": {},
|
|
1050
1073
|
"total_categories": 0,
|
|
@@ -1053,58 +1076,43 @@ class ToolUniverse:
|
|
|
1053
1076
|
"summary": "",
|
|
1054
1077
|
}
|
|
1055
1078
|
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
# Read tools from each category file
|
|
1060
|
-
for category, file_path in self.tool_files.items():
|
|
1061
|
-
try:
|
|
1062
|
-
# Read the JSON file for this category
|
|
1063
|
-
tools_in_category = read_json_list(file_path)
|
|
1064
|
-
all_tools.extend(tools_in_category)
|
|
1079
|
+
if scan_all:
|
|
1080
|
+
# Scan all JSON files in data directory recursively
|
|
1081
|
+
all_tools, all_tool_names = self._scan_all_json_files()
|
|
1065
1082
|
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1083
|
+
# For config mode with scan_all, organize by file names
|
|
1084
|
+
if mode == "config":
|
|
1085
|
+
file_tools_map = {}
|
|
1086
|
+
for tool in all_tools:
|
|
1087
|
+
# Get the source file for this tool (we need to track this)
|
|
1088
|
+
# For now, we'll organize by tool type as a fallback
|
|
1089
|
+
tool_type = tool.get("type", "Unknown")
|
|
1090
|
+
if tool_type not in file_tools_map:
|
|
1091
|
+
file_tools_map[tool_type] = []
|
|
1092
|
+
file_tools_map[tool_type].append(tool)
|
|
1093
|
+
|
|
1094
|
+
for category, tools in file_tools_map.items():
|
|
1095
|
+
result["categories"][category] = {"count": len(tools)}
|
|
1096
|
+
else:
|
|
1097
|
+
# Use predefined tool files (original behavior)
|
|
1098
|
+
all_tools, all_tool_names = self._scan_predefined_files()
|
|
1069
1099
|
|
|
1070
|
-
|
|
1071
|
-
|
|
1100
|
+
# Read tools from each category file
|
|
1101
|
+
for category, file_path in self.tool_files.items():
|
|
1102
|
+
try:
|
|
1103
|
+
# Read the JSON file for this category
|
|
1104
|
+
tools_in_category = read_json_list(file_path)
|
|
1072
1105
|
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
)
|
|
1077
|
-
if mode == "config":
|
|
1078
|
-
result["categories"][category] = {"count": 0}
|
|
1106
|
+
if mode == "config":
|
|
1107
|
+
tool_names = [tool["name"] for tool in tools_in_category]
|
|
1108
|
+
result["categories"][category] = {"count": len(tool_names)}
|
|
1079
1109
|
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
remote_tools = []
|
|
1085
|
-
for fname in os.listdir(remote_dir):
|
|
1086
|
-
if not fname.lower().endswith(".json"):
|
|
1087
|
-
continue
|
|
1088
|
-
fpath = os.path.join(remote_dir, fname)
|
|
1089
|
-
try:
|
|
1090
|
-
tools_in_file = read_json_list(fpath)
|
|
1091
|
-
if isinstance(tools_in_file, dict):
|
|
1092
|
-
tools_in_file = list(tools_in_file.values())
|
|
1093
|
-
if isinstance(tools_in_file, list):
|
|
1094
|
-
remote_tools.extend(tools_in_file)
|
|
1095
|
-
except Exception as e:
|
|
1096
|
-
warning(
|
|
1097
|
-
f"Warning: Could not read remote tools from {fpath}: {e}"
|
|
1098
|
-
)
|
|
1099
|
-
if remote_tools:
|
|
1100
|
-
all_tools.extend(remote_tools)
|
|
1101
|
-
all_tool_names.update([tool["name"] for tool in remote_tools])
|
|
1110
|
+
except Exception as e:
|
|
1111
|
+
warning(
|
|
1112
|
+
f"Warning: Could not read tools from {category} ({file_path}): {e}"
|
|
1113
|
+
)
|
|
1102
1114
|
if mode == "config":
|
|
1103
|
-
result["categories"][
|
|
1104
|
-
"count": len(remote_tools)
|
|
1105
|
-
}
|
|
1106
|
-
except Exception as e:
|
|
1107
|
-
warning(f"Warning: Failed to scan remote tools directory: {e}")
|
|
1115
|
+
result["categories"][category] = {"count": 0}
|
|
1108
1116
|
|
|
1109
1117
|
# If mode is 'type', organize by tool types instead
|
|
1110
1118
|
if mode == "type":
|
|
@@ -1202,6 +1210,113 @@ class ToolUniverse:
|
|
|
1202
1210
|
|
|
1203
1211
|
return result
|
|
1204
1212
|
|
|
1213
|
+
def _scan_predefined_files(self):
|
|
1214
|
+
"""
|
|
1215
|
+
Scan predefined tool files (original behavior).
|
|
1216
|
+
|
|
1217
|
+
Returns:
|
|
1218
|
+
tuple: (all_tools, all_tool_names) where all_tools is a list of tool configs
|
|
1219
|
+
and all_tool_names is a set of tool names for deduplication
|
|
1220
|
+
"""
|
|
1221
|
+
all_tools = []
|
|
1222
|
+
all_tool_names = set()
|
|
1223
|
+
|
|
1224
|
+
# Read tools from each category file
|
|
1225
|
+
for category, file_path in self.tool_files.items():
|
|
1226
|
+
try:
|
|
1227
|
+
# Read the JSON file for this category
|
|
1228
|
+
tools_in_category = read_json_list(file_path)
|
|
1229
|
+
all_tools.extend(tools_in_category)
|
|
1230
|
+
all_tool_names.update([tool["name"] for tool in tools_in_category])
|
|
1231
|
+
except Exception as e:
|
|
1232
|
+
warning(
|
|
1233
|
+
f"Warning: Could not read tools from {category} ({file_path}): {e}"
|
|
1234
|
+
)
|
|
1235
|
+
|
|
1236
|
+
# Also include remote tools
|
|
1237
|
+
try:
|
|
1238
|
+
remote_dir = os.path.join(current_dir, "data", "remote_tools")
|
|
1239
|
+
if os.path.isdir(remote_dir):
|
|
1240
|
+
remote_tools = []
|
|
1241
|
+
for fname in os.listdir(remote_dir):
|
|
1242
|
+
if not fname.lower().endswith(".json"):
|
|
1243
|
+
continue
|
|
1244
|
+
fpath = os.path.join(remote_dir, fname)
|
|
1245
|
+
try:
|
|
1246
|
+
tools_in_file = read_json_list(fpath)
|
|
1247
|
+
if isinstance(tools_in_file, dict):
|
|
1248
|
+
tools_in_file = list(tools_in_file.values())
|
|
1249
|
+
if isinstance(tools_in_file, list):
|
|
1250
|
+
remote_tools.extend(tools_in_file)
|
|
1251
|
+
except Exception as e:
|
|
1252
|
+
warning(
|
|
1253
|
+
f"Warning: Could not read remote tools from {fpath}: {e}"
|
|
1254
|
+
)
|
|
1255
|
+
if remote_tools:
|
|
1256
|
+
all_tools.extend(remote_tools)
|
|
1257
|
+
all_tool_names.update([tool["name"] for tool in remote_tools])
|
|
1258
|
+
except Exception as e:
|
|
1259
|
+
warning(f"Warning: Failed to scan remote tools directory: {e}")
|
|
1260
|
+
|
|
1261
|
+
return all_tools, all_tool_names
|
|
1262
|
+
|
|
1263
|
+
def _scan_all_json_files(self):
|
|
1264
|
+
"""
|
|
1265
|
+
Recursively scan all JSON files in the data directory and its subdirectories.
|
|
1266
|
+
|
|
1267
|
+
Returns:
|
|
1268
|
+
tuple: (all_tools, all_tool_names) where all_tools is a list of tool configs
|
|
1269
|
+
and all_tool_names is a set of tool names for deduplication
|
|
1270
|
+
"""
|
|
1271
|
+
all_tools = []
|
|
1272
|
+
all_tool_names = set()
|
|
1273
|
+
|
|
1274
|
+
# Get the data directory path
|
|
1275
|
+
data_dir = os.path.join(current_dir, "data")
|
|
1276
|
+
|
|
1277
|
+
if not os.path.exists(data_dir):
|
|
1278
|
+
warning(f"Warning: Data directory not found: {data_dir}")
|
|
1279
|
+
return all_tools, all_tool_names
|
|
1280
|
+
|
|
1281
|
+
# Recursively find all JSON files
|
|
1282
|
+
json_files = []
|
|
1283
|
+
for root, _dirs, files in os.walk(data_dir):
|
|
1284
|
+
for file in files:
|
|
1285
|
+
if file.lower().endswith(".json"):
|
|
1286
|
+
json_files.append(os.path.join(root, file))
|
|
1287
|
+
|
|
1288
|
+
self.logger.debug(f"Found {len(json_files)} JSON files to scan")
|
|
1289
|
+
|
|
1290
|
+
# Read tools from each JSON file
|
|
1291
|
+
for json_file in json_files:
|
|
1292
|
+
try:
|
|
1293
|
+
tools_in_file = read_json_list(json_file)
|
|
1294
|
+
|
|
1295
|
+
# Handle different data formats
|
|
1296
|
+
if isinstance(tools_in_file, dict):
|
|
1297
|
+
# Convert dict of tools to list of tools
|
|
1298
|
+
tools_in_file = list(tools_in_file.values())
|
|
1299
|
+
elif not isinstance(tools_in_file, list):
|
|
1300
|
+
# Skip files that don't contain tool configurations
|
|
1301
|
+
continue
|
|
1302
|
+
|
|
1303
|
+
# Add tools to our collection
|
|
1304
|
+
for tool in tools_in_file:
|
|
1305
|
+
if isinstance(tool, dict) and "name" in tool:
|
|
1306
|
+
all_tools.append(tool)
|
|
1307
|
+
all_tool_names.add(tool["name"])
|
|
1308
|
+
|
|
1309
|
+
self.logger.debug(f"Loaded {len(tools_in_file)} tools from {json_file}")
|
|
1310
|
+
|
|
1311
|
+
except Exception as e:
|
|
1312
|
+
warning(f"Warning: Could not read tools from {json_file}: {e}")
|
|
1313
|
+
continue
|
|
1314
|
+
|
|
1315
|
+
self.logger.info(
|
|
1316
|
+
f"Scanned {len(json_files)} JSON files, found {len(all_tools)} tools"
|
|
1317
|
+
)
|
|
1318
|
+
return all_tools, all_tool_names
|
|
1319
|
+
|
|
1205
1320
|
def refresh_tool_name_desc(
|
|
1206
1321
|
self,
|
|
1207
1322
|
enable_full_desc=False,
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Script to filter tool files by removing tools that don't exist in the current tool universe.
|
|
4
|
+
|
|
5
|
+
This script:
|
|
6
|
+
1. Gets all valid tool names from ToolUniverse using scan_all=True
|
|
7
|
+
2. Filters tool_relationship_graph_FINAL.json to keep only valid tools
|
|
8
|
+
3. Filters v4_all_tools_final.json to keep only valid tools
|
|
9
|
+
4. Preserves all other data structure and content
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
# Import after modifying sys.path
|
|
16
|
+
from tooluniverse import ToolUniverse
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def load_json_file(file_path):
|
|
20
|
+
"""Load JSON file and return the data."""
|
|
21
|
+
try:
|
|
22
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
23
|
+
return json.load(f)
|
|
24
|
+
except Exception as e:
|
|
25
|
+
print(f"Error loading {file_path}: {e}")
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def save_json_file(file_path, data):
|
|
30
|
+
"""Save data to JSON file."""
|
|
31
|
+
try:
|
|
32
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
33
|
+
json.dump(data, f, ensure_ascii=False, indent=2)
|
|
34
|
+
print(f"Successfully saved filtered data to {file_path}")
|
|
35
|
+
return True
|
|
36
|
+
except Exception as e:
|
|
37
|
+
print(f"Error saving {file_path}: {e}")
|
|
38
|
+
return False
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def filter_tool_relationship_graph(data, valid_tool_names):
|
|
42
|
+
"""
|
|
43
|
+
Filter tool_relationship_graph_FINAL.json to keep only valid tools.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
data: The loaded JSON data
|
|
47
|
+
valid_tool_names: Set of valid tool names
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
Filtered data
|
|
51
|
+
"""
|
|
52
|
+
if not isinstance(data, dict):
|
|
53
|
+
print("Warning: tool_relationship_graph data is not a dict")
|
|
54
|
+
return data
|
|
55
|
+
|
|
56
|
+
filtered_data = {}
|
|
57
|
+
|
|
58
|
+
# Handle nodes array
|
|
59
|
+
if "nodes" in data and isinstance(data["nodes"], list):
|
|
60
|
+
filtered_nodes = []
|
|
61
|
+
for node in data["nodes"]:
|
|
62
|
+
if isinstance(node, dict) and "name" in node:
|
|
63
|
+
if node["name"] in valid_tool_names:
|
|
64
|
+
filtered_nodes.append(node)
|
|
65
|
+
else:
|
|
66
|
+
print(f"Removing node from relationship graph: {node['name']}")
|
|
67
|
+
else:
|
|
68
|
+
# Keep non-tool nodes (if any)
|
|
69
|
+
filtered_nodes.append(node)
|
|
70
|
+
filtered_data["nodes"] = filtered_nodes
|
|
71
|
+
print(
|
|
72
|
+
f"Nodes: {len(data['nodes'])} -> {len(filtered_nodes)} ({len(data['nodes']) - len(filtered_nodes)} removed)"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Handle edges array
|
|
76
|
+
if "edges" in data and isinstance(data["edges"], list):
|
|
77
|
+
filtered_edges = []
|
|
78
|
+
for edge in data["edges"]:
|
|
79
|
+
if isinstance(edge, dict) and "source" in edge and "target" in edge:
|
|
80
|
+
# Keep edge if both source and target are valid tools
|
|
81
|
+
if (
|
|
82
|
+
edge["source"] in valid_tool_names
|
|
83
|
+
and edge["target"] in valid_tool_names
|
|
84
|
+
):
|
|
85
|
+
filtered_edges.append(edge)
|
|
86
|
+
else:
|
|
87
|
+
print(
|
|
88
|
+
f"Removing edge from relationship graph: {edge.get('source', 'unknown')} -> {edge.get('target', 'unknown')}"
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
# Keep non-tool edges (if any)
|
|
92
|
+
filtered_edges.append(edge)
|
|
93
|
+
filtered_data["edges"] = filtered_edges
|
|
94
|
+
print(
|
|
95
|
+
f"Edges: {len(data['edges'])} -> {len(filtered_edges)} ({len(data['edges']) - len(filtered_edges)} removed)"
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Keep other fields as-is (like stats, metadata, etc.)
|
|
99
|
+
for key, value in data.items():
|
|
100
|
+
if key not in ["nodes", "edges"]:
|
|
101
|
+
filtered_data[key] = value
|
|
102
|
+
|
|
103
|
+
return filtered_data
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def filter_v4_all_tools(data, valid_tool_names):
|
|
107
|
+
"""
|
|
108
|
+
Filter v4_all_tools_final.json to keep only valid tools.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
data: The loaded JSON data
|
|
112
|
+
valid_tool_names: Set of valid tool names
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Filtered data
|
|
116
|
+
"""
|
|
117
|
+
if not isinstance(data, list):
|
|
118
|
+
print("Warning: v4_all_tools data is not a list")
|
|
119
|
+
return data
|
|
120
|
+
|
|
121
|
+
filtered_data = []
|
|
122
|
+
|
|
123
|
+
for tool in data:
|
|
124
|
+
if isinstance(tool, dict) and "name" in tool:
|
|
125
|
+
if tool["name"] in valid_tool_names:
|
|
126
|
+
filtered_data.append(tool)
|
|
127
|
+
else:
|
|
128
|
+
print(f"Removing tool from v4_all_tools: {tool['name']}")
|
|
129
|
+
else:
|
|
130
|
+
# Keep non-tool entries (if any)
|
|
131
|
+
filtered_data.append(tool)
|
|
132
|
+
|
|
133
|
+
return filtered_data
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def main():
|
|
137
|
+
"""Main function to filter the tool files."""
|
|
138
|
+
print("Starting tool file filtering process...")
|
|
139
|
+
|
|
140
|
+
# Initialize ToolUniverse and get all valid tool names
|
|
141
|
+
print("Getting all valid tool names from ToolUniverse...")
|
|
142
|
+
tu = ToolUniverse()
|
|
143
|
+
all_tool_names = tu.list_built_in_tools(mode="list_name", scan_all=True)
|
|
144
|
+
valid_tool_names = set(all_tool_names)
|
|
145
|
+
print(f"Found {len(valid_tool_names)} valid tools")
|
|
146
|
+
|
|
147
|
+
# Define file paths
|
|
148
|
+
project_root = Path(__file__).parent.parent.parent.parent
|
|
149
|
+
web_dir = project_root / "web"
|
|
150
|
+
|
|
151
|
+
relationship_graph_file = web_dir / "tool_relationship_graph_FINAL.json"
|
|
152
|
+
v4_tools_file = web_dir / "v4_all_tools_final.json"
|
|
153
|
+
|
|
154
|
+
# Check if files exist
|
|
155
|
+
if not relationship_graph_file.exists():
|
|
156
|
+
print(f"Error: {relationship_graph_file} not found")
|
|
157
|
+
return
|
|
158
|
+
|
|
159
|
+
if not v4_tools_file.exists():
|
|
160
|
+
print(f"Error: {v4_tools_file} not found")
|
|
161
|
+
return
|
|
162
|
+
|
|
163
|
+
# Process tool_relationship_graph_FINAL.json
|
|
164
|
+
print(f"\nProcessing {relationship_graph_file.name}...")
|
|
165
|
+
relationship_data = load_json_file(relationship_graph_file)
|
|
166
|
+
if relationship_data is not None:
|
|
167
|
+
len(relationship_data.get("nodes", []))
|
|
168
|
+
len(relationship_data.get("edges", []))
|
|
169
|
+
filtered_relationship_data = filter_tool_relationship_graph(
|
|
170
|
+
relationship_data, valid_tool_names
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
# Save filtered data
|
|
174
|
+
save_json_file(relationship_graph_file, filtered_relationship_data)
|
|
175
|
+
|
|
176
|
+
# Process v4_all_tools_final.json
|
|
177
|
+
print(f"\nProcessing {v4_tools_file.name}...")
|
|
178
|
+
v4_data = load_json_file(v4_tools_file)
|
|
179
|
+
if v4_data is not None:
|
|
180
|
+
original_count = len(v4_data)
|
|
181
|
+
filtered_v4_data = filter_v4_all_tools(v4_data, valid_tool_names)
|
|
182
|
+
filtered_count = len(filtered_v4_data)
|
|
183
|
+
print(
|
|
184
|
+
f"V4 tools: {original_count} -> {filtered_count} tools ({original_count - filtered_count} removed)"
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Save filtered data
|
|
188
|
+
save_json_file(v4_tools_file, filtered_v4_data)
|
|
189
|
+
|
|
190
|
+
print("\nTool file filtering completed!")
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
if __name__ == "__main__":
|
|
194
|
+
main()
|
|
@@ -1,46 +1,54 @@
|
|
|
1
1
|
import json
|
|
2
|
-
from typing import Any, Dict, List
|
|
3
2
|
import os
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
4
|
from tooluniverse import ToolUniverse
|
|
5
5
|
|
|
6
|
-
# Load
|
|
6
|
+
# Load all tool schemas from JSON
|
|
7
7
|
schema_path = os.path.join(
|
|
8
8
|
os.path.dirname(__file__), "..", "data", "alphafold_tools.json"
|
|
9
9
|
)
|
|
10
10
|
with open(schema_path) as f:
|
|
11
|
-
|
|
11
|
+
tools_json = json.load(f)
|
|
12
|
+
|
|
13
|
+
schemas = {tool["name"]: tool["return_schema"] for tool in tools_json}
|
|
12
14
|
|
|
13
15
|
tooluni = ToolUniverse()
|
|
14
16
|
tooluni.load_tools()
|
|
15
17
|
|
|
16
|
-
# Test cases: 3 valid, 1 invalid UniProt ID, and 1 missing parameter
|
|
17
18
|
test_queries: List[Dict[str, Any]] = [
|
|
19
|
+
# Hemoglobin subunit alpha (valid)
|
|
18
20
|
{
|
|
19
|
-
"name": "
|
|
20
|
-
"arguments": {"
|
|
21
|
+
"name": "alphafold_get_prediction",
|
|
22
|
+
"arguments": {"qualifier": "P69905"},
|
|
21
23
|
},
|
|
24
|
+
# Invalid
|
|
22
25
|
{
|
|
23
|
-
"name": "
|
|
24
|
-
"arguments": {"
|
|
26
|
+
"name": "alphafold_get_prediction",
|
|
27
|
+
"arguments": {"qualifier": "XXX123"},
|
|
25
28
|
},
|
|
29
|
+
# Missing param
|
|
26
30
|
{
|
|
27
|
-
"name": "
|
|
28
|
-
"arguments": {
|
|
31
|
+
"name": "alphafold_get_prediction",
|
|
32
|
+
"arguments": {},
|
|
29
33
|
},
|
|
34
|
+
# Summary: valid
|
|
30
35
|
{
|
|
31
|
-
"name": "
|
|
32
|
-
"arguments": {"
|
|
33
|
-
},
|
|
36
|
+
"name": "alphafold_get_summary",
|
|
37
|
+
"arguments": {"qualifier": "P69905"},
|
|
38
|
+
},
|
|
39
|
+
# Annotations (valid + invalid type)
|
|
34
40
|
{
|
|
35
|
-
"name": "
|
|
36
|
-
"arguments": {},
|
|
37
|
-
},
|
|
41
|
+
"name": "alphafold_get_annotations",
|
|
42
|
+
"arguments": {"qualifier": "P69905", "type": "MUTAGEN"},
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"name": "alphafold_get_annotations",
|
|
46
|
+
"arguments": {"qualifier": "P69905", "type": "INVALID"},
|
|
47
|
+
},
|
|
38
48
|
]
|
|
39
49
|
|
|
40
50
|
for idx, query in enumerate(test_queries, 1):
|
|
41
|
-
|
|
42
|
-
label = f"UniProt ID: {uid}" if uid else "No UniProt ID"
|
|
43
|
-
print(f"\n[{idx}] Running {query['name']} with {label}")
|
|
51
|
+
print(f"\n[{idx}] Running {query['name']} with {query['arguments']}")
|
|
44
52
|
result = tooluni.run(query)
|
|
45
53
|
|
|
46
54
|
# Handle errors
|
|
@@ -51,21 +59,50 @@ for idx, query in enumerate(test_queries, 1):
|
|
|
51
59
|
continue
|
|
52
60
|
|
|
53
61
|
# Handle success
|
|
54
|
-
data = result.get("data"
|
|
62
|
+
data = result.get("data")
|
|
55
63
|
if not data:
|
|
56
64
|
print("No data returned.")
|
|
57
65
|
continue
|
|
58
66
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
67
|
+
# Schema validation (check only top-level keys)
|
|
68
|
+
schema = schemas[query["name"]]
|
|
69
|
+
expected_keys = schema.get("properties", {}).keys()
|
|
70
|
+
|
|
71
|
+
# Handle list vs dict results
|
|
72
|
+
if isinstance(data, list) and data:
|
|
73
|
+
record = data[0]
|
|
74
|
+
elif isinstance(data, dict):
|
|
75
|
+
record = data
|
|
76
|
+
else:
|
|
77
|
+
record = {}
|
|
65
78
|
|
|
66
|
-
|
|
67
|
-
missing = [k for k in schema.keys() if k not in first]
|
|
79
|
+
missing = [k for k in expected_keys if k not in record]
|
|
68
80
|
if missing:
|
|
69
81
|
print(f" INVALID Missing expected fields: {missing}")
|
|
70
82
|
else:
|
|
71
|
-
print(" All expected schema fields present")
|
|
83
|
+
print(" SUCCESS All expected schema fields present")
|
|
84
|
+
|
|
85
|
+
# Show highlights depending on tool
|
|
86
|
+
if query["name"] == "alphafold_get_prediction":
|
|
87
|
+
if "uniprotDescription" in record:
|
|
88
|
+
print(
|
|
89
|
+
f" {record.get('uniprotDescription')} ({record.get('uniprotAccession')})"
|
|
90
|
+
)
|
|
91
|
+
print(f" Organism: {record.get('organismScientificName')}")
|
|
92
|
+
print(f" Avg pLDDT: {record.get('globalMetricValue')}")
|
|
93
|
+
|
|
94
|
+
elif query["name"] == "alphafold_get_summary":
|
|
95
|
+
entry = record.get("uniprot_entry", {})
|
|
96
|
+
structures = record.get("structures", [])
|
|
97
|
+
print(f" UniProt AC: {entry.get('ac')}, ID: {entry.get('id')}")
|
|
98
|
+
print(f" Sequence length: {entry.get('sequence_length')}")
|
|
99
|
+
print(f" Structures returned: {len(structures)}")
|
|
100
|
+
|
|
101
|
+
elif query["name"] == "alphafold_get_annotations":
|
|
102
|
+
annotations = record.get("annotation", [])
|
|
103
|
+
print(f" Accession: {record.get('accession')}")
|
|
104
|
+
print(f" Total annotations: {len(annotations)}")
|
|
105
|
+
if annotations:
|
|
106
|
+
first_ann = annotations[0]
|
|
107
|
+
print(f" First annotation type: {first_ann.get('type')}")
|
|
108
|
+
print(f" First annotation description: {first_ann.get('description')}")
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Basic test for list_built_in_tools including scan_all option.
|
|
4
|
+
Run directly: python tests/test_list_built_in_tools.py
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from tooluniverse import ToolUniverse # noqa: E402
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def main():
|
|
11
|
+
tu = ToolUniverse()
|
|
12
|
+
|
|
13
|
+
# Use predefined files (original behavior)
|
|
14
|
+
tool_names = tu.list_built_in_tools(mode="list_name", scan_all=False)
|
|
15
|
+
print(f"predefined tool names: {len(tool_names)}")
|
|
16
|
+
|
|
17
|
+
# Scan all JSON files
|
|
18
|
+
all_tool_names = tu.list_built_in_tools(mode="list_name", scan_all=True)
|
|
19
|
+
print(f"all tool names (scan_all): {len(all_tool_names)}")
|
|
20
|
+
|
|
21
|
+
# Get all tool specifications
|
|
22
|
+
all_tool_specs = tu.list_built_in_tools(mode="list_spec", scan_all=True)
|
|
23
|
+
print(f"all tool specs (scan_all): {len(all_tool_specs)}")
|
|
24
|
+
|
|
25
|
+
# Organize all tools by type
|
|
26
|
+
type_stats = tu.list_built_in_tools(mode="type", scan_all=True)
|
|
27
|
+
print(
|
|
28
|
+
f"type stats -> total_categories: {type_stats['total_categories']}, total_tools: {type_stats['total_tools']}"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
if __name__ == "__main__":
|
|
33
|
+
main()
|