tooluniverse 0.1.4__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tooluniverse might be problematic. Click here for more details.

Files changed (187) hide show
  1. tooluniverse/__init__.py +340 -4
  2. tooluniverse/admetai_tool.py +84 -0
  3. tooluniverse/agentic_tool.py +563 -0
  4. tooluniverse/alphafold_tool.py +96 -0
  5. tooluniverse/base_tool.py +129 -6
  6. tooluniverse/boltz_tool.py +207 -0
  7. tooluniverse/chem_tool.py +192 -0
  8. tooluniverse/compose_scripts/__init__.py +1 -0
  9. tooluniverse/compose_scripts/biomarker_discovery.py +293 -0
  10. tooluniverse/compose_scripts/comprehensive_drug_discovery.py +186 -0
  11. tooluniverse/compose_scripts/drug_safety_analyzer.py +89 -0
  12. tooluniverse/compose_scripts/literature_tool.py +34 -0
  13. tooluniverse/compose_scripts/output_summarizer.py +279 -0
  14. tooluniverse/compose_scripts/tool_description_optimizer.py +681 -0
  15. tooluniverse/compose_scripts/tool_discover.py +705 -0
  16. tooluniverse/compose_scripts/tool_graph_composer.py +448 -0
  17. tooluniverse/compose_tool.py +371 -0
  18. tooluniverse/ctg_tool.py +1002 -0
  19. tooluniverse/custom_tool.py +81 -0
  20. tooluniverse/dailymed_tool.py +108 -0
  21. tooluniverse/data/admetai_tools.json +155 -0
  22. tooluniverse/data/agentic_tools.json +1156 -0
  23. tooluniverse/data/alphafold_tools.json +87 -0
  24. tooluniverse/data/boltz_tools.json +9 -0
  25. tooluniverse/data/chembl_tools.json +16 -0
  26. tooluniverse/data/clait_tools.json +108 -0
  27. tooluniverse/data/clinicaltrials_gov_tools.json +326 -0
  28. tooluniverse/data/compose_tools.json +202 -0
  29. tooluniverse/data/dailymed_tools.json +70 -0
  30. tooluniverse/data/dataset_tools.json +646 -0
  31. tooluniverse/data/disease_target_score_tools.json +712 -0
  32. tooluniverse/data/efo_tools.json +17 -0
  33. tooluniverse/data/embedding_tools.json +319 -0
  34. tooluniverse/data/enrichr_tools.json +31 -0
  35. tooluniverse/data/europe_pmc_tools.json +22 -0
  36. tooluniverse/data/expert_feedback_tools.json +10 -0
  37. tooluniverse/data/fda_drug_adverse_event_tools.json +491 -0
  38. tooluniverse/data/fda_drug_labeling_tools.json +544 -168
  39. tooluniverse/data/fda_drugs_with_brand_generic_names_for_tool.py +76929 -148860
  40. tooluniverse/data/finder_tools.json +209 -0
  41. tooluniverse/data/gene_ontology_tools.json +113 -0
  42. tooluniverse/data/gwas_tools.json +1082 -0
  43. tooluniverse/data/hpa_tools.json +333 -0
  44. tooluniverse/data/humanbase_tools.json +47 -0
  45. tooluniverse/data/idmap_tools.json +74 -0
  46. tooluniverse/data/mcp_client_tools_example.json +113 -0
  47. tooluniverse/data/mcpautoloadertool_defaults.json +28 -0
  48. tooluniverse/data/medlineplus_tools.json +141 -0
  49. tooluniverse/data/monarch_tools.json +1 -1
  50. tooluniverse/data/openalex_tools.json +36 -0
  51. tooluniverse/data/opentarget_tools.json +82 -58
  52. tooluniverse/data/output_summarization_tools.json +101 -0
  53. tooluniverse/data/packages/bioinformatics_core_tools.json +1756 -0
  54. tooluniverse/data/packages/categorized_tools.txt +206 -0
  55. tooluniverse/data/packages/cheminformatics_tools.json +347 -0
  56. tooluniverse/data/packages/earth_sciences_tools.json +74 -0
  57. tooluniverse/data/packages/genomics_tools.json +776 -0
  58. tooluniverse/data/packages/image_processing_tools.json +38 -0
  59. tooluniverse/data/packages/machine_learning_tools.json +789 -0
  60. tooluniverse/data/packages/neuroscience_tools.json +62 -0
  61. tooluniverse/data/packages/original_tools.txt +0 -0
  62. tooluniverse/data/packages/physics_astronomy_tools.json +62 -0
  63. tooluniverse/data/packages/scientific_computing_tools.json +560 -0
  64. tooluniverse/data/packages/single_cell_tools.json +453 -0
  65. tooluniverse/data/packages/software_tools.json +4954 -0
  66. tooluniverse/data/packages/structural_biology_tools.json +396 -0
  67. tooluniverse/data/packages/visualization_tools.json +399 -0
  68. tooluniverse/data/pubchem_tools.json +215 -0
  69. tooluniverse/data/pubtator_tools.json +68 -0
  70. tooluniverse/data/rcsb_pdb_tools.json +1332 -0
  71. tooluniverse/data/reactome_tools.json +19 -0
  72. tooluniverse/data/semantic_scholar_tools.json +26 -0
  73. tooluniverse/data/special_tools.json +2 -25
  74. tooluniverse/data/tool_composition_tools.json +88 -0
  75. tooluniverse/data/toolfinderkeyword_defaults.json +34 -0
  76. tooluniverse/data/txagent_client_tools.json +9 -0
  77. tooluniverse/data/uniprot_tools.json +211 -0
  78. tooluniverse/data/url_fetch_tools.json +94 -0
  79. tooluniverse/data/uspto_downloader_tools.json +9 -0
  80. tooluniverse/data/uspto_tools.json +811 -0
  81. tooluniverse/data/xml_tools.json +3275 -0
  82. tooluniverse/dataset_tool.py +296 -0
  83. tooluniverse/default_config.py +165 -0
  84. tooluniverse/efo_tool.py +42 -0
  85. tooluniverse/embedding_database.py +630 -0
  86. tooluniverse/embedding_sync.py +396 -0
  87. tooluniverse/enrichr_tool.py +266 -0
  88. tooluniverse/europe_pmc_tool.py +52 -0
  89. tooluniverse/execute_function.py +1775 -95
  90. tooluniverse/extended_hooks.py +444 -0
  91. tooluniverse/gene_ontology_tool.py +194 -0
  92. tooluniverse/graphql_tool.py +158 -36
  93. tooluniverse/gwas_tool.py +358 -0
  94. tooluniverse/hpa_tool.py +1645 -0
  95. tooluniverse/humanbase_tool.py +389 -0
  96. tooluniverse/logging_config.py +254 -0
  97. tooluniverse/mcp_client_tool.py +764 -0
  98. tooluniverse/mcp_integration.py +413 -0
  99. tooluniverse/mcp_tool_registry.py +925 -0
  100. tooluniverse/medlineplus_tool.py +337 -0
  101. tooluniverse/openalex_tool.py +228 -0
  102. tooluniverse/openfda_adv_tool.py +283 -0
  103. tooluniverse/openfda_tool.py +393 -160
  104. tooluniverse/output_hook.py +1122 -0
  105. tooluniverse/package_tool.py +195 -0
  106. tooluniverse/pubchem_tool.py +158 -0
  107. tooluniverse/pubtator_tool.py +168 -0
  108. tooluniverse/rcsb_pdb_tool.py +38 -0
  109. tooluniverse/reactome_tool.py +108 -0
  110. tooluniverse/remote/boltz/boltz_mcp_server.py +50 -0
  111. tooluniverse/remote/depmap_24q2/depmap_24q2_mcp_tool.py +442 -0
  112. tooluniverse/remote/expert_feedback/human_expert_mcp_tools.py +2013 -0
  113. tooluniverse/remote/expert_feedback/simple_test.py +23 -0
  114. tooluniverse/remote/expert_feedback/start_web_interface.py +188 -0
  115. tooluniverse/remote/expert_feedback/web_only_interface.py +0 -0
  116. tooluniverse/remote/expert_feedback_mcp/human_expert_mcp_server.py +1611 -0
  117. tooluniverse/remote/expert_feedback_mcp/simple_test.py +34 -0
  118. tooluniverse/remote/expert_feedback_mcp/start_web_interface.py +91 -0
  119. tooluniverse/remote/immune_compass/compass_tool.py +327 -0
  120. tooluniverse/remote/pinnacle/pinnacle_tool.py +328 -0
  121. tooluniverse/remote/transcriptformer/transcriptformer_tool.py +586 -0
  122. tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +61 -0
  123. tooluniverse/remote/uspto_downloader/uspto_downloader_tool.py +120 -0
  124. tooluniverse/remote_tool.py +99 -0
  125. tooluniverse/restful_tool.py +53 -30
  126. tooluniverse/scripts/generate_tool_graph.py +408 -0
  127. tooluniverse/scripts/visualize_tool_graph.py +829 -0
  128. tooluniverse/semantic_scholar_tool.py +62 -0
  129. tooluniverse/smcp.py +2452 -0
  130. tooluniverse/smcp_server.py +975 -0
  131. tooluniverse/test/mcp_server_test.py +0 -0
  132. tooluniverse/test/test_admetai_tool.py +370 -0
  133. tooluniverse/test/test_agentic_tool.py +129 -0
  134. tooluniverse/test/test_alphafold_tool.py +71 -0
  135. tooluniverse/test/test_chem_tool.py +37 -0
  136. tooluniverse/test/test_compose_lieraturereview.py +63 -0
  137. tooluniverse/test/test_compose_tool.py +448 -0
  138. tooluniverse/test/test_dailymed.py +69 -0
  139. tooluniverse/test/test_dataset_tool.py +200 -0
  140. tooluniverse/test/test_disease_target_score.py +56 -0
  141. tooluniverse/test/test_drugbank_filter_examples.py +179 -0
  142. tooluniverse/test/test_efo.py +31 -0
  143. tooluniverse/test/test_enrichr_tool.py +21 -0
  144. tooluniverse/test/test_europe_pmc_tool.py +20 -0
  145. tooluniverse/test/test_fda_adv.py +95 -0
  146. tooluniverse/test/test_fda_drug_labeling.py +91 -0
  147. tooluniverse/test/test_gene_ontology_tools.py +66 -0
  148. tooluniverse/test/test_gwas_tool.py +139 -0
  149. tooluniverse/test/test_hpa.py +625 -0
  150. tooluniverse/test/test_humanbase_tool.py +20 -0
  151. tooluniverse/test/test_idmap_tools.py +61 -0
  152. tooluniverse/test/test_mcp_server.py +211 -0
  153. tooluniverse/test/test_mcp_tool.py +247 -0
  154. tooluniverse/test/test_medlineplus.py +220 -0
  155. tooluniverse/test/test_openalex_tool.py +32 -0
  156. tooluniverse/test/test_opentargets.py +28 -0
  157. tooluniverse/test/test_pubchem_tool.py +116 -0
  158. tooluniverse/test/test_pubtator_tool.py +37 -0
  159. tooluniverse/test/test_rcsb_pdb_tool.py +86 -0
  160. tooluniverse/test/test_reactome.py +54 -0
  161. tooluniverse/test/test_semantic_scholar_tool.py +24 -0
  162. tooluniverse/test/test_software_tools.py +147 -0
  163. tooluniverse/test/test_tool_description_optimizer.py +49 -0
  164. tooluniverse/test/test_tool_finder.py +26 -0
  165. tooluniverse/test/test_tool_finder_llm.py +252 -0
  166. tooluniverse/test/test_tools_find.py +195 -0
  167. tooluniverse/test/test_uniprot_tools.py +74 -0
  168. tooluniverse/test/test_uspto_tool.py +72 -0
  169. tooluniverse/test/test_xml_tool.py +113 -0
  170. tooluniverse/tool_finder_embedding.py +267 -0
  171. tooluniverse/tool_finder_keyword.py +693 -0
  172. tooluniverse/tool_finder_llm.py +699 -0
  173. tooluniverse/tool_graph_web_ui.py +955 -0
  174. tooluniverse/tool_registry.py +416 -0
  175. tooluniverse/uniprot_tool.py +155 -0
  176. tooluniverse/url_tool.py +253 -0
  177. tooluniverse/uspto_tool.py +240 -0
  178. tooluniverse/utils.py +369 -41
  179. tooluniverse/xml_tool.py +369 -0
  180. tooluniverse-1.0.0.dist-info/METADATA +377 -0
  181. tooluniverse-1.0.0.dist-info/RECORD +186 -0
  182. {tooluniverse-0.1.4.dist-info → tooluniverse-1.0.0.dist-info}/WHEEL +1 -1
  183. tooluniverse-1.0.0.dist-info/entry_points.txt +9 -0
  184. tooluniverse-0.1.4.dist-info/METADATA +0 -141
  185. tooluniverse-0.1.4.dist-info/RECORD +0 -18
  186. {tooluniverse-0.1.4.dist-info → tooluniverse-1.0.0.dist-info}/licenses/LICENSE +0 -0
  187. {tooluniverse-0.1.4.dist-info → tooluniverse-1.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,448 @@
1
+ """
2
+ Simplified and fixed tool_graph_composer.py
3
+ This version includes better error handling and avoids the 'unhashable type' issue.
4
+ """
5
+
6
+ import json
7
+ import os
8
+ import pickle
9
+ from datetime import datetime
10
+
11
+
12
+ def compose(arguments, tooluniverse, call_tool):
13
+ """
14
+ Compose function for building tool compatibility graphs.
15
+
16
+ Args:
17
+ arguments: Dictionary with composition parameters
18
+ tooluniverse: ToolUniverse instance
19
+ call_tool: Function to call other tools
20
+
21
+ Returns:
22
+ Dictionary with results and file paths
23
+ """
24
+ try:
25
+ # Extract arguments with defaults
26
+ output_path = arguments.get("output_path", "./tool_composition_graph")
27
+ analysis_depth = arguments.get("analysis_depth", "detailed")
28
+ min_compatibility_score = arguments.get("min_compatibility_score", 60)
29
+ exclude_categories = arguments.get(
30
+ "exclude_categories", ["tool_finder", "special_tools"]
31
+ )
32
+ max_tools_per_category = arguments.get("max_tools_per_category", 50)
33
+ force_rebuild = arguments.get("force_rebuild", False)
34
+
35
+ print(f"Starting tool graph composition with {analysis_depth} analysis...")
36
+
37
+ # Check for existing graph
38
+ cache_path = f"{output_path}_cache.pkl"
39
+ if not force_rebuild and os.path.exists(cache_path):
40
+ print("Loading cached graph...")
41
+ return _load_cached_graph(cache_path, output_path)
42
+
43
+ # Load all available tools
44
+ tools = _load_all_tools(
45
+ tooluniverse, exclude_categories, max_tools_per_category
46
+ )
47
+ print(f"Loaded {len(tools)} tools for analysis")
48
+
49
+ if len(tools) == 0:
50
+ return {
51
+ "status": "error",
52
+ "message": "No tools available for analysis after filtering",
53
+ "tools_analyzed": 0,
54
+ "edges_created": 0,
55
+ }
56
+
57
+ # Build the graph
58
+ graph_data = _build_compatibility_graph(
59
+ tools, analysis_depth, min_compatibility_score, call_tool
60
+ )
61
+
62
+ # Save the graph
63
+ output_files = _save_graph(graph_data, output_path)
64
+
65
+ # Cache the results
66
+ cache_data = {
67
+ "graph_data": graph_data,
68
+ "output_files": output_files,
69
+ "creation_time": datetime.now().isoformat(),
70
+ }
71
+
72
+ try:
73
+ with open(cache_path, "wb") as f:
74
+ pickle.dump(cache_data, f)
75
+ print(f"Cached results to: {cache_path}")
76
+ except Exception as e:
77
+ print(f"Warning: Could not cache results: {e}")
78
+
79
+ # Generate statistics
80
+ stats = _generate_graph_stats(graph_data)
81
+
82
+ # Prepare result
83
+ result = {
84
+ "status": "success",
85
+ "graph_files": output_files,
86
+ "statistics": stats,
87
+ "tools_analyzed": len(tools),
88
+ "edges_created": len(graph_data.get("edges", [])),
89
+ "timestamp": datetime.now().isoformat(),
90
+ }
91
+
92
+ print("Tool graph composition completed successfully!")
93
+ return result
94
+
95
+ except Exception as e:
96
+ print(f"Error in tool graph composition: {e}")
97
+ import traceback
98
+
99
+ traceback.print_exc()
100
+
101
+ return {
102
+ "status": "error",
103
+ "message": str(e),
104
+ "tools_analyzed": 0,
105
+ "edges_created": 0,
106
+ "timestamp": datetime.now().isoformat(),
107
+ }
108
+
109
+
110
+ def _load_all_tools(tooluniverse, exclude_categories, max_per_category):
111
+ """Load all available tools from ToolUniverse."""
112
+
113
+ all_tools = []
114
+ exclude_set = set(exclude_categories) # Convert to set for faster lookup
115
+
116
+ # Get all tools from ToolUniverse using all_tool_dict directly
117
+ # Group tools by category based on their configuration
118
+ tools_by_category = {}
119
+
120
+ for tool_name, tool_config in tooluniverse.all_tool_dict.items():
121
+ # Skip if tool_name is not a string (defensive programming)
122
+ if not isinstance(tool_name, str):
123
+ print(f"Skipping non-string tool name: {tool_name}")
124
+ continue
125
+
126
+ # Try to determine category from various sources
127
+ category = "unknown"
128
+
129
+ # Check if category is specified in tool config
130
+ if isinstance(tool_config, dict) and "category" in tool_config:
131
+ category = tool_config["category"]
132
+ else:
133
+ # Try to find category from tool_category_dicts
134
+ for cat_name, tools_in_cat in tooluniverse.tool_category_dicts.items():
135
+ if tool_name in tools_in_cat:
136
+ category = cat_name
137
+ break
138
+
139
+ # Initialize category if not exists
140
+ if category not in tools_by_category:
141
+ tools_by_category[category] = []
142
+
143
+ tools_by_category[category].append((tool_name, tool_config))
144
+
145
+ # Process each category
146
+ for category, category_tools in tools_by_category.items():
147
+ # Skip excluded categories
148
+ if category in exclude_set:
149
+ print(f"Skipping category: {category}")
150
+ continue
151
+
152
+ # Limit tools per category for performance
153
+ if len(category_tools) > max_per_category:
154
+ category_tools = category_tools[:max_per_category]
155
+ print(f"Limited {category} to {max_per_category} tools")
156
+
157
+ # Add category information and convert to list format
158
+ for tool_name, tool_config in category_tools:
159
+ # Create a copy to avoid modifying the original
160
+ if isinstance(tool_config, dict):
161
+ tool_config = dict(tool_config)
162
+ else:
163
+ tool_config = {
164
+ "name": tool_name,
165
+ "description": "No description available",
166
+ }
167
+
168
+ tool_config["category"] = category
169
+ tool_config["name"] = tool_name # Ensure name is set
170
+ all_tools.append(tool_config)
171
+
172
+ print(f"Loaded {len(category_tools)} tools from {category}")
173
+
174
+ return all_tools
175
+
176
+
177
+ def _build_compatibility_graph(tools, analysis_depth, min_score, call_tool):
178
+ """Build the compatibility graph by analyzing tool pairs."""
179
+
180
+ # Initialize graph data structure
181
+ graph_data = {
182
+ "nodes": [],
183
+ "edges": [],
184
+ "metadata": {
185
+ "analysis_depth": analysis_depth,
186
+ "min_compatibility_score": min_score,
187
+ "creation_time": datetime.now().isoformat(),
188
+ "total_tools": len(tools),
189
+ },
190
+ }
191
+
192
+ # Add nodes (tools)
193
+ for i, tool in enumerate(tools):
194
+ node_data = {
195
+ "id": tool.get("name", f"tool_{i}"),
196
+ "name": tool.get("name", f"tool_{i}"),
197
+ "type": tool.get("type", "unknown"),
198
+ "description": tool.get("description", ""),
199
+ "category": tool.get("category", "unknown"),
200
+ "parameters": tool.get("parameter", {}),
201
+ }
202
+ graph_data["nodes"].append(node_data)
203
+
204
+ # Analyze tool pairs for compatibility (limited for demo)
205
+ total_pairs = min(len(tools) * (len(tools) - 1), 100) # Limit for demo
206
+ analyzed_pairs = 0
207
+
208
+ print(f"Analyzing up to {total_pairs} tool pairs...")
209
+
210
+ for i, source_tool in enumerate(tools):
211
+ for j, target_tool in enumerate(tools):
212
+ if i == j: # Skip self-loops
213
+ continue
214
+
215
+ analyzed_pairs += 1
216
+ if analyzed_pairs > total_pairs:
217
+ print("Reached analysis limit for demo")
218
+ break
219
+
220
+ if analyzed_pairs % 10 == 0:
221
+ progress = (analyzed_pairs / total_pairs) * 100
222
+ print(f"Progress: {analyzed_pairs}/{total_pairs} ({progress:.1f}%)")
223
+
224
+ try:
225
+ # Create safe tool specifications for analysis
226
+ source_spec = {
227
+ "name": source_tool.get("name", f"tool_{i}"),
228
+ "type": source_tool.get("type", "unknown"),
229
+ "description": source_tool.get("description", ""),
230
+ "parameter": source_tool.get("parameter", {}),
231
+ }
232
+
233
+ target_spec = {
234
+ "name": target_tool.get("name", f"tool_{j}"),
235
+ "type": target_tool.get("type", "unknown"),
236
+ "description": target_tool.get("description", ""),
237
+ "parameter": target_tool.get("parameter", {}),
238
+ }
239
+
240
+ # Analyze compatibility using the ToolCompatibilityAnalyzer
241
+ compatibility_result = call_tool(
242
+ "ToolCompatibilityAnalyzer",
243
+ {
244
+ "source_tool": json.dumps(source_spec),
245
+ "target_tool": json.dumps(target_spec),
246
+ "analysis_depth": analysis_depth,
247
+ },
248
+ )
249
+
250
+ # Extract compatibility information from the analysis result
251
+ compatibility_info = _extract_compatibility_info(compatibility_result)
252
+ score = compatibility_info.get("compatibility_score", 0)
253
+
254
+ # Create edge if compatibility score meets threshold
255
+ if score >= min_score:
256
+ edge_data = {
257
+ "source": source_spec["name"],
258
+ "target": target_spec["name"],
259
+ "compatibility_score": score,
260
+ "confidence": compatibility_info.get("confidence", score),
261
+ "is_compatible": compatibility_info.get("is_compatible", False),
262
+ "automation_ready": compatibility_info.get(
263
+ "automation_ready", False
264
+ ),
265
+ "analysis_summary": str(compatibility_result)[
266
+ :500
267
+ ], # Truncate for storage
268
+ }
269
+ graph_data["edges"].append(edge_data)
270
+
271
+ except Exception as e:
272
+ print(
273
+ f"Error analyzing {source_tool.get('name', 'unknown')} -> {target_tool.get('name', 'unknown')}: {e}"
274
+ )
275
+ continue
276
+
277
+ if analyzed_pairs > total_pairs:
278
+ break
279
+
280
+ print(
281
+ f"Created {len(graph_data['edges'])} compatible edges from {analyzed_pairs} analyzed pairs"
282
+ )
283
+ return graph_data
284
+
285
+
286
+ def _extract_compatibility_info(analysis_result):
287
+ """Extract structured compatibility information from analysis result."""
288
+
289
+ # Handle different result formats
290
+ if isinstance(analysis_result, list) and len(analysis_result) > 0:
291
+ analysis_result = analysis_result[0]
292
+
293
+ # Convert result to string for analysis
294
+ if isinstance(analysis_result, dict):
295
+ if "content" in analysis_result:
296
+ analysis_text = analysis_result["content"]
297
+ elif "result" in analysis_result:
298
+ analysis_text = analysis_result["result"]
299
+ else:
300
+ analysis_text = str(analysis_result)
301
+ else:
302
+ analysis_text = str(analysis_result)
303
+
304
+ # Basic parsing to extract key information
305
+ compatibility_score = 50 # Default moderate score
306
+ is_compatible = False
307
+ confidence = 50
308
+
309
+ # Simple text analysis to determine compatibility
310
+ analysis_lower = analysis_text.lower()
311
+
312
+ # Look for compatibility indicators
313
+ if "highly compatible" in analysis_lower:
314
+ compatibility_score = 85
315
+ is_compatible = True
316
+ confidence = 90
317
+ elif "compatible" in analysis_lower and "incompatible" not in analysis_lower:
318
+ compatibility_score = 70
319
+ is_compatible = True
320
+ confidence = 75
321
+ elif "partially compatible" in analysis_lower:
322
+ compatibility_score = 60
323
+ is_compatible = True
324
+ confidence = 60
325
+ elif "incompatible" in analysis_lower:
326
+ compatibility_score = 20
327
+ is_compatible = False
328
+ confidence = 80
329
+
330
+ # Look for automation indicators
331
+ automation_ready = "automatic" in analysis_lower or "direct" in analysis_lower
332
+
333
+ return {
334
+ "compatibility_score": compatibility_score,
335
+ "is_compatible": is_compatible,
336
+ "confidence": confidence,
337
+ "automation_ready": automation_ready,
338
+ }
339
+
340
+
341
+ def _save_graph(graph_data, output_path):
342
+ """Save the graph in multiple formats."""
343
+
344
+ output_files = {}
345
+
346
+ try:
347
+ # Save as JSON
348
+ json_path = f"{output_path}.json"
349
+ with open(json_path, "w") as f:
350
+ json.dump(graph_data, f, indent=2)
351
+ output_files["json"] = json_path
352
+ print(f"Saved JSON graph: {json_path}")
353
+
354
+ # Save as pickle for Python use
355
+ pickle_path = f"{output_path}.pkl"
356
+ with open(pickle_path, "wb") as f:
357
+ pickle.dump(graph_data, f)
358
+ output_files["pickle"] = pickle_path
359
+ print(f"Saved pickle graph: {pickle_path}")
360
+
361
+ except Exception as e:
362
+ print(f"Error saving graph: {e}")
363
+ raise e
364
+
365
+ return output_files
366
+
367
+
368
+ def _generate_graph_stats(graph_data):
369
+ """Generate statistics about the graph."""
370
+
371
+ try:
372
+ nodes = graph_data.get("nodes", [])
373
+ edges = graph_data.get("edges", [])
374
+
375
+ total_nodes = len(nodes)
376
+ total_edges = len(edges)
377
+
378
+ # Calculate edge density
379
+ max_possible_edges = total_nodes * (total_nodes - 1) if total_nodes > 1 else 1
380
+ edge_density = total_edges / max_possible_edges if max_possible_edges > 0 else 0
381
+
382
+ # Calculate compatibility score statistics
383
+ scores = [edge.get("compatibility_score", 0) for edge in edges]
384
+ avg_score = sum(scores) / len(scores) if scores else 0
385
+ high_score_edges = len([s for s in scores if s >= 80])
386
+
387
+ # Calculate automation readiness
388
+ automation_ready_edges = len(
389
+ [e for e in edges if e.get("automation_ready", False)]
390
+ )
391
+ automation_percentage = (
392
+ (automation_ready_edges / total_edges * 100) if total_edges > 0 else 0
393
+ )
394
+
395
+ # Category distribution
396
+ categories = {}
397
+ for node in nodes:
398
+ cat = node.get("category", "unknown")
399
+ categories[cat] = categories.get(cat, 0) + 1
400
+
401
+ return {
402
+ "total_nodes": total_nodes,
403
+ "total_edges": total_edges,
404
+ "edge_density": edge_density,
405
+ "compatibility_scores": {
406
+ "average": avg_score,
407
+ "count_high": high_score_edges,
408
+ },
409
+ "automation_ready_percentage": automation_percentage,
410
+ "categories": categories,
411
+ }
412
+
413
+ except Exception as e:
414
+ print(f"Error generating stats: {e}")
415
+ return {
416
+ "total_nodes": len(graph_data.get("nodes", [])),
417
+ "total_edges": len(graph_data.get("edges", [])),
418
+ "error": str(e),
419
+ }
420
+
421
+
422
+ def _load_cached_graph(cache_path, output_path):
423
+ """Load a previously cached graph."""
424
+
425
+ try:
426
+ with open(cache_path, "rb") as f:
427
+ cache_data = pickle.load(f)
428
+
429
+ graph_data = cache_data["graph_data"]
430
+ output_files = cache_data["output_files"]
431
+
432
+ # Generate fresh stats
433
+ stats = _generate_graph_stats(graph_data)
434
+
435
+ return {
436
+ "status": "loaded_from_cache",
437
+ "graph_files": output_files,
438
+ "statistics": stats,
439
+ "tools_analyzed": graph_data["metadata"]["total_tools"],
440
+ "edges_created": len(graph_data["edges"]),
441
+ "timestamp": cache_data["creation_time"],
442
+ "cache_loaded": True,
443
+ }
444
+
445
+ except Exception as e:
446
+ print(f"Error loading cached graph: {e}")
447
+ # Return error status to trigger rebuild
448
+ raise e