tooluniverse 1.0.7__py3-none-any.whl → 1.0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tooluniverse might be problematic. Click here for more details.
- tooluniverse/__init__.py +37 -14
- tooluniverse/admetai_tool.py +16 -5
- tooluniverse/base_tool.py +36 -0
- tooluniverse/biogrid_tool.py +118 -0
- tooluniverse/build_optimizer.py +87 -0
- tooluniverse/cache/__init__.py +3 -0
- tooluniverse/cache/memory_cache.py +99 -0
- tooluniverse/cache/result_cache_manager.py +235 -0
- tooluniverse/cache/sqlite_backend.py +257 -0
- tooluniverse/clinvar_tool.py +90 -0
- tooluniverse/compose_scripts/output_summarizer.py +87 -33
- tooluniverse/compose_tool.py +2 -2
- tooluniverse/custom_tool.py +28 -0
- tooluniverse/data/adverse_event_tools.json +97 -98
- tooluniverse/data/agentic_tools.json +81 -162
- tooluniverse/data/arxiv_tools.json +1 -4
- tooluniverse/data/compose_tools.json +0 -54
- tooluniverse/data/core_tools.json +1 -4
- tooluniverse/data/dataset_tools.json +7 -7
- tooluniverse/data/doaj_tools.json +1 -3
- tooluniverse/data/drug_discovery_agents.json +282 -0
- tooluniverse/data/europe_pmc_tools.json +1 -2
- tooluniverse/data/genomics_tools.json +174 -0
- tooluniverse/data/geo_tools.json +86 -0
- tooluniverse/data/literature_search_tools.json +15 -35
- tooluniverse/data/markitdown_tools.json +51 -0
- tooluniverse/data/monarch_tools.json +1 -2
- tooluniverse/data/openalex_tools.json +1 -5
- tooluniverse/data/opentarget_tools.json +8 -16
- tooluniverse/data/output_summarization_tools.json +23 -20
- tooluniverse/data/packages/bioinformatics_core_tools.json +2 -2
- tooluniverse/data/packages/cheminformatics_tools.json +1 -1
- tooluniverse/data/packages/genomics_tools.json +1 -1
- tooluniverse/data/packages/single_cell_tools.json +1 -1
- tooluniverse/data/packages/structural_biology_tools.json +1 -1
- tooluniverse/data/pmc_tools.json +1 -4
- tooluniverse/data/ppi_tools.json +139 -0
- tooluniverse/data/pubmed_tools.json +1 -3
- tooluniverse/data/semantic_scholar_tools.json +1 -2
- tooluniverse/data/tool_composition_tools.json +2 -4
- tooluniverse/data/unified_guideline_tools.json +206 -4
- tooluniverse/data/xml_tools.json +15 -15
- tooluniverse/data/zenodo_tools.json +1 -2
- tooluniverse/dbsnp_tool.py +71 -0
- tooluniverse/default_config.py +6 -0
- tooluniverse/ensembl_tool.py +61 -0
- tooluniverse/execute_function.py +235 -76
- tooluniverse/generate_tools.py +303 -20
- tooluniverse/genomics_gene_search_tool.py +56 -0
- tooluniverse/geo_tool.py +116 -0
- tooluniverse/gnomad_tool.py +63 -0
- tooluniverse/logging_config.py +64 -2
- tooluniverse/markitdown_tool.py +159 -0
- tooluniverse/mcp_client_tool.py +10 -5
- tooluniverse/molecule_2d_tool.py +9 -3
- tooluniverse/molecule_3d_tool.py +9 -3
- tooluniverse/output_hook.py +217 -150
- tooluniverse/smcp.py +18 -10
- tooluniverse/smcp_server.py +89 -199
- tooluniverse/string_tool.py +112 -0
- tooluniverse/tools/{MultiAgentLiteratureSearch.py → ADMETAnalyzerAgent.py} +18 -18
- tooluniverse/tools/ArXiv_search_papers.py +3 -3
- tooluniverse/tools/CMA_Guidelines_Search.py +52 -0
- tooluniverse/tools/CORE_search_papers.py +3 -3
- tooluniverse/tools/ClinVar_search_variants.py +52 -0
- tooluniverse/tools/ClinicalTrialDesignAgent.py +63 -0
- tooluniverse/tools/CompoundDiscoveryAgent.py +59 -0
- tooluniverse/tools/DOAJ_search_articles.py +2 -2
- tooluniverse/tools/DiseaseAnalyzerAgent.py +52 -0
- tooluniverse/tools/DrugInteractionAnalyzerAgent.py +52 -0
- tooluniverse/tools/DrugOptimizationAgent.py +63 -0
- tooluniverse/tools/Ensembl_lookup_gene_by_symbol.py +52 -0
- tooluniverse/tools/EuropePMC_search_articles.py +1 -1
- tooluniverse/tools/GIN_Guidelines_Search.py +52 -0
- tooluniverse/tools/GWAS_search_associations_by_gene.py +52 -0
- tooluniverse/tools/LiteratureSynthesisAgent.py +59 -0
- tooluniverse/tools/PMC_search_papers.py +3 -3
- tooluniverse/tools/PubMed_search_articles.py +2 -2
- tooluniverse/tools/SemanticScholar_search_papers.py +1 -1
- tooluniverse/tools/UCSC_get_genes_by_region.py +67 -0
- tooluniverse/tools/Zenodo_search_records.py +1 -1
- tooluniverse/tools/__init__.py +33 -3
- tooluniverse/tools/convert_to_markdown.py +59 -0
- tooluniverse/tools/dbSNP_get_variant_by_rsid.py +46 -0
- tooluniverse/tools/gnomAD_query_variant.py +52 -0
- tooluniverse/tools/openalex_literature_search.py +4 -4
- tooluniverse/ucsc_tool.py +60 -0
- tooluniverse/unified_guideline_tools.py +1175 -57
- tooluniverse/utils.py +51 -4
- tooluniverse/zenodo_tool.py +2 -1
- {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.9.dist-info}/METADATA +10 -3
- {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.9.dist-info}/RECORD +96 -61
- {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.9.dist-info}/entry_points.txt +0 -3
- {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.9.dist-info}/WHEEL +0 -0
- {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.9.dist-info}/licenses/LICENSE +0 -0
- {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.9.dist-info}/top_level.txt +0 -0
tooluniverse/smcp_server.py
CHANGED
|
@@ -7,6 +7,7 @@ It creates a minimal SMCP server that exposes all ToolUniverse tools as MCP tool
|
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
import argparse
|
|
10
|
+
import os
|
|
10
11
|
import sys
|
|
11
12
|
from .smcp import SMCP
|
|
12
13
|
|
|
@@ -134,6 +135,22 @@ def run_stdio_server():
|
|
|
134
135
|
This function provides compatibility with the original MCP server's run_claude_desktop function.
|
|
135
136
|
It accepts the same arguments as run_smcp_server but forces transport='stdio'.
|
|
136
137
|
"""
|
|
138
|
+
# Set environment variable and reconfigure logging for stdio mode
|
|
139
|
+
os.environ["TOOLUNIVERSE_STDIO_MODE"] = "1"
|
|
140
|
+
|
|
141
|
+
# Import and reconfigure logging to stderr
|
|
142
|
+
from .logging_config import reconfigure_for_stdio
|
|
143
|
+
|
|
144
|
+
reconfigure_for_stdio()
|
|
145
|
+
# Ensure stdout is line-buffered for immediate JSON-RPC flushing in stdio mode
|
|
146
|
+
try:
|
|
147
|
+
import sys as _sys
|
|
148
|
+
|
|
149
|
+
if hasattr(_sys.stdout, "reconfigure"):
|
|
150
|
+
_sys.stdout.reconfigure(line_buffering=True)
|
|
151
|
+
except Exception:
|
|
152
|
+
pass
|
|
153
|
+
|
|
137
154
|
parser = argparse.ArgumentParser(
|
|
138
155
|
description="Start SMCP (Scientific Model Context Protocol) Server with stdio transport",
|
|
139
156
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
@@ -297,107 +314,35 @@ Examples:
|
|
|
297
314
|
from .execute_function import ToolUniverse
|
|
298
315
|
|
|
299
316
|
tu = ToolUniverse()
|
|
300
|
-
|
|
317
|
+
# Use ToolUniverse API to list categories consistently
|
|
318
|
+
stats = tu.list_built_in_tools(mode="config", scan_all=False)
|
|
301
319
|
|
|
302
320
|
print("Available tool categories:", file=sys.stderr)
|
|
303
321
|
print("=" * 50, file=sys.stderr)
|
|
304
322
|
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
if category in [
|
|
315
|
-
"uniprot",
|
|
316
|
-
"ChEMBL",
|
|
317
|
-
"opentarget",
|
|
318
|
-
"pubchem",
|
|
319
|
-
"hpa",
|
|
320
|
-
"rcsb_pdb",
|
|
321
|
-
"reactome",
|
|
322
|
-
"go",
|
|
323
|
-
]:
|
|
324
|
-
scientific_db.append(category)
|
|
325
|
-
elif category in [
|
|
326
|
-
"EuropePMC",
|
|
327
|
-
"semantic_scholar",
|
|
328
|
-
"pubtator",
|
|
329
|
-
"OpenAlex",
|
|
330
|
-
]:
|
|
331
|
-
literature.append(category)
|
|
332
|
-
elif category.startswith("software_"):
|
|
333
|
-
software.append(category)
|
|
334
|
-
elif category in [
|
|
335
|
-
"special_tools",
|
|
336
|
-
"tool_finder",
|
|
337
|
-
"tool_composition",
|
|
338
|
-
"agents",
|
|
339
|
-
]:
|
|
340
|
-
special.append(category)
|
|
341
|
-
elif category in [
|
|
342
|
-
"clinical_trials",
|
|
343
|
-
"fda_drug_label",
|
|
344
|
-
"fda_drug_adverse_event",
|
|
345
|
-
"dailymed",
|
|
346
|
-
"medlineplus",
|
|
347
|
-
]:
|
|
348
|
-
clinical.append(category)
|
|
349
|
-
else:
|
|
350
|
-
other.append(category)
|
|
351
|
-
|
|
352
|
-
if scientific_db:
|
|
353
|
-
print("\n🔬 Scientific Databases:", file=sys.stderr)
|
|
354
|
-
for cat in scientific_db:
|
|
355
|
-
print(f" {cat}", file=sys.stderr)
|
|
356
|
-
|
|
357
|
-
if literature:
|
|
358
|
-
print("\n📚 Literature & Knowledge:", file=sys.stderr)
|
|
359
|
-
for cat in literature:
|
|
360
|
-
print(f" {cat}", file=sys.stderr)
|
|
361
|
-
|
|
362
|
-
if clinical:
|
|
363
|
-
print("\n🏥 Clinical & Drug Information:", file=sys.stderr)
|
|
364
|
-
for cat in clinical:
|
|
365
|
-
print(f" {cat}", file=sys.stderr)
|
|
366
|
-
|
|
367
|
-
if software:
|
|
368
|
-
print("\n💻 Software Tools:", file=sys.stderr)
|
|
369
|
-
for cat in software[:5]: # Show first 5
|
|
370
|
-
print(f" {cat}", file=sys.stderr)
|
|
371
|
-
if len(software) > 5:
|
|
372
|
-
print(
|
|
373
|
-
f" ... and {len(software, file=sys.stderr) - 5} more software categories"
|
|
374
|
-
)
|
|
375
|
-
|
|
376
|
-
if special:
|
|
377
|
-
print("\n🛠 Special & Meta Tools:", file=sys.stderr)
|
|
378
|
-
for cat in special:
|
|
379
|
-
print(f" {cat}", file=sys.stderr)
|
|
380
|
-
|
|
381
|
-
if other:
|
|
382
|
-
print("\n📂 Other Categories:", file=sys.stderr)
|
|
383
|
-
for cat in other:
|
|
384
|
-
print(f" {cat}", file=sys.stderr)
|
|
323
|
+
categories = stats.get("categories", {})
|
|
324
|
+
# Sort by count desc, then name asc
|
|
325
|
+
sorted_items = sorted(
|
|
326
|
+
categories.items(), key=lambda kv: (-kv[1].get("count", 0), kv[0])
|
|
327
|
+
)
|
|
328
|
+
for key, info in sorted_items:
|
|
329
|
+
display = key.replace("_", " ").title()
|
|
330
|
+
count = info.get("count", 0)
|
|
331
|
+
print(f" {display}: {count}", file=sys.stderr)
|
|
385
332
|
|
|
386
|
-
print(f"\nTotal: {len(tool_types, file=sys.stderr)} categories available")
|
|
387
|
-
print("\nCommon combinations:", file=sys.stderr)
|
|
388
333
|
print(
|
|
389
|
-
"
|
|
334
|
+
f"\nTotal categories: {stats.get('total_categories', 0)}",
|
|
390
335
|
file=sys.stderr,
|
|
391
336
|
)
|
|
392
337
|
print(
|
|
393
|
-
"
|
|
338
|
+
f"Total unique tools: {stats.get('total_tools', 0)}",
|
|
394
339
|
file=sys.stderr,
|
|
395
340
|
)
|
|
341
|
+
|
|
396
342
|
print(
|
|
397
|
-
"
|
|
343
|
+
"\nTip: Use --exclude-categories or --include-tools to customize loading",
|
|
398
344
|
file=sys.stderr,
|
|
399
345
|
)
|
|
400
|
-
print(" Minimal setup: special_tools tool_finder", file=sys.stderr)
|
|
401
346
|
|
|
402
347
|
except Exception as e:
|
|
403
348
|
print(f"❌ Error listing categories: {e}", file=sys.stderr)
|
|
@@ -415,13 +360,20 @@ Examples:
|
|
|
415
360
|
print("Available tools:", file=sys.stderr)
|
|
416
361
|
print("=" * 50, file=sys.stderr)
|
|
417
362
|
|
|
418
|
-
# Group tools by category
|
|
363
|
+
# Group tools by category (use ToolUniverse's canonical 'type' field)
|
|
419
364
|
tools_by_category = {}
|
|
420
365
|
for tool in tu.all_tools:
|
|
421
|
-
|
|
366
|
+
# Handle both dict and object tool formats
|
|
367
|
+
if isinstance(tool, dict):
|
|
368
|
+
tool_type = tool.get("type", "unknown")
|
|
369
|
+
tool_name = tool.get("name", "unknown")
|
|
370
|
+
else:
|
|
371
|
+
tool_type = getattr(tool, "type", "unknown")
|
|
372
|
+
tool_name = getattr(tool, "name", "unknown")
|
|
373
|
+
|
|
422
374
|
if tool_type not in tools_by_category:
|
|
423
375
|
tools_by_category[tool_type] = []
|
|
424
|
-
tools_by_category[tool_type].append(
|
|
376
|
+
tools_by_category[tool_type].append(tool_name)
|
|
425
377
|
|
|
426
378
|
total_tools = 0
|
|
427
379
|
for category in sorted(tools_by_category.keys()):
|
|
@@ -554,7 +506,7 @@ Examples:
|
|
|
554
506
|
print("🔗 Hooks disabled", file=sys.stderr)
|
|
555
507
|
|
|
556
508
|
print(f"⚡ Max workers: {args.max_workers}", file=sys.stderr)
|
|
557
|
-
print()
|
|
509
|
+
print(file=sys.stderr)
|
|
558
510
|
|
|
559
511
|
# Create SMCP server with hook support
|
|
560
512
|
server = SMCP(
|
|
@@ -769,96 +721,27 @@ Examples:
|
|
|
769
721
|
from .execute_function import ToolUniverse
|
|
770
722
|
|
|
771
723
|
tu = ToolUniverse()
|
|
772
|
-
|
|
724
|
+
# Use ToolUniverse API to list categories consistently
|
|
725
|
+
stats = tu.list_built_in_tools(mode="config", scan_all=False)
|
|
773
726
|
|
|
774
727
|
print("Available tool categories:")
|
|
775
728
|
print("=" * 50)
|
|
776
729
|
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
"rcsb_pdb",
|
|
793
|
-
"reactome",
|
|
794
|
-
"go",
|
|
795
|
-
]:
|
|
796
|
-
scientific_db.append(category)
|
|
797
|
-
elif category in [
|
|
798
|
-
"EuropePMC",
|
|
799
|
-
"semantic_scholar",
|
|
800
|
-
"pubtator",
|
|
801
|
-
"OpenAlex",
|
|
802
|
-
]:
|
|
803
|
-
literature.append(category)
|
|
804
|
-
elif category.startswith("software_"):
|
|
805
|
-
software.append(category)
|
|
806
|
-
elif category in [
|
|
807
|
-
"special_tools",
|
|
808
|
-
"tool_finder",
|
|
809
|
-
"tool_composition",
|
|
810
|
-
"agents",
|
|
811
|
-
]:
|
|
812
|
-
special.append(category)
|
|
813
|
-
elif category in [
|
|
814
|
-
"clinical_trials",
|
|
815
|
-
"fda_drug_label",
|
|
816
|
-
"fda_drug_adverse_event",
|
|
817
|
-
"dailymed",
|
|
818
|
-
"medlineplus",
|
|
819
|
-
]:
|
|
820
|
-
clinical.append(category)
|
|
821
|
-
else:
|
|
822
|
-
other.append(category)
|
|
823
|
-
|
|
824
|
-
if scientific_db:
|
|
825
|
-
print("\n🔬 Scientific Databases:")
|
|
826
|
-
for cat in scientific_db:
|
|
827
|
-
print(f" {cat}")
|
|
828
|
-
|
|
829
|
-
if literature:
|
|
830
|
-
print("\n📚 Literature & Knowledge:")
|
|
831
|
-
for cat in literature:
|
|
832
|
-
print(f" {cat}")
|
|
833
|
-
|
|
834
|
-
if clinical:
|
|
835
|
-
print("\n🏥 Clinical & Drug Information:")
|
|
836
|
-
for cat in clinical:
|
|
837
|
-
print(f" {cat}")
|
|
838
|
-
|
|
839
|
-
if software:
|
|
840
|
-
print("\n💻 Software Tools:")
|
|
841
|
-
for cat in software[:5]: # Show first 5
|
|
842
|
-
print(f" {cat}")
|
|
843
|
-
if len(software) > 5:
|
|
844
|
-
print(f" ... and {len(software) - 5} more software categories")
|
|
845
|
-
|
|
846
|
-
if special:
|
|
847
|
-
print("\n🛠 Special & Meta Tools:")
|
|
848
|
-
for cat in special:
|
|
849
|
-
print(f" {cat}")
|
|
850
|
-
|
|
851
|
-
if other:
|
|
852
|
-
print("\n📂 Other Categories:")
|
|
853
|
-
for cat in other:
|
|
854
|
-
print(f" {cat}")
|
|
855
|
-
|
|
856
|
-
print(f"\nTotal: {len(tool_types)} categories available")
|
|
857
|
-
print("\nCommon combinations:")
|
|
858
|
-
print(" Scientific research: uniprot ChEMBL opentarget pubchem hpa")
|
|
859
|
-
print(" Drug discovery: ChEMBL fda_drug_label clinical_trials pubchem")
|
|
860
|
-
print(" Literature analysis: EuropePMC semantic_scholar pubtator")
|
|
861
|
-
print(" Minimal setup: special_tools tool_finder")
|
|
730
|
+
categories = stats.get("categories", {})
|
|
731
|
+
# Sort by count desc, then name asc
|
|
732
|
+
sorted_items = sorted(
|
|
733
|
+
categories.items(), key=lambda kv: (-kv[1].get("count", 0), kv[0])
|
|
734
|
+
)
|
|
735
|
+
for key, info in sorted_items:
|
|
736
|
+
display = key.replace("_", " ").title()
|
|
737
|
+
count = info.get("count", 0)
|
|
738
|
+
print(f" {display}: {count}")
|
|
739
|
+
|
|
740
|
+
print(f"\nTotal categories: {stats.get('total_categories', 0)}")
|
|
741
|
+
print(f"Total unique tools: {stats.get('total_tools', 0)}")
|
|
742
|
+
print(
|
|
743
|
+
"\nTip: Use --exclude-categories or --include-tools to customize loading"
|
|
744
|
+
)
|
|
862
745
|
|
|
863
746
|
except Exception as e:
|
|
864
747
|
print(f"❌ Error listing categories: {e}")
|
|
@@ -871,30 +754,37 @@ Examples:
|
|
|
871
754
|
from .execute_function import ToolUniverse
|
|
872
755
|
|
|
873
756
|
tu = ToolUniverse()
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
757
|
+
# Reuse ToolUniverse selection logic directly (applies API key skipping internally)
|
|
758
|
+
tool_config_files = {}
|
|
759
|
+
if args.tool_config_files:
|
|
760
|
+
for config_spec in args.tool_config_files:
|
|
761
|
+
if ":" in config_spec:
|
|
762
|
+
category, path = config_spec.split(":", 1)
|
|
763
|
+
tool_config_files[category] = path
|
|
764
|
+
|
|
765
|
+
tu.load_tools(
|
|
766
|
+
tool_type=(
|
|
767
|
+
args.categories
|
|
768
|
+
if args.categories and len(args.categories) > 0
|
|
769
|
+
else None
|
|
770
|
+
),
|
|
771
|
+
exclude_tools=args.exclude_tools,
|
|
772
|
+
exclude_categories=args.exclude_categories,
|
|
773
|
+
include_tools=args.include_tools,
|
|
774
|
+
tool_config_files=(tool_config_files or None),
|
|
775
|
+
tools_file=args.tools_file,
|
|
776
|
+
include_tool_types=args.include_tool_types,
|
|
777
|
+
exclude_tool_types=args.exclude_tool_types,
|
|
778
|
+
)
|
|
886
779
|
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
tools = sorted(tools_by_category[category])
|
|
890
|
-
print(f"\n📁 {category} ({len(tools)} tools):")
|
|
891
|
-
for tool in tools[:10]: # Show first 10 tools per category
|
|
892
|
-
print(f" {tool}")
|
|
893
|
-
if len(tools) > 10:
|
|
894
|
-
print(f" ... and {len(tools) - 10} more tools")
|
|
895
|
-
total_tools += len(tools)
|
|
780
|
+
# Names of tools that are actually available under current configuration
|
|
781
|
+
tool_names = tu.get_available_tools(name_only=True)
|
|
896
782
|
|
|
897
|
-
print(
|
|
783
|
+
print("Available tools (for this server configuration):")
|
|
784
|
+
print("=" * 50)
|
|
785
|
+
for name in sorted(tool_names)[:200]: # cap output for readability
|
|
786
|
+
print(f" {name}")
|
|
787
|
+
print(f"\nTotal: {len(tool_names)} tools available")
|
|
898
788
|
print("\nNote: Use --exclude-tools to exclude specific tools by name")
|
|
899
789
|
print(" Use --exclude-categories to exclude entire categories")
|
|
900
790
|
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
"""
|
|
2
|
+
STRING Database REST API Tool
|
|
3
|
+
|
|
4
|
+
This tool provides access to protein-protein interaction data from the STRING
|
|
5
|
+
database. STRING is a database of known and predicted protein-protein
|
|
6
|
+
interactions.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import requests
|
|
10
|
+
from typing import Dict, Any, List
|
|
11
|
+
from .base_tool import BaseTool
|
|
12
|
+
from .tool_registry import register_tool
|
|
13
|
+
|
|
14
|
+
STRING_BASE_URL = "https://string-db.org/api"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@register_tool("STRINGRESTTool")
|
|
18
|
+
class STRINGRESTTool(BaseTool):
|
|
19
|
+
"""
|
|
20
|
+
STRING Database REST API tool.
|
|
21
|
+
Generic wrapper for STRING API endpoints defined in ppi_tools.json.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, tool_config):
|
|
25
|
+
super().__init__(tool_config)
|
|
26
|
+
fields = tool_config.get("fields", {})
|
|
27
|
+
parameter = tool_config.get("parameter", {})
|
|
28
|
+
|
|
29
|
+
self.endpoint_template: str = fields.get("endpoint", "/tsv/network")
|
|
30
|
+
self.required: List[str] = parameter.get("required", [])
|
|
31
|
+
self.output_format: str = fields.get("return_format", "TSV")
|
|
32
|
+
|
|
33
|
+
def _build_url(self, arguments: Dict[str, Any]) -> str | Dict[str, Any]:
|
|
34
|
+
"""Build URL for STRING API request."""
|
|
35
|
+
url_path = self.endpoint_template
|
|
36
|
+
return STRING_BASE_URL + url_path
|
|
37
|
+
|
|
38
|
+
def _build_params(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
|
39
|
+
"""Build parameters for STRING API request."""
|
|
40
|
+
params = {}
|
|
41
|
+
|
|
42
|
+
# Map protein IDs to STRING format
|
|
43
|
+
if "protein_ids" in arguments:
|
|
44
|
+
protein_ids = arguments["protein_ids"]
|
|
45
|
+
if isinstance(protein_ids, list):
|
|
46
|
+
params["identifiers"] = "\r".join(protein_ids)
|
|
47
|
+
else:
|
|
48
|
+
params["identifiers"] = str(protein_ids)
|
|
49
|
+
|
|
50
|
+
# Add other parameters
|
|
51
|
+
if "species" in arguments:
|
|
52
|
+
params["species"] = arguments["species"]
|
|
53
|
+
if "confidence_score" in arguments:
|
|
54
|
+
params["required_score"] = int(arguments["confidence_score"] * 1000)
|
|
55
|
+
if "limit" in arguments:
|
|
56
|
+
params["limit"] = arguments["limit"]
|
|
57
|
+
if "network_type" in arguments:
|
|
58
|
+
params["network_type"] = arguments["network_type"]
|
|
59
|
+
|
|
60
|
+
return params
|
|
61
|
+
|
|
62
|
+
def _make_request(self, url: str, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
63
|
+
"""Perform a GET request and handle common errors."""
|
|
64
|
+
try:
|
|
65
|
+
response = requests.get(url, params=params, timeout=30)
|
|
66
|
+
response.raise_for_status()
|
|
67
|
+
|
|
68
|
+
if self.output_format == "TSV":
|
|
69
|
+
return self._parse_tsv_response(response.text)
|
|
70
|
+
else:
|
|
71
|
+
return response.json()
|
|
72
|
+
|
|
73
|
+
except requests.exceptions.RequestException as e:
|
|
74
|
+
return {"error": f"Request failed: {str(e)}"}
|
|
75
|
+
except Exception as e:
|
|
76
|
+
return {"error": f"Unexpected error: {str(e)}"}
|
|
77
|
+
|
|
78
|
+
def _parse_tsv_response(self, text: str) -> Dict[str, Any]:
|
|
79
|
+
"""Parse TSV response from STRING API."""
|
|
80
|
+
lines = text.strip().split("\n")
|
|
81
|
+
if len(lines) < 2:
|
|
82
|
+
return {"data": [], "error": "No data returned"}
|
|
83
|
+
|
|
84
|
+
# Parse header
|
|
85
|
+
header = lines[0].split("\t")
|
|
86
|
+
|
|
87
|
+
# Parse data rows
|
|
88
|
+
data = []
|
|
89
|
+
for line in lines[1:]:
|
|
90
|
+
if line.strip():
|
|
91
|
+
values = line.split("\t")
|
|
92
|
+
row = {}
|
|
93
|
+
for i, value in enumerate(values):
|
|
94
|
+
if i < len(header):
|
|
95
|
+
row[header[i]] = value
|
|
96
|
+
data.append(row)
|
|
97
|
+
|
|
98
|
+
return {"data": data, "header": header}
|
|
99
|
+
|
|
100
|
+
def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
|
101
|
+
"""Execute the tool with given arguments."""
|
|
102
|
+
# Validate required parameters
|
|
103
|
+
for param in self.required:
|
|
104
|
+
if param not in arguments:
|
|
105
|
+
return {"error": f"Missing required parameter: {param}"}
|
|
106
|
+
|
|
107
|
+
url = self._build_url(arguments)
|
|
108
|
+
if isinstance(url, dict) and "error" in url:
|
|
109
|
+
return url
|
|
110
|
+
|
|
111
|
+
params = self._build_params(arguments)
|
|
112
|
+
return self._make_request(url, params)
|
|
@@ -1,33 +1,33 @@
|
|
|
1
1
|
"""
|
|
2
|
-
|
|
2
|
+
ADMETAnalyzerAgent
|
|
3
3
|
|
|
4
|
-
|
|
4
|
+
AI agent that analyzes ADMET data and provides insights on drug-likeness and safety profiles
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from typing import Any, Optional, Callable
|
|
8
8
|
from ._shared_client import get_shared_client
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
def
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
11
|
+
def ADMETAnalyzerAgent(
|
|
12
|
+
compounds: str,
|
|
13
|
+
admet_data: str,
|
|
14
|
+
disease_context: Optional[str] = "",
|
|
15
15
|
*,
|
|
16
16
|
stream_callback: Optional[Callable[[str], None]] = None,
|
|
17
17
|
use_cache: bool = False,
|
|
18
18
|
validate: bool = True,
|
|
19
19
|
) -> Any:
|
|
20
20
|
"""
|
|
21
|
-
|
|
21
|
+
AI agent that analyzes ADMET data and provides insights on drug-likeness and safety profiles
|
|
22
22
|
|
|
23
23
|
Parameters
|
|
24
24
|
----------
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
25
|
+
compounds : str
|
|
26
|
+
List of compounds to analyze (comma-separated)
|
|
27
|
+
admet_data : str
|
|
28
|
+
ADMET data from computational tools to analyze
|
|
29
|
+
disease_context : str
|
|
30
|
+
Disease context for ADMET evaluation
|
|
31
31
|
stream_callback : Callable, optional
|
|
32
32
|
Callback for streaming output
|
|
33
33
|
use_cache : bool, default False
|
|
@@ -43,11 +43,11 @@ def MultiAgentLiteratureSearch(
|
|
|
43
43
|
|
|
44
44
|
return get_shared_client().run_one_function(
|
|
45
45
|
{
|
|
46
|
-
"name": "
|
|
46
|
+
"name": "ADMETAnalyzerAgent",
|
|
47
47
|
"arguments": {
|
|
48
|
-
"
|
|
49
|
-
"
|
|
50
|
-
"
|
|
48
|
+
"compounds": compounds,
|
|
49
|
+
"admet_data": admet_data,
|
|
50
|
+
"disease_context": disease_context,
|
|
51
51
|
},
|
|
52
52
|
},
|
|
53
53
|
stream_callback=stream_callback,
|
|
@@ -56,4 +56,4 @@ def MultiAgentLiteratureSearch(
|
|
|
56
56
|
)
|
|
57
57
|
|
|
58
58
|
|
|
59
|
-
__all__ = ["
|
|
59
|
+
__all__ = ["ADMETAnalyzerAgent"]
|
|
@@ -10,9 +10,9 @@ from ._shared_client import get_shared_client
|
|
|
10
10
|
|
|
11
11
|
def ArXiv_search_papers(
|
|
12
12
|
query: str,
|
|
13
|
-
limit: int,
|
|
14
|
-
sort_by: str,
|
|
15
|
-
sort_order: str,
|
|
13
|
+
limit: Optional[int] = 10,
|
|
14
|
+
sort_by: Optional[str] = "relevance",
|
|
15
|
+
sort_order: Optional[str] = "descending",
|
|
16
16
|
*,
|
|
17
17
|
stream_callback: Optional[Callable[[str], None]] = None,
|
|
18
18
|
use_cache: bool = False,
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CMA_Guidelines_Search
|
|
3
|
+
|
|
4
|
+
Search Canadian Medical Association (CMA) Infobase guidelines. Contains over 1200 evidence-based ...
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Optional, Callable
|
|
8
|
+
from ._shared_client import get_shared_client
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def CMA_Guidelines_Search(
|
|
12
|
+
query: str,
|
|
13
|
+
limit: int,
|
|
14
|
+
*,
|
|
15
|
+
stream_callback: Optional[Callable[[str], None]] = None,
|
|
16
|
+
use_cache: bool = False,
|
|
17
|
+
validate: bool = True,
|
|
18
|
+
) -> list[Any]:
|
|
19
|
+
"""
|
|
20
|
+
Search Canadian Medical Association (CMA) Infobase guidelines. Contains over 1200 evidence-based ...
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
query : str
|
|
25
|
+
Medical condition, treatment, or clinical topic to search for in CMA guidelin...
|
|
26
|
+
limit : int
|
|
27
|
+
Maximum number of guidelines to return (default: 10)
|
|
28
|
+
stream_callback : Callable, optional
|
|
29
|
+
Callback for streaming output
|
|
30
|
+
use_cache : bool, default False
|
|
31
|
+
Enable caching
|
|
32
|
+
validate : bool, default True
|
|
33
|
+
Validate parameters
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
list[Any]
|
|
38
|
+
"""
|
|
39
|
+
# Handle mutable defaults to avoid B006 linting error
|
|
40
|
+
|
|
41
|
+
return get_shared_client().run_one_function(
|
|
42
|
+
{
|
|
43
|
+
"name": "CMA_Guidelines_Search",
|
|
44
|
+
"arguments": {"query": query, "limit": limit},
|
|
45
|
+
},
|
|
46
|
+
stream_callback=stream_callback,
|
|
47
|
+
use_cache=use_cache,
|
|
48
|
+
validate=validate,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
__all__ = ["CMA_Guidelines_Search"]
|
|
@@ -11,9 +11,9 @@ from ._shared_client import get_shared_client
|
|
|
11
11
|
def CORE_search_papers(
|
|
12
12
|
query: str,
|
|
13
13
|
limit: int,
|
|
14
|
-
year_from: int,
|
|
15
|
-
year_to: int,
|
|
16
|
-
language: str,
|
|
14
|
+
year_from: Optional[int] = None,
|
|
15
|
+
year_to: Optional[int] = None,
|
|
16
|
+
language: Optional[str] = None,
|
|
17
17
|
*,
|
|
18
18
|
stream_callback: Optional[Callable[[str], None]] = None,
|
|
19
19
|
use_cache: bool = False,
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ClinVar_search_variants
|
|
3
|
+
|
|
4
|
+
Search ClinVar via NCBI E-utilities (esearch→esummary) and return concise variant records for a q...
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Optional, Callable
|
|
8
|
+
from ._shared_client import get_shared_client
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def ClinVar_search_variants(
|
|
12
|
+
query: str,
|
|
13
|
+
retmax: Optional[int] = 5,
|
|
14
|
+
*,
|
|
15
|
+
stream_callback: Optional[Callable[[str], None]] = None,
|
|
16
|
+
use_cache: bool = False,
|
|
17
|
+
validate: bool = True,
|
|
18
|
+
) -> list[Any]:
|
|
19
|
+
"""
|
|
20
|
+
Search ClinVar via NCBI E-utilities (esearch→esummary) and return concise variant records for a q...
|
|
21
|
+
|
|
22
|
+
Parameters
|
|
23
|
+
----------
|
|
24
|
+
query : str
|
|
25
|
+
ClinVar search term (e.g., BRCA1).
|
|
26
|
+
retmax : int
|
|
27
|
+
Max records.
|
|
28
|
+
stream_callback : Callable, optional
|
|
29
|
+
Callback for streaming output
|
|
30
|
+
use_cache : bool, default False
|
|
31
|
+
Enable caching
|
|
32
|
+
validate : bool, default True
|
|
33
|
+
Validate parameters
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
list[Any]
|
|
38
|
+
"""
|
|
39
|
+
# Handle mutable defaults to avoid B006 linting error
|
|
40
|
+
|
|
41
|
+
return get_shared_client().run_one_function(
|
|
42
|
+
{
|
|
43
|
+
"name": "ClinVar_search_variants",
|
|
44
|
+
"arguments": {"query": query, "retmax": retmax},
|
|
45
|
+
},
|
|
46
|
+
stream_callback=stream_callback,
|
|
47
|
+
use_cache=use_cache,
|
|
48
|
+
validate=validate,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
__all__ = ["ClinVar_search_variants"]
|