tooluniverse 1.0.9.1__py3-none-any.whl → 1.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tooluniverse might be problematic. Click here for more details.
- tooluniverse/__init__.py +57 -1
- tooluniverse/admetai_tool.py +1 -1
- tooluniverse/agentic_tool.py +65 -17
- tooluniverse/base_tool.py +19 -8
- tooluniverse/blast_tool.py +132 -0
- tooluniverse/boltz_tool.py +3 -3
- tooluniverse/cache/result_cache_manager.py +167 -12
- tooluniverse/cbioportal_tool.py +42 -0
- tooluniverse/clinvar_tool.py +268 -74
- tooluniverse/compose_scripts/drug_safety_analyzer.py +1 -1
- tooluniverse/compose_scripts/multi_agent_literature_search.py +1 -1
- tooluniverse/compose_scripts/output_summarizer.py +4 -4
- tooluniverse/compose_scripts/tool_discover.py +1941 -443
- tooluniverse/compose_scripts/tool_graph_composer.py +1 -1
- tooluniverse/compose_scripts/tool_metadata_generator.py +1 -1
- tooluniverse/compose_tool.py +9 -9
- tooluniverse/core_tool.py +2 -2
- tooluniverse/ctg_tool.py +4 -4
- tooluniverse/custom_tool.py +1 -1
- tooluniverse/data/agentic_tools.json +0 -370
- tooluniverse/data/alphafold_tools.json +6 -6
- tooluniverse/data/blast_tools.json +112 -0
- tooluniverse/data/cbioportal_tools.json +87 -0
- tooluniverse/data/clinvar_tools.json +235 -0
- tooluniverse/data/compose_tools.json +0 -89
- tooluniverse/data/dbsnp_tools.json +275 -0
- tooluniverse/data/emdb_tools.json +61 -0
- tooluniverse/data/ensembl_tools.json +259 -0
- tooluniverse/data/file_download_tools.json +275 -0
- tooluniverse/data/geo_tools.json +200 -48
- tooluniverse/data/gnomad_tools.json +109 -0
- tooluniverse/data/gtopdb_tools.json +68 -0
- tooluniverse/data/gwas_tools.json +32 -0
- tooluniverse/data/interpro_tools.json +199 -0
- tooluniverse/data/jaspar_tools.json +70 -0
- tooluniverse/data/kegg_tools.json +356 -0
- tooluniverse/data/mpd_tools.json +87 -0
- tooluniverse/data/ols_tools.json +314 -0
- tooluniverse/data/package_discovery_tools.json +64 -0
- tooluniverse/data/packages/categorized_tools.txt +0 -1
- tooluniverse/data/packages/machine_learning_tools.json +0 -47
- tooluniverse/data/paleobiology_tools.json +91 -0
- tooluniverse/data/pride_tools.json +62 -0
- tooluniverse/data/pypi_package_inspector_tools.json +158 -0
- tooluniverse/data/python_executor_tools.json +341 -0
- tooluniverse/data/regulomedb_tools.json +50 -0
- tooluniverse/data/remap_tools.json +89 -0
- tooluniverse/data/screen_tools.json +89 -0
- tooluniverse/data/tool_discovery_agents.json +428 -0
- tooluniverse/data/tool_discovery_agents.json.backup +1343 -0
- tooluniverse/data/uniprot_tools.json +77 -0
- tooluniverse/data/web_search_tools.json +250 -0
- tooluniverse/data/worms_tools.json +55 -0
- tooluniverse/dataset_tool.py +2 -2
- tooluniverse/dbsnp_tool.py +196 -58
- tooluniverse/default_config.py +36 -3
- tooluniverse/emdb_tool.py +30 -0
- tooluniverse/enrichr_tool.py +14 -14
- tooluniverse/ensembl_tool.py +140 -47
- tooluniverse/execute_function.py +594 -29
- tooluniverse/extended_hooks.py +4 -4
- tooluniverse/file_download_tool.py +269 -0
- tooluniverse/gene_ontology_tool.py +1 -1
- tooluniverse/generate_tools.py +3 -3
- tooluniverse/geo_tool.py +81 -28
- tooluniverse/gnomad_tool.py +100 -52
- tooluniverse/gtopdb_tool.py +41 -0
- tooluniverse/humanbase_tool.py +10 -10
- tooluniverse/interpro_tool.py +72 -0
- tooluniverse/jaspar_tool.py +30 -0
- tooluniverse/kegg_tool.py +230 -0
- tooluniverse/logging_config.py +2 -2
- tooluniverse/mcp_client_tool.py +57 -129
- tooluniverse/mcp_integration.py +52 -49
- tooluniverse/mcp_tool_registry.py +147 -528
- tooluniverse/mpd_tool.py +42 -0
- tooluniverse/ncbi_eutils_tool.py +96 -0
- tooluniverse/ols_tool.py +435 -0
- tooluniverse/openalex_tool.py +8 -8
- tooluniverse/openfda_tool.py +2 -2
- tooluniverse/output_hook.py +15 -15
- tooluniverse/package_discovery_tool.py +217 -0
- tooluniverse/package_tool.py +1 -1
- tooluniverse/paleobiology_tool.py +30 -0
- tooluniverse/pmc_tool.py +2 -2
- tooluniverse/pride_tool.py +30 -0
- tooluniverse/pypi_package_inspector_tool.py +593 -0
- tooluniverse/python_executor_tool.py +711 -0
- tooluniverse/regulomedb_tool.py +30 -0
- tooluniverse/remap_tool.py +44 -0
- tooluniverse/remote/boltz/boltz_mcp_server.py +1 -1
- tooluniverse/remote/depmap_24q2/depmap_24q2_mcp_tool.py +3 -3
- tooluniverse/remote/immune_compass/compass_tool.py +3 -3
- tooluniverse/remote/pinnacle/pinnacle_tool.py +2 -2
- tooluniverse/remote/transcriptformer/transcriptformer_tool.py +3 -3
- tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +3 -3
- tooluniverse/remote_tool.py +4 -4
- tooluniverse/screen_tool.py +44 -0
- tooluniverse/scripts/filter_tool_files.py +2 -2
- tooluniverse/smcp.py +93 -12
- tooluniverse/smcp_server.py +100 -21
- tooluniverse/space/__init__.py +46 -0
- tooluniverse/space/loader.py +133 -0
- tooluniverse/space/validator.py +353 -0
- tooluniverse/tool_finder_embedding.py +5 -3
- tooluniverse/tool_finder_keyword.py +12 -10
- tooluniverse/tool_finder_llm.py +12 -8
- tooluniverse/tools/{UCSC_get_genes_by_region.py → BLAST_nucleotide_search.py} +22 -26
- tooluniverse/tools/BLAST_protein_search.py +63 -0
- tooluniverse/tools/ClinVar_search_variants.py +26 -15
- tooluniverse/tools/CodeQualityAnalyzer.py +3 -3
- tooluniverse/tools/EMDB_get_structure.py +46 -0
- tooluniverse/tools/GtoPdb_get_targets.py +52 -0
- tooluniverse/tools/InterPro_get_domain_details.py +46 -0
- tooluniverse/tools/InterPro_get_protein_domains.py +49 -0
- tooluniverse/tools/InterPro_search_domains.py +52 -0
- tooluniverse/tools/JASPAR_get_transcription_factors.py +52 -0
- tooluniverse/tools/MPD_get_phenotype_data.py +59 -0
- tooluniverse/tools/PRIDE_search_proteomics.py +52 -0
- tooluniverse/tools/PackageAnalyzer.py +55 -0
- tooluniverse/tools/Paleobiology_get_fossils.py +52 -0
- tooluniverse/tools/PyPIPackageInspector.py +59 -0
- tooluniverse/tools/ReMap_get_transcription_factor_binding.py +59 -0
- tooluniverse/tools/ReferenceInfoAnalyzer.py +55 -0
- tooluniverse/tools/RegulomeDB_query_variant.py +46 -0
- tooluniverse/tools/SCREEN_get_regulatory_elements.py +59 -0
- tooluniverse/tools/{ArgumentDescriptionOptimizer.py → TestResultsAnalyzer.py} +13 -13
- tooluniverse/tools/ToolDiscover.py +11 -11
- tooluniverse/tools/UniProt_id_mapping.py +63 -0
- tooluniverse/tools/UniProt_search.py +63 -0
- tooluniverse/tools/UnifiedToolGenerator.py +59 -0
- tooluniverse/tools/WoRMS_search_species.py +49 -0
- tooluniverse/tools/XMLToolOptimizer.py +55 -0
- tooluniverse/tools/__init__.py +119 -29
- tooluniverse/tools/_shared_client.py +3 -3
- tooluniverse/tools/alphafold_get_annotations.py +3 -3
- tooluniverse/tools/alphafold_get_prediction.py +3 -3
- tooluniverse/tools/alphafold_get_summary.py +3 -3
- tooluniverse/tools/cBioPortal_get_cancer_studies.py +46 -0
- tooluniverse/tools/cBioPortal_get_mutations.py +52 -0
- tooluniverse/tools/{gnomAD_query_variant.py → clinvar_get_clinical_significance.py} +8 -11
- tooluniverse/tools/clinvar_get_variant_details.py +49 -0
- tooluniverse/tools/dbSNP_get_variant_by_rsid.py +7 -7
- tooluniverse/tools/dbsnp_get_frequencies.py +46 -0
- tooluniverse/tools/dbsnp_search_by_gene.py +52 -0
- tooluniverse/tools/download_binary_file.py +66 -0
- tooluniverse/tools/download_file.py +71 -0
- tooluniverse/tools/download_text_content.py +55 -0
- tooluniverse/tools/dynamic_package_discovery.py +59 -0
- tooluniverse/tools/ensembl_get_sequence.py +52 -0
- tooluniverse/tools/{Ensembl_lookup_gene_by_symbol.py → ensembl_get_variants.py} +11 -11
- tooluniverse/tools/ensembl_lookup_gene.py +46 -0
- tooluniverse/tools/geo_get_dataset_info.py +46 -0
- tooluniverse/tools/geo_get_sample_info.py +46 -0
- tooluniverse/tools/geo_search_datasets.py +67 -0
- tooluniverse/tools/gnomad_get_gene_constraints.py +49 -0
- tooluniverse/tools/kegg_find_genes.py +52 -0
- tooluniverse/tools/kegg_get_gene_info.py +46 -0
- tooluniverse/tools/kegg_get_pathway_info.py +46 -0
- tooluniverse/tools/kegg_list_organisms.py +44 -0
- tooluniverse/tools/kegg_search_pathway.py +46 -0
- tooluniverse/tools/ols_find_similar_terms.py +63 -0
- tooluniverse/tools/{get_hyperopt_info.py → ols_get_ontology_info.py} +13 -10
- tooluniverse/tools/ols_get_term_ancestors.py +67 -0
- tooluniverse/tools/ols_get_term_children.py +67 -0
- tooluniverse/tools/{TestCaseGenerator.py → ols_get_term_info.py} +12 -9
- tooluniverse/tools/{CodeOptimizer.py → ols_search_ontologies.py} +22 -14
- tooluniverse/tools/ols_search_terms.py +71 -0
- tooluniverse/tools/python_code_executor.py +79 -0
- tooluniverse/tools/python_script_runner.py +79 -0
- tooluniverse/tools/web_api_documentation_search.py +63 -0
- tooluniverse/tools/web_search.py +71 -0
- tooluniverse/uniprot_tool.py +219 -16
- tooluniverse/url_tool.py +19 -1
- tooluniverse/uspto_tool.py +1 -1
- tooluniverse/utils.py +12 -12
- tooluniverse/web_search_tool.py +229 -0
- tooluniverse/worms_tool.py +64 -0
- {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.11.dist-info}/METADATA +8 -3
- {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.11.dist-info}/RECORD +184 -92
- tooluniverse/data/genomics_tools.json +0 -174
- tooluniverse/tools/ToolDescriptionOptimizer.py +0 -67
- tooluniverse/tools/ToolImplementationGenerator.py +0 -67
- tooluniverse/tools/ToolOptimizer.py +0 -59
- tooluniverse/tools/ToolSpecificationGenerator.py +0 -67
- tooluniverse/tools/ToolSpecificationOptimizer.py +0 -63
- tooluniverse/ucsc_tool.py +0 -60
- {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.11.dist-info}/WHEEL +0 -0
- {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.11.dist-info}/entry_points.txt +0 -0
- {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.11.dist-info}/licenses/LICENSE +0 -0
- {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.11.dist-info}/top_level.txt +0 -0
tooluniverse/__init__.py
CHANGED
|
@@ -174,9 +174,26 @@ MCPAutoLoaderTool: Any
|
|
|
174
174
|
ADMETAITool: Any
|
|
175
175
|
AlphaFoldRESTTool: Any
|
|
176
176
|
ComposeTool: Any
|
|
177
|
+
PythonCodeExecutor: Any
|
|
178
|
+
PythonScriptRunner: Any
|
|
177
179
|
CellosaurusSearchTool: Any
|
|
178
180
|
CellosaurusQueryConverterTool: Any
|
|
179
181
|
CellosaurusGetCellLineInfoTool: Any
|
|
182
|
+
# New database tools
|
|
183
|
+
InterProRESTTool: Any
|
|
184
|
+
NCBIBlastTool: Any
|
|
185
|
+
CBioPortalRESTTool: Any
|
|
186
|
+
RegulomeDBRESTTool: Any
|
|
187
|
+
JASPARRESTTool: Any
|
|
188
|
+
ReMapRESTTool: Any
|
|
189
|
+
SCREENRESTTool: Any
|
|
190
|
+
PRIDERESTTool: Any
|
|
191
|
+
EMDBRESTTool: Any
|
|
192
|
+
GtoPdbRESTTool: Any
|
|
193
|
+
MPDRESTTool: Any
|
|
194
|
+
WoRMSRESTTool: Any
|
|
195
|
+
PaleobiologyRESTTool: Any
|
|
196
|
+
OLSTool: Any
|
|
180
197
|
if not _LIGHT_IMPORT and not LAZY_LOADING_ENABLED:
|
|
181
198
|
# Import all tool classes immediately (old behavior) with warning suppression # noqa: E501
|
|
182
199
|
with warnings.catch_warnings():
|
|
@@ -185,7 +202,6 @@ if not _LIGHT_IMPORT and not LAZY_LOADING_ENABLED:
|
|
|
185
202
|
warnings.filterwarnings("ignore", category=UserWarning)
|
|
186
203
|
warnings.filterwarnings("ignore", category=FutureWarning)
|
|
187
204
|
# Suppress specific third-party warnings
|
|
188
|
-
warnings.filterwarnings("ignore", category=UserWarning, module="hyperopt")
|
|
189
205
|
warnings.filterwarnings(
|
|
190
206
|
"ignore", category=DeprecationWarning, module="pkg_resources"
|
|
191
207
|
)
|
|
@@ -213,6 +229,10 @@ if not _LIGHT_IMPORT and not LAZY_LOADING_ENABLED:
|
|
|
213
229
|
)
|
|
214
230
|
from .chem_tool import ChEMBLTool
|
|
215
231
|
from .compose_tool import ComposeTool
|
|
232
|
+
from .python_executor_tool import (
|
|
233
|
+
PythonCodeExecutor,
|
|
234
|
+
PythonScriptRunner,
|
|
235
|
+
)
|
|
216
236
|
from .europe_pmc_tool import EuropePMCTool
|
|
217
237
|
from .semantic_scholar_tool import SemanticScholarTool
|
|
218
238
|
from .pubtator_tool import PubTatorTool
|
|
@@ -235,6 +255,12 @@ if not _LIGHT_IMPORT and not LAZY_LOADING_ENABLED:
|
|
|
235
255
|
from .embedding_database import EmbeddingDatabase
|
|
236
256
|
from .embedding_sync import EmbeddingSync
|
|
237
257
|
from .rcsb_pdb_tool import RCSBTool
|
|
258
|
+
from .web_search_tool import (
|
|
259
|
+
WebSearchTool,
|
|
260
|
+
WebAPIDocumentationSearchTool,
|
|
261
|
+
)
|
|
262
|
+
from .package_discovery_tool import DynamicPackageDiscovery
|
|
263
|
+
from .pypi_package_inspector_tool import PyPIPackageInspector
|
|
238
264
|
from .gwas_tool import (
|
|
239
265
|
GWASAssociationSearch,
|
|
240
266
|
GWASStudySearch,
|
|
@@ -264,6 +290,14 @@ if not _LIGHT_IMPORT and not LAZY_LOADING_ENABLED:
|
|
|
264
290
|
CellosaurusQueryConverterTool,
|
|
265
291
|
CellosaurusGetCellLineInfoTool,
|
|
266
292
|
)
|
|
293
|
+
from .ols_tool import OLSTool
|
|
294
|
+
|
|
295
|
+
# New database tools
|
|
296
|
+
from .clinvar_tool import (
|
|
297
|
+
ClinVarSearchVariants,
|
|
298
|
+
ClinVarGetVariantDetails,
|
|
299
|
+
ClinVarGetClinicalSignificance,
|
|
300
|
+
)
|
|
267
301
|
|
|
268
302
|
# Literature search tools
|
|
269
303
|
from .arxiv_tool import ArXivTool
|
|
@@ -310,6 +344,8 @@ else:
|
|
|
310
344
|
)
|
|
311
345
|
ChEMBLTool = _LazyImportProxy("chem_tool", "ChEMBLTool")
|
|
312
346
|
ComposeTool = _LazyImportProxy("compose_tool", "ComposeTool")
|
|
347
|
+
PythonCodeExecutor = _LazyImportProxy("python_executor_tool", "PythonCodeExecutor")
|
|
348
|
+
PythonScriptRunner = _LazyImportProxy("python_executor_tool", "PythonScriptRunner")
|
|
313
349
|
EuropePMCTool = _LazyImportProxy("europe_pmc_tool", "EuropePMCTool")
|
|
314
350
|
SemanticScholarTool = _LazyImportProxy(
|
|
315
351
|
"semantic_scholar_tool", "SemanticScholarTool"
|
|
@@ -368,6 +404,7 @@ else:
|
|
|
368
404
|
CellosaurusGetCellLineInfoTool = _LazyImportProxy(
|
|
369
405
|
"cellosaurus_tool", "CellosaurusGetCellLineInfoTool"
|
|
370
406
|
)
|
|
407
|
+
OLSTool = _LazyImportProxy("ols_tool", "OLSTool")
|
|
371
408
|
# Literature search tools
|
|
372
409
|
ArXivTool = _LazyImportProxy("arxiv_tool", "ArXivTool")
|
|
373
410
|
CrossrefTool = _LazyImportProxy("crossref_tool", "CrossrefTool")
|
|
@@ -381,6 +418,16 @@ else:
|
|
|
381
418
|
CoreTool = _LazyImportProxy("core_tool", "CoreTool")
|
|
382
419
|
PMCTool = _LazyImportProxy("pmc_tool", "PMCTool")
|
|
383
420
|
ZenodoTool = _LazyImportProxy("zenodo_tool", "ZenodoTool")
|
|
421
|
+
WebSearchTool = _LazyImportProxy("web_search_tool", "WebSearchTool")
|
|
422
|
+
WebAPIDocumentationSearchTool = _LazyImportProxy(
|
|
423
|
+
"web_search_tool", "WebAPIDocumentationSearchTool"
|
|
424
|
+
)
|
|
425
|
+
DynamicPackageDiscovery = _LazyImportProxy(
|
|
426
|
+
"package_discovery_tool", "DynamicPackageDiscovery"
|
|
427
|
+
)
|
|
428
|
+
PyPIPackageInspector = _LazyImportProxy(
|
|
429
|
+
"pypi_package_inspector_tool", "PyPIPackageInspector"
|
|
430
|
+
)
|
|
384
431
|
|
|
385
432
|
__all__ = [
|
|
386
433
|
"__version__",
|
|
@@ -456,6 +503,7 @@ __all__ = [
|
|
|
456
503
|
"CellosaurusSearchTool",
|
|
457
504
|
"CellosaurusQueryConverterTool",
|
|
458
505
|
"CellosaurusGetCellLineInfoTool",
|
|
506
|
+
"OLSTool",
|
|
459
507
|
# Literature search tools
|
|
460
508
|
"ArXivTool",
|
|
461
509
|
"CrossrefTool",
|
|
@@ -469,4 +517,12 @@ __all__ = [
|
|
|
469
517
|
"CoreTool",
|
|
470
518
|
"PMCTool",
|
|
471
519
|
"ZenodoTool",
|
|
520
|
+
"WebSearchTool",
|
|
521
|
+
"WebAPIDocumentationSearchTool",
|
|
522
|
+
"DynamicPackageDiscovery",
|
|
523
|
+
"PyPIPackageInspector",
|
|
524
|
+
# ClinVar tools
|
|
525
|
+
"ClinVarSearchVariants",
|
|
526
|
+
"ClinVarGetVariantDetails",
|
|
527
|
+
"ClinVarGetClinicalSignificance",
|
|
472
528
|
]
|
tooluniverse/admetai_tool.py
CHANGED
tooluniverse/agentic_tool.py
CHANGED
|
@@ -38,7 +38,7 @@ class AgenticTool(BaseTool):
|
|
|
38
38
|
"""
|
|
39
39
|
Check if any API keys are available across all supported API types.
|
|
40
40
|
|
|
41
|
-
Returns
|
|
41
|
+
Returns
|
|
42
42
|
bool: True if at least one API type has all required keys, False otherwise
|
|
43
43
|
"""
|
|
44
44
|
for _api_type, required_vars in API_KEY_ENV_VARS.items():
|
|
@@ -74,16 +74,44 @@ class AgenticTool(BaseTool):
|
|
|
74
74
|
# Get configuration from nested 'configs' dict or fallback to top-level
|
|
75
75
|
configs = tool_config.get("configs", {})
|
|
76
76
|
|
|
77
|
-
# Helper function to get config values with
|
|
77
|
+
# Helper function to get config values with Space support
|
|
78
78
|
def get_config(key: str, default: Any) -> Any:
|
|
79
|
-
|
|
79
|
+
tool_value = configs.get(key, tool_config.get(key))
|
|
80
|
+
|
|
81
|
+
# Get environment value directly (avoid calling self method during init)
|
|
82
|
+
env_value = None
|
|
83
|
+
if key == "api_type":
|
|
84
|
+
# Direct use of AgenticTool api_type values from Space
|
|
85
|
+
env_value = os.getenv("TOOLUNIVERSE_LLM_DEFAULT_PROVIDER")
|
|
86
|
+
elif key == "model_id":
|
|
87
|
+
task = tool_config.get("llm_task", "default").upper()
|
|
88
|
+
env_value = os.getenv(f"TOOLUNIVERSE_LLM_MODEL_{task}") or os.getenv(
|
|
89
|
+
"TOOLUNIVERSE_LLM_MODEL_DEFAULT"
|
|
90
|
+
)
|
|
91
|
+
elif key == "temperature":
|
|
92
|
+
temp_str = os.getenv("TOOLUNIVERSE_LLM_TEMPERATURE")
|
|
93
|
+
env_value = float(temp_str) if temp_str else None
|
|
94
|
+
|
|
95
|
+
mode = os.getenv("TOOLUNIVERSE_LLM_CONFIG_MODE", "default")
|
|
96
|
+
|
|
97
|
+
if mode == "default":
|
|
98
|
+
# Space as default: tool config > env > built-in default
|
|
99
|
+
if tool_value is not None:
|
|
100
|
+
return tool_value
|
|
101
|
+
if env_value is not None:
|
|
102
|
+
return env_value
|
|
103
|
+
return default
|
|
104
|
+
else: # mode == "fallback"
|
|
105
|
+
# Space as fallback: tool config > built-in default (env as fallback later)
|
|
106
|
+
if tool_value is not None:
|
|
107
|
+
return tool_value
|
|
108
|
+
return default
|
|
80
109
|
|
|
81
110
|
# LLM configuration
|
|
82
111
|
self._api_type: str = get_config("api_type", "CHATGPT")
|
|
83
112
|
self._model_id: str = get_config("model_id", "o1-mini")
|
|
84
113
|
self._temperature: Optional[float] = get_config("temperature", 0.1)
|
|
85
|
-
#
|
|
86
|
-
self._max_new_tokens: Optional[int] = None
|
|
114
|
+
# max_new_tokens is handled by LLM client automatically
|
|
87
115
|
self._return_json: bool = get_config("return_json", False)
|
|
88
116
|
self._max_retries: int = get_config("max_retries", 5)
|
|
89
117
|
self._retry_delay: int = get_config("retry_delay", 5)
|
|
@@ -96,9 +124,8 @@ class AgenticTool(BaseTool):
|
|
|
96
124
|
|
|
97
125
|
# Global fallback configuration
|
|
98
126
|
self._use_global_fallback: bool = get_config("use_global_fallback", True)
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
)
|
|
127
|
+
# Initialize fallback chain later after environment config is set
|
|
128
|
+
self._global_fallback_chain: List[Dict[str, str]] = []
|
|
102
129
|
|
|
103
130
|
# Gemini model configuration (optional; env override)
|
|
104
131
|
self._gemini_model_id: str = get_config(
|
|
@@ -133,11 +160,40 @@ class AgenticTool(BaseTool):
|
|
|
133
160
|
self._current_api_type = None
|
|
134
161
|
self._current_model_id = None
|
|
135
162
|
|
|
163
|
+
# Store environment config for fallback mode
|
|
164
|
+
# Direct use of AgenticTool api_type values from Space
|
|
165
|
+
self._env_api_type = os.getenv("TOOLUNIVERSE_LLM_DEFAULT_PROVIDER")
|
|
166
|
+
|
|
167
|
+
task = tool_config.get("llm_task", "default").upper()
|
|
168
|
+
self._env_model_id = os.getenv(f"TOOLUNIVERSE_LLM_MODEL_{task}") or os.getenv(
|
|
169
|
+
"TOOLUNIVERSE_LLM_MODEL_DEFAULT"
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
# Initialize global fallback chain now that environment config is set
|
|
173
|
+
self._global_fallback_chain = self._get_global_fallback_chain()
|
|
174
|
+
|
|
136
175
|
# Try primary API first, then fallback if configured
|
|
137
176
|
self._try_initialize_api()
|
|
138
177
|
|
|
139
178
|
def _get_global_fallback_chain(self) -> List[Dict[str, str]]:
|
|
140
179
|
"""Get the global fallback chain from environment or use default."""
|
|
180
|
+
mode = os.getenv("TOOLUNIVERSE_LLM_CONFIG_MODE", "default")
|
|
181
|
+
|
|
182
|
+
# In fallback mode, prepend environment config to fallback chain
|
|
183
|
+
if mode == "fallback" and self._env_api_type and self._env_model_id:
|
|
184
|
+
env_fallback = {
|
|
185
|
+
"api_type": self._env_api_type,
|
|
186
|
+
"model_id": self._env_model_id,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
# Check if env fallback is different from primary config
|
|
190
|
+
if (
|
|
191
|
+
env_fallback["api_type"] != self._api_type
|
|
192
|
+
or env_fallback["model_id"] != self._model_id
|
|
193
|
+
):
|
|
194
|
+
# Add environment config as first fallback
|
|
195
|
+
return [env_fallback] + DEFAULT_FALLBACK_CHAIN.copy()
|
|
196
|
+
|
|
141
197
|
# Check environment variable for custom fallback chain
|
|
142
198
|
env_chain = os.getenv("AGENTIC_TOOL_FALLBACK_CHAIN")
|
|
143
199
|
if env_chain:
|
|
@@ -257,8 +313,6 @@ class AgenticTool(BaseTool):
|
|
|
257
313
|
raise ValueError(
|
|
258
314
|
f"Unsupported API type: {self._api_type}. Supported types: {supported_api_types}"
|
|
259
315
|
)
|
|
260
|
-
if self._max_new_tokens is not None and self._max_new_tokens <= 0:
|
|
261
|
-
raise ValueError("max_new_tokens must be positive or None")
|
|
262
316
|
|
|
263
317
|
# ------------------------------------------------------------------ public API --------------
|
|
264
318
|
def run(
|
|
@@ -386,7 +440,6 @@ class AgenticTool(BaseTool):
|
|
|
386
440
|
"api_type": self._api_type,
|
|
387
441
|
"model_id": self._model_id,
|
|
388
442
|
"temperature": self._temperature,
|
|
389
|
-
"max_new_tokens": self._max_new_tokens,
|
|
390
443
|
},
|
|
391
444
|
"execution_time_seconds": execution_time,
|
|
392
445
|
"timestamp": start_time.isoformat(),
|
|
@@ -417,7 +470,6 @@ class AgenticTool(BaseTool):
|
|
|
417
470
|
"api_type": self._api_type,
|
|
418
471
|
"model_id": self._model_id,
|
|
419
472
|
"temperature": self._temperature,
|
|
420
|
-
"max_new_tokens": self._max_new_tokens,
|
|
421
473
|
},
|
|
422
474
|
"execution_time_seconds": execution_time,
|
|
423
475
|
"timestamp": start_time.isoformat(),
|
|
@@ -442,7 +494,6 @@ class AgenticTool(BaseTool):
|
|
|
442
494
|
"api_type": self._api_type,
|
|
443
495
|
"model_id": self._model_id,
|
|
444
496
|
"temperature": self._temperature,
|
|
445
|
-
"max_new_tokens": self._max_new_tokens,
|
|
446
497
|
},
|
|
447
498
|
"execution_time_seconds": execution_time,
|
|
448
499
|
},
|
|
@@ -511,7 +562,6 @@ class AgenticTool(BaseTool):
|
|
|
511
562
|
"api_type": self._api_type,
|
|
512
563
|
"model_id": self._model_id,
|
|
513
564
|
"temperature": self._temperature,
|
|
514
|
-
"max_new_tokens": self._max_new_tokens,
|
|
515
565
|
"return_json": self._return_json,
|
|
516
566
|
"max_retries": self._max_retries,
|
|
517
567
|
"retry_delay": self._retry_delay,
|
|
@@ -595,9 +645,7 @@ class AgenticTool(BaseTool):
|
|
|
595
645
|
def estimate_token_usage(self, arguments: Dict[str, Any]) -> Dict[str, int]:
|
|
596
646
|
prompt = self._format_prompt(arguments)
|
|
597
647
|
estimated_input_tokens = len(prompt) // 4
|
|
598
|
-
estimated_max_output_tokens =
|
|
599
|
-
self._max_new_tokens if self._max_new_tokens is not None else 2048
|
|
600
|
-
)
|
|
648
|
+
estimated_max_output_tokens = 2048 # Default estimation
|
|
601
649
|
estimated_total_tokens = estimated_input_tokens + estimated_max_output_tokens
|
|
602
650
|
return {
|
|
603
651
|
"estimated_input_tokens": estimated_input_tokens,
|
tooluniverse/base_tool.py
CHANGED
|
@@ -38,7 +38,7 @@ class BaseTool:
|
|
|
38
38
|
|
|
39
39
|
Override this method in subclasses to specify a custom defaults file.
|
|
40
40
|
|
|
41
|
-
Returns
|
|
41
|
+
Returns
|
|
42
42
|
Path or resource object pointing to the defaults file
|
|
43
43
|
"""
|
|
44
44
|
tool_type = cls.__name__
|
|
@@ -154,7 +154,7 @@ class BaseTool:
|
|
|
154
154
|
def get_required_parameters(self):
|
|
155
155
|
"""
|
|
156
156
|
Retrieve required parameters from the endpoint definition.
|
|
157
|
-
Returns
|
|
157
|
+
Returns
|
|
158
158
|
list: List of required parameters for the given endpoint.
|
|
159
159
|
"""
|
|
160
160
|
schema = self.tool_config.get("parameter", {})
|
|
@@ -172,7 +172,7 @@ class BaseTool:
|
|
|
172
172
|
Args:
|
|
173
173
|
arguments: Dictionary of arguments to validate
|
|
174
174
|
|
|
175
|
-
Returns
|
|
175
|
+
Returns
|
|
176
176
|
ToolError if validation fails, None if validation passes
|
|
177
177
|
"""
|
|
178
178
|
schema = self.tool_config.get("parameter", {})
|
|
@@ -207,7 +207,7 @@ class BaseTool:
|
|
|
207
207
|
Args:
|
|
208
208
|
exception: The raw exception to classify
|
|
209
209
|
|
|
210
|
-
Returns
|
|
210
|
+
Returns
|
|
211
211
|
Structured ToolError instance
|
|
212
212
|
"""
|
|
213
213
|
error_str = str(exception).lower()
|
|
@@ -261,7 +261,7 @@ class BaseTool:
|
|
|
261
261
|
Args:
|
|
262
262
|
arguments: Dictionary of arguments for the tool call
|
|
263
263
|
|
|
264
|
-
Returns
|
|
264
|
+
Returns
|
|
265
265
|
String cache key
|
|
266
266
|
"""
|
|
267
267
|
# Include tool name and arguments in cache key
|
|
@@ -276,7 +276,7 @@ class BaseTool:
|
|
|
276
276
|
"""
|
|
277
277
|
Check if this tool supports streaming responses.
|
|
278
278
|
|
|
279
|
-
Returns
|
|
279
|
+
Returns
|
|
280
280
|
True if tool supports streaming, False otherwise
|
|
281
281
|
"""
|
|
282
282
|
return self.tool_config.get("supports_streaming", False)
|
|
@@ -285,11 +285,22 @@ class BaseTool:
|
|
|
285
285
|
"""
|
|
286
286
|
Check if this tool's results can be cached.
|
|
287
287
|
|
|
288
|
-
Returns
|
|
288
|
+
Returns
|
|
289
289
|
True if tool results can be cached, False otherwise
|
|
290
290
|
"""
|
|
291
291
|
return self.tool_config.get("cacheable", True)
|
|
292
292
|
|
|
293
|
+
def get_batch_concurrency_limit(self) -> int:
|
|
294
|
+
"""Return maximum concurrent executions allowed during batch runs (0 = unlimited)."""
|
|
295
|
+
limit = self.tool_config.get("batch_max_concurrency")
|
|
296
|
+
if limit is None:
|
|
297
|
+
return 0
|
|
298
|
+
try:
|
|
299
|
+
parsed = int(limit)
|
|
300
|
+
except (TypeError, ValueError):
|
|
301
|
+
return 0
|
|
302
|
+
return max(0, parsed)
|
|
303
|
+
|
|
293
304
|
def get_cache_namespace(self) -> str:
|
|
294
305
|
"""Return cache namespace identifier for this tool."""
|
|
295
306
|
return self.tool_config.get("name", self.__class__.__name__)
|
|
@@ -326,7 +337,7 @@ class BaseTool:
|
|
|
326
337
|
"""
|
|
327
338
|
Get comprehensive information about this tool.
|
|
328
339
|
|
|
329
|
-
Returns
|
|
340
|
+
Returns
|
|
330
341
|
Dictionary containing tool metadata
|
|
331
342
|
"""
|
|
332
343
|
return {
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
from typing import Any, Dict
|
|
2
|
+
from Bio.Blast import NCBIWWW, NCBIXML
|
|
3
|
+
from Bio.Seq import Seq
|
|
4
|
+
from .base_tool import BaseTool
|
|
5
|
+
from .tool_registry import register_tool
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@register_tool("NCBIBlastTool")
|
|
9
|
+
class NCBIBlastTool(BaseTool):
|
|
10
|
+
def __init__(self, tool_config: Dict):
|
|
11
|
+
super().__init__(tool_config)
|
|
12
|
+
self.timeout = 300 # BLAST can take a long time
|
|
13
|
+
self.max_wait_time = 600 # Maximum wait time for results
|
|
14
|
+
|
|
15
|
+
def _parse_blast_results(self, blast_xml: str) -> Dict[str, Any]:
|
|
16
|
+
"""Parse BLAST XML results into structured data"""
|
|
17
|
+
try:
|
|
18
|
+
from io import StringIO
|
|
19
|
+
|
|
20
|
+
blast_record = NCBIXML.read(StringIO(blast_xml))
|
|
21
|
+
|
|
22
|
+
results = {
|
|
23
|
+
"query_id": blast_record.query_id,
|
|
24
|
+
"query_length": blast_record.query_length,
|
|
25
|
+
"database": blast_record.database,
|
|
26
|
+
"algorithm": blast_record.application,
|
|
27
|
+
"alignments": [],
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
for alignment in blast_record.alignments:
|
|
31
|
+
alignment_data = {
|
|
32
|
+
"hit_id": getattr(alignment, "hit_id", "unknown"),
|
|
33
|
+
"hit_def": getattr(alignment, "hit_def", "unknown"),
|
|
34
|
+
"hit_length": getattr(alignment, "hit_length", 0),
|
|
35
|
+
"hsps": [],
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
for hsp in alignment.hsps:
|
|
39
|
+
hsp_data = {
|
|
40
|
+
"score": getattr(hsp, "score", 0),
|
|
41
|
+
"bits": getattr(hsp, "bits", 0),
|
|
42
|
+
"expect": getattr(hsp, "expect", 0),
|
|
43
|
+
"identities": getattr(hsp, "identities", 0),
|
|
44
|
+
"positives": getattr(hsp, "positives", 0),
|
|
45
|
+
"gaps": getattr(hsp, "gaps", 0),
|
|
46
|
+
"align_length": getattr(hsp, "align_length", 0),
|
|
47
|
+
"query_start": getattr(hsp, "query_start", 0),
|
|
48
|
+
"query_end": getattr(hsp, "query_end", 0),
|
|
49
|
+
"hit_start": getattr(hsp, "hit_start", 0),
|
|
50
|
+
"hit_end": getattr(hsp, "hit_end", 0),
|
|
51
|
+
"query": getattr(hsp, "query", ""),
|
|
52
|
+
"match": getattr(hsp, "match", ""),
|
|
53
|
+
"sbjct": getattr(hsp, "sbjct", ""),
|
|
54
|
+
}
|
|
55
|
+
alignment_data["hsps"].append(hsp_data)
|
|
56
|
+
|
|
57
|
+
results["alignments"].append(alignment_data)
|
|
58
|
+
|
|
59
|
+
return results
|
|
60
|
+
|
|
61
|
+
except Exception as e:
|
|
62
|
+
return {
|
|
63
|
+
"error": f"Failed to parse BLAST results: {str(e)}",
|
|
64
|
+
"raw_xml": (
|
|
65
|
+
blast_xml[:1000] + "..." if len(blast_xml) > 1000 else blast_xml
|
|
66
|
+
),
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
|
|
70
|
+
"""Execute BLAST search using NCBI Web service"""
|
|
71
|
+
try:
|
|
72
|
+
sequence = arguments.get("sequence", "")
|
|
73
|
+
blast_type = arguments.get("blast_type", "blastn")
|
|
74
|
+
database = arguments.get("database", "nt")
|
|
75
|
+
expect = arguments.get("expect", 10.0)
|
|
76
|
+
hitlist_size = arguments.get("hitlist_size", 50)
|
|
77
|
+
|
|
78
|
+
if not sequence:
|
|
79
|
+
return {
|
|
80
|
+
"status": "error",
|
|
81
|
+
"error": "Missing required parameter: sequence",
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Validate sequence
|
|
85
|
+
try:
|
|
86
|
+
seq_obj = Seq(sequence)
|
|
87
|
+
if len(seq_obj) < 10:
|
|
88
|
+
return {
|
|
89
|
+
"status": "error",
|
|
90
|
+
"error": "Sequence too short (minimum 10 nucleotides)",
|
|
91
|
+
}
|
|
92
|
+
except Exception as e:
|
|
93
|
+
return {
|
|
94
|
+
"status": "error",
|
|
95
|
+
"error": f"Invalid sequence format: {str(e)}",
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# Perform BLAST search
|
|
99
|
+
result_handle = NCBIWWW.qblast(
|
|
100
|
+
blast_type,
|
|
101
|
+
database,
|
|
102
|
+
sequence,
|
|
103
|
+
expect=expect,
|
|
104
|
+
hitlist_size=hitlist_size,
|
|
105
|
+
format_type="XML",
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Read results
|
|
109
|
+
blast_xml = result_handle.read()
|
|
110
|
+
result_handle.close()
|
|
111
|
+
|
|
112
|
+
# Parse results
|
|
113
|
+
parsed_results = self._parse_blast_results(blast_xml)
|
|
114
|
+
|
|
115
|
+
if "error" in parsed_results:
|
|
116
|
+
return {
|
|
117
|
+
"status": "error",
|
|
118
|
+
"error": parsed_results["error"],
|
|
119
|
+
"raw_data": parsed_results.get("raw_xml", ""),
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"status": "success",
|
|
124
|
+
"data": parsed_results,
|
|
125
|
+
"query_sequence": sequence,
|
|
126
|
+
"blast_type": blast_type,
|
|
127
|
+
"database": database,
|
|
128
|
+
"hit_count": len(parsed_results["alignments"]),
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
except Exception as e:
|
|
132
|
+
return {"status": "error", "error": f"BLAST search failed: {str(e)}"}
|
tooluniverse/boltz_tool.py
CHANGED
|
@@ -91,7 +91,7 @@ class Boltz2DockingTool(BaseTool):
|
|
|
91
91
|
- other optional boltz CLI flags (e.g., 'recycling_steps').
|
|
92
92
|
timeout (int): The maximum time in seconds to wait for the Boltz command to complete.
|
|
93
93
|
|
|
94
|
-
Returns
|
|
94
|
+
Returns
|
|
95
95
|
dict: A dictionary containing the path to the predicted structure and affinity data, or an error.
|
|
96
96
|
"""
|
|
97
97
|
arguments = arguments or {}
|
|
@@ -170,7 +170,7 @@ class Boltz2DockingTool(BaseTool):
|
|
|
170
170
|
prediction_folder, f"{input_filename}_model_0.cif"
|
|
171
171
|
)
|
|
172
172
|
if os.path.exists(structure_file):
|
|
173
|
-
with open(structure_file, "r") as f:
|
|
173
|
+
with open(structure_file, "r", encoding="utf-8") as f:
|
|
174
174
|
results["predicted_structure"] = f.read()
|
|
175
175
|
results["structure_format"] = "cif"
|
|
176
176
|
else:
|
|
@@ -183,7 +183,7 @@ class Boltz2DockingTool(BaseTool):
|
|
|
183
183
|
prediction_folder, f"affinity_{input_filename}.json"
|
|
184
184
|
)
|
|
185
185
|
if os.path.exists(affinity_file):
|
|
186
|
-
with open(affinity_file, "r") as f:
|
|
186
|
+
with open(affinity_file, "r", encoding="utf-8") as f:
|
|
187
187
|
results["affinity_prediction"] = json.load(f)
|
|
188
188
|
else:
|
|
189
189
|
results["affinity_error"] = f"Missing {os.path.basename(affinity_file)}"
|