tooluniverse 1.0.7__py3-none-any.whl → 1.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tooluniverse might be problematic. Click here for more details.

Files changed (76) hide show
  1. tooluniverse/__init__.py +29 -14
  2. tooluniverse/admetai_tool.py +8 -4
  3. tooluniverse/base_tool.py +36 -0
  4. tooluniverse/biogrid_tool.py +118 -0
  5. tooluniverse/build_optimizer.py +87 -0
  6. tooluniverse/cache/__init__.py +3 -0
  7. tooluniverse/cache/memory_cache.py +99 -0
  8. tooluniverse/cache/result_cache_manager.py +235 -0
  9. tooluniverse/cache/sqlite_backend.py +257 -0
  10. tooluniverse/clinvar_tool.py +90 -0
  11. tooluniverse/custom_tool.py +28 -0
  12. tooluniverse/data/arxiv_tools.json +1 -4
  13. tooluniverse/data/core_tools.json +1 -4
  14. tooluniverse/data/dataset_tools.json +7 -7
  15. tooluniverse/data/doaj_tools.json +1 -3
  16. tooluniverse/data/drug_discovery_agents.json +292 -0
  17. tooluniverse/data/europe_pmc_tools.json +1 -2
  18. tooluniverse/data/genomics_tools.json +174 -0
  19. tooluniverse/data/geo_tools.json +86 -0
  20. tooluniverse/data/markitdown_tools.json +51 -0
  21. tooluniverse/data/openalex_tools.json +1 -5
  22. tooluniverse/data/pmc_tools.json +1 -4
  23. tooluniverse/data/ppi_tools.json +139 -0
  24. tooluniverse/data/pubmed_tools.json +1 -3
  25. tooluniverse/data/semantic_scholar_tools.json +1 -2
  26. tooluniverse/data/unified_guideline_tools.json +206 -4
  27. tooluniverse/data/xml_tools.json +15 -15
  28. tooluniverse/data/zenodo_tools.json +1 -2
  29. tooluniverse/dbsnp_tool.py +71 -0
  30. tooluniverse/default_config.py +6 -0
  31. tooluniverse/ensembl_tool.py +61 -0
  32. tooluniverse/execute_function.py +196 -75
  33. tooluniverse/generate_tools.py +303 -20
  34. tooluniverse/genomics_gene_search_tool.py +56 -0
  35. tooluniverse/geo_tool.py +116 -0
  36. tooluniverse/gnomad_tool.py +63 -0
  37. tooluniverse/markitdown_tool.py +159 -0
  38. tooluniverse/mcp_client_tool.py +10 -5
  39. tooluniverse/smcp.py +10 -9
  40. tooluniverse/string_tool.py +112 -0
  41. tooluniverse/tools/ADMETAnalyzerAgent.py +59 -0
  42. tooluniverse/tools/ArXiv_search_papers.py +3 -3
  43. tooluniverse/tools/CMA_Guidelines_Search.py +52 -0
  44. tooluniverse/tools/CORE_search_papers.py +3 -3
  45. tooluniverse/tools/ClinVar_search_variants.py +52 -0
  46. tooluniverse/tools/ClinicalTrialDesignAgent.py +63 -0
  47. tooluniverse/tools/CompoundDiscoveryAgent.py +59 -0
  48. tooluniverse/tools/DOAJ_search_articles.py +2 -2
  49. tooluniverse/tools/DiseaseAnalyzerAgent.py +52 -0
  50. tooluniverse/tools/DrugInteractionAnalyzerAgent.py +52 -0
  51. tooluniverse/tools/DrugOptimizationAgent.py +63 -0
  52. tooluniverse/tools/Ensembl_lookup_gene_by_symbol.py +52 -0
  53. tooluniverse/tools/EuropePMC_search_articles.py +1 -1
  54. tooluniverse/tools/GIN_Guidelines_Search.py +52 -0
  55. tooluniverse/tools/GWAS_search_associations_by_gene.py +52 -0
  56. tooluniverse/tools/LiteratureSynthesisAgent.py +59 -0
  57. tooluniverse/tools/PMC_search_papers.py +3 -3
  58. tooluniverse/tools/PubMed_search_articles.py +2 -2
  59. tooluniverse/tools/SemanticScholar_search_papers.py +1 -1
  60. tooluniverse/tools/UCSC_get_genes_by_region.py +67 -0
  61. tooluniverse/tools/Zenodo_search_records.py +1 -1
  62. tooluniverse/tools/__init__.py +33 -1
  63. tooluniverse/tools/convert_to_markdown.py +59 -0
  64. tooluniverse/tools/dbSNP_get_variant_by_rsid.py +46 -0
  65. tooluniverse/tools/gnomAD_query_variant.py +52 -0
  66. tooluniverse/tools/openalex_literature_search.py +4 -4
  67. tooluniverse/ucsc_tool.py +60 -0
  68. tooluniverse/unified_guideline_tools.py +1175 -57
  69. tooluniverse/utils.py +51 -4
  70. tooluniverse/zenodo_tool.py +2 -1
  71. {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.8.dist-info}/METADATA +9 -3
  72. {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.8.dist-info}/RECORD +76 -40
  73. {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.8.dist-info}/WHEEL +0 -0
  74. {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.8.dist-info}/entry_points.txt +0 -0
  75. {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.8.dist-info}/licenses/LICENSE +0 -0
  76. {tooluniverse-1.0.7.dist-info → tooluniverse-1.0.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,159 @@
1
+ """
2
+ MarkItDown Tool for ToolUniverse
3
+
4
+ Simple implementation following Microsoft's official MCP pattern.
5
+ Supports http:, https:, file:, data: URIs.
6
+ """
7
+
8
+ import os
9
+ import subprocess
10
+ import sys
11
+ import urllib.parse
12
+ import urllib.request
13
+ import tempfile
14
+ from typing import Dict, Any
15
+ from .base_tool import BaseTool
16
+ from .tool_registry import register_tool
17
+
18
+
19
+ @register_tool("MarkItDownTool")
20
+ class MarkItDownTool(BaseTool):
21
+ """MarkItDown tool for converting files to Markdown."""
22
+
23
+ def __init__(self, tool_config):
24
+ super().__init__(tool_config)
25
+ self.tool_name = tool_config.get("name", "MarkItDownTool")
26
+
27
+ def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
28
+ """Execute MarkItDown tool."""
29
+ try:
30
+ return self._convert_to_markdown(arguments)
31
+ except Exception as e:
32
+ return {"error": str(e)}
33
+
34
+ def _convert_to_markdown(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
35
+ """Convert a resource described by URI to Markdown using markitdown CLI."""
36
+ uri = arguments.get("uri")
37
+ output_path = arguments.get("output_path")
38
+ enable_plugins = arguments.get("enable_plugins", False)
39
+
40
+ if not uri:
41
+ return {"error": "URI is required"}
42
+
43
+ try:
44
+ # Parse URI
45
+ parsed_uri = urllib.parse.urlparse(uri)
46
+ scheme = parsed_uri.scheme.lower()
47
+
48
+ # Handle different URI schemes
49
+ if scheme in ["http", "https"]:
50
+ # Download from URL
51
+ temp_file = self._download_from_url(uri)
52
+ if not temp_file:
53
+ return {"error": f"Failed to download from URL: {uri}"}
54
+ input_path = temp_file
55
+ cleanup_temp = True
56
+
57
+ elif scheme == "file":
58
+ # Local file
59
+ file_path = urllib.request.url2pathname(parsed_uri.path)
60
+ if not os.path.exists(file_path):
61
+ return {"error": f"File not found: {file_path}"}
62
+ input_path = file_path
63
+ cleanup_temp = False
64
+
65
+ elif scheme == "data":
66
+ # Data URI
67
+ temp_file = self._handle_data_uri(uri)
68
+ if not temp_file:
69
+ return {"error": f"Failed to process data URI: {uri}"}
70
+ input_path = temp_file
71
+ cleanup_temp = True
72
+
73
+ else:
74
+ return {
75
+ "error": f"Unsupported URI scheme: {scheme}. Supported schemes: http, https, file, data"
76
+ }
77
+
78
+ # Build markitdown command
79
+ cmd = [sys.executable, "-m", "markitdown", input_path]
80
+ if enable_plugins:
81
+ cmd.append("--use-plugins")
82
+ if output_path:
83
+ cmd.extend(["-o", output_path])
84
+
85
+ # Execute command
86
+ result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
87
+
88
+ if result.returncode != 0:
89
+ error_msg = f"MarkItDown failed: {result.stderr}"
90
+ if cleanup_temp and os.path.exists(input_path):
91
+ os.unlink(input_path)
92
+ return {"error": error_msg}
93
+
94
+ # Get markdown content
95
+ if output_path and os.path.exists(output_path):
96
+ with open(output_path, "r", encoding="utf-8") as f:
97
+ markdown_content = f.read()
98
+ else:
99
+ markdown_content = result.stdout
100
+
101
+ # Clean up temporary file if needed
102
+ if cleanup_temp and os.path.exists(input_path):
103
+ os.unlink(input_path)
104
+
105
+ # Prepare response
106
+ response = {
107
+ "markdown_content": markdown_content,
108
+ "file_info": {
109
+ "original_uri": uri,
110
+ "uri_scheme": scheme,
111
+ "output_file": output_path if output_path else None,
112
+ },
113
+ }
114
+
115
+ # If no output_path specified, also return the content as a string for convenience
116
+ if not output_path:
117
+ response["content"] = markdown_content
118
+
119
+ return response
120
+
121
+ except Exception as e:
122
+ return {"error": f"URI processing failed: {str(e)}"}
123
+
124
+ def _download_from_url(self, url: str) -> str:
125
+ """Download content from URL to temporary file."""
126
+ try:
127
+ with urllib.request.urlopen(url, timeout=30) as response:
128
+ content = response.read()
129
+
130
+ # Create temporary file
131
+ with tempfile.NamedTemporaryFile(delete=False) as temp_file:
132
+ temp_file.write(content)
133
+ return temp_file.name
134
+ except Exception:
135
+ return None
136
+
137
+ def _handle_data_uri(self, data_uri: str) -> str:
138
+ """Handle data URI and save to temporary file."""
139
+ try:
140
+ # Parse data URI: data:[<mediatype>][;base64],<data>
141
+ if "," not in data_uri:
142
+ return None
143
+
144
+ header, data = data_uri.split(",", 1)
145
+
146
+ # Check if base64 encoded
147
+ if ";base64" in header:
148
+ import base64
149
+
150
+ content = base64.b64decode(data)
151
+ else:
152
+ content = data.encode("utf-8")
153
+
154
+ # Create temporary file
155
+ with tempfile.NamedTemporaryFile(delete=False) as temp_file:
156
+ temp_file.write(content)
157
+ return temp_file.name
158
+ except Exception:
159
+ return None
@@ -26,18 +26,23 @@ class BaseMCPClient:
26
26
 
27
27
  def __init__(self, server_url: str, transport: str = "http", timeout: int = 30):
28
28
  self.server_url = os.path.expandvars(server_url)
29
- self.transport = transport
29
+ # Normalize transport for backward compatibility: treat 'stdio' as HTTP
30
+ normalized_transport = (
31
+ transport.lower() if isinstance(transport, str) else "http"
32
+ )
33
+ if normalized_transport == "stdio":
34
+ normalized_transport = "http"
35
+ self.transport = normalized_transport
30
36
  self.timeout = timeout
31
37
  self.session = None
32
38
  self.mcp_session_id = None
33
39
  self._initialized = False
34
40
 
35
- # Validate transport
41
+ # Validate transport (accept 'stdio' via normalization above)
36
42
  supported_transports = ["http", "websocket"]
37
43
  if self.transport not in supported_transports:
38
- raise ValueError(
39
- f"Invalid transport '{self.transport}'. Supported: {supported_transports}"
40
- )
44
+ # Keep message concise to satisfy line length rules
45
+ raise ValueError("Invalid transport")
41
46
 
42
47
  async def _ensure_session(self):
43
48
  """Ensure HTTP session is available for HTTP transport"""
tooluniverse/smcp.py CHANGED
@@ -2265,22 +2265,17 @@ class SMCP(FastMCP):
2265
2265
 
2266
2266
  # Filter out None values for optional parameters (preserve streaming flag)
2267
2267
  args_dict = {k: v for k, v in kwargs.items() if v is not None}
2268
- filtered_args = {
2269
- k: v
2270
- for k, v in args_dict.items()
2271
- if k != "_tooluniverse_stream"
2272
- }
2273
2268
 
2274
- # Validate required parameters
2269
+ # Validate required parameters (check against args_dict, not filtered_args)
2275
2270
  missing_required = [
2276
- param for param in required_params if param not in filtered_args
2271
+ param for param in required_params if param not in args_dict
2277
2272
  ]
2278
2273
  if missing_required:
2279
2274
  return json.dumps(
2280
2275
  {
2281
2276
  "error": f"Missing required parameters: {missing_required}",
2282
2277
  "required": required_params,
2283
- "provided": list(filtered_args.keys()),
2278
+ "provided": list(args_dict.keys()),
2284
2279
  },
2285
2280
  indent=2,
2286
2281
  )
@@ -2289,9 +2284,12 @@ class SMCP(FastMCP):
2289
2284
 
2290
2285
  loop = asyncio.get_event_loop()
2291
2286
 
2287
+ # Initialize stream_callback to None by default
2288
+ stream_callback = None
2289
+
2292
2290
  if stream_flag and ctx is not None:
2293
2291
 
2294
- def stream_callback(chunk: str) -> None:
2292
+ def _stream_callback(chunk: str) -> None:
2295
2293
  if not chunk:
2296
2294
  return
2297
2295
  try:
@@ -2312,6 +2310,9 @@ class SMCP(FastMCP):
2312
2310
  f"Failed to dispatch stream chunk for {tool_name}: {cb_error}"
2313
2311
  )
2314
2312
 
2313
+ # Assign the function to stream_callback
2314
+ stream_callback = _stream_callback
2315
+
2315
2316
  # Ensure downstream tools see the streaming flag
2316
2317
  if "_tooluniverse_stream" not in args_dict:
2317
2318
  args_dict["_tooluniverse_stream"] = True
@@ -0,0 +1,112 @@
1
+ """
2
+ STRING Database REST API Tool
3
+
4
+ This tool provides access to protein-protein interaction data from the STRING
5
+ database. STRING is a database of known and predicted protein-protein
6
+ interactions.
7
+ """
8
+
9
+ import requests
10
+ from typing import Dict, Any, List
11
+ from .base_tool import BaseTool
12
+ from .tool_registry import register_tool
13
+
14
+ STRING_BASE_URL = "https://string-db.org/api"
15
+
16
+
17
+ @register_tool("STRINGRESTTool")
18
+ class STRINGRESTTool(BaseTool):
19
+ """
20
+ STRING Database REST API tool.
21
+ Generic wrapper for STRING API endpoints defined in ppi_tools.json.
22
+ """
23
+
24
+ def __init__(self, tool_config):
25
+ super().__init__(tool_config)
26
+ fields = tool_config.get("fields", {})
27
+ parameter = tool_config.get("parameter", {})
28
+
29
+ self.endpoint_template: str = fields.get("endpoint", "/tsv/network")
30
+ self.required: List[str] = parameter.get("required", [])
31
+ self.output_format: str = fields.get("return_format", "TSV")
32
+
33
+ def _build_url(self, arguments: Dict[str, Any]) -> str | Dict[str, Any]:
34
+ """Build URL for STRING API request."""
35
+ url_path = self.endpoint_template
36
+ return STRING_BASE_URL + url_path
37
+
38
+ def _build_params(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
39
+ """Build parameters for STRING API request."""
40
+ params = {}
41
+
42
+ # Map protein IDs to STRING format
43
+ if "protein_ids" in arguments:
44
+ protein_ids = arguments["protein_ids"]
45
+ if isinstance(protein_ids, list):
46
+ params["identifiers"] = "\r".join(protein_ids)
47
+ else:
48
+ params["identifiers"] = str(protein_ids)
49
+
50
+ # Add other parameters
51
+ if "species" in arguments:
52
+ params["species"] = arguments["species"]
53
+ if "confidence_score" in arguments:
54
+ params["required_score"] = int(arguments["confidence_score"] * 1000)
55
+ if "limit" in arguments:
56
+ params["limit"] = arguments["limit"]
57
+ if "network_type" in arguments:
58
+ params["network_type"] = arguments["network_type"]
59
+
60
+ return params
61
+
62
+ def _make_request(self, url: str, params: Dict[str, Any]) -> Dict[str, Any]:
63
+ """Perform a GET request and handle common errors."""
64
+ try:
65
+ response = requests.get(url, params=params, timeout=30)
66
+ response.raise_for_status()
67
+
68
+ if self.output_format == "TSV":
69
+ return self._parse_tsv_response(response.text)
70
+ else:
71
+ return response.json()
72
+
73
+ except requests.exceptions.RequestException as e:
74
+ return {"error": f"Request failed: {str(e)}"}
75
+ except Exception as e:
76
+ return {"error": f"Unexpected error: {str(e)}"}
77
+
78
+ def _parse_tsv_response(self, text: str) -> Dict[str, Any]:
79
+ """Parse TSV response from STRING API."""
80
+ lines = text.strip().split("\n")
81
+ if len(lines) < 2:
82
+ return {"data": [], "error": "No data returned"}
83
+
84
+ # Parse header
85
+ header = lines[0].split("\t")
86
+
87
+ # Parse data rows
88
+ data = []
89
+ for line in lines[1:]:
90
+ if line.strip():
91
+ values = line.split("\t")
92
+ row = {}
93
+ for i, value in enumerate(values):
94
+ if i < len(header):
95
+ row[header[i]] = value
96
+ data.append(row)
97
+
98
+ return {"data": data, "header": header}
99
+
100
+ def run(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
101
+ """Execute the tool with given arguments."""
102
+ # Validate required parameters
103
+ for param in self.required:
104
+ if param not in arguments:
105
+ return {"error": f"Missing required parameter: {param}"}
106
+
107
+ url = self._build_url(arguments)
108
+ if isinstance(url, dict) and "error" in url:
109
+ return url
110
+
111
+ params = self._build_params(arguments)
112
+ return self._make_request(url, params)
@@ -0,0 +1,59 @@
1
+ """
2
+ ADMETAnalyzerAgent
3
+
4
+ AI agent that analyzes ADMET data and provides insights on drug-likeness and safety profiles
5
+ """
6
+
7
+ from typing import Any, Optional, Callable
8
+ from ._shared_client import get_shared_client
9
+
10
+
11
+ def ADMETAnalyzerAgent(
12
+ compounds: str,
13
+ admet_data: str,
14
+ disease_context: Optional[str] = "",
15
+ *,
16
+ stream_callback: Optional[Callable[[str], None]] = None,
17
+ use_cache: bool = False,
18
+ validate: bool = True,
19
+ ) -> Any:
20
+ """
21
+ AI agent that analyzes ADMET data and provides insights on drug-likeness and safety profiles
22
+
23
+ Parameters
24
+ ----------
25
+ compounds : str
26
+ List of compounds to analyze (comma-separated)
27
+ admet_data : str
28
+ ADMET data from computational tools to analyze
29
+ disease_context : str
30
+ Disease context for ADMET evaluation
31
+ stream_callback : Callable, optional
32
+ Callback for streaming output
33
+ use_cache : bool, default False
34
+ Enable caching
35
+ validate : bool, default True
36
+ Validate parameters
37
+
38
+ Returns
39
+ -------
40
+ Any
41
+ """
42
+ # Handle mutable defaults to avoid B006 linting error
43
+
44
+ return get_shared_client().run_one_function(
45
+ {
46
+ "name": "ADMETAnalyzerAgent",
47
+ "arguments": {
48
+ "compounds": compounds,
49
+ "admet_data": admet_data,
50
+ "disease_context": disease_context,
51
+ },
52
+ },
53
+ stream_callback=stream_callback,
54
+ use_cache=use_cache,
55
+ validate=validate,
56
+ )
57
+
58
+
59
+ __all__ = ["ADMETAnalyzerAgent"]
@@ -10,9 +10,9 @@ from ._shared_client import get_shared_client
10
10
 
11
11
  def ArXiv_search_papers(
12
12
  query: str,
13
- limit: int,
14
- sort_by: str,
15
- sort_order: str,
13
+ limit: Optional[int] = 10,
14
+ sort_by: Optional[str] = "relevance",
15
+ sort_order: Optional[str] = "descending",
16
16
  *,
17
17
  stream_callback: Optional[Callable[[str], None]] = None,
18
18
  use_cache: bool = False,
@@ -0,0 +1,52 @@
1
+ """
2
+ CMA_Guidelines_Search
3
+
4
+ Search Canadian Medical Association (CMA) Infobase guidelines. Contains over 1200 evidence-based ...
5
+ """
6
+
7
+ from typing import Any, Optional, Callable
8
+ from ._shared_client import get_shared_client
9
+
10
+
11
+ def CMA_Guidelines_Search(
12
+ query: str,
13
+ limit: int,
14
+ *,
15
+ stream_callback: Optional[Callable[[str], None]] = None,
16
+ use_cache: bool = False,
17
+ validate: bool = True,
18
+ ) -> list[Any]:
19
+ """
20
+ Search Canadian Medical Association (CMA) Infobase guidelines. Contains over 1200 evidence-based ...
21
+
22
+ Parameters
23
+ ----------
24
+ query : str
25
+ Medical condition, treatment, or clinical topic to search for in CMA guidelin...
26
+ limit : int
27
+ Maximum number of guidelines to return (default: 10)
28
+ stream_callback : Callable, optional
29
+ Callback for streaming output
30
+ use_cache : bool, default False
31
+ Enable caching
32
+ validate : bool, default True
33
+ Validate parameters
34
+
35
+ Returns
36
+ -------
37
+ list[Any]
38
+ """
39
+ # Handle mutable defaults to avoid B006 linting error
40
+
41
+ return get_shared_client().run_one_function(
42
+ {
43
+ "name": "CMA_Guidelines_Search",
44
+ "arguments": {"query": query, "limit": limit},
45
+ },
46
+ stream_callback=stream_callback,
47
+ use_cache=use_cache,
48
+ validate=validate,
49
+ )
50
+
51
+
52
+ __all__ = ["CMA_Guidelines_Search"]
@@ -11,9 +11,9 @@ from ._shared_client import get_shared_client
11
11
  def CORE_search_papers(
12
12
  query: str,
13
13
  limit: int,
14
- year_from: int,
15
- year_to: int,
16
- language: str,
14
+ year_from: Optional[int] = None,
15
+ year_to: Optional[int] = None,
16
+ language: Optional[str] = None,
17
17
  *,
18
18
  stream_callback: Optional[Callable[[str], None]] = None,
19
19
  use_cache: bool = False,
@@ -0,0 +1,52 @@
1
+ """
2
+ ClinVar_search_variants
3
+
4
+ Search ClinVar via NCBI E-utilities (esearch→esummary) and return concise variant records for a q...
5
+ """
6
+
7
+ from typing import Any, Optional, Callable
8
+ from ._shared_client import get_shared_client
9
+
10
+
11
+ def ClinVar_search_variants(
12
+ query: str,
13
+ retmax: Optional[int] = 5,
14
+ *,
15
+ stream_callback: Optional[Callable[[str], None]] = None,
16
+ use_cache: bool = False,
17
+ validate: bool = True,
18
+ ) -> list[Any]:
19
+ """
20
+ Search ClinVar via NCBI E-utilities (esearch→esummary) and return concise variant records for a q...
21
+
22
+ Parameters
23
+ ----------
24
+ query : str
25
+ ClinVar search term (e.g., BRCA1).
26
+ retmax : int
27
+ Max records.
28
+ stream_callback : Callable, optional
29
+ Callback for streaming output
30
+ use_cache : bool, default False
31
+ Enable caching
32
+ validate : bool, default True
33
+ Validate parameters
34
+
35
+ Returns
36
+ -------
37
+ list[Any]
38
+ """
39
+ # Handle mutable defaults to avoid B006 linting error
40
+
41
+ return get_shared_client().run_one_function(
42
+ {
43
+ "name": "ClinVar_search_variants",
44
+ "arguments": {"query": query, "retmax": retmax},
45
+ },
46
+ stream_callback=stream_callback,
47
+ use_cache=use_cache,
48
+ validate=validate,
49
+ )
50
+
51
+
52
+ __all__ = ["ClinVar_search_variants"]
@@ -0,0 +1,63 @@
1
+ """
2
+ ClinicalTrialDesignAgent
3
+
4
+ AI agent that designs clinical trial protocols based on preclinical data and regulatory requirements
5
+ """
6
+
7
+ from typing import Any, Optional, Callable
8
+ from ._shared_client import get_shared_client
9
+
10
+
11
+ def ClinicalTrialDesignAgent(
12
+ drug_name: str,
13
+ indication: str,
14
+ preclinical_data: Optional[str] = "",
15
+ target_population: Optional[str] = "General adult population",
16
+ *,
17
+ stream_callback: Optional[Callable[[str], None]] = None,
18
+ use_cache: bool = False,
19
+ validate: bool = True,
20
+ ) -> Any:
21
+ """
22
+ AI agent that designs clinical trial protocols based on preclinical data and regulatory requirements
23
+
24
+ Parameters
25
+ ----------
26
+ drug_name : str
27
+ Name of the drug candidate
28
+ indication : str
29
+ Disease indication
30
+ preclinical_data : str
31
+ Preclinical efficacy and safety data
32
+ target_population : str
33
+ Target patient population
34
+ stream_callback : Callable, optional
35
+ Callback for streaming output
36
+ use_cache : bool, default False
37
+ Enable caching
38
+ validate : bool, default True
39
+ Validate parameters
40
+
41
+ Returns
42
+ -------
43
+ Any
44
+ """
45
+ # Handle mutable defaults to avoid B006 linting error
46
+
47
+ return get_shared_client().run_one_function(
48
+ {
49
+ "name": "ClinicalTrialDesignAgent",
50
+ "arguments": {
51
+ "drug_name": drug_name,
52
+ "indication": indication,
53
+ "preclinical_data": preclinical_data,
54
+ "target_population": target_population,
55
+ },
56
+ },
57
+ stream_callback=stream_callback,
58
+ use_cache=use_cache,
59
+ validate=validate,
60
+ )
61
+
62
+
63
+ __all__ = ["ClinicalTrialDesignAgent"]
@@ -0,0 +1,59 @@
1
+ """
2
+ CompoundDiscoveryAgent
3
+
4
+ AI agent that analyzes potential drug compounds using multiple strategies and LLM reasoning
5
+ """
6
+
7
+ from typing import Any, Optional, Callable
8
+ from ._shared_client import get_shared_client
9
+
10
+
11
+ def CompoundDiscoveryAgent(
12
+ disease_name: str,
13
+ targets: str,
14
+ context: Optional[str] = "",
15
+ *,
16
+ stream_callback: Optional[Callable[[str], None]] = None,
17
+ use_cache: bool = False,
18
+ validate: bool = True,
19
+ ) -> Any:
20
+ """
21
+ AI agent that analyzes potential drug compounds using multiple strategies and LLM reasoning
22
+
23
+ Parameters
24
+ ----------
25
+ disease_name : str
26
+ Name of the disease
27
+ targets : str
28
+ List of therapeutic targets (comma-separated)
29
+ context : str
30
+ Additional context or specific requirements
31
+ stream_callback : Callable, optional
32
+ Callback for streaming output
33
+ use_cache : bool, default False
34
+ Enable caching
35
+ validate : bool, default True
36
+ Validate parameters
37
+
38
+ Returns
39
+ -------
40
+ Any
41
+ """
42
+ # Handle mutable defaults to avoid B006 linting error
43
+
44
+ return get_shared_client().run_one_function(
45
+ {
46
+ "name": "CompoundDiscoveryAgent",
47
+ "arguments": {
48
+ "disease_name": disease_name,
49
+ "targets": targets,
50
+ "context": context,
51
+ },
52
+ },
53
+ stream_callback=stream_callback,
54
+ use_cache=use_cache,
55
+ validate=validate,
56
+ )
57
+
58
+
59
+ __all__ = ["CompoundDiscoveryAgent"]
@@ -10,8 +10,8 @@ from ._shared_client import get_shared_client
10
10
 
11
11
  def DOAJ_search_articles(
12
12
  query: str,
13
- max_results: int,
14
- type: str,
13
+ max_results: Optional[int] = 10,
14
+ type: Optional[str] = "articles",
15
15
  *,
16
16
  stream_callback: Optional[Callable[[str], None]] = None,
17
17
  use_cache: bool = False,