tooluniverse 1.0.9.1__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tooluniverse might be problematic. Click here for more details.

Files changed (57) hide show
  1. tooluniverse/admetai_tool.py +1 -1
  2. tooluniverse/agentic_tool.py +65 -17
  3. tooluniverse/base_tool.py +19 -8
  4. tooluniverse/boltz_tool.py +1 -1
  5. tooluniverse/cache/result_cache_manager.py +167 -12
  6. tooluniverse/compose_scripts/drug_safety_analyzer.py +1 -1
  7. tooluniverse/compose_scripts/multi_agent_literature_search.py +1 -1
  8. tooluniverse/compose_scripts/output_summarizer.py +4 -4
  9. tooluniverse/compose_scripts/tool_graph_composer.py +1 -1
  10. tooluniverse/compose_scripts/tool_metadata_generator.py +1 -1
  11. tooluniverse/compose_tool.py +9 -9
  12. tooluniverse/core_tool.py +2 -2
  13. tooluniverse/ctg_tool.py +4 -4
  14. tooluniverse/custom_tool.py +1 -1
  15. tooluniverse/dataset_tool.py +2 -2
  16. tooluniverse/default_config.py +1 -1
  17. tooluniverse/enrichr_tool.py +14 -14
  18. tooluniverse/execute_function.py +520 -15
  19. tooluniverse/extended_hooks.py +4 -4
  20. tooluniverse/gene_ontology_tool.py +1 -1
  21. tooluniverse/generate_tools.py +3 -3
  22. tooluniverse/humanbase_tool.py +10 -10
  23. tooluniverse/logging_config.py +2 -2
  24. tooluniverse/mcp_client_tool.py +57 -129
  25. tooluniverse/mcp_integration.py +52 -49
  26. tooluniverse/mcp_tool_registry.py +147 -528
  27. tooluniverse/openalex_tool.py +8 -8
  28. tooluniverse/openfda_tool.py +2 -2
  29. tooluniverse/output_hook.py +15 -15
  30. tooluniverse/package_tool.py +1 -1
  31. tooluniverse/pmc_tool.py +2 -2
  32. tooluniverse/remote/boltz/boltz_mcp_server.py +1 -1
  33. tooluniverse/remote/depmap_24q2/depmap_24q2_mcp_tool.py +2 -2
  34. tooluniverse/remote/immune_compass/compass_tool.py +3 -3
  35. tooluniverse/remote/pinnacle/pinnacle_tool.py +2 -2
  36. tooluniverse/remote/transcriptformer/transcriptformer_tool.py +3 -3
  37. tooluniverse/remote/uspto_downloader/uspto_downloader_mcp_server.py +3 -3
  38. tooluniverse/remote_tool.py +4 -4
  39. tooluniverse/scripts/filter_tool_files.py +2 -2
  40. tooluniverse/smcp.py +93 -12
  41. tooluniverse/smcp_server.py +97 -18
  42. tooluniverse/space/__init__.py +46 -0
  43. tooluniverse/space/loader.py +133 -0
  44. tooluniverse/space/validator.py +353 -0
  45. tooluniverse/tool_finder_embedding.py +2 -2
  46. tooluniverse/tool_finder_keyword.py +9 -9
  47. tooluniverse/tool_finder_llm.py +6 -6
  48. tooluniverse/tools/_shared_client.py +3 -3
  49. tooluniverse/url_tool.py +1 -1
  50. tooluniverse/uspto_tool.py +1 -1
  51. tooluniverse/utils.py +10 -10
  52. {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.10.dist-info}/METADATA +6 -2
  53. {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.10.dist-info}/RECORD +57 -54
  54. {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.10.dist-info}/WHEEL +0 -0
  55. {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.10.dist-info}/entry_points.txt +0 -0
  56. {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.10.dist-info}/licenses/LICENSE +0 -0
  57. {tooluniverse-1.0.9.1.dist-info → tooluniverse-1.0.10.dist-info}/top_level.txt +0 -0
@@ -55,7 +55,7 @@ class ADMETAITool(BaseTool):
55
55
  Args:
56
56
  smiles: The SMILES string(s) of the molecule(s).
57
57
 
58
- Returns:
58
+ Returns
59
59
  A dictionary mapping each SMILES string to a subdictionary of
60
60
  selected ADMET properties and their predicted values.
61
61
  """
@@ -38,7 +38,7 @@ class AgenticTool(BaseTool):
38
38
  """
39
39
  Check if any API keys are available across all supported API types.
40
40
 
41
- Returns:
41
+ Returns
42
42
  bool: True if at least one API type has all required keys, False otherwise
43
43
  """
44
44
  for _api_type, required_vars in API_KEY_ENV_VARS.items():
@@ -74,16 +74,44 @@ class AgenticTool(BaseTool):
74
74
  # Get configuration from nested 'configs' dict or fallback to top-level
75
75
  configs = tool_config.get("configs", {})
76
76
 
77
- # Helper function to get config values with fallback
77
+ # Helper function to get config values with Space support
78
78
  def get_config(key: str, default: Any) -> Any:
79
- return configs.get(key, tool_config.get(key, default))
79
+ tool_value = configs.get(key, tool_config.get(key))
80
+
81
+ # Get environment value directly (avoid calling self method during init)
82
+ env_value = None
83
+ if key == "api_type":
84
+ # Direct use of AgenticTool api_type values from Space
85
+ env_value = os.getenv("TOOLUNIVERSE_LLM_DEFAULT_PROVIDER")
86
+ elif key == "model_id":
87
+ task = tool_config.get("llm_task", "default").upper()
88
+ env_value = os.getenv(f"TOOLUNIVERSE_LLM_MODEL_{task}") or os.getenv(
89
+ "TOOLUNIVERSE_LLM_MODEL_DEFAULT"
90
+ )
91
+ elif key == "temperature":
92
+ temp_str = os.getenv("TOOLUNIVERSE_LLM_TEMPERATURE")
93
+ env_value = float(temp_str) if temp_str else None
94
+
95
+ mode = os.getenv("TOOLUNIVERSE_LLM_CONFIG_MODE", "default")
96
+
97
+ if mode == "default":
98
+ # Space as default: tool config > env > built-in default
99
+ if tool_value is not None:
100
+ return tool_value
101
+ if env_value is not None:
102
+ return env_value
103
+ return default
104
+ else: # mode == "fallback"
105
+ # Space as fallback: tool config > built-in default (env as fallback later)
106
+ if tool_value is not None:
107
+ return tool_value
108
+ return default
80
109
 
81
110
  # LLM configuration
82
111
  self._api_type: str = get_config("api_type", "CHATGPT")
83
112
  self._model_id: str = get_config("model_id", "o1-mini")
84
113
  self._temperature: Optional[float] = get_config("temperature", 0.1)
85
- # Ignore configured max_new_tokens; client will resolve per model/env
86
- self._max_new_tokens: Optional[int] = None
114
+ # max_new_tokens is handled by LLM client automatically
87
115
  self._return_json: bool = get_config("return_json", False)
88
116
  self._max_retries: int = get_config("max_retries", 5)
89
117
  self._retry_delay: int = get_config("retry_delay", 5)
@@ -96,9 +124,8 @@ class AgenticTool(BaseTool):
96
124
 
97
125
  # Global fallback configuration
98
126
  self._use_global_fallback: bool = get_config("use_global_fallback", True)
99
- self._global_fallback_chain: List[Dict[str, str]] = (
100
- self._get_global_fallback_chain()
101
- )
127
+ # Initialize fallback chain later after environment config is set
128
+ self._global_fallback_chain: List[Dict[str, str]] = []
102
129
 
103
130
  # Gemini model configuration (optional; env override)
104
131
  self._gemini_model_id: str = get_config(
@@ -133,11 +160,40 @@ class AgenticTool(BaseTool):
133
160
  self._current_api_type = None
134
161
  self._current_model_id = None
135
162
 
163
+ # Store environment config for fallback mode
164
+ # Direct use of AgenticTool api_type values from Space
165
+ self._env_api_type = os.getenv("TOOLUNIVERSE_LLM_DEFAULT_PROVIDER")
166
+
167
+ task = tool_config.get("llm_task", "default").upper()
168
+ self._env_model_id = os.getenv(f"TOOLUNIVERSE_LLM_MODEL_{task}") or os.getenv(
169
+ "TOOLUNIVERSE_LLM_MODEL_DEFAULT"
170
+ )
171
+
172
+ # Initialize global fallback chain now that environment config is set
173
+ self._global_fallback_chain = self._get_global_fallback_chain()
174
+
136
175
  # Try primary API first, then fallback if configured
137
176
  self._try_initialize_api()
138
177
 
139
178
  def _get_global_fallback_chain(self) -> List[Dict[str, str]]:
140
179
  """Get the global fallback chain from environment or use default."""
180
+ mode = os.getenv("TOOLUNIVERSE_LLM_CONFIG_MODE", "default")
181
+
182
+ # In fallback mode, prepend environment config to fallback chain
183
+ if mode == "fallback" and self._env_api_type and self._env_model_id:
184
+ env_fallback = {
185
+ "api_type": self._env_api_type,
186
+ "model_id": self._env_model_id,
187
+ }
188
+
189
+ # Check if env fallback is different from primary config
190
+ if (
191
+ env_fallback["api_type"] != self._api_type
192
+ or env_fallback["model_id"] != self._model_id
193
+ ):
194
+ # Add environment config as first fallback
195
+ return [env_fallback] + DEFAULT_FALLBACK_CHAIN.copy()
196
+
141
197
  # Check environment variable for custom fallback chain
142
198
  env_chain = os.getenv("AGENTIC_TOOL_FALLBACK_CHAIN")
143
199
  if env_chain:
@@ -257,8 +313,6 @@ class AgenticTool(BaseTool):
257
313
  raise ValueError(
258
314
  f"Unsupported API type: {self._api_type}. Supported types: {supported_api_types}"
259
315
  )
260
- if self._max_new_tokens is not None and self._max_new_tokens <= 0:
261
- raise ValueError("max_new_tokens must be positive or None")
262
316
 
263
317
  # ------------------------------------------------------------------ public API --------------
264
318
  def run(
@@ -386,7 +440,6 @@ class AgenticTool(BaseTool):
386
440
  "api_type": self._api_type,
387
441
  "model_id": self._model_id,
388
442
  "temperature": self._temperature,
389
- "max_new_tokens": self._max_new_tokens,
390
443
  },
391
444
  "execution_time_seconds": execution_time,
392
445
  "timestamp": start_time.isoformat(),
@@ -417,7 +470,6 @@ class AgenticTool(BaseTool):
417
470
  "api_type": self._api_type,
418
471
  "model_id": self._model_id,
419
472
  "temperature": self._temperature,
420
- "max_new_tokens": self._max_new_tokens,
421
473
  },
422
474
  "execution_time_seconds": execution_time,
423
475
  "timestamp": start_time.isoformat(),
@@ -442,7 +494,6 @@ class AgenticTool(BaseTool):
442
494
  "api_type": self._api_type,
443
495
  "model_id": self._model_id,
444
496
  "temperature": self._temperature,
445
- "max_new_tokens": self._max_new_tokens,
446
497
  },
447
498
  "execution_time_seconds": execution_time,
448
499
  },
@@ -511,7 +562,6 @@ class AgenticTool(BaseTool):
511
562
  "api_type": self._api_type,
512
563
  "model_id": self._model_id,
513
564
  "temperature": self._temperature,
514
- "max_new_tokens": self._max_new_tokens,
515
565
  "return_json": self._return_json,
516
566
  "max_retries": self._max_retries,
517
567
  "retry_delay": self._retry_delay,
@@ -595,9 +645,7 @@ class AgenticTool(BaseTool):
595
645
  def estimate_token_usage(self, arguments: Dict[str, Any]) -> Dict[str, int]:
596
646
  prompt = self._format_prompt(arguments)
597
647
  estimated_input_tokens = len(prompt) // 4
598
- estimated_max_output_tokens = (
599
- self._max_new_tokens if self._max_new_tokens is not None else 2048
600
- )
648
+ estimated_max_output_tokens = 2048 # Default estimation
601
649
  estimated_total_tokens = estimated_input_tokens + estimated_max_output_tokens
602
650
  return {
603
651
  "estimated_input_tokens": estimated_input_tokens,
tooluniverse/base_tool.py CHANGED
@@ -38,7 +38,7 @@ class BaseTool:
38
38
 
39
39
  Override this method in subclasses to specify a custom defaults file.
40
40
 
41
- Returns:
41
+ Returns
42
42
  Path or resource object pointing to the defaults file
43
43
  """
44
44
  tool_type = cls.__name__
@@ -154,7 +154,7 @@ class BaseTool:
154
154
  def get_required_parameters(self):
155
155
  """
156
156
  Retrieve required parameters from the endpoint definition.
157
- Returns:
157
+ Returns
158
158
  list: List of required parameters for the given endpoint.
159
159
  """
160
160
  schema = self.tool_config.get("parameter", {})
@@ -172,7 +172,7 @@ class BaseTool:
172
172
  Args:
173
173
  arguments: Dictionary of arguments to validate
174
174
 
175
- Returns:
175
+ Returns
176
176
  ToolError if validation fails, None if validation passes
177
177
  """
178
178
  schema = self.tool_config.get("parameter", {})
@@ -207,7 +207,7 @@ class BaseTool:
207
207
  Args:
208
208
  exception: The raw exception to classify
209
209
 
210
- Returns:
210
+ Returns
211
211
  Structured ToolError instance
212
212
  """
213
213
  error_str = str(exception).lower()
@@ -261,7 +261,7 @@ class BaseTool:
261
261
  Args:
262
262
  arguments: Dictionary of arguments for the tool call
263
263
 
264
- Returns:
264
+ Returns
265
265
  String cache key
266
266
  """
267
267
  # Include tool name and arguments in cache key
@@ -276,7 +276,7 @@ class BaseTool:
276
276
  """
277
277
  Check if this tool supports streaming responses.
278
278
 
279
- Returns:
279
+ Returns
280
280
  True if tool supports streaming, False otherwise
281
281
  """
282
282
  return self.tool_config.get("supports_streaming", False)
@@ -285,11 +285,22 @@ class BaseTool:
285
285
  """
286
286
  Check if this tool's results can be cached.
287
287
 
288
- Returns:
288
+ Returns
289
289
  True if tool results can be cached, False otherwise
290
290
  """
291
291
  return self.tool_config.get("cacheable", True)
292
292
 
293
+ def get_batch_concurrency_limit(self) -> int:
294
+ """Return maximum concurrent executions allowed during batch runs (0 = unlimited)."""
295
+ limit = self.tool_config.get("batch_max_concurrency")
296
+ if limit is None:
297
+ return 0
298
+ try:
299
+ parsed = int(limit)
300
+ except (TypeError, ValueError):
301
+ return 0
302
+ return max(0, parsed)
303
+
293
304
  def get_cache_namespace(self) -> str:
294
305
  """Return cache namespace identifier for this tool."""
295
306
  return self.tool_config.get("name", self.__class__.__name__)
@@ -326,7 +337,7 @@ class BaseTool:
326
337
  """
327
338
  Get comprehensive information about this tool.
328
339
 
329
- Returns:
340
+ Returns
330
341
  Dictionary containing tool metadata
331
342
  """
332
343
  return {
@@ -91,7 +91,7 @@ class Boltz2DockingTool(BaseTool):
91
91
  - other optional boltz CLI flags (e.g., 'recycling_steps').
92
92
  timeout (int): The maximum time in seconds to wait for the Boltz command to complete.
93
93
 
94
- Returns:
94
+ Returns
95
95
  dict: A dictionary containing the path to the predicted structure and affinity data, or an error.
96
96
  """
97
97
  arguments = arguments or {}
@@ -6,9 +6,11 @@ from __future__ import annotations
6
6
 
7
7
  import logging
8
8
  import os
9
+ import queue
10
+ import threading
9
11
  import time
10
12
  from dataclasses import dataclass
11
- from typing import Any, Dict, Iterator, Optional
13
+ from typing import Any, Dict, Iterator, Optional, Sequence
12
14
 
13
15
  from .memory_cache import LRUCache, SingleFlight
14
16
  from .sqlite_backend import CacheEntry, PersistentCache
@@ -36,6 +38,8 @@ class ResultCacheManager:
36
38
  persistence_enabled: bool = True,
37
39
  singleflight: bool = True,
38
40
  default_ttl: Optional[int] = None,
41
+ async_persist: Optional[bool] = None,
42
+ async_queue_size: int = 10000,
39
43
  ):
40
44
  self.enabled = enabled
41
45
  self.default_ttl = default_ttl
@@ -55,6 +59,7 @@ class ResultCacheManager:
55
59
  self.persistent = None
56
60
 
57
61
  self.singleflight = SingleFlight() if singleflight else None
62
+ self._init_async_persistence(async_persist, async_queue_size)
58
63
 
59
64
  # ------------------------------------------------------------------
60
65
  # Helper methods
@@ -69,6 +74,33 @@ class ResultCacheManager:
69
74
  def _ttl_or_default(self, ttl: Optional[int]) -> Optional[int]:
70
75
  return ttl if ttl is not None else self.default_ttl
71
76
 
77
+ def _init_async_persistence(
78
+ self, async_persist: Optional[bool], async_queue_size: int
79
+ ) -> None:
80
+ if async_persist is None:
81
+ async_persist = os.getenv(
82
+ "TOOLUNIVERSE_CACHE_ASYNC_PERSIST", "true"
83
+ ).lower() in ("true", "1", "yes")
84
+
85
+ self.async_persist = (
86
+ async_persist and self.persistent is not None and self.enabled
87
+ )
88
+
89
+ self._persist_queue: Optional["queue.Queue[tuple[str, Dict[str, Any]]]"] = None
90
+ self._worker_thread: Optional[threading.Thread] = None
91
+
92
+ if not self.async_persist:
93
+ return
94
+
95
+ queue_size = max(1, async_queue_size)
96
+ self._persist_queue = queue.Queue(maxsize=queue_size)
97
+ self._worker_thread = threading.Thread(
98
+ target=self._async_worker,
99
+ name="ResultCacheWriter",
100
+ daemon=True,
101
+ )
102
+ self._worker_thread.start()
103
+
72
104
  # ------------------------------------------------------------------
73
105
  # Public API
74
106
  # ------------------------------------------------------------------
@@ -126,17 +158,15 @@ class ResultCacheManager:
126
158
  )
127
159
 
128
160
  if self.persistent:
129
- try:
130
- self.persistent.set(
131
- composed,
132
- value,
133
- namespace=namespace,
134
- version=version,
135
- ttl=effective_ttl,
136
- )
137
- except Exception as exc:
138
- logger.warning("Persistent cache write failed: %s", exc)
139
- self.persistent = None
161
+ payload = {
162
+ "composed": composed,
163
+ "value": value,
164
+ "namespace": namespace,
165
+ "version": version,
166
+ "ttl": effective_ttl,
167
+ }
168
+ if not self._schedule_persist("set", payload):
169
+ self._perform_persist_set(**payload)
140
170
 
141
171
  def delete(self, *, namespace: str, version: str, cache_key: str):
142
172
  composed = self.compose_key(namespace, version, cache_key)
@@ -162,10 +192,40 @@ class ResultCacheManager:
162
192
 
163
193
  if self.persistent:
164
194
  try:
195
+ self.flush()
165
196
  self.persistent.clear(namespace=namespace)
166
197
  except Exception as exc:
167
198
  logger.warning("Persistent cache clear failed: %s", exc)
168
199
 
200
+ def bulk_get(self, requests: Sequence[Dict[str, str]]) -> Dict[str, Any]:
201
+ """Fetch multiple cache entries at once.
202
+
203
+ Args:
204
+ requests: Iterable of dicts containing ``namespace``, ``version`` and ``cache_key``.
205
+
206
+ Returns
207
+ Mapping of composed cache keys to cached values.
208
+ """
209
+
210
+ if not self.enabled:
211
+ return {}
212
+
213
+ hits: Dict[str, Any] = {}
214
+ for request in requests:
215
+ namespace = request["namespace"]
216
+ version = request["version"]
217
+ cache_key = request["cache_key"]
218
+ value = self.get(
219
+ namespace=namespace,
220
+ version=version,
221
+ cache_key=cache_key,
222
+ )
223
+ if value is not None:
224
+ composed = self.compose_key(namespace, version, cache_key)
225
+ hits[composed] = value
226
+
227
+ return hits
228
+
169
229
  def stats(self) -> Dict[str, Any]:
170
230
  return {
171
231
  "enabled": self.enabled,
@@ -173,11 +233,18 @@ class ResultCacheManager:
173
233
  "persistent": (
174
234
  self.persistent.stats() if self.persistent else {"enabled": False}
175
235
  ),
236
+ "async_persist": self.async_persist,
237
+ "pending_writes": (
238
+ self._persist_queue.qsize()
239
+ if self.async_persist and self._persist_queue is not None
240
+ else 0
241
+ ),
176
242
  }
177
243
 
178
244
  def dump(self, namespace: Optional[str] = None) -> Iterator[Dict[str, Any]]:
179
245
  if not self.persistent:
180
246
  return iter([])
247
+ self.flush()
181
248
  return (
182
249
  {
183
250
  "cache_key": entry.key,
@@ -220,12 +287,100 @@ class ResultCacheManager:
220
287
  return _DummyContext()
221
288
 
222
289
  def close(self):
290
+ self.flush()
291
+ self._shutdown_async_worker()
223
292
  if self.persistent:
224
293
  try:
225
294
  self.persistent.close()
226
295
  except Exception as exc:
227
296
  logger.warning("Persistent cache close failed: %s", exc)
228
297
 
298
+ # ------------------------------------------------------------------
299
+ # Async persistence helpers
300
+ # ------------------------------------------------------------------
301
+
302
+ def flush(self):
303
+ if self.async_persist and self._persist_queue is not None:
304
+ self._persist_queue.join()
305
+
306
+ def _schedule_persist(self, op: str, payload: Dict[str, Any]) -> bool:
307
+ if not self.async_persist or self._persist_queue is None:
308
+ return False
309
+ try:
310
+ self._persist_queue.put_nowait((op, payload))
311
+ return True
312
+ except queue.Full:
313
+ logger.warning(
314
+ "Async cache queue full; falling back to synchronous persistence"
315
+ )
316
+ return False
317
+
318
+ def _async_worker(self):
319
+ queue_ref = self._persist_queue
320
+ if queue_ref is None:
321
+ return
322
+
323
+ while True:
324
+ try:
325
+ op, payload = queue_ref.get()
326
+ except Exception:
327
+ continue
328
+
329
+ if op == "__STOP__":
330
+ queue_ref.task_done()
331
+ break
332
+
333
+ try:
334
+ if op == "set":
335
+ self._perform_persist_set(**payload)
336
+ else:
337
+ logger.warning("Unknown async cache operation: %s", op)
338
+ except Exception as exc:
339
+ logger.warning("Async cache write failed: %s", exc)
340
+ # Disable async persistence to avoid repeated failures
341
+ self.async_persist = False
342
+ finally:
343
+ queue_ref.task_done()
344
+
345
+ def _perform_persist_set(
346
+ self,
347
+ *,
348
+ composed: str,
349
+ value: Any,
350
+ namespace: str,
351
+ version: str,
352
+ ttl: Optional[int],
353
+ ):
354
+ if not self.persistent:
355
+ return
356
+ try:
357
+ self.persistent.set(
358
+ composed,
359
+ value,
360
+ namespace=namespace,
361
+ version=version,
362
+ ttl=ttl,
363
+ )
364
+ except Exception as exc:
365
+ logger.warning("Persistent cache write failed: %s", exc)
366
+ self.persistent = None
367
+ raise
368
+
369
+ def _shutdown_async_worker(self) -> None:
370
+ if not self.async_persist or self._persist_queue is None:
371
+ return
372
+
373
+ try:
374
+ self._persist_queue.put_nowait(("__STOP__", {}))
375
+ except queue.Full:
376
+ self._persist_queue.put(("__STOP__", {}))
377
+
378
+ if self._worker_thread is not None:
379
+ self._worker_thread.join(timeout=5)
380
+
381
+ self._worker_thread = None
382
+ self._persist_queue = None
383
+
229
384
 
230
385
  class _DummyContext:
231
386
  def __enter__(self):
@@ -13,7 +13,7 @@ def compose(arguments, tooluniverse, call_tool):
13
13
  tooluniverse: ToolUniverse instance
14
14
  call_tool: Function to call other tools
15
15
 
16
- Returns:
16
+ Returns
17
17
  dict: Comprehensive drug safety analysis result
18
18
  """
19
19
  drug_name = arguments["drug_name"]
@@ -46,7 +46,7 @@ def compose(arguments, tooluniverse, call_tool, stream_callback=None):
46
46
  call_tool (function): Function to call other tools
47
47
  stream_callback (callable, optional): Callback function for streaming output
48
48
 
49
- Returns:
49
+ Returns
50
50
  dict: The result of the multi-agent search
51
51
  """
52
52
  query = arguments.get("query", "")
@@ -39,7 +39,7 @@ def compose(arguments: Dict[str, Any], tooluniverse, call_tool) -> Dict[str, Any
39
39
  tooluniverse: ToolUniverse instance for tool execution
40
40
  call_tool: Function to call other tools within the composition
41
41
 
42
- Returns:
42
+ Returns
43
43
  Dict[str, Any]: Dictionary containing:
44
44
  - success (bool): Whether summarization was successful
45
45
  - original_length (int): Length of original output
@@ -165,7 +165,7 @@ def _chunk_output(text: str, chunk_size: int) -> List[str]:
165
165
  text (str): The text to be chunked
166
166
  chunk_size (int): Maximum size of each chunk
167
167
 
168
- Returns:
168
+ Returns
169
169
  List[str]: List of text chunks
170
170
  """
171
171
  if len(text) <= chunk_size:
@@ -208,7 +208,7 @@ def _summarize_chunk(
208
208
  focus_areas (str): Areas to focus on during summarization
209
209
  call_tool: Function to call the summarizer tool
210
210
 
211
- Returns:
211
+ Returns
212
212
  str: Summarized chunk text, or empty string if summarization fails
213
213
  """
214
214
  try:
@@ -296,7 +296,7 @@ def _merge_summaries(
296
296
  max_length (int): Maximum length of final summary
297
297
  call_tool: Function to call the summarizer tool
298
298
 
299
- Returns:
299
+ Returns
300
300
  str: Final merged summary
301
301
  """
302
302
  if not chunk_summaries:
@@ -18,7 +18,7 @@ def compose(arguments, tooluniverse, call_tool):
18
18
  tooluniverse: ToolUniverse instance
19
19
  call_tool: Function to call other tools
20
20
 
21
- Returns:
21
+ Returns
22
22
  Dictionary with results and file paths
23
23
  """
24
24
  try:
@@ -13,7 +13,7 @@ def compose(arguments, tooluniverse, call_tool):
13
13
  tooluniverse: ToolUniverse instance
14
14
  call_tool: Function to call other tools
15
15
 
16
- Returns:
16
+ Returns
17
17
  list: List of tool metadata dictionaries (JSON-compatible)
18
18
  """
19
19
  import json
@@ -65,7 +65,7 @@ class ComposeTool(BaseTool):
65
65
  """
66
66
  Automatically discover tool dependencies from composition code.
67
67
 
68
- Returns:
68
+ Returns
69
69
  set: Set of tool names that this composition calls
70
70
  """
71
71
  dependencies = set()
@@ -89,7 +89,7 @@ class ComposeTool(BaseTool):
89
89
  """
90
90
  Create a mapping from tool names to their categories.
91
91
 
92
- Returns:
92
+ Returns
93
93
  dict: Mapping of tool names to category names
94
94
  """
95
95
  tool_to_category = {}
@@ -119,7 +119,7 @@ class ComposeTool(BaseTool):
119
119
  Args:
120
120
  missing_tools (set): Set of missing tool names
121
121
 
122
- Returns:
122
+ Returns
123
123
  tuple: (successfully_loaded, failed_to_load)
124
124
  """
125
125
  if not self.tooluniverse or not self.auto_load_dependencies:
@@ -173,7 +173,7 @@ class ComposeTool(BaseTool):
173
173
  """
174
174
  Load composition code from external Python file.
175
175
 
176
- Returns:
176
+ Returns
177
177
  str: The composition code as a string
178
178
  """
179
179
  if not self.composition_file:
@@ -213,7 +213,7 @@ class ComposeTool(BaseTool):
213
213
  arguments (dict): Input arguments for the composition
214
214
  stream_callback (callable, optional): Callback function for streaming output
215
215
 
216
- Returns:
216
+ Returns
217
217
  Any: Result from the composition execution
218
218
  """
219
219
  if not self.tooluniverse:
@@ -303,7 +303,7 @@ class ComposeTool(BaseTool):
303
303
  Args:
304
304
  stream_callback (callable, optional): Callback function for streaming output
305
305
 
306
- Returns:
306
+ Returns
307
307
  callable: Event emitter function
308
308
  """
309
309
 
@@ -327,7 +327,7 @@ class ComposeTool(BaseTool):
327
327
  arguments (dict): Input arguments
328
328
  stream_callback (callable, optional): Callback function for streaming output
329
329
 
330
- Returns:
330
+ Returns
331
331
  Any: Result from the composition execution
332
332
  """
333
333
  # Resolve file path
@@ -377,7 +377,7 @@ class ComposeTool(BaseTool):
377
377
  arguments (dict): Input arguments
378
378
  stream_callback (callable, optional): Callback function for streaming output
379
379
 
380
- Returns:
380
+ Returns
381
381
  Any: Result from the composition execution
382
382
  """
383
383
  # Initialize execution context
@@ -408,7 +408,7 @@ class ComposeTool(BaseTool):
408
408
  tool_name (str): Name of the tool to call
409
409
  arguments (dict): Arguments to pass to the tool
410
410
 
411
- Returns:
411
+ Returns
412
412
  Any: Result from the tool execution
413
413
  """
414
414
  # Check if tool is available (check both callable_functions and all_tool_dict)