celltype-cli 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- celltype_cli-0.1.0.dist-info/METADATA +267 -0
- celltype_cli-0.1.0.dist-info/RECORD +89 -0
- celltype_cli-0.1.0.dist-info/WHEEL +4 -0
- celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
- celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
- ct/__init__.py +3 -0
- ct/agent/__init__.py +0 -0
- ct/agent/case_studies.py +426 -0
- ct/agent/config.py +523 -0
- ct/agent/doctor.py +544 -0
- ct/agent/knowledge.py +523 -0
- ct/agent/loop.py +99 -0
- ct/agent/mcp_server.py +478 -0
- ct/agent/orchestrator.py +733 -0
- ct/agent/runner.py +656 -0
- ct/agent/sandbox.py +481 -0
- ct/agent/session.py +145 -0
- ct/agent/system_prompt.py +186 -0
- ct/agent/trace_store.py +228 -0
- ct/agent/trajectory.py +169 -0
- ct/agent/types.py +182 -0
- ct/agent/workflows.py +462 -0
- ct/api/__init__.py +1 -0
- ct/api/app.py +211 -0
- ct/api/config.py +120 -0
- ct/api/engine.py +124 -0
- ct/cli.py +1448 -0
- ct/data/__init__.py +0 -0
- ct/data/compute_providers.json +59 -0
- ct/data/cro_database.json +395 -0
- ct/data/downloader.py +238 -0
- ct/data/loaders.py +252 -0
- ct/kb/__init__.py +5 -0
- ct/kb/benchmarks.py +147 -0
- ct/kb/governance.py +106 -0
- ct/kb/ingest.py +415 -0
- ct/kb/reasoning.py +129 -0
- ct/kb/schema_monitor.py +162 -0
- ct/kb/substrate.py +387 -0
- ct/models/__init__.py +0 -0
- ct/models/llm.py +370 -0
- ct/tools/__init__.py +195 -0
- ct/tools/_compound_resolver.py +297 -0
- ct/tools/biomarker.py +368 -0
- ct/tools/cellxgene.py +282 -0
- ct/tools/chemistry.py +1371 -0
- ct/tools/claude.py +390 -0
- ct/tools/clinical.py +1153 -0
- ct/tools/clue.py +249 -0
- ct/tools/code.py +1069 -0
- ct/tools/combination.py +397 -0
- ct/tools/compute.py +402 -0
- ct/tools/cro.py +413 -0
- ct/tools/data_api.py +2114 -0
- ct/tools/design.py +295 -0
- ct/tools/dna.py +575 -0
- ct/tools/experiment.py +604 -0
- ct/tools/expression.py +655 -0
- ct/tools/files.py +957 -0
- ct/tools/genomics.py +1387 -0
- ct/tools/http_client.py +146 -0
- ct/tools/imaging.py +319 -0
- ct/tools/intel.py +223 -0
- ct/tools/literature.py +743 -0
- ct/tools/network.py +422 -0
- ct/tools/notification.py +111 -0
- ct/tools/omics.py +3330 -0
- ct/tools/ops.py +1230 -0
- ct/tools/parity.py +649 -0
- ct/tools/pk.py +245 -0
- ct/tools/protein.py +678 -0
- ct/tools/regulatory.py +643 -0
- ct/tools/remote_data.py +179 -0
- ct/tools/report.py +181 -0
- ct/tools/repurposing.py +376 -0
- ct/tools/safety.py +1280 -0
- ct/tools/shell.py +178 -0
- ct/tools/singlecell.py +533 -0
- ct/tools/statistics.py +552 -0
- ct/tools/structure.py +882 -0
- ct/tools/target.py +901 -0
- ct/tools/translational.py +123 -0
- ct/tools/viability.py +218 -0
- ct/ui/__init__.py +0 -0
- ct/ui/markdown.py +31 -0
- ct/ui/status.py +258 -0
- ct/ui/suggestions.py +567 -0
- ct/ui/terminal.py +1456 -0
- ct/ui/traces.py +112 -0
ct/agent/mcp_server.py
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP tool server for the Claude Agent SDK.
|
|
3
|
+
|
|
4
|
+
Wraps the existing ct ToolRegistry so every registered tool is exposed as an
|
|
5
|
+
MCP tool that the Agent SDK can invoke. Also provides a persistent ``run_python``
|
|
6
|
+
sandbox tool for multi-turn code execution.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from claude_agent_sdk import SdkMcpTool, create_sdk_mcp_server
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger("ct.mcp_server")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# ---------------------------------------------------------------------------
|
|
21
|
+
# Tool result formatting
|
|
22
|
+
# ---------------------------------------------------------------------------
|
|
23
|
+
|
|
24
|
+
def _format_tool_result(result: Any, max_chars: int = 8000) -> str:
|
|
25
|
+
"""Format a ct tool result dict into text for the Agent SDK."""
|
|
26
|
+
if not isinstance(result, dict):
|
|
27
|
+
text = str(result)
|
|
28
|
+
return text[:max_chars] if len(text) > max_chars else text
|
|
29
|
+
|
|
30
|
+
parts = []
|
|
31
|
+
|
|
32
|
+
# Summary first (most important)
|
|
33
|
+
summary = result.get("summary", "")
|
|
34
|
+
if summary:
|
|
35
|
+
parts.append(summary)
|
|
36
|
+
|
|
37
|
+
# Include key data fields
|
|
38
|
+
skip = {"summary"}
|
|
39
|
+
compact = {"top_hits", "top_terms"}
|
|
40
|
+
for key, val in result.items():
|
|
41
|
+
if key in skip:
|
|
42
|
+
continue
|
|
43
|
+
if key in compact and isinstance(val, (dict, list)):
|
|
44
|
+
count = len(val)
|
|
45
|
+
parts.append(f"{key}: {type(val).__name__} with {count} entries")
|
|
46
|
+
continue
|
|
47
|
+
val_str = str(val)
|
|
48
|
+
if len(val_str) > 1500:
|
|
49
|
+
val_str = val_str[:1500] + f"... [{len(val_str)} chars total]"
|
|
50
|
+
parts.append(f"{key}: {val_str}")
|
|
51
|
+
|
|
52
|
+
text = "\n".join(parts)
|
|
53
|
+
return text[:max_chars] if len(text) > max_chars else text
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# ---------------------------------------------------------------------------
|
|
57
|
+
# Convert ct Tool.parameters to JSON Schema
|
|
58
|
+
# ---------------------------------------------------------------------------
|
|
59
|
+
|
|
60
|
+
_PY_TYPE_MAP = {
|
|
61
|
+
"str": "string",
|
|
62
|
+
"string": "string",
|
|
63
|
+
"int": "integer",
|
|
64
|
+
"integer": "integer",
|
|
65
|
+
"float": "number",
|
|
66
|
+
"number": "number",
|
|
67
|
+
"bool": "boolean",
|
|
68
|
+
"boolean": "boolean",
|
|
69
|
+
"list": "array",
|
|
70
|
+
"dict": "object",
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _params_to_json_schema(parameters: dict) -> dict:
|
|
75
|
+
"""Convert a ct tool parameters dict to a JSON Schema object.
|
|
76
|
+
|
|
77
|
+
ct tools describe parameters as ``{param_name: description_string}``.
|
|
78
|
+
We map these to string-typed JSON Schema properties since the LLM
|
|
79
|
+
produces string values that tools coerce internally.
|
|
80
|
+
"""
|
|
81
|
+
if not parameters:
|
|
82
|
+
return {"type": "object", "properties": {}}
|
|
83
|
+
|
|
84
|
+
properties = {}
|
|
85
|
+
for name, desc in parameters.items():
|
|
86
|
+
# Extract type hint from description if present (e.g., "gene name (str)")
|
|
87
|
+
prop = {"type": "string", "description": str(desc)}
|
|
88
|
+
properties[name] = prop
|
|
89
|
+
|
|
90
|
+
return {
|
|
91
|
+
"type": "object",
|
|
92
|
+
"properties": properties,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# ---------------------------------------------------------------------------
|
|
97
|
+
# Create MCP tools from registry
|
|
98
|
+
# ---------------------------------------------------------------------------
|
|
99
|
+
|
|
100
|
+
def _make_tool_handler(tool_obj, session):
|
|
101
|
+
"""Create an async handler for a registry tool.
|
|
102
|
+
|
|
103
|
+
The handler runs the synchronous tool function in a thread to avoid
|
|
104
|
+
blocking the event loop.
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
async def handler(args: dict[str, Any]) -> dict[str, Any]:
|
|
108
|
+
# Inject session and prior results (empty for SDK mode)
|
|
109
|
+
call_args = dict(args)
|
|
110
|
+
call_args["_session"] = session
|
|
111
|
+
call_args["_prior_results"] = {}
|
|
112
|
+
|
|
113
|
+
# Coerce string values to numeric types when they look like numbers.
|
|
114
|
+
# MCP sends all parameters as strings, but tools often expect int/float.
|
|
115
|
+
for key, val in list(call_args.items()):
|
|
116
|
+
if key.startswith("_"):
|
|
117
|
+
continue
|
|
118
|
+
if isinstance(val, str):
|
|
119
|
+
# Try int first, then float
|
|
120
|
+
try:
|
|
121
|
+
call_args[key] = int(val)
|
|
122
|
+
continue
|
|
123
|
+
except ValueError:
|
|
124
|
+
pass
|
|
125
|
+
try:
|
|
126
|
+
call_args[key] = float(val)
|
|
127
|
+
continue
|
|
128
|
+
except ValueError:
|
|
129
|
+
pass
|
|
130
|
+
# Boolean coercion
|
|
131
|
+
if val.lower() in ("true", "false"):
|
|
132
|
+
call_args[key] = val.lower() == "true"
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
result = await asyncio.to_thread(tool_obj.run, **call_args)
|
|
136
|
+
text = _format_tool_result(result)
|
|
137
|
+
except Exception as e:
|
|
138
|
+
logger.warning("Tool %s raised: %s", tool_obj.name, e)
|
|
139
|
+
text = f"Error: {e}"
|
|
140
|
+
return {
|
|
141
|
+
"content": [{"type": "text", "text": text}],
|
|
142
|
+
"is_error": True,
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
return {"content": [{"type": "text", "text": text}]}
|
|
146
|
+
|
|
147
|
+
return handler
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# ---------------------------------------------------------------------------
|
|
151
|
+
# run_python sandbox tool
|
|
152
|
+
# ---------------------------------------------------------------------------
|
|
153
|
+
|
|
154
|
+
def _make_run_python_handler(session, code_trace_buffer: list | None = None):
|
|
155
|
+
"""Create the run_python MCP tool handler with a persistent Sandbox.
|
|
156
|
+
|
|
157
|
+
The sandbox is created lazily on first invocation and persists across
|
|
158
|
+
tool calls within one query (variables carry over). A new MCP server
|
|
159
|
+
(and thus a new sandbox) is created for each query, so state resets
|
|
160
|
+
between queries automatically.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
session: Active ct Session.
|
|
164
|
+
code_trace_buffer: Optional shared list. When provided, the handler
|
|
165
|
+
appends structured execution metadata after each call. This
|
|
166
|
+
bypasses the SDK message stream (which may truncate tool results)
|
|
167
|
+
so the trace collector gets full code, stdout, and plot data.
|
|
168
|
+
"""
|
|
169
|
+
from ct.agent.sandbox import Sandbox
|
|
170
|
+
|
|
171
|
+
config = session.config
|
|
172
|
+
timeout = int(config.get("sandbox.timeout", 300))
|
|
173
|
+
output_dir = config.get("sandbox.output_dir")
|
|
174
|
+
max_retries = int(config.get("sandbox.max_retries", 2))
|
|
175
|
+
|
|
176
|
+
extra_read_dirs = []
|
|
177
|
+
extra_read_str = config.get("sandbox.extra_read_dirs")
|
|
178
|
+
if extra_read_str:
|
|
179
|
+
for d in str(extra_read_str).split(","):
|
|
180
|
+
d = d.strip()
|
|
181
|
+
if d and Path(d).exists():
|
|
182
|
+
extra_read_dirs.append(Path(d))
|
|
183
|
+
|
|
184
|
+
sandbox = Sandbox(
|
|
185
|
+
timeout=timeout,
|
|
186
|
+
output_dir=output_dir,
|
|
187
|
+
max_retries=max_retries,
|
|
188
|
+
extra_read_dirs=extra_read_dirs or None,
|
|
189
|
+
)
|
|
190
|
+
sandbox.load_datasets()
|
|
191
|
+
|
|
192
|
+
async def handler(args: dict[str, Any]) -> dict[str, Any]:
|
|
193
|
+
code = args.get("code", "")
|
|
194
|
+
if not code.strip():
|
|
195
|
+
return {
|
|
196
|
+
"content": [{"type": "text", "text": "Error: no code provided"}],
|
|
197
|
+
"is_error": True,
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
exec_result = await asyncio.to_thread(sandbox.execute, code)
|
|
201
|
+
|
|
202
|
+
# Build output text
|
|
203
|
+
parts = []
|
|
204
|
+
if exec_result.get("stdout"):
|
|
205
|
+
parts.append(exec_result["stdout"])
|
|
206
|
+
if exec_result.get("error"):
|
|
207
|
+
parts.append(f"Error:\n{exec_result['error']}")
|
|
208
|
+
if exec_result.get("plots"):
|
|
209
|
+
parts.append(f"Plots saved: {exec_result['plots']}")
|
|
210
|
+
if exec_result.get("exports"):
|
|
211
|
+
parts.append(f"Exports saved: {exec_result['exports']}")
|
|
212
|
+
|
|
213
|
+
# Check if the code set a `result` variable
|
|
214
|
+
result_var = sandbox.get_variable("result")
|
|
215
|
+
if result_var and isinstance(result_var, dict):
|
|
216
|
+
summary = result_var.get("summary", "")
|
|
217
|
+
answer = result_var.get("answer", "")
|
|
218
|
+
if summary:
|
|
219
|
+
parts.append(f"\nResult summary: {summary}")
|
|
220
|
+
if answer:
|
|
221
|
+
parts.append(f"Result answer: {answer}")
|
|
222
|
+
|
|
223
|
+
text = "\n".join(parts) if parts else "(no output)"
|
|
224
|
+
# Cap output to keep context manageable
|
|
225
|
+
text = text[:6000]
|
|
226
|
+
|
|
227
|
+
# Buffer structured execution metadata for trace capture.
|
|
228
|
+
# This bypasses the SDK stream which may truncate tool results.
|
|
229
|
+
if code_trace_buffer is not None:
|
|
230
|
+
code_trace_buffer.append({
|
|
231
|
+
"tool": "run_python",
|
|
232
|
+
"code": code,
|
|
233
|
+
"stdout": exec_result.get("stdout", ""),
|
|
234
|
+
"plots": exec_result.get("plots", []),
|
|
235
|
+
"exports": exec_result.get("exports", []),
|
|
236
|
+
"error": exec_result.get("error"),
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
is_error = bool(exec_result.get("error"))
|
|
240
|
+
return {
|
|
241
|
+
"content": [{"type": "text", "text": text}],
|
|
242
|
+
"is_error": is_error,
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
return handler, sandbox
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
# ---------------------------------------------------------------------------
|
|
249
|
+
# run_r tool — first-class R execution via rpy2
|
|
250
|
+
# ---------------------------------------------------------------------------
|
|
251
|
+
|
|
252
|
+
def _make_run_r_handler(code_trace_buffer: list | None = None):
|
|
253
|
+
"""Create the run_r MCP tool handler for R code execution.
|
|
254
|
+
|
|
255
|
+
Uses rpy2 to execute R code. The global R session persists across calls
|
|
256
|
+
(packages stay loaded, variables carry over within a query).
|
|
257
|
+
"""
|
|
258
|
+
|
|
259
|
+
async def handler(args: dict[str, Any]) -> dict[str, Any]:
|
|
260
|
+
code = args.get("code", "")
|
|
261
|
+
if not code.strip():
|
|
262
|
+
return {
|
|
263
|
+
"content": [{"type": "text", "text": "Error: no R code provided"}],
|
|
264
|
+
"is_error": True,
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
def _exec_r(code: str) -> str:
|
|
268
|
+
try:
|
|
269
|
+
import rpy2.robjects as ro
|
|
270
|
+
from rpy2.robjects import numpy2ri, pandas2ri
|
|
271
|
+
|
|
272
|
+
# Use capture.output to grab all printed/cat output
|
|
273
|
+
# Wrap user code in braces so multi-line code works
|
|
274
|
+
escaped = code.replace("\\", "\\\\").replace("'", "\\'")
|
|
275
|
+
wrapper = f"paste(capture.output({{ {code} }}), collapse='\\n')"
|
|
276
|
+
|
|
277
|
+
try:
|
|
278
|
+
captured = ro.r(wrapper)
|
|
279
|
+
output_text = str(captured[0]) if captured else ""
|
|
280
|
+
except Exception:
|
|
281
|
+
# If capture.output fails (syntax error etc), run directly
|
|
282
|
+
# to get the actual R error message
|
|
283
|
+
try:
|
|
284
|
+
result = ro.r(code)
|
|
285
|
+
output_text = str(result)[:3000]
|
|
286
|
+
except Exception as e2:
|
|
287
|
+
return f"R Error: {e2}"
|
|
288
|
+
|
|
289
|
+
# Also get the return value of the last expression
|
|
290
|
+
# by running the code directly (capture.output eats return values)
|
|
291
|
+
result_text = ""
|
|
292
|
+
try:
|
|
293
|
+
result = ro.r(code)
|
|
294
|
+
if result is not None and result != ro.NULL:
|
|
295
|
+
numpy2ri.activate()
|
|
296
|
+
pandas2ri.activate()
|
|
297
|
+
try:
|
|
298
|
+
if hasattr(result, '__len__') and len(result) == 1:
|
|
299
|
+
result_text = f"\nReturn value: {float(result[0])}"
|
|
300
|
+
elif hasattr(result, '__len__') and len(result) <= 50:
|
|
301
|
+
vals = [str(x) for x in result]
|
|
302
|
+
result_text = f"\nReturn value: [{', '.join(vals)}]"
|
|
303
|
+
else:
|
|
304
|
+
result_text = f"\nReturn value: {str(result)[:2000]}"
|
|
305
|
+
except Exception:
|
|
306
|
+
result_text = f"\nReturn value: {str(result)[:2000]}"
|
|
307
|
+
finally:
|
|
308
|
+
numpy2ri.deactivate()
|
|
309
|
+
pandas2ri.deactivate()
|
|
310
|
+
except Exception:
|
|
311
|
+
pass # Already captured output above
|
|
312
|
+
|
|
313
|
+
return (output_text + result_text).strip() or "(no output)"
|
|
314
|
+
|
|
315
|
+
except Exception as e:
|
|
316
|
+
return f"R Error: {e}"
|
|
317
|
+
|
|
318
|
+
text = await asyncio.to_thread(_exec_r, code)
|
|
319
|
+
text = text[:6000]
|
|
320
|
+
is_error = text.startswith("R Error:")
|
|
321
|
+
|
|
322
|
+
# Buffer structured execution metadata for trace capture.
|
|
323
|
+
if code_trace_buffer is not None:
|
|
324
|
+
code_trace_buffer.append({
|
|
325
|
+
"tool": "run_r",
|
|
326
|
+
"code": code,
|
|
327
|
+
"stdout": text,
|
|
328
|
+
"error": text if is_error else None,
|
|
329
|
+
})
|
|
330
|
+
|
|
331
|
+
return {
|
|
332
|
+
"content": [{"type": "text", "text": text}],
|
|
333
|
+
"is_error": is_error,
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
return handler
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
# ---------------------------------------------------------------------------
|
|
340
|
+
# Public API
|
|
341
|
+
# ---------------------------------------------------------------------------
|
|
342
|
+
|
|
343
|
+
def create_ct_mcp_server(
|
|
344
|
+
session,
|
|
345
|
+
*,
|
|
346
|
+
exclude_categories: set[str] | None = None,
|
|
347
|
+
exclude_tools: set[str] | None = None,
|
|
348
|
+
include_run_python: bool = True,
|
|
349
|
+
):
|
|
350
|
+
"""Create an in-process MCP server exposing all ct tools.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
session: Active ct Session (provides config, LLM client).
|
|
354
|
+
exclude_categories: Tool categories to omit.
|
|
355
|
+
exclude_tools: Specific tool names to omit.
|
|
356
|
+
include_run_python: Whether to include the run_python sandbox tool.
|
|
357
|
+
|
|
358
|
+
Returns:
|
|
359
|
+
A tuple of ``(mcp_server, sandbox_or_none, tool_names, code_trace_buffer)``
|
|
360
|
+
where sandbox is the Sandbox instance (if run_python is enabled) for
|
|
361
|
+
post-query inspection, and code_trace_buffer is a shared list that
|
|
362
|
+
MCP handlers append structured execution metadata to.
|
|
363
|
+
"""
|
|
364
|
+
from ct.tools import registry, ensure_loaded, EXPERIMENTAL_CATEGORIES
|
|
365
|
+
|
|
366
|
+
ensure_loaded()
|
|
367
|
+
|
|
368
|
+
exclude_categories = exclude_categories or set()
|
|
369
|
+
exclude_tools = exclude_tools or set()
|
|
370
|
+
|
|
371
|
+
# Shared buffer: code tool handlers append structured metadata here.
|
|
372
|
+
# The runner reads from this to enrich trace events — bypasses the SDK
|
|
373
|
+
# stream which may truncate/omit tool result content.
|
|
374
|
+
code_trace_buffer: list[dict] = []
|
|
375
|
+
|
|
376
|
+
sdk_tools: list[SdkMcpTool] = []
|
|
377
|
+
tool_names: list[str] = []
|
|
378
|
+
|
|
379
|
+
for tool_obj in registry.list_tools():
|
|
380
|
+
if tool_obj.category in exclude_categories:
|
|
381
|
+
continue
|
|
382
|
+
if tool_obj.name in exclude_tools:
|
|
383
|
+
continue
|
|
384
|
+
# Skip experimental categories by default
|
|
385
|
+
if tool_obj.category in EXPERIMENTAL_CATEGORIES:
|
|
386
|
+
continue
|
|
387
|
+
|
|
388
|
+
handler = _make_tool_handler(tool_obj, session)
|
|
389
|
+
schema = _params_to_json_schema(tool_obj.parameters)
|
|
390
|
+
|
|
391
|
+
sdk_tool = SdkMcpTool(
|
|
392
|
+
name=tool_obj.name,
|
|
393
|
+
description=tool_obj.description,
|
|
394
|
+
input_schema=schema,
|
|
395
|
+
handler=handler,
|
|
396
|
+
)
|
|
397
|
+
sdk_tools.append(sdk_tool)
|
|
398
|
+
tool_names.append(tool_obj.name)
|
|
399
|
+
|
|
400
|
+
# Add run_python tool
|
|
401
|
+
sandbox = None
|
|
402
|
+
if include_run_python:
|
|
403
|
+
rp_handler, sandbox = _make_run_python_handler(session, code_trace_buffer)
|
|
404
|
+
rp_tool = SdkMcpTool(
|
|
405
|
+
name="run_python",
|
|
406
|
+
description=(
|
|
407
|
+
"Execute Python code in a sandboxed environment. Variables persist "
|
|
408
|
+
"between calls. Pre-imported: pd, np, plt, sns, scipy_stats, sklearn, "
|
|
409
|
+
"json, re, math, collections, itertools, os, glob, gzip, csv, zipfile, "
|
|
410
|
+
"io, tempfile, struct, datetime, Path, safe_subprocess_run, "
|
|
411
|
+
"compute_pi_percentage, run_r (R via rpy2). "
|
|
412
|
+
"Save plots to OUTPUT_DIR. When done, assign "
|
|
413
|
+
"result = {'summary': '...', 'answer': '...'}"
|
|
414
|
+
),
|
|
415
|
+
input_schema={
|
|
416
|
+
"type": "object",
|
|
417
|
+
"properties": {
|
|
418
|
+
"code": {
|
|
419
|
+
"type": "string",
|
|
420
|
+
"description": "Python code to execute",
|
|
421
|
+
}
|
|
422
|
+
},
|
|
423
|
+
"required": ["code"],
|
|
424
|
+
},
|
|
425
|
+
handler=rp_handler,
|
|
426
|
+
)
|
|
427
|
+
sdk_tools.append(rp_tool)
|
|
428
|
+
tool_names.append("run_python")
|
|
429
|
+
|
|
430
|
+
# Add run_r tool (R code execution via rpy2)
|
|
431
|
+
if include_run_python: # R tool follows same gating as Python
|
|
432
|
+
try:
|
|
433
|
+
import rpy2.robjects # noqa: F401 — check availability
|
|
434
|
+
rr_handler = _make_run_r_handler(code_trace_buffer)
|
|
435
|
+
rr_tool = SdkMcpTool(
|
|
436
|
+
name="run_r",
|
|
437
|
+
description=(
|
|
438
|
+
"Execute R code via rpy2. Use for: natural splines (ns()), "
|
|
439
|
+
"wilcox.test(), p.adjust(), fisher.test(), lm(), predict(), "
|
|
440
|
+
"survival analysis, KEGG pathway analysis (KEGGREST), and any "
|
|
441
|
+
"analysis where R is the reference implementation. "
|
|
442
|
+
"Available packages: stats, splines, survival, MASS, KEGGREST. "
|
|
443
|
+
"Print results with cat() or print(). "
|
|
444
|
+
"Use this instead of run_python when the question asks for R, or when "
|
|
445
|
+
"R's implementation is the reference (splines, multiple testing correction, "
|
|
446
|
+
"nonparametric tests, organism-specific KEGG ORA)."
|
|
447
|
+
),
|
|
448
|
+
input_schema={
|
|
449
|
+
"type": "object",
|
|
450
|
+
"properties": {
|
|
451
|
+
"code": {
|
|
452
|
+
"type": "string",
|
|
453
|
+
"description": "R code to execute",
|
|
454
|
+
}
|
|
455
|
+
},
|
|
456
|
+
"required": ["code"],
|
|
457
|
+
},
|
|
458
|
+
handler=rr_handler,
|
|
459
|
+
)
|
|
460
|
+
sdk_tools.append(rr_tool)
|
|
461
|
+
tool_names.append("run_r")
|
|
462
|
+
except ImportError:
|
|
463
|
+
logger.info("rpy2 not available — run_r tool disabled")
|
|
464
|
+
|
|
465
|
+
server = create_sdk_mcp_server(
|
|
466
|
+
name="ct-tools",
|
|
467
|
+
version="1.0.0",
|
|
468
|
+
tools=sdk_tools,
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
logger.info(
|
|
472
|
+
"Created MCP server with %d tools (%d domain + %s)",
|
|
473
|
+
len(sdk_tools),
|
|
474
|
+
len(sdk_tools) - (1 if include_run_python else 0) - (1 if "run_r" in tool_names else 0),
|
|
475
|
+
", ".join(t for t in ["run_python", "run_r"] if t in tool_names) or "no sandbox",
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
return server, sandbox, tool_names, code_trace_buffer
|