celltype-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. celltype_cli-0.1.0.dist-info/METADATA +267 -0
  2. celltype_cli-0.1.0.dist-info/RECORD +89 -0
  3. celltype_cli-0.1.0.dist-info/WHEEL +4 -0
  4. celltype_cli-0.1.0.dist-info/entry_points.txt +2 -0
  5. celltype_cli-0.1.0.dist-info/licenses/LICENSE +21 -0
  6. ct/__init__.py +3 -0
  7. ct/agent/__init__.py +0 -0
  8. ct/agent/case_studies.py +426 -0
  9. ct/agent/config.py +523 -0
  10. ct/agent/doctor.py +544 -0
  11. ct/agent/knowledge.py +523 -0
  12. ct/agent/loop.py +99 -0
  13. ct/agent/mcp_server.py +478 -0
  14. ct/agent/orchestrator.py +733 -0
  15. ct/agent/runner.py +656 -0
  16. ct/agent/sandbox.py +481 -0
  17. ct/agent/session.py +145 -0
  18. ct/agent/system_prompt.py +186 -0
  19. ct/agent/trace_store.py +228 -0
  20. ct/agent/trajectory.py +169 -0
  21. ct/agent/types.py +182 -0
  22. ct/agent/workflows.py +462 -0
  23. ct/api/__init__.py +1 -0
  24. ct/api/app.py +211 -0
  25. ct/api/config.py +120 -0
  26. ct/api/engine.py +124 -0
  27. ct/cli.py +1448 -0
  28. ct/data/__init__.py +0 -0
  29. ct/data/compute_providers.json +59 -0
  30. ct/data/cro_database.json +395 -0
  31. ct/data/downloader.py +238 -0
  32. ct/data/loaders.py +252 -0
  33. ct/kb/__init__.py +5 -0
  34. ct/kb/benchmarks.py +147 -0
  35. ct/kb/governance.py +106 -0
  36. ct/kb/ingest.py +415 -0
  37. ct/kb/reasoning.py +129 -0
  38. ct/kb/schema_monitor.py +162 -0
  39. ct/kb/substrate.py +387 -0
  40. ct/models/__init__.py +0 -0
  41. ct/models/llm.py +370 -0
  42. ct/tools/__init__.py +195 -0
  43. ct/tools/_compound_resolver.py +297 -0
  44. ct/tools/biomarker.py +368 -0
  45. ct/tools/cellxgene.py +282 -0
  46. ct/tools/chemistry.py +1371 -0
  47. ct/tools/claude.py +390 -0
  48. ct/tools/clinical.py +1153 -0
  49. ct/tools/clue.py +249 -0
  50. ct/tools/code.py +1069 -0
  51. ct/tools/combination.py +397 -0
  52. ct/tools/compute.py +402 -0
  53. ct/tools/cro.py +413 -0
  54. ct/tools/data_api.py +2114 -0
  55. ct/tools/design.py +295 -0
  56. ct/tools/dna.py +575 -0
  57. ct/tools/experiment.py +604 -0
  58. ct/tools/expression.py +655 -0
  59. ct/tools/files.py +957 -0
  60. ct/tools/genomics.py +1387 -0
  61. ct/tools/http_client.py +146 -0
  62. ct/tools/imaging.py +319 -0
  63. ct/tools/intel.py +223 -0
  64. ct/tools/literature.py +743 -0
  65. ct/tools/network.py +422 -0
  66. ct/tools/notification.py +111 -0
  67. ct/tools/omics.py +3330 -0
  68. ct/tools/ops.py +1230 -0
  69. ct/tools/parity.py +649 -0
  70. ct/tools/pk.py +245 -0
  71. ct/tools/protein.py +678 -0
  72. ct/tools/regulatory.py +643 -0
  73. ct/tools/remote_data.py +179 -0
  74. ct/tools/report.py +181 -0
  75. ct/tools/repurposing.py +376 -0
  76. ct/tools/safety.py +1280 -0
  77. ct/tools/shell.py +178 -0
  78. ct/tools/singlecell.py +533 -0
  79. ct/tools/statistics.py +552 -0
  80. ct/tools/structure.py +882 -0
  81. ct/tools/target.py +901 -0
  82. ct/tools/translational.py +123 -0
  83. ct/tools/viability.py +218 -0
  84. ct/ui/__init__.py +0 -0
  85. ct/ui/markdown.py +31 -0
  86. ct/ui/status.py +258 -0
  87. ct/ui/suggestions.py +567 -0
  88. ct/ui/terminal.py +1456 -0
  89. ct/ui/traces.py +112 -0
ct/tools/ops.py ADDED
@@ -0,0 +1,1230 @@
1
+ """
2
+ Research-ops productivity tools: notebook entries, todos, and workflow templates.
3
+
4
+ These tools provide lightweight project memory in ~/.ct/ops (or config override)
5
+ without depending on external services.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from datetime import date, datetime, timezone
11
+ import json
12
+ from pathlib import Path
13
+ import re
14
+ from typing import Any
15
+ from uuid import uuid4
16
+
17
+ from ct.tools import registry
18
+
19
+
20
+ def _ops_root(_session=None) -> Path:
21
+ """Return base directory for ops storage and ensure it exists."""
22
+ base = None
23
+ if _session is not None and getattr(_session, "config", None) is not None:
24
+ base = _session.config.get("ops.base_dir")
25
+
26
+ root = Path(base).expanduser() if base else (Path.home() / ".ct" / "ops")
27
+ root.mkdir(parents=True, exist_ok=True)
28
+ return root
29
+
30
+
31
+ def _notebook_path(_session=None) -> Path:
32
+ return _ops_root(_session) / "notebook.jsonl"
33
+
34
+
35
+ def _todos_path(_session=None) -> Path:
36
+ return _ops_root(_session) / "todos.json"
37
+
38
+
39
+ def _workflow_dir(_session=None) -> Path:
40
+ path = _ops_root(_session) / "workflows"
41
+ path.mkdir(parents=True, exist_ok=True)
42
+ return path
43
+
44
+
45
+ def _now_iso() -> str:
46
+ return datetime.now(timezone.utc).isoformat()
47
+
48
+
49
+ def _parse_int(value: Any, default: int) -> int:
50
+ try:
51
+ return int(value)
52
+ except Exception:
53
+ return default
54
+
55
+
56
+ def _normalize_tags(tags: Any) -> list[str]:
57
+ """Normalize tags from string/list input to unique lowercase tokens."""
58
+ if tags is None:
59
+ return []
60
+
61
+ raw: list[str]
62
+ if isinstance(tags, str):
63
+ raw = [x.strip() for x in tags.split(",")]
64
+ elif isinstance(tags, list):
65
+ raw = [str(x).strip() for x in tags]
66
+ else:
67
+ raw = [str(tags).strip()]
68
+
69
+ cleaned: list[str] = []
70
+ seen = set()
71
+ for tag in raw:
72
+ if not tag:
73
+ continue
74
+ token = re.sub(r"\s+", "-", tag.lower())
75
+ token = re.sub(r"[^a-z0-9._:-]", "", token)
76
+ if not token or token in seen:
77
+ continue
78
+ seen.add(token)
79
+ cleaned.append(token)
80
+
81
+ return cleaned[:20]
82
+
83
+
84
+ def _load_todos(_session=None) -> tuple[list[dict], str | None]:
85
+ """Load todo list, returning (todos, error)."""
86
+ path = _todos_path(_session)
87
+ if not path.exists():
88
+ return [], None
89
+
90
+ try:
91
+ data = json.loads(path.read_text(encoding="utf-8"))
92
+ except Exception as exc:
93
+ return [], f"Failed to read todo database: {exc}"
94
+
95
+ if not isinstance(data, list):
96
+ return [], "Todo database is corrupted (expected JSON list)."
97
+
98
+ todos = [x for x in data if isinstance(x, dict)]
99
+ return todos, None
100
+
101
+
102
+ def _save_todos(todos: list[dict], _session=None) -> tuple[Path, str | None]:
103
+ """Persist todo list to disk."""
104
+ path = _todos_path(_session)
105
+ try:
106
+ path.write_text(json.dumps(todos, indent=2), encoding="utf-8")
107
+ return path, None
108
+ except Exception as exc:
109
+ return path, f"Failed to persist todo database: {exc}"
110
+
111
+
112
+ def _priority_rank(priority: str) -> int:
113
+ mapping = {"critical": 0, "high": 1, "medium": 2, "low": 3}
114
+ return mapping.get(priority, 2)
115
+
116
+
117
+ def _slugify_name(name: str) -> str:
118
+ slug = re.sub(r"[^a-zA-Z0-9._-]+", "-", name.strip().lower())
119
+ slug = re.sub(r"-+", "-", slug).strip("-._")
120
+ return slug or "workflow"
121
+
122
+
123
+ def _load_notebook_entries(_session=None) -> tuple[list[dict], int, str | None]:
124
+ """Load notebook entries from JSONL, skipping malformed lines."""
125
+ path = _notebook_path(_session)
126
+ if not path.exists():
127
+ return [], 0, None
128
+
129
+ entries: list[dict] = []
130
+ skipped = 0
131
+ try:
132
+ with open(path, "r", encoding="utf-8") as handle:
133
+ for raw in handle:
134
+ raw = raw.strip()
135
+ if not raw:
136
+ continue
137
+ try:
138
+ obj = json.loads(raw)
139
+ except json.JSONDecodeError:
140
+ skipped += 1
141
+ continue
142
+ if isinstance(obj, dict):
143
+ entries.append(obj)
144
+ else:
145
+ skipped += 1
146
+ except Exception as exc:
147
+ return [], 0, f"Failed to read notebook: {exc}"
148
+
149
+ return entries, skipped, None
150
+
151
+
152
+ def _save_notebook_entries(entries: list[dict], _session=None) -> tuple[Path, str | None]:
153
+ """Persist notebook entries to JSONL."""
154
+ path = _notebook_path(_session)
155
+ try:
156
+ with open(path, "w", encoding="utf-8") as handle:
157
+ for entry in entries:
158
+ handle.write(json.dumps(entry) + "\n")
159
+ return path, None
160
+ except Exception as exc:
161
+ return path, f"Failed to persist notebook: {exc}"
162
+
163
+
164
+ def _list_workflow_files(_session=None) -> list[Path]:
165
+ path = _workflow_dir(_session)
166
+ return sorted([p for p in path.glob("*.json") if p.is_file()])
167
+
168
+
169
+ def _load_workflow_payload(path: Path) -> tuple[dict | None, str | None]:
170
+ try:
171
+ payload = json.loads(path.read_text(encoding="utf-8"))
172
+ except Exception as exc:
173
+ return None, f"Failed to read workflow file '{path.name}': {exc}"
174
+ if not isinstance(payload, dict):
175
+ return None, f"Invalid workflow file '{path.name}' (expected object)."
176
+ return payload, None
177
+
178
+
179
+ def _find_workflow_path(identifier: str, _session=None) -> tuple[Path | None, str | None]:
180
+ """Resolve workflow by file stem, file name, or display name."""
181
+ ident = (identifier or "").strip()
182
+ if not ident:
183
+ return None, "Workflow identifier is required."
184
+
185
+ candidates = _list_workflow_files(_session)
186
+ if not candidates:
187
+ return None, "No saved workflows."
188
+
189
+ # 1) Exact filename match
190
+ for path in candidates:
191
+ if path.name == ident or path.stem == ident:
192
+ return path, None
193
+
194
+ # 2) Slug match
195
+ slug = _slugify_name(ident)
196
+ for path in candidates:
197
+ if path.stem == slug:
198
+ return path, None
199
+
200
+ # 3) Match by workflow display name in payload
201
+ lowered = ident.lower()
202
+ for path in candidates:
203
+ payload, error = _load_workflow_payload(path)
204
+ if error or not payload:
205
+ continue
206
+ name = str(payload.get("name", "")).strip().lower()
207
+ if name == lowered:
208
+ return path, None
209
+
210
+ return None, f"Workflow '{identifier}' not found."
211
+
212
+
213
+ @registry.register(
214
+ name="ops.notebook_add",
215
+ description="Append a structured notebook entry for project memory",
216
+ category="ops",
217
+ parameters={
218
+ "title": "Short notebook entry title",
219
+ "content": "Entry body text (markdown/plain text)",
220
+ "tags": "Optional list of tags or comma-separated tags",
221
+ "linked_query": "Optional source query/command that produced the insight",
222
+ },
223
+ usage_guide=(
224
+ "Use after an important finding, decision, or caveat so future runs can reuse context. "
225
+ "Prefer concise entries with tags for retrieval."
226
+ ),
227
+ )
228
+ def notebook_add(
229
+ title: str,
230
+ content: str,
231
+ tags: list[str] | str | None = None,
232
+ linked_query: str | None = None,
233
+ _session=None,
234
+ **kwargs,
235
+ ) -> dict:
236
+ """Append a notebook entry to local JSONL storage."""
237
+ title = (title or "").strip()
238
+ content = (content or "").strip()
239
+ if not title:
240
+ return {"summary": "Notebook title is required.", "error": "missing_title"}
241
+ if not content:
242
+ return {"summary": "Notebook content is required.", "error": "missing_content"}
243
+
244
+ entry = {
245
+ "id": uuid4().hex[:12],
246
+ "created_at": _now_iso(),
247
+ "title": title,
248
+ "content": content,
249
+ "tags": _normalize_tags(tags),
250
+ "linked_query": (linked_query or "").strip() or None,
251
+ }
252
+
253
+ path = _notebook_path(_session)
254
+ try:
255
+ with open(path, "a", encoding="utf-8") as handle:
256
+ handle.write(json.dumps(entry) + "\n")
257
+ except Exception as exc:
258
+ return {"summary": f"Failed to save notebook entry: {exc}", "error": "write_failed"}
259
+
260
+ return {
261
+ "summary": f"Notebook entry saved ({entry['id']}) with {len(entry['tags'])} tags.",
262
+ "entry": entry,
263
+ "path": str(path),
264
+ }
265
+
266
+
267
+ @registry.register(
268
+ name="ops.notebook_search",
269
+ description="Search notebook entries by keyword and/or tag",
270
+ category="ops",
271
+ parameters={
272
+ "query": "Keyword query matched against title/content/linked_query",
273
+ "tag": "Optional single tag filter",
274
+ "limit": "Maximum entries to return (default 20, max 100)",
275
+ },
276
+ usage_guide=(
277
+ "Use before planning to recover prior findings, assumptions, and unresolved risks. "
278
+ "Combine with tags to narrow to specific projects."
279
+ ),
280
+ )
281
+ def notebook_search(
282
+ query: str = "",
283
+ tag: str = "",
284
+ limit: int = 20,
285
+ _session=None,
286
+ **kwargs,
287
+ ) -> dict:
288
+ """Search notebook JSONL entries."""
289
+ limit = max(1, min(int(limit), 100))
290
+ q = (query or "").strip().lower()
291
+ tag_tokens = _normalize_tags([tag]) if tag else []
292
+ tag_norm = tag_tokens[0] if tag_tokens else ""
293
+ if tag and not tag_norm:
294
+ return {
295
+ "summary": "Invalid tag filter.",
296
+ "error": "invalid_tag",
297
+ }
298
+
299
+ path = _notebook_path(_session)
300
+ if not path.exists():
301
+ return {
302
+ "summary": f"Notebook is empty: {path}",
303
+ "matches": [],
304
+ "count": 0,
305
+ "path": str(path),
306
+ }
307
+
308
+ matches = []
309
+ bad_lines = 0
310
+ with open(path, "r", encoding="utf-8") as handle:
311
+ for line in handle:
312
+ line = line.strip()
313
+ if not line:
314
+ continue
315
+ try:
316
+ entry = json.loads(line)
317
+ except json.JSONDecodeError:
318
+ bad_lines += 1
319
+ continue
320
+ if not isinstance(entry, dict):
321
+ bad_lines += 1
322
+ continue
323
+
324
+ if tag_norm and tag_norm not in entry.get("tags", []):
325
+ continue
326
+
327
+ haystack = " ".join(
328
+ [
329
+ str(entry.get("title", "")),
330
+ str(entry.get("content", "")),
331
+ str(entry.get("linked_query", "")),
332
+ ]
333
+ ).lower()
334
+ if q and q not in haystack:
335
+ continue
336
+
337
+ preview = str(entry.get("content", "")).replace("\n", " ").strip()
338
+ if len(preview) > 180:
339
+ preview = preview[:177] + "..."
340
+
341
+ matches.append(
342
+ {
343
+ "id": entry.get("id"),
344
+ "created_at": entry.get("created_at"),
345
+ "title": entry.get("title"),
346
+ "tags": entry.get("tags", []),
347
+ "preview": preview,
348
+ "linked_query": entry.get("linked_query"),
349
+ }
350
+ )
351
+
352
+ matches.sort(key=lambda x: x.get("created_at") or "", reverse=True)
353
+ matches = matches[:limit]
354
+
355
+ qualifier = []
356
+ if q:
357
+ qualifier.append(f"query='{query}'")
358
+ if tag_norm:
359
+ qualifier.append(f"tag='{tag_norm}'")
360
+ suffix = f" ({', '.join(qualifier)})" if qualifier else ""
361
+
362
+ summary = f"Found {len(matches)} notebook entries{suffix}."
363
+ if bad_lines:
364
+ summary += f" Skipped {bad_lines} malformed lines."
365
+
366
+ return {
367
+ "summary": summary,
368
+ "matches": matches,
369
+ "count": len(matches),
370
+ "path": str(path),
371
+ "skipped_malformed_lines": bad_lines,
372
+ }
373
+
374
+
375
+ @registry.register(
376
+ name="ops.notebook_get",
377
+ description="Fetch a notebook entry by ID",
378
+ category="ops",
379
+ parameters={"entry_id": "Notebook entry ID"},
380
+ usage_guide="Use when you need the full text of one saved notebook entry.",
381
+ )
382
+ def notebook_get(entry_id: str, _session=None, **kwargs) -> dict:
383
+ """Get a notebook entry by ID."""
384
+ needle = (entry_id or "").strip()
385
+ if not needle:
386
+ return {"summary": "entry_id is required.", "error": "missing_entry_id"}
387
+
388
+ entries, skipped, error = _load_notebook_entries(_session)
389
+ if error:
390
+ return {"summary": error, "error": "notebook_error"}
391
+
392
+ for entry in entries:
393
+ if str(entry.get("id", "")).strip() == needle:
394
+ out = dict(entry)
395
+ out["summary"] = f"Notebook entry {needle} loaded."
396
+ if skipped:
397
+ out["summary"] += f" Skipped {skipped} malformed lines while loading."
398
+ out["path"] = str(_notebook_path(_session))
399
+ return out
400
+
401
+ return {"summary": f"Notebook entry not found: {needle}", "error": "not_found"}
402
+
403
+
404
+ @registry.register(
405
+ name="ops.notebook_list",
406
+ description="List recent notebook entries",
407
+ category="ops",
408
+ parameters={
409
+ "limit": "Maximum entries to return (default 20, max 200)",
410
+ "tag": "Optional tag filter",
411
+ },
412
+ usage_guide="Use for a quick overview of recent project notes.",
413
+ )
414
+ def notebook_list(limit: int = 20, tag: str = "", _session=None, **kwargs) -> dict:
415
+ """List notebook entries sorted by recency."""
416
+ limit = max(1, min(_parse_int(limit, 20), 200))
417
+ tag_tokens = _normalize_tags([tag]) if tag else []
418
+ tag_norm = tag_tokens[0] if tag_tokens else ""
419
+ if tag and not tag_norm:
420
+ return {"summary": "Invalid tag filter.", "error": "invalid_tag"}
421
+
422
+ entries, skipped, error = _load_notebook_entries(_session)
423
+ if error:
424
+ return {"summary": error, "error": "notebook_error"}
425
+
426
+ if tag_norm:
427
+ entries = [e for e in entries if tag_norm in (e.get("tags") or [])]
428
+
429
+ entries = sorted(entries, key=lambda x: str(x.get("created_at") or ""), reverse=True)
430
+ entries = entries[:limit]
431
+
432
+ items = []
433
+ for entry in entries:
434
+ preview = str(entry.get("content", "")).replace("\n", " ").strip()
435
+ if len(preview) > 140:
436
+ preview = preview[:137] + "..."
437
+ items.append(
438
+ {
439
+ "id": entry.get("id"),
440
+ "created_at": entry.get("created_at"),
441
+ "title": entry.get("title"),
442
+ "tags": entry.get("tags", []),
443
+ "preview": preview,
444
+ }
445
+ )
446
+
447
+ summary = f"Listed {len(items)} notebook entries."
448
+ if tag_norm:
449
+ summary += f" tag={tag_norm}."
450
+ if skipped:
451
+ summary += f" Skipped {skipped} malformed lines."
452
+
453
+ return {
454
+ "summary": summary,
455
+ "entries": items,
456
+ "count": len(items),
457
+ "path": str(_notebook_path(_session)),
458
+ "skipped_malformed_lines": skipped,
459
+ }
460
+
461
+
462
+ @registry.register(
463
+ name="ops.notebook_update",
464
+ description="Update fields of an existing notebook entry",
465
+ category="ops",
466
+ parameters={
467
+ "entry_id": "Notebook entry ID",
468
+ "title": "Optional new title",
469
+ "content": "Optional new content",
470
+ "tags": "Optional replacement tags (list or comma-separated)",
471
+ "linked_query": "Optional replacement linked query",
472
+ },
473
+ usage_guide="Use to correct or refine existing notes without creating duplicates.",
474
+ )
475
+ def notebook_update(
476
+ entry_id: str,
477
+ title: str | None = None,
478
+ content: str | None = None,
479
+ tags: list[str] | str | None = None,
480
+ linked_query: str | None = None,
481
+ _session=None,
482
+ **kwargs,
483
+ ) -> dict:
484
+ """Update an existing notebook entry by ID."""
485
+ needle = (entry_id or "").strip()
486
+ if not needle:
487
+ return {"summary": "entry_id is required.", "error": "missing_entry_id"}
488
+
489
+ entries, skipped, error = _load_notebook_entries(_session)
490
+ if error:
491
+ return {"summary": error, "error": "notebook_error"}
492
+
493
+ touched = None
494
+ for entry in entries:
495
+ if str(entry.get("id", "")).strip() != needle:
496
+ continue
497
+ if title is not None:
498
+ entry["title"] = str(title).strip()
499
+ if content is not None:
500
+ entry["content"] = str(content).strip()
501
+ if tags is not None:
502
+ entry["tags"] = _normalize_tags(tags)
503
+ if linked_query is not None:
504
+ entry["linked_query"] = str(linked_query).strip() or None
505
+ entry["updated_at"] = _now_iso()
506
+ touched = entry
507
+ break
508
+
509
+ if touched is None:
510
+ return {"summary": f"Notebook entry not found: {needle}", "error": "not_found"}
511
+ if not str(touched.get("title", "")).strip():
512
+ return {"summary": "Notebook title cannot be empty.", "error": "invalid_title"}
513
+ if not str(touched.get("content", "")).strip():
514
+ return {"summary": "Notebook content cannot be empty.", "error": "invalid_content"}
515
+
516
+ path, error = _save_notebook_entries(entries, _session)
517
+ if error:
518
+ return {"summary": error, "error": "notebook_error"}
519
+
520
+ summary = f"Notebook entry {needle} updated."
521
+ if skipped:
522
+ summary += f" Skipped {skipped} malformed lines while loading."
523
+ return {"summary": summary, "entry": touched, "path": str(path)}
524
+
525
+
526
+ @registry.register(
527
+ name="ops.notebook_delete",
528
+ description="Delete a notebook entry by ID",
529
+ category="ops",
530
+ parameters={"entry_id": "Notebook entry ID"},
531
+ usage_guide="Use to remove stale or incorrect notebook entries.",
532
+ )
533
+ def notebook_delete(entry_id: str, _session=None, **kwargs) -> dict:
534
+ """Delete a notebook entry by ID."""
535
+ needle = (entry_id or "").strip()
536
+ if not needle:
537
+ return {"summary": "entry_id is required.", "error": "missing_entry_id"}
538
+
539
+ entries, skipped, error = _load_notebook_entries(_session)
540
+ if error:
541
+ return {"summary": error, "error": "notebook_error"}
542
+
543
+ original = len(entries)
544
+ kept = [e for e in entries if str(e.get("id", "")).strip() != needle]
545
+ if len(kept) == original:
546
+ return {"summary": f"Notebook entry not found: {needle}", "error": "not_found"}
547
+
548
+ path, error = _save_notebook_entries(kept, _session)
549
+ if error:
550
+ return {"summary": error, "error": "notebook_error"}
551
+
552
+ summary = f"Notebook entry deleted: {needle}."
553
+ if skipped:
554
+ summary += f" Skipped {skipped} malformed lines while loading."
555
+ return {"summary": summary, "path": str(path), "count": len(kept)}
556
+
557
+
558
+ @registry.register(
559
+ name="ops.todo_add",
560
+ description="Create a tracked todo item for research follow-ups",
561
+ category="ops",
562
+ parameters={
563
+ "task": "Todo description",
564
+ "priority": "critical|high|medium|low (default medium)",
565
+ "due_date": "Optional due date in YYYY-MM-DD",
566
+ "owner": "Optional owner name/alias",
567
+ },
568
+ usage_guide=(
569
+ "Use to capture follow-up actions from synthesis outputs (validation assays, "
570
+ "data pulls, literature checks) so nothing is lost between sessions."
571
+ ),
572
+ )
573
+ def todo_add(
574
+ task: str,
575
+ priority: str = "medium",
576
+ due_date: str | None = None,
577
+ owner: str | None = None,
578
+ _session=None,
579
+ **kwargs,
580
+ ) -> dict:
581
+ """Append a todo item to local todo storage."""
582
+ task = (task or "").strip()
583
+ if not task:
584
+ return {"summary": "Todo task is required.", "error": "missing_task"}
585
+
586
+ normalized_priority = (priority or "medium").strip().lower()
587
+ allowed_priorities = {"critical", "high", "medium", "low"}
588
+ if normalized_priority not in allowed_priorities:
589
+ return {
590
+ "summary": "Invalid priority. Use one of: critical, high, medium, low.",
591
+ "error": "invalid_priority",
592
+ }
593
+
594
+ normalized_due = None
595
+ if due_date:
596
+ try:
597
+ normalized_due = date.fromisoformat(str(due_date)).isoformat()
598
+ except ValueError:
599
+ return {
600
+ "summary": "Invalid due_date format. Use YYYY-MM-DD.",
601
+ "error": "invalid_due_date",
602
+ }
603
+
604
+ todos, err = _load_todos(_session)
605
+ if err:
606
+ return {"summary": err, "error": "todo_db_error"}
607
+
608
+ item = {
609
+ "id": uuid4().hex[:12],
610
+ "task": task,
611
+ "status": "open",
612
+ "priority": normalized_priority,
613
+ "due_date": normalized_due,
614
+ "owner": (owner or "").strip() or None,
615
+ "created_at": _now_iso(),
616
+ "updated_at": _now_iso(),
617
+ }
618
+ todos.append(item)
619
+
620
+ path, err = _save_todos(todos, _session)
621
+ if err:
622
+ return {"summary": err, "error": "todo_db_error"}
623
+
624
+ open_count = sum(1 for x in todos if x.get("status") == "open")
625
+ return {
626
+ "summary": f"Todo added ({item['id']}). Open items: {open_count}.",
627
+ "item": item,
628
+ "open_count": open_count,
629
+ "path": str(path),
630
+ }
631
+
632
+
633
+ @registry.register(
634
+ name="ops.todo_list",
635
+ description="List tracked todo items with status and priority ordering",
636
+ category="ops",
637
+ parameters={
638
+ "status": "open|done|all (default open)",
639
+ "limit": "Maximum items to return (default 50, max 200)",
640
+ },
641
+ usage_guide=(
642
+ "Use at the start/end of sessions to manage execution backlog. "
643
+ "Default ordering surfaces urgent and overdue items first."
644
+ ),
645
+ )
646
+ def todo_list(status: str = "open", limit: int = 50, _session=None, **kwargs) -> dict:
647
+ """Return todo items with deterministic ordering."""
648
+ status_norm = (status or "open").strip().lower()
649
+ if status_norm not in {"open", "done", "all"}:
650
+ return {
651
+ "summary": "Invalid status. Use open, done, or all.",
652
+ "error": "invalid_status",
653
+ }
654
+
655
+ limit = max(1, min(int(limit), 200))
656
+ todos, err = _load_todos(_session)
657
+ if err:
658
+ return {"summary": err, "error": "todo_db_error"}
659
+
660
+ filtered = todos
661
+ if status_norm != "all":
662
+ filtered = [x for x in todos if x.get("status") == status_norm]
663
+
664
+ def sort_key(item: dict) -> tuple:
665
+ due = item.get("due_date") or "9999-12-31"
666
+ created = item.get("created_at") or ""
667
+ return (_priority_rank(str(item.get("priority", "medium"))), due, created)
668
+
669
+ filtered = sorted(filtered, key=sort_key)
670
+ limited = filtered[:limit]
671
+
672
+ open_count = sum(1 for x in todos if x.get("status") == "open")
673
+ done_count = sum(1 for x in todos if x.get("status") == "done")
674
+
675
+ return {
676
+ "summary": f"Listed {len(limited)} todo items (open={open_count}, done={done_count}).",
677
+ "items": limited,
678
+ "count": len(limited),
679
+ "open_count": open_count,
680
+ "done_count": done_count,
681
+ "status_filter": status_norm,
682
+ "path": str(_todos_path(_session)),
683
+ }
684
+
685
+
686
+ @registry.register(
687
+ name="ops.todo_get",
688
+ description="Fetch a todo item by ID",
689
+ category="ops",
690
+ parameters={"todo_id": "Todo item ID"},
691
+ usage_guide="Use to inspect a single todo in full detail.",
692
+ )
693
+ def todo_get(todo_id: str, _session=None, **kwargs) -> dict:
694
+ """Get a todo item by ID."""
695
+ needle = (todo_id or "").strip()
696
+ if not needle:
697
+ return {"summary": "todo_id is required.", "error": "missing_todo_id"}
698
+
699
+ todos, err = _load_todos(_session)
700
+ if err:
701
+ return {"summary": err, "error": "todo_db_error"}
702
+
703
+ for item in todos:
704
+ if str(item.get("id", "")).strip() == needle:
705
+ out = dict(item)
706
+ out["summary"] = f"Todo item loaded: {needle}"
707
+ out["path"] = str(_todos_path(_session))
708
+ return out
709
+ return {"summary": f"Todo item not found: {needle}", "error": "not_found"}
710
+
711
+
712
+ @registry.register(
713
+ name="ops.todo_update",
714
+ description="Update an existing todo item",
715
+ category="ops",
716
+ parameters={
717
+ "todo_id": "Todo item ID",
718
+ "task": "Optional replacement task text",
719
+ "status": "Optional status: open|in_progress|blocked|done|cancelled",
720
+ "priority": "Optional priority: critical|high|medium|low",
721
+ "due_date": "Optional due date in YYYY-MM-DD (or empty to clear)",
722
+ "owner": "Optional owner (or empty to clear)",
723
+ },
724
+ usage_guide="Use to track execution state and ownership of follow-up work.",
725
+ )
726
+ def todo_update(
727
+ todo_id: str,
728
+ task: str | None = None,
729
+ status: str | None = None,
730
+ priority: str | None = None,
731
+ due_date: str | None = None,
732
+ owner: str | None = None,
733
+ _session=None,
734
+ **kwargs,
735
+ ) -> dict:
736
+ """Update a todo item by ID."""
737
+ needle = (todo_id or "").strip()
738
+ if not needle:
739
+ return {"summary": "todo_id is required.", "error": "missing_todo_id"}
740
+
741
+ todos, err = _load_todos(_session)
742
+ if err:
743
+ return {"summary": err, "error": "todo_db_error"}
744
+
745
+ allowed_status = {"open", "in_progress", "blocked", "done", "cancelled"}
746
+ allowed_priority = {"critical", "high", "medium", "low"}
747
+ item = None
748
+ for candidate in todos:
749
+ if str(candidate.get("id", "")).strip() == needle:
750
+ item = candidate
751
+ break
752
+ if item is None:
753
+ return {"summary": f"Todo item not found: {needle}", "error": "not_found"}
754
+
755
+ if task is not None:
756
+ item["task"] = str(task).strip()
757
+ if status is not None:
758
+ normalized_status = str(status).strip().lower()
759
+ if normalized_status not in allowed_status:
760
+ return {
761
+ "summary": "Invalid status. Use open, in_progress, blocked, done, cancelled.",
762
+ "error": "invalid_status",
763
+ }
764
+ item["status"] = normalized_status
765
+ if priority is not None:
766
+ normalized_priority = str(priority).strip().lower()
767
+ if normalized_priority not in allowed_priority:
768
+ return {
769
+ "summary": "Invalid priority. Use critical, high, medium, low.",
770
+ "error": "invalid_priority",
771
+ }
772
+ item["priority"] = normalized_priority
773
+ if due_date is not None:
774
+ raw_due = str(due_date).strip()
775
+ if raw_due:
776
+ try:
777
+ item["due_date"] = date.fromisoformat(raw_due).isoformat()
778
+ except ValueError:
779
+ return {
780
+ "summary": "Invalid due_date format. Use YYYY-MM-DD.",
781
+ "error": "invalid_due_date",
782
+ }
783
+ else:
784
+ item["due_date"] = None
785
+ if owner is not None:
786
+ item["owner"] = str(owner).strip() or None
787
+
788
+ if not str(item.get("task", "")).strip():
789
+ return {"summary": "Todo task cannot be empty.", "error": "invalid_task"}
790
+ item["updated_at"] = _now_iso()
791
+
792
+ path, err = _save_todos(todos, _session)
793
+ if err:
794
+ return {"summary": err, "error": "todo_db_error"}
795
+ return {"summary": f"Todo item updated: {needle}", "item": item, "path": str(path)}
796
+
797
+
798
+ @registry.register(
799
+ name="ops.todo_delete",
800
+ description="Delete a todo item by ID",
801
+ category="ops",
802
+ parameters={"todo_id": "Todo item ID"},
803
+ usage_guide="Use to remove obsolete todo items.",
804
+ )
805
+ def todo_delete(todo_id: str, _session=None, **kwargs) -> dict:
806
+ """Delete a todo item by ID."""
807
+ needle = (todo_id or "").strip()
808
+ if not needle:
809
+ return {"summary": "todo_id is required.", "error": "missing_todo_id"}
810
+
811
+ todos, err = _load_todos(_session)
812
+ if err:
813
+ return {"summary": err, "error": "todo_db_error"}
814
+
815
+ original = len(todos)
816
+ kept = [x for x in todos if str(x.get("id", "")).strip() != needle]
817
+ if len(kept) == original:
818
+ return {"summary": f"Todo item not found: {needle}", "error": "not_found"}
819
+
820
+ path, err = _save_todos(kept, _session)
821
+ if err:
822
+ return {"summary": err, "error": "todo_db_error"}
823
+ return {"summary": f"Todo item deleted: {needle}", "count": len(kept), "path": str(path)}
824
+
825
+
826
+ def _normalize_workflow_steps(steps: list[dict] | str) -> tuple[list[dict] | None, str | None]:
827
+ """Validate and normalize workflow step payloads."""
828
+ if isinstance(steps, str):
829
+ try:
830
+ steps = json.loads(steps)
831
+ except json.JSONDecodeError:
832
+ return None, "Invalid steps payload. Provide JSON array or list of step objects."
833
+
834
+ if not isinstance(steps, list) or not steps:
835
+ return None, "Workflow steps must be a non-empty list."
836
+
837
+ cleaned_steps = []
838
+ for idx, step in enumerate(steps, 1):
839
+ if not isinstance(step, dict):
840
+ return None, f"Step {idx} is not an object."
841
+ tool = str(step.get("tool", "")).strip()
842
+ description = str(step.get("description", "")).strip()
843
+ if not tool:
844
+ return None, f"Step {idx} is missing required field 'tool'."
845
+
846
+ cleaned_steps.append(
847
+ {
848
+ "id": _parse_int(step.get("id", idx), idx),
849
+ "description": description,
850
+ "tool": tool,
851
+ "tool_args": step.get("tool_args", {}) if isinstance(step.get("tool_args", {}), dict) else {},
852
+ "depends_on": [
853
+ _parse_int(x, 0) for x in (step.get("depends_on", []) if isinstance(step.get("depends_on", []), list) else [])
854
+ if _parse_int(x, 0) > 0
855
+ ],
856
+ }
857
+ )
858
+
859
+ cleaned_steps.sort(key=lambda x: x["id"])
860
+ return cleaned_steps, None
861
+
862
+
863
+ @registry.register(
864
+ name="ops.workflow_save",
865
+ description="Save a reusable workflow template from a plan-like step list",
866
+ category="ops",
867
+ parameters={
868
+ "name": "Workflow template name",
869
+ "query": "Original or canonical query this workflow answers",
870
+ "steps": "List of step dicts (id/description/tool/tool_args/depends_on)",
871
+ "notes": "Optional notes about assumptions or context",
872
+ },
873
+ usage_guide=(
874
+ "Use after a successful run to preserve the strategy as a reusable template. "
875
+ "Templates are stored locally and can be inspected with files.read_file."
876
+ ),
877
+ )
878
+ def workflow_save(
879
+ name: str,
880
+ query: str,
881
+ steps: list[dict] | str,
882
+ notes: str = "",
883
+ _session=None,
884
+ **kwargs,
885
+ ) -> dict:
886
+ """Persist a validated workflow template to local JSON."""
887
+ workflow_name = (name or "").strip()
888
+ if not workflow_name:
889
+ return {"summary": "Workflow name is required.", "error": "missing_name"}
890
+
891
+ query = (query or "").strip()
892
+ if not query:
893
+ return {"summary": "Workflow query is required.", "error": "missing_query"}
894
+
895
+ cleaned_steps, error = _normalize_workflow_steps(steps)
896
+ if error:
897
+ return {"summary": error, "error": "invalid_steps"}
898
+
899
+ payload = {
900
+ "name": workflow_name,
901
+ "query": query,
902
+ "notes": (notes or "").strip() or None,
903
+ "created_at": _now_iso(),
904
+ "version": 1,
905
+ "steps": cleaned_steps,
906
+ }
907
+
908
+ out_dir = _workflow_dir(_session)
909
+ stem = _slugify_name(workflow_name)
910
+ out_path = out_dir / f"{stem}.json"
911
+ suffix = 1
912
+ while out_path.exists():
913
+ suffix += 1
914
+ out_path = out_dir / f"{stem}-{suffix}.json"
915
+
916
+ try:
917
+ out_path.write_text(json.dumps(payload, indent=2), encoding="utf-8")
918
+ except Exception as exc:
919
+ return {"summary": f"Failed to save workflow template: {exc}", "error": "write_failed"}
920
+
921
+ return {
922
+ "summary": f"Saved workflow template '{workflow_name}' with {len(cleaned_steps)} steps.",
923
+ "workflow": payload,
924
+ "path": str(out_path),
925
+ }
926
+
927
+
928
+ @registry.register(
929
+ name="ops.workflow_list",
930
+ description="List saved workflow templates",
931
+ category="ops",
932
+ parameters={
933
+ "limit": "Maximum workflows to return (default 50, max 200)",
934
+ "query": "Optional keyword filter against name/query/notes",
935
+ },
936
+ usage_guide="Use to discover reusable workflow templates available in local storage.",
937
+ )
938
+ def workflow_list(limit: int = 50, query: str = "", _session=None, **kwargs) -> dict:
939
+ """List workflow template metadata."""
940
+ limit = max(1, min(_parse_int(limit, 50), 200))
941
+ needle = (query or "").strip().lower()
942
+ workflows = []
943
+ for path in _list_workflow_files(_session):
944
+ payload, error = _load_workflow_payload(path)
945
+ if error or not payload:
946
+ continue
947
+ haystack = " ".join(
948
+ [
949
+ str(payload.get("name", "")),
950
+ str(payload.get("query", "")),
951
+ str(payload.get("notes", "")),
952
+ ]
953
+ ).lower()
954
+ if needle and needle not in haystack:
955
+ continue
956
+ workflows.append(
957
+ {
958
+ "id": path.stem,
959
+ "name": payload.get("name", path.stem),
960
+ "query": payload.get("query", ""),
961
+ "created_at": payload.get("created_at"),
962
+ "updated_at": payload.get("updated_at"),
963
+ "n_steps": len(payload.get("steps", []) if isinstance(payload.get("steps"), list) else []),
964
+ "path": str(path),
965
+ }
966
+ )
967
+
968
+ workflows.sort(key=lambda x: str(x.get("updated_at") or x.get("created_at") or ""), reverse=True)
969
+ workflows = workflows[:limit]
970
+ suffix = f" filter='{query}'" if needle else ""
971
+ return {
972
+ "summary": f"Listed {len(workflows)} workflow templates.{suffix}",
973
+ "workflows": workflows,
974
+ "count": len(workflows),
975
+ "directory": str(_workflow_dir(_session)),
976
+ }
977
+
978
+
979
+ @registry.register(
980
+ name="ops.workflow_get",
981
+ description="Load one saved workflow template by ID or name",
982
+ category="ops",
983
+ parameters={"workflow_id": "Workflow file stem, file name, or display name"},
984
+ usage_guide="Use when you need full details of a saved workflow template.",
985
+ )
986
+ def workflow_get(workflow_id: str, _session=None, **kwargs) -> dict:
987
+ """Get a workflow template payload."""
988
+ path, error = _find_workflow_path(workflow_id, _session)
989
+ if error:
990
+ return {"summary": error, "error": "not_found"}
991
+
992
+ payload, error = _load_workflow_payload(path)
993
+ if error or payload is None:
994
+ return {"summary": error or "Invalid workflow payload.", "error": "workflow_error"}
995
+
996
+ payload = dict(payload)
997
+ payload["summary"] = f"Workflow loaded: {payload.get('name', path.stem)}"
998
+ payload["workflow_id"] = path.stem
999
+ payload["path"] = str(path)
1000
+ return payload
1001
+
1002
+
1003
+ @registry.register(
1004
+ name="ops.workflow_update",
1005
+ description="Update an existing workflow template",
1006
+ category="ops",
1007
+ parameters={
1008
+ "workflow_id": "Workflow file stem, file name, or display name",
1009
+ "name": "Optional replacement name",
1010
+ "query": "Optional replacement canonical query",
1011
+ "steps": "Optional replacement step list",
1012
+ "notes": "Optional replacement notes",
1013
+ },
1014
+ usage_guide="Use to keep reusable workflows current as your process evolves.",
1015
+ )
1016
+ def workflow_update(
1017
+ workflow_id: str,
1018
+ name: str | None = None,
1019
+ query: str | None = None,
1020
+ steps: list[dict] | str | None = None,
1021
+ notes: str | None = None,
1022
+ _session=None,
1023
+ **kwargs,
1024
+ ) -> dict:
1025
+ """Update workflow template fields and save in place."""
1026
+ path, error = _find_workflow_path(workflow_id, _session)
1027
+ if error:
1028
+ return {"summary": error, "error": "not_found"}
1029
+
1030
+ payload, error = _load_workflow_payload(path)
1031
+ if error or payload is None:
1032
+ return {"summary": error or "Invalid workflow payload.", "error": "workflow_error"}
1033
+
1034
+ if name is not None:
1035
+ payload["name"] = str(name).strip()
1036
+ if query is not None:
1037
+ payload["query"] = str(query).strip()
1038
+ if notes is not None:
1039
+ payload["notes"] = str(notes).strip() or None
1040
+ if steps is not None:
1041
+ cleaned_steps, step_error = _normalize_workflow_steps(steps)
1042
+ if step_error:
1043
+ return {"summary": step_error, "error": "invalid_steps"}
1044
+ payload["steps"] = cleaned_steps
1045
+
1046
+ if not str(payload.get("name", "")).strip():
1047
+ return {"summary": "Workflow name cannot be empty.", "error": "invalid_name"}
1048
+ if not str(payload.get("query", "")).strip():
1049
+ return {"summary": "Workflow query cannot be empty.", "error": "invalid_query"}
1050
+ if not isinstance(payload.get("steps"), list) or not payload["steps"]:
1051
+ return {"summary": "Workflow requires at least one step.", "error": "invalid_steps"}
1052
+
1053
+ payload["updated_at"] = _now_iso()
1054
+ payload["version"] = _parse_int(payload.get("version", 1), 1) + 1
1055
+ try:
1056
+ path.write_text(json.dumps(payload, indent=2), encoding="utf-8")
1057
+ except Exception as exc:
1058
+ return {"summary": f"Failed to update workflow: {exc}", "error": "write_failed"}
1059
+
1060
+ return {
1061
+ "summary": f"Workflow updated: {payload.get('name', path.stem)}",
1062
+ "workflow": payload,
1063
+ "path": str(path),
1064
+ }
1065
+
1066
+
1067
+ @registry.register(
1068
+ name="ops.workflow_delete",
1069
+ description="Delete a saved workflow template",
1070
+ category="ops",
1071
+ parameters={"workflow_id": "Workflow file stem, file name, or display name"},
1072
+ usage_guide="Use to remove obsolete workflow templates.",
1073
+ )
1074
+ def workflow_delete(workflow_id: str, _session=None, **kwargs) -> dict:
1075
+ """Delete one workflow template."""
1076
+ path, error = _find_workflow_path(workflow_id, _session)
1077
+ if error:
1078
+ return {"summary": error, "error": "not_found"}
1079
+
1080
+ try:
1081
+ path.unlink()
1082
+ except Exception as exc:
1083
+ return {"summary": f"Failed to delete workflow: {exc}", "error": "delete_failed"}
1084
+ return {"summary": f"Workflow deleted: {path.stem}", "path": str(path)}
1085
+
1086
+
1087
+ @registry.register(
1088
+ name="ops.workflow_run",
1089
+ description="Execute a saved workflow template",
1090
+ category="ops",
1091
+ parameters={
1092
+ "workflow_id": "Workflow file stem, file name, or display name",
1093
+ "dry_run": "If true, return the resolved execution plan without running tools",
1094
+ "continue_on_error": "If true, continue executing later steps after failures",
1095
+ },
1096
+ usage_guide="Use to replay a validated workflow template against new inputs or contexts.",
1097
+ )
1098
+ def workflow_run(
1099
+ workflow_id: str,
1100
+ dry_run: bool = False,
1101
+ continue_on_error: bool = False,
1102
+ _session=None,
1103
+ _prior_results=None,
1104
+ **kwargs,
1105
+ ) -> dict:
1106
+ """Execute workflow steps in dependency order."""
1107
+ path, error = _find_workflow_path(workflow_id, _session)
1108
+ if error:
1109
+ return {"summary": error, "error": "not_found"}
1110
+ payload, error = _load_workflow_payload(path)
1111
+ if error or payload is None:
1112
+ return {"summary": error or "Invalid workflow payload.", "error": "workflow_error"}
1113
+
1114
+ cleaned_steps, step_error = _normalize_workflow_steps(payload.get("steps", []))
1115
+ if step_error:
1116
+ return {"summary": f"Workflow invalid: {step_error}", "error": "invalid_steps"}
1117
+
1118
+ if dry_run:
1119
+ return {
1120
+ "summary": f"[DRY RUN] Workflow '{payload.get('name', path.stem)}' ready with {len(cleaned_steps)} steps.",
1121
+ "workflow_id": path.stem,
1122
+ "name": payload.get("name", path.stem),
1123
+ "steps": cleaned_steps,
1124
+ "path": str(path),
1125
+ }
1126
+
1127
+ from ct.tools import registry as _tool_registry
1128
+
1129
+ results = {}
1130
+ status_by_id = {step["id"]: "pending" for step in cleaned_steps}
1131
+ step_index = {step["id"]: step for step in cleaned_steps}
1132
+ loop_guard = len(cleaned_steps) * 4 + 8
1133
+ iterations = 0
1134
+ executed = []
1135
+
1136
+ while iterations < loop_guard:
1137
+ iterations += 1
1138
+ progressed = False
1139
+
1140
+ for step in cleaned_steps:
1141
+ sid = step["id"]
1142
+ if status_by_id[sid] != "pending":
1143
+ continue
1144
+ deps = step.get("depends_on", [])
1145
+ if any(status_by_id.get(dep) not in {"completed"} for dep in deps):
1146
+ # If a dependency failed and we're strict, abort this step.
1147
+ if any(status_by_id.get(dep) == "failed" for dep in deps) and not continue_on_error:
1148
+ status_by_id[sid] = "skipped"
1149
+ continue
1150
+
1151
+ tool_name = step["tool"]
1152
+ tool = _tool_registry.get_tool(tool_name)
1153
+ if tool is None:
1154
+ status_by_id[sid] = "failed"
1155
+ results[sid] = {"error": "tool_not_found", "summary": f"Tool not found: {tool_name}"}
1156
+ if not continue_on_error:
1157
+ return {
1158
+ "summary": f"Workflow failed at step {sid}: tool not found ({tool_name}).",
1159
+ "workflow_id": path.stem,
1160
+ "results": results,
1161
+ "status_by_step": status_by_id,
1162
+ }
1163
+ progressed = True
1164
+ continue
1165
+
1166
+ args = dict(step.get("tool_args", {}))
1167
+ for key, val in list(args.items()):
1168
+ if isinstance(val, str) and val.startswith("$step."):
1169
+ parts = val.split(".")
1170
+ if len(parts) < 2:
1171
+ continue
1172
+ ref_id = _parse_int(parts[1], -1)
1173
+ if ref_id not in results:
1174
+ continue
1175
+ resolved = results[ref_id]
1176
+ for field in parts[2:]:
1177
+ if isinstance(resolved, dict) and field in resolved:
1178
+ resolved = resolved[field]
1179
+ else:
1180
+ break
1181
+ args[key] = resolved
1182
+
1183
+ args["_session"] = _session
1184
+ args["_prior_results"] = dict(_prior_results or {}) | results
1185
+ try:
1186
+ result = tool.run(**args)
1187
+ except Exception as exc:
1188
+ result = {"error": "execution_exception", "summary": f"{tool_name} crashed: {exc}"}
1189
+
1190
+ has_error = isinstance(result, dict) and result.get("error")
1191
+ results[sid] = result
1192
+ status_by_id[sid] = "failed" if has_error else "completed"
1193
+ executed.append({"step_id": sid, "tool": tool_name, "status": status_by_id[sid]})
1194
+ progressed = True
1195
+
1196
+ if has_error and not continue_on_error:
1197
+ return {
1198
+ "summary": f"Workflow failed at step {sid} ({tool_name}).",
1199
+ "workflow_id": path.stem,
1200
+ "name": payload.get("name", path.stem),
1201
+ "results": results,
1202
+ "status_by_step": status_by_id,
1203
+ "executed": executed,
1204
+ }
1205
+
1206
+ if not progressed:
1207
+ break
1208
+
1209
+ pending = [sid for sid, st in status_by_id.items() if st == "pending"]
1210
+ if pending:
1211
+ for sid in pending:
1212
+ # unresolved dependencies / cycle
1213
+ status_by_id[sid] = "skipped"
1214
+
1215
+ completed = sum(1 for st in status_by_id.values() if st == "completed")
1216
+ failed = sum(1 for st in status_by_id.values() if st == "failed")
1217
+ skipped = sum(1 for st in status_by_id.values() if st == "skipped")
1218
+ summary = (
1219
+ f"Workflow '{payload.get('name', path.stem)}' executed: "
1220
+ f"{completed} completed, {failed} failed, {skipped} skipped."
1221
+ )
1222
+ return {
1223
+ "summary": summary,
1224
+ "workflow_id": path.stem,
1225
+ "name": payload.get("name", path.stem),
1226
+ "results": results,
1227
+ "status_by_step": status_by_id,
1228
+ "executed": executed,
1229
+ "path": str(path),
1230
+ }