gobby 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. gobby/__init__.py +1 -1
  2. gobby/adapters/__init__.py +6 -0
  3. gobby/adapters/base.py +11 -2
  4. gobby/adapters/claude_code.py +2 -2
  5. gobby/adapters/codex_impl/adapter.py +38 -43
  6. gobby/adapters/copilot.py +324 -0
  7. gobby/adapters/cursor.py +373 -0
  8. gobby/adapters/gemini.py +2 -26
  9. gobby/adapters/windsurf.py +359 -0
  10. gobby/agents/definitions.py +162 -2
  11. gobby/agents/isolation.py +33 -1
  12. gobby/agents/pty_reader.py +192 -0
  13. gobby/agents/registry.py +10 -1
  14. gobby/agents/runner.py +24 -8
  15. gobby/agents/sandbox.py +8 -3
  16. gobby/agents/session.py +4 -0
  17. gobby/agents/spawn.py +9 -2
  18. gobby/agents/spawn_executor.py +49 -61
  19. gobby/agents/spawners/command_builder.py +4 -4
  20. gobby/app_context.py +5 -0
  21. gobby/cli/__init__.py +4 -0
  22. gobby/cli/install.py +259 -4
  23. gobby/cli/installers/__init__.py +12 -0
  24. gobby/cli/installers/copilot.py +242 -0
  25. gobby/cli/installers/cursor.py +244 -0
  26. gobby/cli/installers/shared.py +3 -0
  27. gobby/cli/installers/windsurf.py +242 -0
  28. gobby/cli/pipelines.py +639 -0
  29. gobby/cli/sessions.py +3 -1
  30. gobby/cli/skills.py +209 -0
  31. gobby/cli/tasks/crud.py +6 -5
  32. gobby/cli/tasks/search.py +1 -1
  33. gobby/cli/ui.py +116 -0
  34. gobby/cli/workflows.py +38 -17
  35. gobby/config/app.py +5 -0
  36. gobby/config/skills.py +23 -2
  37. gobby/hooks/broadcaster.py +9 -0
  38. gobby/hooks/event_handlers/_base.py +6 -1
  39. gobby/hooks/event_handlers/_session.py +44 -130
  40. gobby/hooks/events.py +48 -0
  41. gobby/hooks/hook_manager.py +25 -3
  42. gobby/install/copilot/hooks/hook_dispatcher.py +203 -0
  43. gobby/install/cursor/hooks/hook_dispatcher.py +203 -0
  44. gobby/install/gemini/hooks/hook_dispatcher.py +8 -0
  45. gobby/install/windsurf/hooks/hook_dispatcher.py +205 -0
  46. gobby/llm/__init__.py +14 -1
  47. gobby/llm/claude.py +217 -1
  48. gobby/llm/service.py +149 -0
  49. gobby/mcp_proxy/instructions.py +9 -27
  50. gobby/mcp_proxy/models.py +1 -0
  51. gobby/mcp_proxy/registries.py +56 -9
  52. gobby/mcp_proxy/server.py +6 -2
  53. gobby/mcp_proxy/services/tool_filter.py +7 -0
  54. gobby/mcp_proxy/services/tool_proxy.py +19 -1
  55. gobby/mcp_proxy/stdio.py +37 -21
  56. gobby/mcp_proxy/tools/agents.py +7 -0
  57. gobby/mcp_proxy/tools/hub.py +30 -1
  58. gobby/mcp_proxy/tools/orchestration/cleanup.py +5 -5
  59. gobby/mcp_proxy/tools/orchestration/monitor.py +1 -1
  60. gobby/mcp_proxy/tools/orchestration/orchestrate.py +8 -3
  61. gobby/mcp_proxy/tools/orchestration/review.py +17 -4
  62. gobby/mcp_proxy/tools/orchestration/wait.py +7 -7
  63. gobby/mcp_proxy/tools/pipelines/__init__.py +254 -0
  64. gobby/mcp_proxy/tools/pipelines/_discovery.py +67 -0
  65. gobby/mcp_proxy/tools/pipelines/_execution.py +281 -0
  66. gobby/mcp_proxy/tools/sessions/_crud.py +4 -4
  67. gobby/mcp_proxy/tools/sessions/_handoff.py +1 -1
  68. gobby/mcp_proxy/tools/skills/__init__.py +184 -30
  69. gobby/mcp_proxy/tools/spawn_agent.py +229 -14
  70. gobby/mcp_proxy/tools/tasks/_context.py +8 -0
  71. gobby/mcp_proxy/tools/tasks/_crud.py +27 -1
  72. gobby/mcp_proxy/tools/tasks/_helpers.py +1 -1
  73. gobby/mcp_proxy/tools/tasks/_lifecycle.py +125 -8
  74. gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +2 -1
  75. gobby/mcp_proxy/tools/tasks/_search.py +1 -1
  76. gobby/mcp_proxy/tools/workflows/__init__.py +9 -2
  77. gobby/mcp_proxy/tools/workflows/_lifecycle.py +12 -1
  78. gobby/mcp_proxy/tools/workflows/_query.py +45 -26
  79. gobby/mcp_proxy/tools/workflows/_terminal.py +39 -3
  80. gobby/mcp_proxy/tools/worktrees.py +54 -15
  81. gobby/memory/context.py +5 -5
  82. gobby/runner.py +108 -6
  83. gobby/servers/http.py +7 -1
  84. gobby/servers/routes/__init__.py +2 -0
  85. gobby/servers/routes/admin.py +44 -0
  86. gobby/servers/routes/mcp/endpoints/execution.py +18 -25
  87. gobby/servers/routes/mcp/hooks.py +10 -1
  88. gobby/servers/routes/pipelines.py +227 -0
  89. gobby/servers/websocket.py +314 -1
  90. gobby/sessions/analyzer.py +87 -1
  91. gobby/sessions/manager.py +5 -5
  92. gobby/sessions/transcripts/__init__.py +3 -0
  93. gobby/sessions/transcripts/claude.py +5 -0
  94. gobby/sessions/transcripts/codex.py +5 -0
  95. gobby/sessions/transcripts/gemini.py +5 -0
  96. gobby/skills/hubs/__init__.py +25 -0
  97. gobby/skills/hubs/base.py +234 -0
  98. gobby/skills/hubs/claude_plugins.py +328 -0
  99. gobby/skills/hubs/clawdhub.py +289 -0
  100. gobby/skills/hubs/github_collection.py +465 -0
  101. gobby/skills/hubs/manager.py +263 -0
  102. gobby/skills/hubs/skillhub.py +342 -0
  103. gobby/storage/memories.py +4 -4
  104. gobby/storage/migrations.py +95 -3
  105. gobby/storage/pipelines.py +367 -0
  106. gobby/storage/sessions.py +23 -4
  107. gobby/storage/skills.py +1 -1
  108. gobby/storage/tasks/_aggregates.py +2 -2
  109. gobby/storage/tasks/_lifecycle.py +4 -4
  110. gobby/storage/tasks/_models.py +7 -1
  111. gobby/storage/tasks/_queries.py +3 -3
  112. gobby/sync/memories.py +4 -3
  113. gobby/tasks/commits.py +48 -17
  114. gobby/workflows/actions.py +75 -0
  115. gobby/workflows/context_actions.py +246 -5
  116. gobby/workflows/definitions.py +119 -1
  117. gobby/workflows/detection_helpers.py +23 -11
  118. gobby/workflows/enforcement/task_policy.py +18 -0
  119. gobby/workflows/engine.py +20 -1
  120. gobby/workflows/evaluator.py +8 -5
  121. gobby/workflows/lifecycle_evaluator.py +57 -26
  122. gobby/workflows/loader.py +567 -30
  123. gobby/workflows/lobster_compat.py +147 -0
  124. gobby/workflows/pipeline_executor.py +801 -0
  125. gobby/workflows/pipeline_state.py +172 -0
  126. gobby/workflows/pipeline_webhooks.py +206 -0
  127. gobby/workflows/premature_stop.py +5 -0
  128. gobby/worktrees/git.py +135 -20
  129. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/METADATA +56 -22
  130. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/RECORD +134 -106
  131. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/WHEEL +0 -0
  132. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/entry_points.txt +0 -0
  133. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/licenses/LICENSE.md +0 -0
  134. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/top_level.txt +0 -0
gobby/tasks/commits.py CHANGED
@@ -487,30 +487,49 @@ def extract_mentioned_symbols(task: dict[str, Any]) -> list[str]:
487
487
 
488
488
 
489
489
  # Task ID patterns to search for in commit messages
490
- # Supports #N format (e.g., #1, #47) - human-friendly task references
490
+ # Uses {project}-#N format to avoid GitHub auto-linking and match CLI display format
491
+ # Patterns capture both project name and task number for validation
491
492
  TASK_ID_PATTERNS = [
492
- # [#N] - bracket format
493
- r"\[#(\d+)\]",
494
- # #N: - hash-colon format (at start of line or after space)
495
- r"(?:^|\s)#(\d+):",
496
- # Implements/Fixes/Closes/Refs #N (supports multiple: #1, #2, #3)
497
- r"(?:implements|fixes|closes|refs)\s+#(\d+)",
498
- # Standalone #N after whitespace (with word boundary to avoid false positives)
499
- r"(?:^|\s)#(\d+)\b(?![\d.])",
493
+ # [project-#N] - bracket format (primary)
494
+ r"\[(\w+)-#(\d+)\]",
495
+ # project-#N - standalone format (word boundary before, after digits)
496
+ r"(?:^|\s)(\w+)-#(\d+)\b",
497
+ # Implements/Fixes/Closes/Refs project-#N
498
+ r"(?:implements|fixes|closes|refs)\s+(\w+)-#(\d+)",
500
499
  ]
501
500
 
502
501
 
503
- def extract_task_ids_from_message(message: str) -> list[str]:
502
+ def get_current_project_name() -> str | None:
503
+ """Get current project name from context.
504
+
505
+ Returns:
506
+ Project name or None if not in a project.
507
+ """
508
+ from gobby.utils.project_context import get_project_context
509
+
510
+ ctx = get_project_context()
511
+ if ctx and ctx.get("name"):
512
+ name: str = ctx["name"]
513
+ return name
514
+ return None
515
+
516
+
517
+ def extract_task_ids_from_message(
518
+ message: str,
519
+ project_name: str | None = None,
520
+ ) -> list[str]:
504
521
  """Extract task IDs from a commit message.
505
522
 
506
523
  Supports patterns:
507
- - [#N] - bracket format
508
- - #N: - hash-colon format (at start of message)
509
- - Implements/Fixes/Closes/Refs #N
510
- - Multiple references: #1, #2, #3
524
+ - [project-#N] - bracket format (primary)
525
+ - project-#N - standalone format
526
+ - Implements/Fixes/Closes/Refs project-#N
511
527
 
512
528
  Args:
513
529
  message: Commit message to parse.
530
+ project_name: Optional project name to filter matches. If provided,
531
+ only returns task IDs from commits referencing this project.
532
+ If None, returns all task IDs found regardless of project.
514
533
 
515
534
  Returns:
516
535
  List of unique task references found (e.g., ["#1", "#42"]).
@@ -520,8 +539,13 @@ def extract_task_ids_from_message(message: str) -> list[str]:
520
539
  for pattern in TASK_ID_PATTERNS:
521
540
  matches = re.findall(pattern, message, re.IGNORECASE | re.MULTILINE)
522
541
  for match in matches:
542
+ # match is a tuple: (project, task_number)
543
+ found_project, task_num = match
544
+ # Filter by project name if specified
545
+ if project_name and found_project.lower() != project_name.lower():
546
+ continue
523
547
  # Format as #N
524
- task_id = f"#{match}"
548
+ task_id = f"#{task_num}"
525
549
  task_ids.add(task_id)
526
550
 
527
551
  return list(task_ids)
@@ -547,6 +571,7 @@ def auto_link_commits(
547
571
  task_id: str | None = None,
548
572
  since: str | None = None,
549
573
  cwd: str | Path | None = None,
574
+ project_name: str | None = None,
550
575
  ) -> AutoLinkResult:
551
576
  """Auto-detect and link commits that mention task IDs.
552
577
 
@@ -558,12 +583,18 @@ def auto_link_commits(
558
583
  task_id: Optional specific task ID to filter for.
559
584
  since: Optional git --since parameter (e.g., "1 week ago", "2024-01-01").
560
585
  cwd: Working directory for git commands.
586
+ project_name: Optional project name to filter commits. If not provided,
587
+ auto-detects from current project context.
561
588
 
562
589
  Returns:
563
590
  AutoLinkResult with details of linked and skipped commits.
564
591
  """
565
592
  working_dir = Path(cwd) if cwd else Path.cwd()
566
593
 
594
+ # Get project name for filtering (auto-detect if not provided)
595
+ if project_name is None:
596
+ project_name = get_current_project_name()
597
+
567
598
  # Build git log command
568
599
  # Format: "sha|message" for easy parsing
569
600
  git_cmd = ["git", "log", "--pretty=format:%h|%s"]
@@ -590,8 +621,8 @@ def auto_link_commits(
590
621
 
591
622
  commit_sha, message = parts
592
623
 
593
- # Extract task IDs from message
594
- found_task_ids = extract_task_ids_from_message(message)
624
+ # Extract task IDs from message (filtered by project name)
625
+ found_task_ids = extract_task_ids_from_message(message, project_name)
595
626
 
596
627
  if not found_task_ids:
597
628
  continue
@@ -101,7 +101,10 @@ class ActionContext:
101
101
  memory_sync_manager: Any | None = None
102
102
  task_sync_manager: Any | None = None
103
103
  session_task_manager: Any | None = None
104
+ skill_manager: Any | None = None
104
105
  event_data: dict[str, Any] | None = None # Hook event data (e.g., prompt_text)
106
+ pipeline_executor: Any | None = None # PipelineExecutor
107
+ workflow_loader: Any | None = None # WorkflowLoader
105
108
 
106
109
 
107
110
  class ActionHandler(Protocol):
@@ -131,6 +134,9 @@ class ActionExecutor:
131
134
  progress_tracker: Any | None = None,
132
135
  stuck_detector: Any | None = None,
133
136
  websocket_server: Any | None = None,
137
+ skill_manager: Any | None = None,
138
+ pipeline_executor: Any | None = None,
139
+ workflow_loader: Any | None = None,
134
140
  ):
135
141
  self.db = db
136
142
  self.session_manager = session_manager
@@ -148,6 +154,9 @@ class ActionExecutor:
148
154
  self.progress_tracker = progress_tracker
149
155
  self.stuck_detector = stuck_detector
150
156
  self.websocket_server = websocket_server
157
+ self.skill_manager = skill_manager
158
+ self.pipeline_executor = pipeline_executor
159
+ self.workflow_loader = workflow_loader
151
160
  self._handlers: dict[str, ActionHandler] = {}
152
161
 
153
162
  self._register_defaults()
@@ -254,6 +263,9 @@ class ActionExecutor:
254
263
  # --- Autonomous execution actions (closures for progress_tracker/stuck_detector) ---
255
264
  self._register_autonomous_actions()
256
265
 
266
+ # --- Pipeline actions (closures for pipeline_executor/workflow_loader) ---
267
+ self._register_pipeline_actions()
268
+
257
269
  def _register_task_enforcement_actions(self) -> None:
258
270
  """Register task enforcement actions with task_manager closure."""
259
271
  tm = self.task_manager
@@ -385,6 +397,69 @@ class ActionExecutor:
385
397
  self.register("record_task_selection", record_sel)
386
398
  self.register("get_progress_summary", get_summary)
387
399
 
400
+ def _register_pipeline_actions(self) -> None:
401
+ """Register pipeline actions with pipeline_executor/workflow_loader closures."""
402
+ executor = self
403
+
404
+ async def run_pipeline(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
405
+ from gobby.workflows.pipeline_state import ApprovalRequired
406
+
407
+ name = kw.get("name")
408
+ inputs = kw.get("inputs") or {}
409
+ await_completion = kw.get("await_completion", False)
410
+
411
+ if not name:
412
+ return {"error": "Pipeline name is required"}
413
+
414
+ if executor.workflow_loader is None:
415
+ return {"error": "Workflow loader not configured"}
416
+
417
+ if executor.pipeline_executor is None:
418
+ return {"error": "Pipeline executor not configured"}
419
+
420
+ # Load the pipeline
421
+ pipeline = executor.workflow_loader.load_pipeline(name)
422
+ if pipeline is None:
423
+ return {"error": f"Pipeline '{name}' not found"}
424
+
425
+ # Render template variables in inputs
426
+ rendered_inputs = {}
427
+ variables = context.state.variables if context.state else {}
428
+ for key, value in inputs.items():
429
+ if isinstance(value, str):
430
+ rendered_inputs[key] = context.template_engine.render(value, variables)
431
+ else:
432
+ rendered_inputs[key] = value
433
+
434
+ try:
435
+ # Execute the pipeline
436
+ execution = await executor.pipeline_executor.execute(
437
+ pipeline=pipeline,
438
+ inputs=rendered_inputs,
439
+ project_id=variables.get("project_id", ""),
440
+ )
441
+
442
+ return {
443
+ "status": execution.status.value,
444
+ "execution_id": execution.id,
445
+ "pipeline_name": execution.pipeline_name,
446
+ }
447
+
448
+ except ApprovalRequired as e:
449
+ # Store pending pipeline in state if await_completion is True
450
+ if await_completion:
451
+ context.state.variables["pending_pipeline"] = e.execution_id
452
+
453
+ return {
454
+ "status": "waiting_approval",
455
+ "execution_id": e.execution_id,
456
+ "step_id": e.step_id,
457
+ "token": e.token,
458
+ "message": e.message,
459
+ }
460
+
461
+ self.register("run_pipeline", run_pipeline)
462
+
388
463
  async def execute(
389
464
  self, action_type: str, context: ActionContext, **kwargs: Any
390
465
  ) -> dict[str, Any] | None:
@@ -25,20 +25,35 @@ def inject_context(
25
25
  session_id: str,
26
26
  state: Any,
27
27
  template_engine: Any,
28
- source: str | None = None,
28
+ source: str | list[str] | None = None,
29
29
  template: str | None = None,
30
30
  require: bool = False,
31
+ skill_manager: Any | None = None,
32
+ filter: str | None = None,
33
+ session_task_manager: Any | None = None,
34
+ memory_manager: Any | None = None,
35
+ prompt_text: str | None = None,
36
+ limit: int = 5,
37
+ min_importance: float = 0.3,
31
38
  ) -> dict[str, Any] | None:
32
- """Inject context from a source.
39
+ """Inject context from a source or multiple sources.
33
40
 
34
41
  Args:
35
42
  session_manager: The session manager instance
36
43
  session_id: Current session ID
37
44
  state: WorkflowState instance
38
45
  template_engine: Template engine for rendering
39
- source: Source type (previous_session_summary, handoff, artifacts, etc.)
46
+ source: Source type(s). Can be a string or list of strings.
47
+ Supported: previous_session_summary, handoff, artifacts, skills, task_context, memories, etc.
40
48
  template: Optional template for rendering
41
49
  require: If True, block session when no content found (default: False)
50
+ skill_manager: HookSkillManager instance (required for source='skills')
51
+ filter: Optional filter for skills source ('always_apply' to only include always-apply skills)
52
+ session_task_manager: SessionTaskManager instance (required for source='task_context')
53
+ memory_manager: MemoryManager instance (required for source='memories')
54
+ prompt_text: User prompt text for memory recall (required for source='memories')
55
+ limit: Max memories to retrieve (default: 5, used with source='memories')
56
+ min_importance: Minimum importance threshold (default: 0.3, used with source='memories')
42
57
 
43
58
  Returns:
44
59
  Dict with inject_context key, blocking decision, or None
@@ -60,6 +75,57 @@ def inject_context(
60
75
  logger.warning("inject_context: session_id is empty or None")
61
76
  return None
62
77
 
78
+ # Handle list of sources - recursively call for each source and combine
79
+ if isinstance(source, list):
80
+ combined_content: list[str] = []
81
+ for single_source in source:
82
+ result = inject_context(
83
+ session_manager=session_manager,
84
+ session_id=session_id,
85
+ state=state,
86
+ template_engine=template_engine,
87
+ source=single_source,
88
+ template=None, # Don't render template for individual sources
89
+ require=False, # Don't block for individual sources
90
+ skill_manager=skill_manager,
91
+ filter=filter,
92
+ session_task_manager=session_task_manager,
93
+ memory_manager=memory_manager,
94
+ prompt_text=prompt_text,
95
+ limit=limit,
96
+ min_importance=min_importance,
97
+ )
98
+ if result and result.get("inject_context"):
99
+ combined_content.append(result["inject_context"])
100
+
101
+ if combined_content:
102
+ content = "\n\n".join(combined_content)
103
+ if template:
104
+ # Build source_contents mapping for individual source access
105
+ source_contents: dict[str, str] = {}
106
+ for i, single_source in enumerate(source):
107
+ if i < len(combined_content):
108
+ source_contents[single_source] = combined_content[i]
109
+ render_context: dict[str, Any] = {
110
+ "session": session_manager.get(session_id),
111
+ "state": state,
112
+ "artifacts": state.artifacts if state else {},
113
+ "observations": state.observations if state else {},
114
+ "combined_content": content,
115
+ "source_contents": source_contents,
116
+ }
117
+ content = template_engine.render(template, render_context)
118
+ state.context_injected = True
119
+ return {"inject_context": content}
120
+
121
+ # No content from any source - block if required
122
+ if require:
123
+ reason = f"Required handoff context not found (sources={source})"
124
+ logger.warning(f"inject_context: {reason}")
125
+ return {"decision": "block", "reason": reason}
126
+
127
+ return None
128
+
63
129
  # Debug logging for troubleshooting
64
130
  logger.debug(
65
131
  f"inject_context called: source={source!r}, "
@@ -77,7 +143,7 @@ def inject_context(
77
143
  if not source and template:
78
144
  # Render static template directly
79
145
  logger.debug("inject_context: entering template-only path")
80
- render_context: dict[str, Any] = {
146
+ render_context = {
81
147
  "session": session_manager.get(session_id),
82
148
  "state": state,
83
149
  "artifacts": state.artifacts if state else {},
@@ -152,6 +218,69 @@ def inject_context(
152
218
  f"Loaded compact_markdown ({len(content)} chars) from current session {session_id}"
153
219
  )
154
220
 
221
+ elif source == "skills":
222
+ # Inject skill context from skill_manager
223
+ if skill_manager is None:
224
+ logger.debug("inject_context: skills source requires skill_manager")
225
+ return None
226
+
227
+ skills = skill_manager.discover_core_skills()
228
+
229
+ # Apply filter if specified
230
+ if filter == "always_apply":
231
+ skills = [s for s in skills if s.is_always_apply()]
232
+
233
+ if skills:
234
+ content = _format_skills(skills)
235
+ logger.debug(f"Formatted {len(skills)} skills for injection")
236
+
237
+ elif source == "task_context":
238
+ # Inject current task context from session_task_manager
239
+ if session_task_manager is None:
240
+ logger.debug("inject_context: task_context source requires session_task_manager")
241
+ return None
242
+
243
+ session_tasks = session_task_manager.get_session_tasks(session_id)
244
+
245
+ # Filter for "worked_on" tasks (the active task)
246
+ worked_on_tasks = [t for t in session_tasks if t.get("action") == "worked_on"]
247
+
248
+ if worked_on_tasks:
249
+ content = _format_task_context(worked_on_tasks)
250
+ logger.debug(f"Formatted {len(worked_on_tasks)} active tasks for injection")
251
+
252
+ elif source == "memories":
253
+ # Inject relevant memories from memory_manager
254
+ if memory_manager is None:
255
+ logger.debug("inject_context: memories source requires memory_manager")
256
+ return None
257
+
258
+ if not memory_manager.config.enabled:
259
+ logger.debug("inject_context: memory manager is disabled")
260
+ return None
261
+
262
+ # Get project_id from session
263
+ project_id = None
264
+ session = session_manager.get(session_id)
265
+ if session:
266
+ project_id = getattr(session, "project_id", None)
267
+
268
+ try:
269
+ memories = memory_manager.recall(
270
+ query=prompt_text or "",
271
+ project_id=project_id,
272
+ limit=limit,
273
+ min_importance=min_importance,
274
+ use_semantic=True,
275
+ )
276
+
277
+ if memories:
278
+ content = _format_memories(memories)
279
+ logger.debug(f"Formatted {len(memories)} memories for injection")
280
+ except Exception as e:
281
+ logger.error(f"inject_context: memory recall failed: {e}")
282
+ return None
283
+
155
284
  if content:
156
285
  if template:
157
286
  render_context = {
@@ -173,6 +302,12 @@ def inject_context(
173
302
  elif source == "compact_handoff":
174
303
  # Pass content to template (like /clear does with summary)
175
304
  render_context["handoff"] = content
305
+ elif source == "skills":
306
+ render_context["skills_list"] = content
307
+ elif source == "task_context":
308
+ render_context["task_context"] = content
309
+ elif source == "memories":
310
+ render_context["memories_list"] = content
176
311
 
177
312
  content = template_engine.render(template, render_context)
178
313
 
@@ -327,6 +462,87 @@ def extract_handoff_context(
327
462
  return {"error": str(e)}
328
463
 
329
464
 
465
+ def _format_memories(memories: list[Any]) -> str:
466
+ """Format memory objects as markdown for injection.
467
+
468
+ Args:
469
+ memories: List of Memory objects
470
+
471
+ Returns:
472
+ Formatted markdown string with memory content
473
+ """
474
+ lines = ["## Relevant Memories"]
475
+ for memory in memories:
476
+ content = getattr(memory, "content", str(memory))
477
+ memory_type = getattr(memory, "memory_type", None)
478
+ importance = getattr(memory, "importance", None)
479
+
480
+ if memory_type:
481
+ lines.append(f"- [{memory_type}] {content}")
482
+ else:
483
+ lines.append(f"- {content}")
484
+
485
+ if importance and importance >= 0.8:
486
+ lines[-1] += " *(high importance)*"
487
+
488
+ return "\n".join(lines)
489
+
490
+
491
+ def _format_task_context(task_entries: list[dict[str, Any]]) -> str:
492
+ """Format task entries as markdown for injection.
493
+
494
+ Args:
495
+ task_entries: List of dicts with 'task' key containing Task objects
496
+
497
+ Returns:
498
+ Formatted markdown string with task info
499
+ """
500
+ lines = ["## Active Task"]
501
+ for entry in task_entries:
502
+ task = entry.get("task")
503
+ if task is None:
504
+ continue
505
+
506
+ seq_num = getattr(task, "seq_num", None)
507
+ title = getattr(task, "title", "Untitled")
508
+ status = getattr(task, "status", "unknown")
509
+ description = getattr(task, "description", "")
510
+ validation = getattr(task, "validation_criteria", "")
511
+
512
+ # Format task reference
513
+ ref = f"#{seq_num}" if seq_num else task.id[:8] if hasattr(task, "id") else "unknown"
514
+ lines.append(f"**{ref}**: {title}")
515
+ lines.append(f"Status: {status}")
516
+
517
+ if description:
518
+ lines.append(f"\n{description}")
519
+
520
+ if validation:
521
+ lines.append(f"\n**Validation Criteria**: {validation}")
522
+
523
+ return "\n".join(lines)
524
+
525
+
526
+ def _format_skills(skills: list[Any]) -> str:
527
+ """Format a list of ParsedSkill objects as markdown for injection.
528
+
529
+ Args:
530
+ skills: List of ParsedSkill objects
531
+
532
+ Returns:
533
+ Formatted markdown string with skill names and descriptions
534
+ """
535
+ lines = ["## Available Skills"]
536
+ for skill in skills:
537
+ name = getattr(skill, "name", "unknown")
538
+ description = getattr(skill, "description", "")
539
+ if description:
540
+ lines.append(f"- **{name}**: {description}")
541
+ else:
542
+ lines.append(f"- **{name}**")
543
+ return "\n".join(lines)
544
+
545
+
330
546
  def recommend_skills_for_task(task: dict[str, Any] | None) -> list[str]:
331
547
  """Recommend relevant skills based on task category.
332
548
 
@@ -409,7 +625,20 @@ def format_handoff_as_markdown(ctx: Any, prompt_template: str | None = None) ->
409
625
  # Files modified section - only show files still dirty (not yet committed)
410
626
  if ctx.files_modified and ctx.git_status:
411
627
  # Filter to files that appear in git status (still uncommitted)
412
- dirty_files = [f for f in ctx.files_modified if f in ctx.git_status]
628
+ # Normalize paths: files_modified may have absolute paths, git_status has relative
629
+ cwd = Path.cwd()
630
+ dirty_files = []
631
+ for f in ctx.files_modified:
632
+ # Try to make path relative to cwd for comparison
633
+ try:
634
+ rel_path = Path(f).relative_to(cwd)
635
+ rel_str = str(rel_path)
636
+ except ValueError:
637
+ # Path not relative to cwd, use as-is
638
+ rel_str = f
639
+ # Check if relative path appears in git status
640
+ if rel_str in ctx.git_status:
641
+ dirty_files.append(rel_str)
413
642
  if dirty_files:
414
643
  lines = ["### Files Being Modified"]
415
644
  for f in dirty_files:
@@ -444,6 +673,11 @@ def format_handoff_as_markdown(ctx: Any, prompt_template: str | None = None) ->
444
673
 
445
674
  async def handle_inject_context(context: ActionContext, **kwargs: Any) -> dict[str, Any] | None:
446
675
  """ActionHandler wrapper for inject_context."""
676
+ # Get prompt_text from event_data if not explicitly passed
677
+ prompt_text = kwargs.get("prompt_text")
678
+ if prompt_text is None and context.event_data:
679
+ prompt_text = context.event_data.get("prompt_text")
680
+
447
681
  return await asyncio.to_thread(
448
682
  inject_context,
449
683
  session_manager=context.session_manager,
@@ -453,6 +687,13 @@ async def handle_inject_context(context: ActionContext, **kwargs: Any) -> dict[s
453
687
  source=kwargs.get("source"),
454
688
  template=kwargs.get("template"),
455
689
  require=kwargs.get("require", False),
690
+ skill_manager=context.skill_manager,
691
+ filter=kwargs.get("filter"),
692
+ session_task_manager=context.session_task_manager,
693
+ memory_manager=context.memory_manager,
694
+ prompt_text=prompt_text,
695
+ limit=kwargs.get("limit", 5),
696
+ min_importance=kwargs.get("min_importance", 0.3),
456
697
  )
457
698
 
458
699
 
@@ -30,7 +30,11 @@ class PrematureStopHandler(BaseModel):
30
30
  """Handler for when an agent attempts to stop before task completion."""
31
31
 
32
32
  action: Literal["guide_continuation", "block", "warn"] = "guide_continuation"
33
- message: str = "Task has incomplete subtasks. Use suggest_next_task() to continue."
33
+ message: str = (
34
+ "Task has incomplete subtasks. Options: "
35
+ "1) Continue: use suggest_next_task() to find the next task. "
36
+ "2) Stop anyway: use `/g workflows deactivate` to end the workflow first."
37
+ )
34
38
  condition: str | None = None # Optional condition to check (e.g., task_tree_complete)
35
39
 
36
40
 
@@ -88,6 +92,120 @@ class WorkflowDefinition(BaseModel):
88
92
  return None
89
93
 
90
94
 
95
+ # --- Pipeline Definition Models (YAML) ---
96
+
97
+
98
+ class WebhookEndpoint(BaseModel):
99
+ """Configuration for a webhook endpoint."""
100
+
101
+ url: str
102
+ method: str = "POST"
103
+ headers: dict[str, str] = Field(default_factory=dict)
104
+
105
+
106
+ class WebhookConfig(BaseModel):
107
+ """Webhook configuration for pipeline events."""
108
+
109
+ on_approval_pending: WebhookEndpoint | None = None
110
+ on_complete: WebhookEndpoint | None = None
111
+ on_failure: WebhookEndpoint | None = None
112
+
113
+
114
+ class PipelineApproval(BaseModel):
115
+ """Approval gate configuration for a pipeline step."""
116
+
117
+ required: bool = False
118
+ message: str | None = None
119
+ timeout_seconds: int | None = None
120
+
121
+
122
+ class PipelineStep(BaseModel):
123
+ """A single step in a pipeline workflow.
124
+
125
+ Steps must have exactly one execution type: exec, prompt, or invoke_pipeline.
126
+ """
127
+
128
+ id: str
129
+
130
+ # Execution types (mutually exclusive - exactly one required)
131
+ exec: str | None = None # Shell command to run
132
+ prompt: str | None = None # LLM prompt template
133
+ invoke_pipeline: str | None = None # Name of pipeline to invoke
134
+
135
+ # Optional fields
136
+ condition: str | None = None # Condition for step execution
137
+ approval: PipelineApproval | None = None # Approval gate
138
+ tools: list[str] = Field(default_factory=list) # Tool restrictions for prompt steps
139
+ input: str | None = None # Explicit input reference (e.g., $prev_step.output)
140
+
141
+ def model_post_init(self, __context: Any) -> None:
142
+ """Validate that exactly one execution type is specified."""
143
+ exec_types = [self.exec, self.prompt, self.invoke_pipeline]
144
+ specified = [t for t in exec_types if t is not None]
145
+
146
+ if len(specified) == 0:
147
+ raise ValueError(
148
+ "PipelineStep requires at least one execution type: exec, prompt, or invoke_pipeline"
149
+ )
150
+ if len(specified) > 1:
151
+ raise ValueError(
152
+ "PipelineStep exec, prompt, and invoke_pipeline are mutually exclusive - only one allowed"
153
+ )
154
+
155
+
156
+ class PipelineDefinition(BaseModel):
157
+ """Definition for a pipeline workflow with typed data flow between steps.
158
+
159
+ Pipelines execute steps sequentially with explicit data flow via $step.output references.
160
+ """
161
+
162
+ name: str
163
+ description: str | None = None
164
+ version: str = "1.0"
165
+ type: Literal["pipeline"] = "pipeline"
166
+
167
+ @field_validator("version", mode="before")
168
+ @classmethod
169
+ def coerce_version_to_string(cls, v: Any) -> str:
170
+ """Accept numeric versions (1.0, 2) and coerce to string."""
171
+ return str(v) if v is not None else "1.0"
172
+
173
+ # Input/output schema
174
+ inputs: dict[str, Any] = Field(default_factory=dict)
175
+ outputs: dict[str, Any] = Field(default_factory=dict)
176
+
177
+ # Pipeline steps
178
+ steps: list[PipelineStep] = Field(default_factory=list)
179
+
180
+ # Webhook notifications
181
+ webhooks: WebhookConfig | None = None
182
+
183
+ # Expose as MCP tool
184
+ expose_as_tool: bool = False
185
+
186
+ @field_validator("steps", mode="after")
187
+ @classmethod
188
+ def validate_steps(cls, v: list[PipelineStep]) -> list[PipelineStep]:
189
+ """Validate pipeline steps."""
190
+ if len(v) == 0:
191
+ raise ValueError("Pipeline requires at least one step")
192
+
193
+ # Check for duplicate step IDs
194
+ ids = [step.id for step in v]
195
+ if len(ids) != len(set(ids)):
196
+ duplicates = [id for id in ids if ids.count(id) > 1]
197
+ raise ValueError(f"Pipeline step IDs must be unique. Duplicates: {set(duplicates)}")
198
+
199
+ return v
200
+
201
+ def get_step(self, step_id: str) -> PipelineStep | None:
202
+ """Get a step by its ID."""
203
+ for step in self.steps:
204
+ if step.id == step_id:
205
+ return step
206
+ return None
207
+
208
+
91
209
  # --- Workflow State Models (Runtime) ---
92
210
 
93
211