gobby 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +1 -1
- gobby/adapters/__init__.py +6 -0
- gobby/adapters/base.py +11 -2
- gobby/adapters/claude_code.py +2 -2
- gobby/adapters/codex_impl/adapter.py +38 -43
- gobby/adapters/copilot.py +324 -0
- gobby/adapters/cursor.py +373 -0
- gobby/adapters/gemini.py +2 -26
- gobby/adapters/windsurf.py +359 -0
- gobby/agents/definitions.py +162 -2
- gobby/agents/isolation.py +33 -1
- gobby/agents/pty_reader.py +192 -0
- gobby/agents/registry.py +10 -1
- gobby/agents/runner.py +24 -8
- gobby/agents/sandbox.py +8 -3
- gobby/agents/session.py +4 -0
- gobby/agents/spawn.py +9 -2
- gobby/agents/spawn_executor.py +49 -61
- gobby/agents/spawners/command_builder.py +4 -4
- gobby/app_context.py +5 -0
- gobby/cli/__init__.py +4 -0
- gobby/cli/install.py +259 -4
- gobby/cli/installers/__init__.py +12 -0
- gobby/cli/installers/copilot.py +242 -0
- gobby/cli/installers/cursor.py +244 -0
- gobby/cli/installers/shared.py +3 -0
- gobby/cli/installers/windsurf.py +242 -0
- gobby/cli/pipelines.py +639 -0
- gobby/cli/sessions.py +3 -1
- gobby/cli/skills.py +209 -0
- gobby/cli/tasks/crud.py +6 -5
- gobby/cli/tasks/search.py +1 -1
- gobby/cli/ui.py +116 -0
- gobby/cli/workflows.py +38 -17
- gobby/config/app.py +5 -0
- gobby/config/skills.py +23 -2
- gobby/hooks/broadcaster.py +9 -0
- gobby/hooks/event_handlers/_base.py +6 -1
- gobby/hooks/event_handlers/_session.py +44 -130
- gobby/hooks/events.py +48 -0
- gobby/hooks/hook_manager.py +25 -3
- gobby/install/copilot/hooks/hook_dispatcher.py +203 -0
- gobby/install/cursor/hooks/hook_dispatcher.py +203 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +8 -0
- gobby/install/windsurf/hooks/hook_dispatcher.py +205 -0
- gobby/llm/__init__.py +14 -1
- gobby/llm/claude.py +217 -1
- gobby/llm/service.py +149 -0
- gobby/mcp_proxy/instructions.py +9 -27
- gobby/mcp_proxy/models.py +1 -0
- gobby/mcp_proxy/registries.py +56 -9
- gobby/mcp_proxy/server.py +6 -2
- gobby/mcp_proxy/services/tool_filter.py +7 -0
- gobby/mcp_proxy/services/tool_proxy.py +19 -1
- gobby/mcp_proxy/stdio.py +37 -21
- gobby/mcp_proxy/tools/agents.py +7 -0
- gobby/mcp_proxy/tools/hub.py +30 -1
- gobby/mcp_proxy/tools/orchestration/cleanup.py +5 -5
- gobby/mcp_proxy/tools/orchestration/monitor.py +1 -1
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +8 -3
- gobby/mcp_proxy/tools/orchestration/review.py +17 -4
- gobby/mcp_proxy/tools/orchestration/wait.py +7 -7
- gobby/mcp_proxy/tools/pipelines/__init__.py +254 -0
- gobby/mcp_proxy/tools/pipelines/_discovery.py +67 -0
- gobby/mcp_proxy/tools/pipelines/_execution.py +281 -0
- gobby/mcp_proxy/tools/sessions/_crud.py +4 -4
- gobby/mcp_proxy/tools/sessions/_handoff.py +1 -1
- gobby/mcp_proxy/tools/skills/__init__.py +184 -30
- gobby/mcp_proxy/tools/spawn_agent.py +229 -14
- gobby/mcp_proxy/tools/tasks/_context.py +8 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +27 -1
- gobby/mcp_proxy/tools/tasks/_helpers.py +1 -1
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +125 -8
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +2 -1
- gobby/mcp_proxy/tools/tasks/_search.py +1 -1
- gobby/mcp_proxy/tools/workflows/__init__.py +9 -2
- gobby/mcp_proxy/tools/workflows/_lifecycle.py +12 -1
- gobby/mcp_proxy/tools/workflows/_query.py +45 -26
- gobby/mcp_proxy/tools/workflows/_terminal.py +39 -3
- gobby/mcp_proxy/tools/worktrees.py +54 -15
- gobby/memory/context.py +5 -5
- gobby/runner.py +108 -6
- gobby/servers/http.py +7 -1
- gobby/servers/routes/__init__.py +2 -0
- gobby/servers/routes/admin.py +44 -0
- gobby/servers/routes/mcp/endpoints/execution.py +18 -25
- gobby/servers/routes/mcp/hooks.py +10 -1
- gobby/servers/routes/pipelines.py +227 -0
- gobby/servers/websocket.py +314 -1
- gobby/sessions/analyzer.py +87 -1
- gobby/sessions/manager.py +5 -5
- gobby/sessions/transcripts/__init__.py +3 -0
- gobby/sessions/transcripts/claude.py +5 -0
- gobby/sessions/transcripts/codex.py +5 -0
- gobby/sessions/transcripts/gemini.py +5 -0
- gobby/skills/hubs/__init__.py +25 -0
- gobby/skills/hubs/base.py +234 -0
- gobby/skills/hubs/claude_plugins.py +328 -0
- gobby/skills/hubs/clawdhub.py +289 -0
- gobby/skills/hubs/github_collection.py +465 -0
- gobby/skills/hubs/manager.py +263 -0
- gobby/skills/hubs/skillhub.py +342 -0
- gobby/storage/memories.py +4 -4
- gobby/storage/migrations.py +95 -3
- gobby/storage/pipelines.py +367 -0
- gobby/storage/sessions.py +23 -4
- gobby/storage/skills.py +1 -1
- gobby/storage/tasks/_aggregates.py +2 -2
- gobby/storage/tasks/_lifecycle.py +4 -4
- gobby/storage/tasks/_models.py +7 -1
- gobby/storage/tasks/_queries.py +3 -3
- gobby/sync/memories.py +4 -3
- gobby/tasks/commits.py +48 -17
- gobby/workflows/actions.py +75 -0
- gobby/workflows/context_actions.py +246 -5
- gobby/workflows/definitions.py +119 -1
- gobby/workflows/detection_helpers.py +23 -11
- gobby/workflows/enforcement/task_policy.py +18 -0
- gobby/workflows/engine.py +20 -1
- gobby/workflows/evaluator.py +8 -5
- gobby/workflows/lifecycle_evaluator.py +57 -26
- gobby/workflows/loader.py +567 -30
- gobby/workflows/lobster_compat.py +147 -0
- gobby/workflows/pipeline_executor.py +801 -0
- gobby/workflows/pipeline_state.py +172 -0
- gobby/workflows/pipeline_webhooks.py +206 -0
- gobby/workflows/premature_stop.py +5 -0
- gobby/worktrees/git.py +135 -20
- {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/METADATA +56 -22
- {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/RECORD +134 -106
- {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/WHEEL +0 -0
- {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/entry_points.txt +0 -0
- {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/licenses/LICENSE.md +0 -0
- {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/top_level.txt +0 -0
|
@@ -14,7 +14,7 @@ from __future__ import annotations
|
|
|
14
14
|
import logging
|
|
15
15
|
import uuid
|
|
16
16
|
from pathlib import Path
|
|
17
|
-
from typing import TYPE_CHECKING, Any, Literal
|
|
17
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
18
18
|
|
|
19
19
|
from gobby.agents.definitions import AgentDefinition, AgentDefinitionLoader
|
|
20
20
|
from gobby.agents.isolation import (
|
|
@@ -28,6 +28,7 @@ from gobby.mcp_proxy.tools.internal import InternalToolRegistry
|
|
|
28
28
|
from gobby.mcp_proxy.tools.tasks import resolve_task_id_for_mcp
|
|
29
29
|
from gobby.utils.machine_id import get_machine_id
|
|
30
30
|
from gobby.utils.project_context import get_project_context
|
|
31
|
+
from gobby.workflows.loader import WorkflowLoader
|
|
31
32
|
|
|
32
33
|
if TYPE_CHECKING:
|
|
33
34
|
from gobby.agents.runner import AgentRunner
|
|
@@ -36,6 +37,82 @@ if TYPE_CHECKING:
|
|
|
36
37
|
logger = logging.getLogger(__name__)
|
|
37
38
|
|
|
38
39
|
|
|
40
|
+
async def _handle_self_mode(
|
|
41
|
+
workflow: str | None,
|
|
42
|
+
parent_session_id: str,
|
|
43
|
+
step_variables: dict[str, Any] | None,
|
|
44
|
+
initial_step: str | None,
|
|
45
|
+
workflow_loader: WorkflowLoader | None,
|
|
46
|
+
state_manager: Any | None,
|
|
47
|
+
session_manager: Any | None,
|
|
48
|
+
db: Any | None,
|
|
49
|
+
project_path: str | None,
|
|
50
|
+
) -> dict[str, Any]:
|
|
51
|
+
"""
|
|
52
|
+
Activate workflow on calling session instead of spawning a new agent.
|
|
53
|
+
|
|
54
|
+
This is the implementation for mode=self, which activates a workflow
|
|
55
|
+
on the parent session rather than creating a new child session.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
workflow: Workflow name to activate
|
|
59
|
+
parent_session_id: Session to activate workflow on (the caller)
|
|
60
|
+
step_variables: Initial variables for the workflow
|
|
61
|
+
initial_step: Optional starting step (defaults to first step)
|
|
62
|
+
workflow_loader: WorkflowLoader instance
|
|
63
|
+
state_manager: WorkflowStateManager instance (or created from db if None)
|
|
64
|
+
session_manager: LocalSessionManager instance
|
|
65
|
+
db: Database instance
|
|
66
|
+
project_path: Project path for workflow lookup
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Dict with success status and activation details
|
|
70
|
+
"""
|
|
71
|
+
if not workflow:
|
|
72
|
+
return {"success": False, "error": "mode: self requires a workflow to activate"}
|
|
73
|
+
|
|
74
|
+
# Create state_manager from db if not provided
|
|
75
|
+
effective_state_manager = state_manager
|
|
76
|
+
if effective_state_manager is None and db is not None:
|
|
77
|
+
from gobby.workflows.state_manager import WorkflowStateManager
|
|
78
|
+
|
|
79
|
+
effective_state_manager = WorkflowStateManager(db)
|
|
80
|
+
|
|
81
|
+
if not workflow_loader or not effective_state_manager or not session_manager or not db:
|
|
82
|
+
return {
|
|
83
|
+
"success": False,
|
|
84
|
+
"error": "mode: self requires workflow_loader, state_manager (or db), session_manager, and db",
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
# Import and call the existing activate_workflow function
|
|
88
|
+
from gobby.mcp_proxy.tools.workflows._lifecycle import activate_workflow
|
|
89
|
+
|
|
90
|
+
result = activate_workflow(
|
|
91
|
+
loader=workflow_loader,
|
|
92
|
+
state_manager=effective_state_manager,
|
|
93
|
+
session_manager=session_manager,
|
|
94
|
+
db=db,
|
|
95
|
+
name=workflow,
|
|
96
|
+
session_id=parent_session_id,
|
|
97
|
+
initial_step=initial_step,
|
|
98
|
+
variables=step_variables,
|
|
99
|
+
project_path=project_path,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
if not result.get("success"):
|
|
103
|
+
return result
|
|
104
|
+
|
|
105
|
+
return {
|
|
106
|
+
"success": True,
|
|
107
|
+
"mode": "self",
|
|
108
|
+
"workflow_activated": workflow,
|
|
109
|
+
"session_id": parent_session_id,
|
|
110
|
+
"step": result.get("step"),
|
|
111
|
+
"steps": result.get("steps"),
|
|
112
|
+
"message": f"Workflow '{workflow}' activated on session {parent_session_id}",
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
|
|
39
116
|
async def spawn_agent_impl(
|
|
40
117
|
prompt: str,
|
|
41
118
|
runner: AgentRunner,
|
|
@@ -53,7 +130,8 @@ async def spawn_agent_impl(
|
|
|
53
130
|
clone_manager: Any | None = None,
|
|
54
131
|
# Execution
|
|
55
132
|
workflow: str | None = None,
|
|
56
|
-
mode: Literal["terminal", "embedded", "headless"] | None = None,
|
|
133
|
+
mode: Literal["terminal", "embedded", "headless", "self"] | None = None,
|
|
134
|
+
initial_step: str | None = None, # For mode=self, start at specific step
|
|
57
135
|
terminal: str = "auto",
|
|
58
136
|
provider: str | None = None,
|
|
59
137
|
model: str | None = None,
|
|
@@ -68,6 +146,11 @@ async def spawn_agent_impl(
|
|
|
68
146
|
# Context
|
|
69
147
|
parent_session_id: str | None = None,
|
|
70
148
|
project_path: str | None = None,
|
|
149
|
+
# For mode=self (workflow activation on caller session)
|
|
150
|
+
workflow_loader: WorkflowLoader | None = None,
|
|
151
|
+
state_manager: Any | None = None, # WorkflowStateManager
|
|
152
|
+
session_manager: Any | None = None, # LocalSessionManager
|
|
153
|
+
db: Any | None = None, # DatabaseProtocol
|
|
71
154
|
) -> dict[str, Any]:
|
|
72
155
|
"""
|
|
73
156
|
Core spawn_agent implementation that can be called directly.
|
|
@@ -91,7 +174,7 @@ async def spawn_agent_impl(
|
|
|
91
174
|
workflow: Workflow to use
|
|
92
175
|
mode: Execution mode (terminal/embedded/headless)
|
|
93
176
|
terminal: Terminal type for terminal mode
|
|
94
|
-
provider: AI provider (claude/gemini/codex)
|
|
177
|
+
provider: AI provider (claude/gemini/codex/cursor/windsurf/copilot)
|
|
95
178
|
model: Model to use
|
|
96
179
|
timeout: Timeout in seconds
|
|
97
180
|
max_turns: Maximum conversation turns
|
|
@@ -116,18 +199,77 @@ async def spawn_agent_impl(
|
|
|
116
199
|
effective_provider = agent_def.provider
|
|
117
200
|
effective_provider = effective_provider or "claude"
|
|
118
201
|
|
|
119
|
-
effective_mode: Literal["terminal", "embedded", "headless"] | None = mode
|
|
202
|
+
effective_mode: Literal["terminal", "embedded", "headless", "self"] | None = mode
|
|
120
203
|
if effective_mode is None and agent_def:
|
|
121
|
-
effective_mode =
|
|
204
|
+
effective_mode = agent_def.get_effective_mode(workflow)
|
|
122
205
|
effective_mode = effective_mode or "terminal"
|
|
123
206
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
207
|
+
# Resolve workflow using agent_def's named workflows map
|
|
208
|
+
# Resolution order: explicit param > agent's workflows map > legacy workflow field
|
|
209
|
+
effective_workflow: str | None = None
|
|
210
|
+
if agent_def:
|
|
211
|
+
effective_workflow = agent_def.get_effective_workflow(workflow)
|
|
212
|
+
elif workflow:
|
|
213
|
+
effective_workflow = workflow
|
|
214
|
+
|
|
215
|
+
# Handle mode=self: activate workflow on caller session instead of spawning
|
|
216
|
+
if effective_mode == "self":
|
|
217
|
+
# Validate constraints
|
|
218
|
+
if effective_isolation != "current":
|
|
219
|
+
return {
|
|
220
|
+
"success": False,
|
|
221
|
+
"error": "mode: self is incompatible with isolation (worktree/clone). "
|
|
222
|
+
"Self mode activates a workflow on the calling session.",
|
|
223
|
+
}
|
|
224
|
+
if not effective_workflow:
|
|
225
|
+
return {
|
|
226
|
+
"success": False,
|
|
227
|
+
"error": "mode: self requires a workflow to activate",
|
|
228
|
+
}
|
|
229
|
+
if not parent_session_id:
|
|
230
|
+
return {
|
|
231
|
+
"success": False,
|
|
232
|
+
"error": "mode: self requires parent_session_id (the session to activate on)",
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# Resolve step_variables for workflow activation
|
|
236
|
+
self_step_variables: dict[str, Any] | None = None
|
|
237
|
+
if task_id and task_manager:
|
|
238
|
+
# Resolve project context first for task resolution
|
|
239
|
+
ctx = get_project_context(Path(project_path) if project_path else None)
|
|
240
|
+
self_project_id = ctx.get("id") if ctx else None
|
|
241
|
+
if self_project_id:
|
|
242
|
+
try:
|
|
243
|
+
self_task_id = resolve_task_id_for_mcp(task_manager, task_id, self_project_id)
|
|
244
|
+
task = task_manager.get_task(self_task_id)
|
|
245
|
+
if task:
|
|
246
|
+
self_step_variables = {
|
|
247
|
+
"assigned_task_id": f"#{task.seq_num}" if task.seq_num else self_task_id
|
|
248
|
+
}
|
|
249
|
+
except Exception as e:
|
|
250
|
+
logger.warning(f"Failed to resolve task_id {task_id}: {e}")
|
|
251
|
+
|
|
252
|
+
return await _handle_self_mode(
|
|
253
|
+
workflow=effective_workflow,
|
|
254
|
+
parent_session_id=parent_session_id,
|
|
255
|
+
step_variables=self_step_variables,
|
|
256
|
+
initial_step=initial_step,
|
|
257
|
+
workflow_loader=workflow_loader,
|
|
258
|
+
state_manager=state_manager,
|
|
259
|
+
session_manager=session_manager,
|
|
260
|
+
db=db,
|
|
261
|
+
project_path=project_path,
|
|
262
|
+
)
|
|
127
263
|
|
|
128
264
|
effective_base_branch = base_branch
|
|
129
265
|
if effective_base_branch is None and agent_def:
|
|
130
266
|
effective_base_branch = agent_def.base_branch
|
|
267
|
+
# Auto-detect current branch if no base_branch specified
|
|
268
|
+
if effective_base_branch is None and git_manager:
|
|
269
|
+
try:
|
|
270
|
+
effective_base_branch = git_manager.get_current_branch()
|
|
271
|
+
except Exception: # nosec B110 - fallback to default branch is intentional
|
|
272
|
+
effective_base_branch = None
|
|
131
273
|
effective_base_branch = effective_base_branch or "main"
|
|
132
274
|
|
|
133
275
|
effective_branch_prefix = None
|
|
@@ -250,7 +392,14 @@ async def spawn_agent_impl(
|
|
|
250
392
|
session_id = str(uuid.uuid4())
|
|
251
393
|
run_id = str(uuid.uuid4())
|
|
252
394
|
|
|
253
|
-
# 10.
|
|
395
|
+
# 10. Build step_variables for workflow activation (e.g., assigned_task_id)
|
|
396
|
+
step_variables: dict[str, Any] | None = None
|
|
397
|
+
if resolved_task_id:
|
|
398
|
+
step_variables = {
|
|
399
|
+
"assigned_task_id": f"#{task_seq_num}" if task_seq_num else resolved_task_id
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
# 11. Execute spawn via SpawnExecutor
|
|
254
403
|
spawn_request = SpawnRequest(
|
|
255
404
|
prompt=enhanced_prompt,
|
|
256
405
|
cwd=isolation_ctx.cwd,
|
|
@@ -262,8 +411,10 @@ async def spawn_agent_impl(
|
|
|
262
411
|
parent_session_id=parent_session_id,
|
|
263
412
|
project_id=project_id,
|
|
264
413
|
workflow=effective_workflow,
|
|
414
|
+
step_variables=step_variables,
|
|
265
415
|
worktree_id=isolation_ctx.worktree_id,
|
|
266
416
|
clone_id=isolation_ctx.clone_id,
|
|
417
|
+
branch_name=isolation_ctx.branch_name,
|
|
267
418
|
session_manager=runner._child_session_manager,
|
|
268
419
|
machine_id=get_machine_id() or "unknown",
|
|
269
420
|
sandbox_config=effective_sandbox_config,
|
|
@@ -314,6 +465,10 @@ def create_spawn_agent_registry(
|
|
|
314
465
|
clone_storage: Any | None = None,
|
|
315
466
|
clone_manager: Any | None = None,
|
|
316
467
|
session_manager: Any | None = None,
|
|
468
|
+
workflow_loader: WorkflowLoader | None = None,
|
|
469
|
+
# For mode=self (workflow activation on caller session)
|
|
470
|
+
state_manager: Any | None = None, # WorkflowStateManager
|
|
471
|
+
db: Any | None = None, # DatabaseProtocol
|
|
317
472
|
) -> InternalToolRegistry:
|
|
318
473
|
"""
|
|
319
474
|
Create a spawn_agent tool registry with the unified spawn_agent tool.
|
|
@@ -327,6 +482,9 @@ def create_spawn_agent_registry(
|
|
|
327
482
|
clone_storage: Storage for clone records.
|
|
328
483
|
clone_manager: Git manager for clone operations.
|
|
329
484
|
session_manager: Session manager for resolving session references.
|
|
485
|
+
workflow_loader: Loader for workflow validation.
|
|
486
|
+
state_manager: WorkflowStateManager for mode=self activation.
|
|
487
|
+
db: Database instance for mode=self activation.
|
|
330
488
|
|
|
331
489
|
Returns:
|
|
332
490
|
InternalToolRegistry with spawn_agent tool registered.
|
|
@@ -345,8 +503,9 @@ def create_spawn_agent_registry(
|
|
|
345
503
|
description="Unified agent spawning with isolation support",
|
|
346
504
|
)
|
|
347
505
|
|
|
348
|
-
# Use provided
|
|
506
|
+
# Use provided loaders or create defaults
|
|
349
507
|
loader = agent_loader or AgentDefinitionLoader()
|
|
508
|
+
wf_loader = workflow_loader or WorkflowLoader()
|
|
350
509
|
|
|
351
510
|
@registry.tool(
|
|
352
511
|
name="spawn_agent",
|
|
@@ -367,7 +526,8 @@ def create_spawn_agent_registry(
|
|
|
367
526
|
base_branch: str | None = None,
|
|
368
527
|
# Execution
|
|
369
528
|
workflow: str | None = None,
|
|
370
|
-
mode: Literal["terminal", "embedded", "headless"] | None = None,
|
|
529
|
+
mode: Literal["terminal", "embedded", "headless", "self"] | None = None,
|
|
530
|
+
initial_step: str | None = None,
|
|
371
531
|
terminal: str = "auto",
|
|
372
532
|
provider: str | None = None,
|
|
373
533
|
model: str | None = None,
|
|
@@ -394,9 +554,11 @@ def create_spawn_agent_registry(
|
|
|
394
554
|
branch_name: Git branch name (auto-generated from task if not provided)
|
|
395
555
|
base_branch: Base branch for worktree/clone
|
|
396
556
|
workflow: Workflow to use
|
|
397
|
-
mode: Execution mode (terminal/embedded/headless)
|
|
557
|
+
mode: Execution mode (terminal/embedded/headless/self).
|
|
558
|
+
'self' activates workflow on caller session instead of spawning.
|
|
559
|
+
initial_step: For mode=self, start at specific step (defaults to first)
|
|
398
560
|
terminal: Terminal type for terminal mode
|
|
399
|
-
provider: AI provider (claude/gemini/codex)
|
|
561
|
+
provider: AI provider (claude/gemini/codex/cursor/windsurf/copilot)
|
|
400
562
|
model: Model to use
|
|
401
563
|
timeout: Timeout in seconds
|
|
402
564
|
max_turns: Maximum conversation turns
|
|
@@ -423,6 +585,53 @@ def create_spawn_agent_registry(
|
|
|
423
585
|
if agent_def is None and agent != "generic":
|
|
424
586
|
return {"success": False, "error": f"Agent '{agent}' not found"}
|
|
425
587
|
|
|
588
|
+
# Determine effective workflow using agent's named workflows map
|
|
589
|
+
# Resolution: explicit param > agent's workflows map > legacy workflow field
|
|
590
|
+
effective_workflow: str | None = None
|
|
591
|
+
inline_workflow_spec = None
|
|
592
|
+
|
|
593
|
+
if agent_def:
|
|
594
|
+
effective_workflow = agent_def.get_effective_workflow(workflow)
|
|
595
|
+
|
|
596
|
+
# Check if this is an inline workflow that needs registration
|
|
597
|
+
if workflow and agent_def.workflows and workflow in agent_def.workflows:
|
|
598
|
+
spec = agent_def.workflows[workflow]
|
|
599
|
+
if spec.is_inline():
|
|
600
|
+
inline_workflow_spec = spec
|
|
601
|
+
elif (
|
|
602
|
+
not workflow
|
|
603
|
+
and agent_def.default_workflow
|
|
604
|
+
and agent_def.workflows
|
|
605
|
+
and agent_def.default_workflow in agent_def.workflows
|
|
606
|
+
):
|
|
607
|
+
spec = agent_def.workflows[agent_def.default_workflow]
|
|
608
|
+
if spec.is_inline():
|
|
609
|
+
inline_workflow_spec = spec
|
|
610
|
+
elif workflow:
|
|
611
|
+
effective_workflow = workflow
|
|
612
|
+
|
|
613
|
+
# Get project_path for workflow lookup
|
|
614
|
+
ctx = get_project_context(Path(project_path) if project_path else None)
|
|
615
|
+
wf_project_path = ctx.get("project_path") if ctx else None
|
|
616
|
+
|
|
617
|
+
# Register inline workflow if needed
|
|
618
|
+
if inline_workflow_spec and effective_workflow:
|
|
619
|
+
wf_loader.register_inline_workflow(
|
|
620
|
+
effective_workflow, inline_workflow_spec.model_dump(), project_path=wf_project_path
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
# Validate workflow exists if specified (skip for inline that we just registered)
|
|
624
|
+
if effective_workflow and not inline_workflow_spec:
|
|
625
|
+
loaded_workflow = wf_loader.load_workflow(
|
|
626
|
+
effective_workflow, project_path=wf_project_path
|
|
627
|
+
)
|
|
628
|
+
if loaded_workflow is None:
|
|
629
|
+
return {
|
|
630
|
+
"success": False,
|
|
631
|
+
"error": f"Workflow '{effective_workflow}' not found. "
|
|
632
|
+
f"Check available workflows with list_workflows().",
|
|
633
|
+
}
|
|
634
|
+
|
|
426
635
|
# Delegate to spawn_agent_impl
|
|
427
636
|
return await spawn_agent_impl(
|
|
428
637
|
prompt=prompt,
|
|
@@ -437,8 +646,9 @@ def create_spawn_agent_registry(
|
|
|
437
646
|
git_manager=git_manager,
|
|
438
647
|
clone_storage=clone_storage,
|
|
439
648
|
clone_manager=clone_manager,
|
|
440
|
-
workflow=
|
|
649
|
+
workflow=effective_workflow,
|
|
441
650
|
mode=mode,
|
|
651
|
+
initial_step=initial_step,
|
|
442
652
|
terminal=terminal,
|
|
443
653
|
provider=provider,
|
|
444
654
|
model=model,
|
|
@@ -450,6 +660,11 @@ def create_spawn_agent_registry(
|
|
|
450
660
|
sandbox_extra_paths=sandbox_extra_paths,
|
|
451
661
|
parent_session_id=resolved_parent_session_id,
|
|
452
662
|
project_path=project_path,
|
|
663
|
+
# For mode=self
|
|
664
|
+
workflow_loader=wf_loader,
|
|
665
|
+
state_manager=state_manager,
|
|
666
|
+
session_manager=session_manager,
|
|
667
|
+
db=db,
|
|
453
668
|
)
|
|
454
669
|
|
|
455
670
|
return registry
|
|
@@ -88,6 +88,14 @@ class RegistryContext:
|
|
|
88
88
|
return project_id
|
|
89
89
|
return None
|
|
90
90
|
|
|
91
|
+
def get_current_project_name(self) -> str | None:
|
|
92
|
+
"""Get the current project name from context, or None if not in a project."""
|
|
93
|
+
ctx = get_project_context()
|
|
94
|
+
if ctx and ctx.get("name"):
|
|
95
|
+
name: str = ctx["name"]
|
|
96
|
+
return name
|
|
97
|
+
return None
|
|
98
|
+
|
|
91
99
|
def get_workflow_state(self, session_id: str | None) -> WorkflowState | None:
|
|
92
100
|
"""Get workflow state for a session, if available."""
|
|
93
101
|
if not session_id:
|
|
@@ -335,6 +335,32 @@ def create_crud_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
335
335
|
except ValueError as e:
|
|
336
336
|
return {"error": str(e)}
|
|
337
337
|
|
|
338
|
+
# Block closing tasks via update_task - must use close_task for proper workflow
|
|
339
|
+
if status is not None and status.lower() == "closed":
|
|
340
|
+
return {
|
|
341
|
+
"error": "Cannot set status to 'closed' via update_task. "
|
|
342
|
+
"Use close_task(task_id, commit_sha='...') to properly close tasks with commit linking."
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
# Block claiming tasks via update_task - must use claim_task for proper workflow
|
|
346
|
+
if status is not None and status.lower() == "in_progress":
|
|
347
|
+
return {
|
|
348
|
+
"error": "Cannot set status to 'in_progress' via update_task. "
|
|
349
|
+
"Use claim_task(task_id, session_id='...') to properly claim tasks with session tracking."
|
|
350
|
+
}
|
|
351
|
+
if assignee is not None:
|
|
352
|
+
return {
|
|
353
|
+
"error": "Cannot set assignee via update_task. "
|
|
354
|
+
"Use claim_task(task_id, session_id='...') to properly claim tasks with session tracking."
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
# Block needs_review status via update_task - must use mark_task_for_review for proper workflow
|
|
358
|
+
if status is not None and status.lower() in ("review", "needs_review"):
|
|
359
|
+
return {
|
|
360
|
+
"error": "Cannot set status to 'needs_review' via update_task. "
|
|
361
|
+
"Use mark_task_for_review(task_id, session_id='...') to properly route tasks for review."
|
|
362
|
+
}
|
|
363
|
+
|
|
338
364
|
# Build kwargs only for non-None values to avoid overwriting with NULL
|
|
339
365
|
kwargs: dict[str, Any] = {}
|
|
340
366
|
if title is not None:
|
|
@@ -395,7 +421,7 @@ def create_crud_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
395
421
|
},
|
|
396
422
|
"status": {
|
|
397
423
|
"type": "string",
|
|
398
|
-
"description": "New status (open, in_progress,
|
|
424
|
+
"description": "New status (open, in_progress, needs_review, closed)",
|
|
399
425
|
"default": None,
|
|
400
426
|
},
|
|
401
427
|
"priority": {"type": "integer", "description": "New priority", "default": None},
|
|
@@ -6,7 +6,7 @@ for task operations.
|
|
|
6
6
|
|
|
7
7
|
# Reasons for which commit linking and validation are skipped when closing tasks
|
|
8
8
|
SKIP_REASONS: frozenset[str] = frozenset(
|
|
9
|
-
{"duplicate", "already_implemented", "wont_fix", "obsolete"}
|
|
9
|
+
{"duplicate", "already_implemented", "wont_fix", "obsolete", "out_of_repo"}
|
|
10
10
|
)
|
|
11
11
|
|
|
12
12
|
# Category inference patterns mapping category to keywords/phrases
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
Provides task lifecycle tools: close, reopen, delete, and label management.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
+
import uuid
|
|
6
7
|
from typing import Any
|
|
7
8
|
|
|
8
9
|
from gobby.mcp_proxy.tools.internal import InternalToolRegistry
|
|
@@ -20,6 +21,15 @@ from gobby.storage.tasks import TaskNotFoundError
|
|
|
20
21
|
from gobby.storage.worktrees import LocalWorktreeManager
|
|
21
22
|
|
|
22
23
|
|
|
24
|
+
def _is_uuid(value: str) -> bool:
|
|
25
|
+
"""Check if a string is a valid UUID (not a ref like #123)."""
|
|
26
|
+
try:
|
|
27
|
+
uuid.UUID(value)
|
|
28
|
+
return True
|
|
29
|
+
except (ValueError, TypeError):
|
|
30
|
+
return False
|
|
31
|
+
|
|
32
|
+
|
|
23
33
|
def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
24
34
|
"""Create a registry with task lifecycle tools.
|
|
25
35
|
|
|
@@ -121,7 +131,7 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
121
131
|
"You must commit your changes and link them to the task before closing."
|
|
122
132
|
),
|
|
123
133
|
"suggestion": (
|
|
124
|
-
"Commit your changes with `[
|
|
134
|
+
f"Commit your changes with `[{ctx.get_current_project_name() or 'project'}-#task_id]` in the message, "
|
|
125
135
|
"or pass `commit_sha` to `close_task`."
|
|
126
136
|
),
|
|
127
137
|
}
|
|
@@ -184,18 +194,20 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
184
194
|
current_commit_sha = run_git_command(["git", "rev-parse", "--short", "HEAD"], cwd=cwd)
|
|
185
195
|
|
|
186
196
|
if route_to_review:
|
|
187
|
-
# Route to
|
|
188
|
-
# Task stays in
|
|
197
|
+
# Route to needs_review status instead of closing
|
|
198
|
+
# Task stays in needs_review until user explicitly closes
|
|
189
199
|
ctx.task_manager.update_task(
|
|
190
200
|
resolved_id,
|
|
191
|
-
status="
|
|
201
|
+
status="needs_review",
|
|
192
202
|
validation_override_reason=override_justification if store_override else None,
|
|
193
203
|
)
|
|
194
204
|
|
|
195
205
|
# Auto-link session if provided
|
|
196
206
|
if resolved_session_id:
|
|
197
207
|
try:
|
|
198
|
-
ctx.session_task_manager.link_task(
|
|
208
|
+
ctx.session_task_manager.link_task(
|
|
209
|
+
resolved_session_id, resolved_id, "needs_review"
|
|
210
|
+
)
|
|
199
211
|
except Exception:
|
|
200
212
|
pass # nosec B110 - best-effort linking
|
|
201
213
|
|
|
@@ -235,7 +247,17 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
235
247
|
if resolved_session_id:
|
|
236
248
|
try:
|
|
237
249
|
state = ctx.workflow_state_manager.get_state(resolved_session_id)
|
|
238
|
-
if state
|
|
250
|
+
if state:
|
|
251
|
+
# Resolve claimed_task_id to UUID if it's a ref (backward compat)
|
|
252
|
+
claimed_task_id = state.variables.get("claimed_task_id")
|
|
253
|
+
if claimed_task_id and not _is_uuid(claimed_task_id):
|
|
254
|
+
try:
|
|
255
|
+
claimed_task = ctx.task_manager.get_task(claimed_task_id)
|
|
256
|
+
if claimed_task:
|
|
257
|
+
claimed_task_id = claimed_task.id
|
|
258
|
+
except Exception: # nosec B110 - keep original ID if resolution fails
|
|
259
|
+
claimed_task_id = claimed_task_id # explicit no-op
|
|
260
|
+
if state and claimed_task_id == resolved_id:
|
|
239
261
|
# Check if clear_task_on_close is enabled (default: True)
|
|
240
262
|
clear_on_close = state.variables.get("clear_task_on_close", True)
|
|
241
263
|
if clear_on_close:
|
|
@@ -267,7 +289,7 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
267
289
|
|
|
268
290
|
registry.register(
|
|
269
291
|
name="close_task",
|
|
270
|
-
description="Close a task. Pass commit_sha to link and close in one call: close_task(task_id, commit_sha='abc123'). Or include [
|
|
292
|
+
description="Close a task. Pass commit_sha to link and close in one call: close_task(task_id, commit_sha='abc123'). Or include [project-#N] in commit message for auto-linking. Parent tasks require all children closed. Validation auto-skipped for: duplicate, already_implemented, wont_fix, obsolete, out_of_repo.",
|
|
271
293
|
input_schema={
|
|
272
294
|
"type": "object",
|
|
273
295
|
"properties": {
|
|
@@ -277,7 +299,7 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
277
299
|
},
|
|
278
300
|
"reason": {
|
|
279
301
|
"type": "string",
|
|
280
|
-
"description": 'Reason for closing. Use "duplicate", "already_implemented", "wont_fix", or "
|
|
302
|
+
"description": 'Reason for closing. Use "duplicate", "already_implemented", "wont_fix", "obsolete", or "out_of_repo" to auto-skip validation and commit check.',
|
|
281
303
|
"default": "completed",
|
|
282
304
|
},
|
|
283
305
|
"changes_summary": {
|
|
@@ -567,6 +589,17 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
567
589
|
except Exception:
|
|
568
590
|
pass # nosec B110 - best-effort linking
|
|
569
591
|
|
|
592
|
+
# Set task_claimed workflow variable (enables Edit/Write hooks)
|
|
593
|
+
# This mirrors create_task behavior in _crud.py
|
|
594
|
+
try:
|
|
595
|
+
state = ctx.workflow_state_manager.get_state(resolved_session_id)
|
|
596
|
+
if state:
|
|
597
|
+
state.variables["task_claimed"] = True
|
|
598
|
+
state.variables["claimed_task_id"] = resolved_id # Always use UUID
|
|
599
|
+
ctx.workflow_state_manager.save_state(state)
|
|
600
|
+
except Exception:
|
|
601
|
+
pass # nosec B110 - best-effort variable setting
|
|
602
|
+
|
|
570
603
|
return {}
|
|
571
604
|
|
|
572
605
|
registry.register(
|
|
@@ -594,4 +627,88 @@ def create_lifecycle_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
594
627
|
func=claim_task,
|
|
595
628
|
)
|
|
596
629
|
|
|
630
|
+
def mark_task_for_review(
|
|
631
|
+
task_id: str,
|
|
632
|
+
session_id: str,
|
|
633
|
+
review_notes: str | None = None,
|
|
634
|
+
) -> dict[str, Any]:
|
|
635
|
+
"""Mark a task as ready for review.
|
|
636
|
+
|
|
637
|
+
Sets status to 'needs_review'. Use this when work is complete
|
|
638
|
+
but needs human verification before closing.
|
|
639
|
+
|
|
640
|
+
Args:
|
|
641
|
+
task_id: Task reference (#N, path, or UUID)
|
|
642
|
+
session_id: Session ID marking the task for review
|
|
643
|
+
review_notes: Optional notes for the reviewer
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
Empty dict on success, or error dict with details.
|
|
647
|
+
"""
|
|
648
|
+
# Resolve task reference (supports #N, path, UUID formats)
|
|
649
|
+
try:
|
|
650
|
+
resolved_id = resolve_task_id_for_mcp(ctx.task_manager, task_id)
|
|
651
|
+
except TaskNotFoundError as e:
|
|
652
|
+
return {"success": False, "error": str(e)}
|
|
653
|
+
except ValueError as e:
|
|
654
|
+
return {"success": False, "error": str(e)}
|
|
655
|
+
|
|
656
|
+
task = ctx.task_manager.get_task(resolved_id)
|
|
657
|
+
if not task:
|
|
658
|
+
return {"success": False, "error": f"Task {task_id} not found"}
|
|
659
|
+
|
|
660
|
+
# Resolve session_id to UUID (accepts #N, N, UUID, or prefix)
|
|
661
|
+
resolved_session_id = session_id
|
|
662
|
+
try:
|
|
663
|
+
resolved_session_id = ctx.resolve_session_id(session_id)
|
|
664
|
+
except ValueError:
|
|
665
|
+
pass # Fall back to raw value if resolution fails
|
|
666
|
+
|
|
667
|
+
# Build update kwargs
|
|
668
|
+
update_kwargs: dict[str, Any] = {"status": "needs_review"}
|
|
669
|
+
|
|
670
|
+
# Append review notes to description if provided
|
|
671
|
+
if review_notes:
|
|
672
|
+
current_desc = task.description or ""
|
|
673
|
+
review_section = f"\n\n[Review Notes]\n{review_notes}"
|
|
674
|
+
update_kwargs["description"] = current_desc + review_section
|
|
675
|
+
|
|
676
|
+
# Update task status to needs_review
|
|
677
|
+
updated = ctx.task_manager.update_task(resolved_id, **update_kwargs)
|
|
678
|
+
if not updated:
|
|
679
|
+
return {"success": False, "error": f"Failed to mark task {task_id} for review"}
|
|
680
|
+
|
|
681
|
+
# Link task to session (best-effort, don't fail if this fails)
|
|
682
|
+
try:
|
|
683
|
+
ctx.session_task_manager.link_task(resolved_session_id, resolved_id, "needs_review")
|
|
684
|
+
except Exception:
|
|
685
|
+
pass # nosec B110 - best-effort linking
|
|
686
|
+
|
|
687
|
+
return {}
|
|
688
|
+
|
|
689
|
+
registry.register(
|
|
690
|
+
name="mark_task_for_review",
|
|
691
|
+
description="Mark a task as ready for review. Sets status to 'needs_review'. Use this when work is complete but needs human verification before closing.",
|
|
692
|
+
input_schema={
|
|
693
|
+
"type": "object",
|
|
694
|
+
"properties": {
|
|
695
|
+
"task_id": {
|
|
696
|
+
"type": "string",
|
|
697
|
+
"description": "Task reference: #N (e.g., #1, #47), path (e.g., 1.2.3), or UUID",
|
|
698
|
+
},
|
|
699
|
+
"session_id": {
|
|
700
|
+
"type": "string",
|
|
701
|
+
"description": "Your session ID (accepts #N, N, UUID, or prefix). The session marking the task for review.",
|
|
702
|
+
},
|
|
703
|
+
"review_notes": {
|
|
704
|
+
"type": "string",
|
|
705
|
+
"description": "Optional notes for the reviewer explaining what was done and what to verify.",
|
|
706
|
+
"default": None,
|
|
707
|
+
},
|
|
708
|
+
},
|
|
709
|
+
"required": ["task_id", "session_id"],
|
|
710
|
+
},
|
|
711
|
+
func=mark_task_for_review,
|
|
712
|
+
)
|
|
713
|
+
|
|
597
714
|
return registry
|
|
@@ -61,7 +61,8 @@ def validate_commit_requirements(
|
|
|
61
61
|
'- Task was already done: reason="already_implemented"\n'
|
|
62
62
|
'- Task is no longer needed: reason="obsolete"\n'
|
|
63
63
|
'- Task duplicates another: reason="duplicate"\n'
|
|
64
|
-
'- Decided not to do it: reason="wont_fix"'
|
|
64
|
+
'- Decided not to do it: reason="wont_fix"\n'
|
|
65
|
+
'- Changes outside repo (e.g., ~/.gobby/config.yaml): reason="out_of_repo"'
|
|
65
66
|
),
|
|
66
67
|
)
|
|
67
68
|
|
|
@@ -43,7 +43,7 @@ def create_search_registry(ctx: RegistryContext) -> InternalToolRegistry:
|
|
|
43
43
|
|
|
44
44
|
Args:
|
|
45
45
|
query: Search query text (required). Natural language query.
|
|
46
|
-
status: Filter by status (open, in_progress,
|
|
46
|
+
status: Filter by status (open, in_progress, needs_review, closed).
|
|
47
47
|
Can be a single status or comma-separated list.
|
|
48
48
|
task_type: Filter by task type (task, bug, feature, epic)
|
|
49
49
|
priority: Filter by priority (1=High, 2=Medium, 3=Low)
|