gobby 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +1 -1
- gobby/adapters/__init__.py +2 -1
- gobby/adapters/claude_code.py +96 -35
- gobby/adapters/codex_impl/__init__.py +28 -0
- gobby/adapters/codex_impl/adapter.py +722 -0
- gobby/adapters/codex_impl/client.py +679 -0
- gobby/adapters/codex_impl/protocol.py +20 -0
- gobby/adapters/codex_impl/types.py +68 -0
- gobby/adapters/gemini.py +140 -38
- gobby/agents/definitions.py +11 -1
- gobby/agents/isolation.py +525 -0
- gobby/agents/registry.py +11 -0
- gobby/agents/sandbox.py +261 -0
- gobby/agents/session.py +1 -0
- gobby/agents/spawn.py +42 -287
- gobby/agents/spawn_executor.py +415 -0
- gobby/agents/spawners/__init__.py +24 -0
- gobby/agents/spawners/command_builder.py +189 -0
- gobby/agents/spawners/embedded.py +21 -2
- gobby/agents/spawners/headless.py +21 -2
- gobby/agents/spawners/macos.py +26 -1
- gobby/agents/spawners/prompt_manager.py +125 -0
- gobby/cli/__init__.py +0 -2
- gobby/cli/install.py +4 -4
- gobby/cli/installers/claude.py +6 -0
- gobby/cli/installers/gemini.py +6 -0
- gobby/cli/installers/shared.py +103 -4
- gobby/cli/memory.py +185 -0
- gobby/cli/sessions.py +1 -1
- gobby/cli/utils.py +9 -2
- gobby/clones/git.py +177 -0
- gobby/config/__init__.py +12 -97
- gobby/config/app.py +10 -94
- gobby/config/extensions.py +2 -2
- gobby/config/features.py +7 -130
- gobby/config/skills.py +31 -0
- gobby/config/tasks.py +4 -28
- gobby/hooks/__init__.py +0 -13
- gobby/hooks/event_handlers.py +150 -8
- gobby/hooks/hook_manager.py +21 -3
- gobby/hooks/plugins.py +1 -1
- gobby/hooks/webhooks.py +1 -1
- gobby/install/gemini/hooks/hook_dispatcher.py +74 -15
- gobby/llm/resolver.py +3 -2
- gobby/mcp_proxy/importer.py +62 -4
- gobby/mcp_proxy/instructions.py +4 -2
- gobby/mcp_proxy/registries.py +22 -8
- gobby/mcp_proxy/services/recommendation.py +43 -11
- gobby/mcp_proxy/tools/agent_messaging.py +93 -44
- gobby/mcp_proxy/tools/agents.py +76 -740
- gobby/mcp_proxy/tools/artifacts.py +43 -9
- gobby/mcp_proxy/tools/clones.py +0 -385
- gobby/mcp_proxy/tools/memory.py +2 -2
- gobby/mcp_proxy/tools/sessions/__init__.py +14 -0
- gobby/mcp_proxy/tools/sessions/_commits.py +239 -0
- gobby/mcp_proxy/tools/sessions/_crud.py +253 -0
- gobby/mcp_proxy/tools/sessions/_factory.py +63 -0
- gobby/mcp_proxy/tools/sessions/_handoff.py +503 -0
- gobby/mcp_proxy/tools/sessions/_messages.py +166 -0
- gobby/mcp_proxy/tools/skills/__init__.py +14 -29
- gobby/mcp_proxy/tools/spawn_agent.py +455 -0
- gobby/mcp_proxy/tools/tasks/_context.py +18 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +13 -6
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +79 -30
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +1 -1
- gobby/mcp_proxy/tools/tasks/_session.py +22 -7
- gobby/mcp_proxy/tools/workflows.py +84 -34
- gobby/mcp_proxy/tools/worktrees.py +32 -350
- gobby/memory/extractor.py +15 -1
- gobby/memory/ingestion/__init__.py +5 -0
- gobby/memory/ingestion/multimodal.py +221 -0
- gobby/memory/manager.py +62 -283
- gobby/memory/search/__init__.py +10 -0
- gobby/memory/search/coordinator.py +248 -0
- gobby/memory/services/__init__.py +5 -0
- gobby/memory/services/crossref.py +142 -0
- gobby/prompts/loader.py +5 -2
- gobby/runner.py +13 -0
- gobby/servers/http.py +1 -4
- gobby/servers/routes/admin.py +14 -0
- gobby/servers/routes/mcp/endpoints/__init__.py +61 -0
- gobby/servers/routes/mcp/endpoints/discovery.py +405 -0
- gobby/servers/routes/mcp/endpoints/execution.py +568 -0
- gobby/servers/routes/mcp/endpoints/registry.py +378 -0
- gobby/servers/routes/mcp/endpoints/server.py +304 -0
- gobby/servers/routes/mcp/hooks.py +51 -4
- gobby/servers/routes/mcp/tools.py +48 -1506
- gobby/servers/websocket.py +57 -1
- gobby/sessions/analyzer.py +2 -2
- gobby/sessions/lifecycle.py +1 -1
- gobby/sessions/manager.py +9 -0
- gobby/sessions/processor.py +10 -0
- gobby/sessions/transcripts/base.py +1 -0
- gobby/sessions/transcripts/claude.py +15 -5
- gobby/sessions/transcripts/gemini.py +100 -34
- gobby/skills/parser.py +30 -2
- gobby/storage/database.py +9 -2
- gobby/storage/memories.py +32 -21
- gobby/storage/migrations.py +174 -368
- gobby/storage/sessions.py +45 -7
- gobby/storage/skills.py +80 -7
- gobby/storage/tasks/_lifecycle.py +18 -3
- gobby/sync/memories.py +1 -1
- gobby/tasks/external_validator.py +1 -1
- gobby/tasks/validation.py +22 -20
- gobby/tools/summarizer.py +91 -10
- gobby/utils/project_context.py +2 -3
- gobby/utils/status.py +13 -0
- gobby/workflows/actions.py +221 -1217
- gobby/workflows/artifact_actions.py +31 -0
- gobby/workflows/autonomous_actions.py +11 -0
- gobby/workflows/context_actions.py +50 -1
- gobby/workflows/detection_helpers.py +38 -24
- gobby/workflows/enforcement/__init__.py +47 -0
- gobby/workflows/enforcement/blocking.py +281 -0
- gobby/workflows/enforcement/commit_policy.py +283 -0
- gobby/workflows/enforcement/handlers.py +269 -0
- gobby/workflows/enforcement/task_policy.py +542 -0
- gobby/workflows/engine.py +93 -0
- gobby/workflows/evaluator.py +110 -0
- gobby/workflows/git_utils.py +106 -0
- gobby/workflows/hooks.py +41 -0
- gobby/workflows/llm_actions.py +30 -0
- gobby/workflows/mcp_actions.py +20 -1
- gobby/workflows/memory_actions.py +91 -0
- gobby/workflows/safe_evaluator.py +191 -0
- gobby/workflows/session_actions.py +44 -0
- gobby/workflows/state_actions.py +60 -1
- gobby/workflows/stop_signal_actions.py +55 -0
- gobby/workflows/summary_actions.py +217 -51
- gobby/workflows/task_sync_actions.py +347 -0
- gobby/workflows/todo_actions.py +34 -1
- gobby/workflows/webhook_actions.py +185 -0
- {gobby-0.2.6.dist-info → gobby-0.2.8.dist-info}/METADATA +6 -1
- {gobby-0.2.6.dist-info → gobby-0.2.8.dist-info}/RECORD +139 -163
- {gobby-0.2.6.dist-info → gobby-0.2.8.dist-info}/WHEEL +1 -1
- gobby/adapters/codex.py +0 -1332
- gobby/cli/tui.py +0 -34
- gobby/install/claude/commands/gobby/bug.md +0 -51
- gobby/install/claude/commands/gobby/chore.md +0 -51
- gobby/install/claude/commands/gobby/epic.md +0 -52
- gobby/install/claude/commands/gobby/eval.md +0 -235
- gobby/install/claude/commands/gobby/feat.md +0 -49
- gobby/install/claude/commands/gobby/nit.md +0 -52
- gobby/install/claude/commands/gobby/ref.md +0 -52
- gobby/mcp_proxy/tools/session_messages.py +0 -1055
- gobby/prompts/defaults/expansion/system.md +0 -119
- gobby/prompts/defaults/expansion/user.md +0 -48
- gobby/prompts/defaults/external_validation/agent.md +0 -72
- gobby/prompts/defaults/external_validation/external.md +0 -63
- gobby/prompts/defaults/external_validation/spawn.md +0 -83
- gobby/prompts/defaults/external_validation/system.md +0 -6
- gobby/prompts/defaults/features/import_mcp.md +0 -22
- gobby/prompts/defaults/features/import_mcp_github.md +0 -17
- gobby/prompts/defaults/features/import_mcp_search.md +0 -16
- gobby/prompts/defaults/features/recommend_tools.md +0 -32
- gobby/prompts/defaults/features/recommend_tools_hybrid.md +0 -35
- gobby/prompts/defaults/features/recommend_tools_llm.md +0 -30
- gobby/prompts/defaults/features/server_description.md +0 -20
- gobby/prompts/defaults/features/server_description_system.md +0 -6
- gobby/prompts/defaults/features/task_description.md +0 -31
- gobby/prompts/defaults/features/task_description_system.md +0 -6
- gobby/prompts/defaults/features/tool_summary.md +0 -17
- gobby/prompts/defaults/features/tool_summary_system.md +0 -6
- gobby/prompts/defaults/handoff/compact.md +0 -63
- gobby/prompts/defaults/handoff/session_end.md +0 -57
- gobby/prompts/defaults/memory/extract.md +0 -61
- gobby/prompts/defaults/research/step.md +0 -58
- gobby/prompts/defaults/validation/criteria.md +0 -47
- gobby/prompts/defaults/validation/validate.md +0 -38
- gobby/storage/migrations_legacy.py +0 -1359
- gobby/tui/__init__.py +0 -5
- gobby/tui/api_client.py +0 -278
- gobby/tui/app.py +0 -329
- gobby/tui/screens/__init__.py +0 -25
- gobby/tui/screens/agents.py +0 -333
- gobby/tui/screens/chat.py +0 -450
- gobby/tui/screens/dashboard.py +0 -377
- gobby/tui/screens/memory.py +0 -305
- gobby/tui/screens/metrics.py +0 -231
- gobby/tui/screens/orchestrator.py +0 -903
- gobby/tui/screens/sessions.py +0 -412
- gobby/tui/screens/tasks.py +0 -440
- gobby/tui/screens/workflows.py +0 -289
- gobby/tui/screens/worktrees.py +0 -174
- gobby/tui/widgets/__init__.py +0 -21
- gobby/tui/widgets/chat.py +0 -210
- gobby/tui/widgets/conductor.py +0 -104
- gobby/tui/widgets/menu.py +0 -132
- gobby/tui/widgets/message_panel.py +0 -160
- gobby/tui/widgets/review_gate.py +0 -224
- gobby/tui/widgets/task_tree.py +0 -99
- gobby/tui/widgets/token_budget.py +0 -166
- gobby/tui/ws_client.py +0 -258
- gobby/workflows/task_enforcement_actions.py +0 -1343
- {gobby-0.2.6.dist-info → gobby-0.2.8.dist-info}/entry_points.txt +0 -0
- {gobby-0.2.6.dist-info → gobby-0.2.8.dist-info}/licenses/LICENSE.md +0 -0
- {gobby-0.2.6.dist-info → gobby-0.2.8.dist-info}/top_level.txt +0 -0
|
@@ -1,1343 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Task enforcement actions for workflow engine.
|
|
3
|
-
|
|
4
|
-
Provides actions that enforce task tracking before allowing certain tools,
|
|
5
|
-
and enforce task completion before allowing agent to stop.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import ast
|
|
9
|
-
import logging
|
|
10
|
-
import operator
|
|
11
|
-
import subprocess # nosec B404 - subprocess needed for git commands
|
|
12
|
-
from collections.abc import Callable
|
|
13
|
-
from typing import TYPE_CHECKING, Any
|
|
14
|
-
|
|
15
|
-
from gobby.mcp_proxy.tools.task_readiness import is_descendant_of
|
|
16
|
-
|
|
17
|
-
if TYPE_CHECKING:
|
|
18
|
-
from gobby.config.app import DaemonConfig
|
|
19
|
-
from gobby.storage.session_tasks import SessionTaskManager
|
|
20
|
-
from gobby.storage.sessions import LocalSessionManager
|
|
21
|
-
from gobby.storage.tasks import LocalTaskManager
|
|
22
|
-
from gobby.workflows.definitions import WorkflowState
|
|
23
|
-
|
|
24
|
-
logger = logging.getLogger(__name__)
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
# =============================================================================
|
|
28
|
-
# Lazy Evaluation Helpers
|
|
29
|
-
# =============================================================================
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
class _LazyBool:
|
|
33
|
-
"""Lazy boolean that defers computation until first access.
|
|
34
|
-
|
|
35
|
-
Used to avoid expensive operations (git status, DB queries) when
|
|
36
|
-
evaluating block_tools conditions that don't reference certain values.
|
|
37
|
-
|
|
38
|
-
The computation is triggered when the value is used in a boolean context
|
|
39
|
-
(e.g., `if lazy_val:` or `not lazy_val`), which happens during eval().
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
__slots__ = ("_thunk", "_computed", "_value")
|
|
43
|
-
|
|
44
|
-
def __init__(self, thunk: "Callable[[], bool]") -> None:
|
|
45
|
-
self._thunk = thunk
|
|
46
|
-
self._computed = False
|
|
47
|
-
self._value = False
|
|
48
|
-
|
|
49
|
-
def __bool__(self) -> bool:
|
|
50
|
-
if not self._computed:
|
|
51
|
-
self._value = self._thunk()
|
|
52
|
-
self._computed = True
|
|
53
|
-
return self._value
|
|
54
|
-
|
|
55
|
-
def __repr__(self) -> str:
|
|
56
|
-
if self._computed:
|
|
57
|
-
return f"_LazyBool({self._value})"
|
|
58
|
-
return "_LazyBool(<not computed>)"
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
# =============================================================================
|
|
62
|
-
# Helper Functions
|
|
63
|
-
# =============================================================================
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
def _is_plan_file(file_path: str, source: str | None = None) -> bool:
|
|
67
|
-
"""Check if file path is a Claude Code plan file (platform-agnostic).
|
|
68
|
-
|
|
69
|
-
Only exempts plan files for Claude Code sessions to avoid accidental
|
|
70
|
-
exemptions for Gemini/Codex users.
|
|
71
|
-
|
|
72
|
-
The pattern `/.claude/plans/` matches paths like:
|
|
73
|
-
- Unix: /Users/xxx/.claude/plans/file.md (the / comes from xxx/)
|
|
74
|
-
- Windows: C:/Users/xxx/.claude/plans/file.md (after normalization)
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
file_path: The file path being edited
|
|
78
|
-
source: CLI source (e.g., "claude", "gemini", "codex")
|
|
79
|
-
|
|
80
|
-
Returns:
|
|
81
|
-
True if this is a CC plan file that should be exempt from task requirement
|
|
82
|
-
"""
|
|
83
|
-
if not file_path:
|
|
84
|
-
return False
|
|
85
|
-
# Only exempt for Claude Code sessions
|
|
86
|
-
if source != "claude":
|
|
87
|
-
return False
|
|
88
|
-
# Normalize path separators (Windows backslash to forward slash)
|
|
89
|
-
normalized = file_path.replace("\\", "/")
|
|
90
|
-
return "/.claude/plans/" in normalized
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
# =============================================================================
|
|
94
|
-
# Safe Expression Evaluator (AST-based)
|
|
95
|
-
# =============================================================================
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
class SafeExpressionEvaluator(ast.NodeVisitor):
|
|
99
|
-
"""Safe expression evaluator using AST.
|
|
100
|
-
|
|
101
|
-
Evaluates simple Python expressions without using eval().
|
|
102
|
-
Supports boolean operations, comparisons, attribute access, subscripts,
|
|
103
|
-
and a limited set of allowed function calls.
|
|
104
|
-
"""
|
|
105
|
-
|
|
106
|
-
# Comparison operators mapping
|
|
107
|
-
CMP_OPS: dict[type[ast.cmpop], Callable[[Any, Any], bool]] = {
|
|
108
|
-
ast.Eq: operator.eq,
|
|
109
|
-
ast.NotEq: operator.ne,
|
|
110
|
-
ast.Lt: operator.lt,
|
|
111
|
-
ast.LtE: operator.le,
|
|
112
|
-
ast.Gt: operator.gt,
|
|
113
|
-
ast.GtE: operator.ge,
|
|
114
|
-
ast.Is: operator.is_,
|
|
115
|
-
ast.IsNot: operator.is_not,
|
|
116
|
-
ast.In: lambda a, b: a in b,
|
|
117
|
-
ast.NotIn: lambda a, b: a not in b,
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
def __init__(
|
|
121
|
-
self, context: dict[str, Any], allowed_funcs: dict[str, Callable[..., Any]]
|
|
122
|
-
) -> None:
|
|
123
|
-
self.context = context
|
|
124
|
-
self.allowed_funcs = allowed_funcs
|
|
125
|
-
|
|
126
|
-
def evaluate(self, expr: str) -> bool:
|
|
127
|
-
"""Evaluate expression and return boolean result."""
|
|
128
|
-
try:
|
|
129
|
-
tree = ast.parse(expr, mode="eval")
|
|
130
|
-
return bool(self.visit(tree.body))
|
|
131
|
-
except Exception as e:
|
|
132
|
-
raise ValueError(f"Invalid expression: {e}") from e
|
|
133
|
-
|
|
134
|
-
def visit_BoolOp(self, node: ast.BoolOp) -> bool:
|
|
135
|
-
"""Handle 'and' / 'or' operations."""
|
|
136
|
-
if isinstance(node.op, ast.And):
|
|
137
|
-
return all(self.visit(v) for v in node.values)
|
|
138
|
-
elif isinstance(node.op, ast.Or):
|
|
139
|
-
return any(self.visit(v) for v in node.values)
|
|
140
|
-
raise ValueError(f"Unsupported boolean operator: {type(node.op).__name__}")
|
|
141
|
-
|
|
142
|
-
def visit_Compare(self, node: ast.Compare) -> bool:
|
|
143
|
-
"""Handle comparison operations (==, !=, <, >, in, not in, etc.)."""
|
|
144
|
-
left = self.visit(node.left)
|
|
145
|
-
for op, comparator in zip(node.ops, node.comparators, strict=False):
|
|
146
|
-
right = self.visit(comparator)
|
|
147
|
-
op_func = self.CMP_OPS.get(type(op))
|
|
148
|
-
if op_func is None:
|
|
149
|
-
raise ValueError(f"Unsupported comparison: {type(op).__name__}")
|
|
150
|
-
if not op_func(left, right):
|
|
151
|
-
return False
|
|
152
|
-
left = right
|
|
153
|
-
return True
|
|
154
|
-
|
|
155
|
-
def visit_UnaryOp(self, node: ast.UnaryOp) -> Any:
|
|
156
|
-
"""Handle unary operations (not, -, +)."""
|
|
157
|
-
operand = self.visit(node.operand)
|
|
158
|
-
if isinstance(node.op, ast.Not):
|
|
159
|
-
return not operand
|
|
160
|
-
elif isinstance(node.op, ast.USub):
|
|
161
|
-
return -operand
|
|
162
|
-
elif isinstance(node.op, ast.UAdd):
|
|
163
|
-
return +operand
|
|
164
|
-
raise ValueError(f"Unsupported unary operator: {type(node.op).__name__}")
|
|
165
|
-
|
|
166
|
-
def visit_Name(self, node: ast.Name) -> Any:
|
|
167
|
-
"""Handle variable names."""
|
|
168
|
-
name = node.id
|
|
169
|
-
# Built-in constants
|
|
170
|
-
if name == "True":
|
|
171
|
-
return True
|
|
172
|
-
if name == "False":
|
|
173
|
-
return False
|
|
174
|
-
if name == "None":
|
|
175
|
-
return None
|
|
176
|
-
# Context variables
|
|
177
|
-
if name in self.context:
|
|
178
|
-
return self.context[name]
|
|
179
|
-
raise ValueError(f"Unknown variable: {name}")
|
|
180
|
-
|
|
181
|
-
def visit_Constant(self, node: ast.Constant) -> Any:
|
|
182
|
-
"""Handle literal values (strings, numbers, booleans, None)."""
|
|
183
|
-
return node.value
|
|
184
|
-
|
|
185
|
-
def visit_Call(self, node: ast.Call) -> Any:
|
|
186
|
-
"""Handle function calls (only allowed functions)."""
|
|
187
|
-
# Get function name
|
|
188
|
-
if isinstance(node.func, ast.Name):
|
|
189
|
-
func_name = node.func.id
|
|
190
|
-
elif isinstance(node.func, ast.Attribute):
|
|
191
|
-
# Handle method calls like tool_input.get('key')
|
|
192
|
-
obj = self.visit(node.func.value)
|
|
193
|
-
method_name = node.func.attr
|
|
194
|
-
if method_name == "get" and isinstance(obj, dict):
|
|
195
|
-
args = [self.visit(arg) for arg in node.args]
|
|
196
|
-
return obj.get(*args)
|
|
197
|
-
raise ValueError(f"Unsupported method call: {method_name}")
|
|
198
|
-
else:
|
|
199
|
-
raise ValueError(f"Unsupported call type: {type(node.func).__name__}")
|
|
200
|
-
|
|
201
|
-
# Check if function is allowed
|
|
202
|
-
if func_name not in self.allowed_funcs:
|
|
203
|
-
raise ValueError(f"Function not allowed: {func_name}")
|
|
204
|
-
|
|
205
|
-
# Evaluate arguments
|
|
206
|
-
args = [self.visit(arg) for arg in node.args]
|
|
207
|
-
kwargs = {kw.arg: self.visit(kw.value) for kw in node.keywords if kw.arg}
|
|
208
|
-
|
|
209
|
-
return self.allowed_funcs[func_name](*args, **kwargs)
|
|
210
|
-
|
|
211
|
-
def visit_Attribute(self, node: ast.Attribute) -> Any:
|
|
212
|
-
"""Handle attribute access (e.g., obj.attr)."""
|
|
213
|
-
obj = self.visit(node.value)
|
|
214
|
-
attr = node.attr
|
|
215
|
-
if isinstance(obj, dict):
|
|
216
|
-
# Allow dict-style attribute access for convenience
|
|
217
|
-
if attr in obj:
|
|
218
|
-
return obj[attr]
|
|
219
|
-
raise ValueError(f"Key not found: {attr}")
|
|
220
|
-
if hasattr(obj, attr):
|
|
221
|
-
return getattr(obj, attr)
|
|
222
|
-
raise ValueError(f"Attribute not found: {attr}")
|
|
223
|
-
|
|
224
|
-
def visit_Subscript(self, node: ast.Subscript) -> Any:
|
|
225
|
-
"""Handle subscript access (e.g., obj['key'] or obj[0])."""
|
|
226
|
-
obj = self.visit(node.value)
|
|
227
|
-
key = self.visit(node.slice)
|
|
228
|
-
try:
|
|
229
|
-
return obj[key]
|
|
230
|
-
except (KeyError, IndexError, TypeError) as e:
|
|
231
|
-
raise ValueError(f"Subscript access failed: {e}") from e
|
|
232
|
-
|
|
233
|
-
def generic_visit(self, node: ast.AST) -> Any:
|
|
234
|
-
"""Reject any unsupported AST nodes."""
|
|
235
|
-
raise ValueError(f"Unsupported expression type: {type(node).__name__}")
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
# =============================================================================
|
|
239
|
-
# Block Tools Action (Unified Tool Blocking)
|
|
240
|
-
# =============================================================================
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
def _evaluate_block_condition(
|
|
244
|
-
condition: str | None,
|
|
245
|
-
workflow_state: "WorkflowState | None",
|
|
246
|
-
event_data: dict[str, Any] | None = None,
|
|
247
|
-
tool_input: dict[str, Any] | None = None,
|
|
248
|
-
session_has_dirty_files: "_LazyBool | bool" = False,
|
|
249
|
-
task_has_commits: "_LazyBool | bool" = False,
|
|
250
|
-
source: str | None = None,
|
|
251
|
-
) -> bool:
|
|
252
|
-
"""
|
|
253
|
-
Evaluate a blocking rule condition against workflow state.
|
|
254
|
-
|
|
255
|
-
Supports simple Python expressions with access to:
|
|
256
|
-
- variables: workflow state variables dict
|
|
257
|
-
- task_claimed: shorthand for variables.get('task_claimed')
|
|
258
|
-
- plan_mode: shorthand for variables.get('plan_mode')
|
|
259
|
-
- tool_input: the tool's input arguments (for MCP tool checks)
|
|
260
|
-
- session_has_dirty_files: whether session has NEW dirty files (beyond baseline)
|
|
261
|
-
- task_has_commits: whether the current task has linked commits
|
|
262
|
-
- source: CLI source (e.g., "claude", "gemini", "codex")
|
|
263
|
-
|
|
264
|
-
Args:
|
|
265
|
-
condition: Python expression to evaluate
|
|
266
|
-
workflow_state: Current workflow state
|
|
267
|
-
event_data: Optional hook event data
|
|
268
|
-
tool_input: Tool input arguments (for MCP tools, this is the 'arguments' field)
|
|
269
|
-
session_has_dirty_files: Whether session has dirty files beyond baseline (lazy or bool)
|
|
270
|
-
task_has_commits: Whether claimed task has linked commits (lazy or bool)
|
|
271
|
-
source: CLI source identifier
|
|
272
|
-
|
|
273
|
-
Returns:
|
|
274
|
-
True if condition matches (tool should be blocked), False otherwise.
|
|
275
|
-
"""
|
|
276
|
-
if not condition:
|
|
277
|
-
return True # No condition means always match
|
|
278
|
-
|
|
279
|
-
# Build evaluation context
|
|
280
|
-
variables = workflow_state.variables if workflow_state else {}
|
|
281
|
-
context = {
|
|
282
|
-
"variables": variables,
|
|
283
|
-
"task_claimed": variables.get("task_claimed", False),
|
|
284
|
-
"plan_mode": variables.get("plan_mode", False),
|
|
285
|
-
"event": event_data or {},
|
|
286
|
-
"tool_input": tool_input or {},
|
|
287
|
-
"session_has_dirty_files": session_has_dirty_files,
|
|
288
|
-
"task_has_commits": task_has_commits,
|
|
289
|
-
"source": source or "",
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
# Allowed functions for safe evaluation
|
|
293
|
-
allowed_funcs: dict[str, Callable[..., Any]] = {
|
|
294
|
-
"is_plan_file": _is_plan_file,
|
|
295
|
-
"bool": bool,
|
|
296
|
-
"str": str,
|
|
297
|
-
"int": int,
|
|
298
|
-
}
|
|
299
|
-
|
|
300
|
-
try:
|
|
301
|
-
evaluator = SafeExpressionEvaluator(context, allowed_funcs)
|
|
302
|
-
return evaluator.evaluate(condition)
|
|
303
|
-
except Exception as e:
|
|
304
|
-
logger.warning(f"block_tools condition evaluation failed: '{condition}'. Error: {e}")
|
|
305
|
-
return False
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
async def block_tools(
|
|
309
|
-
rules: list[dict[str, Any]] | None = None,
|
|
310
|
-
event_data: dict[str, Any] | None = None,
|
|
311
|
-
workflow_state: "WorkflowState | None" = None,
|
|
312
|
-
project_path: str | None = None,
|
|
313
|
-
task_manager: "LocalTaskManager | None" = None,
|
|
314
|
-
source: str | None = None,
|
|
315
|
-
**kwargs: Any,
|
|
316
|
-
) -> dict[str, Any] | None:
|
|
317
|
-
"""
|
|
318
|
-
Unified tool blocking with multiple configurable rules.
|
|
319
|
-
|
|
320
|
-
Each rule can specify:
|
|
321
|
-
- tools: List of tool names to block (for native CC tools)
|
|
322
|
-
- mcp_tools: List of "server:tool" patterns to block (for MCP tools)
|
|
323
|
-
- when: Optional condition (evaluated against workflow state)
|
|
324
|
-
- reason: Block message to display
|
|
325
|
-
|
|
326
|
-
For MCP tools, the tool_name in event_data is "call_tool" or "mcp__gobby__call_tool",
|
|
327
|
-
and we look inside tool_input for server_name and tool_name.
|
|
328
|
-
|
|
329
|
-
Condition evaluation has access to:
|
|
330
|
-
- variables: workflow state variables
|
|
331
|
-
- task_claimed, plan_mode: shortcuts
|
|
332
|
-
- tool_input: the MCP tool's arguments (for checking commit_sha etc.)
|
|
333
|
-
- session_has_dirty_files: whether session has NEW dirty files beyond baseline
|
|
334
|
-
- task_has_commits: whether the claimed task has linked commits
|
|
335
|
-
- source: CLI source (e.g., "claude", "gemini", "codex")
|
|
336
|
-
|
|
337
|
-
Args:
|
|
338
|
-
rules: List of blocking rules
|
|
339
|
-
event_data: Hook event data with tool_name, tool_input
|
|
340
|
-
workflow_state: For evaluating conditions
|
|
341
|
-
project_path: Path to project for git status checks
|
|
342
|
-
task_manager: For checking task commit status
|
|
343
|
-
source: CLI source identifier (for is_plan_file checks)
|
|
344
|
-
|
|
345
|
-
Returns:
|
|
346
|
-
Dict with decision="block" and reason if blocked, None to allow.
|
|
347
|
-
|
|
348
|
-
Example rule (native tools):
|
|
349
|
-
{
|
|
350
|
-
"tools": ["TaskCreate", "TaskUpdate"],
|
|
351
|
-
"reason": "CC native task tools are disabled. Use gobby-tasks MCP tools."
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
Example rule with condition:
|
|
355
|
-
{
|
|
356
|
-
"tools": ["Edit", "Write", "NotebookEdit"],
|
|
357
|
-
"when": "not task_claimed and not plan_mode",
|
|
358
|
-
"reason": "Claim a task before using Edit, Write, or NotebookEdit tools."
|
|
359
|
-
}
|
|
360
|
-
|
|
361
|
-
Example rule (MCP tools):
|
|
362
|
-
{
|
|
363
|
-
"mcp_tools": ["gobby-tasks:close_task"],
|
|
364
|
-
"when": "not task_has_commits and not tool_input.get('commit_sha')",
|
|
365
|
-
"reason": "A commit is required before closing this task."
|
|
366
|
-
}
|
|
367
|
-
"""
|
|
368
|
-
if not event_data or not rules:
|
|
369
|
-
return None
|
|
370
|
-
|
|
371
|
-
tool_name = event_data.get("tool_name")
|
|
372
|
-
if not tool_name:
|
|
373
|
-
return None
|
|
374
|
-
|
|
375
|
-
tool_input = event_data.get("tool_input", {}) or {}
|
|
376
|
-
|
|
377
|
-
# Create lazy thunks for expensive context values (git status, DB queries).
|
|
378
|
-
# These are only evaluated when actually referenced in a rule condition.
|
|
379
|
-
|
|
380
|
-
def _compute_session_has_dirty_files() -> bool:
|
|
381
|
-
"""Lazy thunk: check for new dirty files beyond baseline."""
|
|
382
|
-
if not workflow_state:
|
|
383
|
-
return False
|
|
384
|
-
if project_path is None:
|
|
385
|
-
# Can't compute without project_path - avoid running git in wrong directory
|
|
386
|
-
logger.debug("_compute_session_has_dirty_files: project_path is None, returning False")
|
|
387
|
-
return False
|
|
388
|
-
baseline_dirty = set(workflow_state.variables.get("baseline_dirty_files", []))
|
|
389
|
-
current_dirty = _get_dirty_files(project_path)
|
|
390
|
-
new_dirty = current_dirty - baseline_dirty
|
|
391
|
-
return len(new_dirty) > 0
|
|
392
|
-
|
|
393
|
-
def _compute_task_has_commits() -> bool:
|
|
394
|
-
"""Lazy thunk: check if claimed task has linked commits."""
|
|
395
|
-
if not workflow_state or not task_manager:
|
|
396
|
-
return False
|
|
397
|
-
claimed_task_id = workflow_state.variables.get("claimed_task_id")
|
|
398
|
-
if not claimed_task_id:
|
|
399
|
-
return False
|
|
400
|
-
try:
|
|
401
|
-
task = task_manager.get_task(claimed_task_id)
|
|
402
|
-
return bool(task and task.commits)
|
|
403
|
-
except Exception:
|
|
404
|
-
return False # nosec B110 - best-effort check
|
|
405
|
-
|
|
406
|
-
# Wrap in _LazyBool so they're only computed when used in boolean context
|
|
407
|
-
session_has_dirty_files: _LazyBool | bool = _LazyBool(_compute_session_has_dirty_files)
|
|
408
|
-
task_has_commits: _LazyBool | bool = _LazyBool(_compute_task_has_commits)
|
|
409
|
-
|
|
410
|
-
for rule in rules:
|
|
411
|
-
# Determine if this rule matches the current tool
|
|
412
|
-
rule_matches = False
|
|
413
|
-
mcp_tool_args: dict[str, Any] = {}
|
|
414
|
-
|
|
415
|
-
# Check native CC tools (Edit, Write, etc.)
|
|
416
|
-
if "tools" in rule:
|
|
417
|
-
tools = rule.get("tools", [])
|
|
418
|
-
if tool_name in tools:
|
|
419
|
-
rule_matches = True
|
|
420
|
-
|
|
421
|
-
# Check MCP tools (server:tool format)
|
|
422
|
-
elif "mcp_tools" in rule:
|
|
423
|
-
# MCP calls come in as "call_tool" or "mcp__gobby__call_tool"
|
|
424
|
-
if tool_name in ("call_tool", "mcp__gobby__call_tool"):
|
|
425
|
-
mcp_server = tool_input.get("server_name", "")
|
|
426
|
-
mcp_tool = tool_input.get("tool_name", "")
|
|
427
|
-
mcp_key = f"{mcp_server}:{mcp_tool}"
|
|
428
|
-
|
|
429
|
-
mcp_tools = rule.get("mcp_tools", [])
|
|
430
|
-
if mcp_key in mcp_tools:
|
|
431
|
-
rule_matches = True
|
|
432
|
-
# For MCP tools, the actual arguments are in tool_input.arguments
|
|
433
|
-
mcp_tool_args = tool_input.get("arguments", {}) or {}
|
|
434
|
-
|
|
435
|
-
if not rule_matches:
|
|
436
|
-
continue
|
|
437
|
-
|
|
438
|
-
# Check optional condition
|
|
439
|
-
condition = rule.get("when")
|
|
440
|
-
if condition:
|
|
441
|
-
# For MCP tools, use the nested arguments for condition evaluation
|
|
442
|
-
eval_tool_input = mcp_tool_args if mcp_tool_args else tool_input
|
|
443
|
-
if not _evaluate_block_condition(
|
|
444
|
-
condition,
|
|
445
|
-
workflow_state,
|
|
446
|
-
event_data,
|
|
447
|
-
tool_input=eval_tool_input,
|
|
448
|
-
session_has_dirty_files=session_has_dirty_files,
|
|
449
|
-
task_has_commits=task_has_commits,
|
|
450
|
-
source=source,
|
|
451
|
-
):
|
|
452
|
-
continue
|
|
453
|
-
|
|
454
|
-
reason = rule.get("reason", f"Tool '{tool_name}' is blocked.")
|
|
455
|
-
logger.info(f"block_tools: Blocking '{tool_name}' - {reason[:100]}")
|
|
456
|
-
return {"decision": "block", "reason": reason}
|
|
457
|
-
|
|
458
|
-
return None
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
def _get_dirty_files(project_path: str | None = None) -> set[str]:
|
|
462
|
-
"""
|
|
463
|
-
Get the set of dirty files from git status --porcelain.
|
|
464
|
-
|
|
465
|
-
Excludes .gobby/ files from the result.
|
|
466
|
-
|
|
467
|
-
Args:
|
|
468
|
-
project_path: Path to the project directory
|
|
469
|
-
|
|
470
|
-
Returns:
|
|
471
|
-
Set of dirty file paths (relative to repo root)
|
|
472
|
-
"""
|
|
473
|
-
if project_path is None:
|
|
474
|
-
logger.warning(
|
|
475
|
-
"_get_dirty_files: project_path is None, git status will use daemon's cwd "
|
|
476
|
-
"which may not be the project directory"
|
|
477
|
-
)
|
|
478
|
-
|
|
479
|
-
try:
|
|
480
|
-
result = subprocess.run( # nosec B603 B607 - hardcoded git command
|
|
481
|
-
["git", "status", "--porcelain"],
|
|
482
|
-
cwd=project_path,
|
|
483
|
-
capture_output=True,
|
|
484
|
-
text=True,
|
|
485
|
-
timeout=10,
|
|
486
|
-
)
|
|
487
|
-
|
|
488
|
-
if result.returncode != 0:
|
|
489
|
-
logger.warning(f"_get_dirty_files: git status failed: {result.stderr}")
|
|
490
|
-
return set()
|
|
491
|
-
|
|
492
|
-
dirty_files = set()
|
|
493
|
-
# Split by newline first, don't strip() the whole string as it removes
|
|
494
|
-
# the leading space from git status format (e.g., " M file.py")
|
|
495
|
-
for line in result.stdout.split("\n"):
|
|
496
|
-
line = line.rstrip() # Remove trailing whitespace only
|
|
497
|
-
if not line:
|
|
498
|
-
continue
|
|
499
|
-
# Format is "XY filename" or "XY filename -> newname" for renames
|
|
500
|
-
# Skip the status prefix (first 3 chars: 2 status chars + space)
|
|
501
|
-
filepath = line[3:].split(" -> ")[0] # Handle renames
|
|
502
|
-
# Exclude .gobby/ files
|
|
503
|
-
if not filepath.startswith(".gobby/"):
|
|
504
|
-
dirty_files.add(filepath)
|
|
505
|
-
|
|
506
|
-
return dirty_files
|
|
507
|
-
|
|
508
|
-
except subprocess.TimeoutExpired:
|
|
509
|
-
logger.warning("_get_dirty_files: git status timed out")
|
|
510
|
-
return set()
|
|
511
|
-
except FileNotFoundError:
|
|
512
|
-
logger.warning("_get_dirty_files: git not found")
|
|
513
|
-
return set()
|
|
514
|
-
except Exception as e:
|
|
515
|
-
logger.error(f"_get_dirty_files: Error running git status: {e}")
|
|
516
|
-
return set()
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
def _get_task_session_liveness(
|
|
520
|
-
task_id: str,
|
|
521
|
-
session_task_manager: "SessionTaskManager | None",
|
|
522
|
-
session_manager: "LocalSessionManager | None",
|
|
523
|
-
exclude_session_id: str | None = None,
|
|
524
|
-
) -> bool:
|
|
525
|
-
"""
|
|
526
|
-
Check if a task is currently being worked on by an active session.
|
|
527
|
-
|
|
528
|
-
Args:
|
|
529
|
-
task_id: The task ID to check
|
|
530
|
-
session_task_manager: Manager to look up session-task links
|
|
531
|
-
session_manager: Manager to check session status
|
|
532
|
-
exclude_session_id: ID of session to exclude from check (e.g. current one)
|
|
533
|
-
|
|
534
|
-
Returns:
|
|
535
|
-
True if an active session (status='active') is linked to this task.
|
|
536
|
-
"""
|
|
537
|
-
if not session_task_manager or not session_manager:
|
|
538
|
-
return False
|
|
539
|
-
|
|
540
|
-
try:
|
|
541
|
-
# Get all sessions linked to this task
|
|
542
|
-
linked_sessions = session_task_manager.get_task_sessions(task_id)
|
|
543
|
-
|
|
544
|
-
for link in linked_sessions:
|
|
545
|
-
session_id = link.get("session_id")
|
|
546
|
-
if not session_id or session_id == exclude_session_id:
|
|
547
|
-
continue
|
|
548
|
-
|
|
549
|
-
# Check if session is truly active
|
|
550
|
-
session = session_manager.get(session_id)
|
|
551
|
-
if session and session.status == "active":
|
|
552
|
-
return True
|
|
553
|
-
|
|
554
|
-
return False
|
|
555
|
-
except Exception as e:
|
|
556
|
-
logger.warning(f"_get_task_session_liveness: Error checking liveness for {task_id}: {e}")
|
|
557
|
-
return False
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
async def capture_baseline_dirty_files(
|
|
561
|
-
workflow_state: "WorkflowState | None",
|
|
562
|
-
project_path: str | None = None,
|
|
563
|
-
) -> dict[str, Any] | None:
|
|
564
|
-
"""
|
|
565
|
-
Capture current dirty files as baseline for session-aware detection.
|
|
566
|
-
|
|
567
|
-
Called on session_start to record pre-existing dirty files. The
|
|
568
|
-
require_commit_before_stop action will compare against this baseline
|
|
569
|
-
to detect only NEW dirty files made during the session.
|
|
570
|
-
|
|
571
|
-
Args:
|
|
572
|
-
workflow_state: Workflow state to store baseline in
|
|
573
|
-
project_path: Path to the project directory for git status check
|
|
574
|
-
|
|
575
|
-
Returns:
|
|
576
|
-
Dict with captured baseline info, or None if no workflow_state
|
|
577
|
-
"""
|
|
578
|
-
if not workflow_state:
|
|
579
|
-
logger.debug("capture_baseline_dirty_files: No workflow_state, skipping")
|
|
580
|
-
return None
|
|
581
|
-
|
|
582
|
-
dirty_files = _get_dirty_files(project_path)
|
|
583
|
-
|
|
584
|
-
# Store as a list in workflow state (sets aren't JSON serializable)
|
|
585
|
-
workflow_state.variables["baseline_dirty_files"] = list(dirty_files)
|
|
586
|
-
|
|
587
|
-
# Log for debugging baseline capture issues
|
|
588
|
-
files_preview = list(dirty_files)[:5]
|
|
589
|
-
logger.info(
|
|
590
|
-
f"capture_baseline_dirty_files: project_path={project_path}, "
|
|
591
|
-
f"captured {len(dirty_files)} files: {files_preview}"
|
|
592
|
-
)
|
|
593
|
-
|
|
594
|
-
return {
|
|
595
|
-
"baseline_captured": True,
|
|
596
|
-
"file_count": len(dirty_files),
|
|
597
|
-
"files": list(dirty_files),
|
|
598
|
-
}
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
async def require_commit_before_stop(
|
|
602
|
-
workflow_state: "WorkflowState | None",
|
|
603
|
-
project_path: str | None = None,
|
|
604
|
-
task_manager: "LocalTaskManager | None" = None,
|
|
605
|
-
) -> dict[str, Any] | None:
|
|
606
|
-
"""
|
|
607
|
-
Block stop if there's an in_progress task with uncommitted changes.
|
|
608
|
-
|
|
609
|
-
This action is designed for on_stop triggers to enforce that agents
|
|
610
|
-
commit their work and close tasks before stopping.
|
|
611
|
-
|
|
612
|
-
Args:
|
|
613
|
-
workflow_state: Workflow state with variables (claimed_task_id, etc.)
|
|
614
|
-
project_path: Path to the project directory for git status check
|
|
615
|
-
task_manager: LocalTaskManager to verify task status
|
|
616
|
-
|
|
617
|
-
Returns:
|
|
618
|
-
Dict with decision="block" and reason if task has uncommitted changes,
|
|
619
|
-
or None to allow the stop.
|
|
620
|
-
"""
|
|
621
|
-
if not workflow_state:
|
|
622
|
-
logger.debug("require_commit_before_stop: No workflow_state, allowing")
|
|
623
|
-
return None
|
|
624
|
-
|
|
625
|
-
claimed_task_id = workflow_state.variables.get("claimed_task_id")
|
|
626
|
-
if not claimed_task_id:
|
|
627
|
-
logger.debug("require_commit_before_stop: No claimed task, allowing")
|
|
628
|
-
return None
|
|
629
|
-
|
|
630
|
-
# Verify the task is actually still in_progress (not just cached in workflow state)
|
|
631
|
-
if task_manager:
|
|
632
|
-
task = task_manager.get_task(claimed_task_id)
|
|
633
|
-
if not task or task.status != "in_progress":
|
|
634
|
-
# Task was changed - clear the stale workflow state
|
|
635
|
-
logger.debug(
|
|
636
|
-
f"require_commit_before_stop: Task '{claimed_task_id}' is no longer "
|
|
637
|
-
f"in_progress (status={task.status if task else 'not found'}), clearing state"
|
|
638
|
-
)
|
|
639
|
-
workflow_state.variables["claimed_task_id"] = None
|
|
640
|
-
workflow_state.variables["task_claimed"] = False
|
|
641
|
-
return None
|
|
642
|
-
|
|
643
|
-
# Check for uncommitted changes using baseline-aware comparison
|
|
644
|
-
current_dirty = _get_dirty_files(project_path)
|
|
645
|
-
|
|
646
|
-
if not current_dirty:
|
|
647
|
-
logger.debug("require_commit_before_stop: No uncommitted changes, allowing")
|
|
648
|
-
return None
|
|
649
|
-
|
|
650
|
-
# Get baseline dirty files captured at session start
|
|
651
|
-
baseline_dirty = set(workflow_state.variables.get("baseline_dirty_files", []))
|
|
652
|
-
|
|
653
|
-
# Calculate NEW dirty files (not in baseline)
|
|
654
|
-
new_dirty = current_dirty - baseline_dirty
|
|
655
|
-
|
|
656
|
-
if not new_dirty:
|
|
657
|
-
logger.debug(
|
|
658
|
-
f"require_commit_before_stop: All {len(current_dirty)} dirty files were pre-existing "
|
|
659
|
-
f"(in baseline), allowing"
|
|
660
|
-
)
|
|
661
|
-
return None
|
|
662
|
-
|
|
663
|
-
logger.debug(
|
|
664
|
-
f"require_commit_before_stop: Found {len(new_dirty)} new dirty files "
|
|
665
|
-
f"(baseline had {len(baseline_dirty)}, current has {len(current_dirty)})"
|
|
666
|
-
)
|
|
667
|
-
|
|
668
|
-
# Track how many times we've blocked to prevent infinite loops
|
|
669
|
-
block_count = workflow_state.variables.get("_commit_block_count", 0)
|
|
670
|
-
if block_count >= 3:
|
|
671
|
-
logger.warning(
|
|
672
|
-
f"require_commit_before_stop: Reached max block count ({block_count}), allowing"
|
|
673
|
-
)
|
|
674
|
-
return None
|
|
675
|
-
|
|
676
|
-
workflow_state.variables["_commit_block_count"] = block_count + 1
|
|
677
|
-
|
|
678
|
-
# Block - agent needs to commit and close
|
|
679
|
-
logger.info(
|
|
680
|
-
f"require_commit_before_stop: Blocking stop - task '{claimed_task_id}' "
|
|
681
|
-
f"has {len(new_dirty)} uncommitted changes"
|
|
682
|
-
)
|
|
683
|
-
|
|
684
|
-
# Build list of new dirty files for the message (limit to 10 for readability)
|
|
685
|
-
new_dirty_list = sorted(new_dirty)[:10]
|
|
686
|
-
files_display = "\n".join(f" - {f}" for f in new_dirty_list)
|
|
687
|
-
if len(new_dirty) > 10:
|
|
688
|
-
files_display += f"\n ... and {len(new_dirty) - 10} more files"
|
|
689
|
-
|
|
690
|
-
return {
|
|
691
|
-
"decision": "block",
|
|
692
|
-
"reason": (
|
|
693
|
-
f"Task '{claimed_task_id}' is in_progress with {len(new_dirty)} uncommitted "
|
|
694
|
-
f"changes made during this session:\n{files_display}\n\n"
|
|
695
|
-
f"Before stopping, commit your changes and close the task:\n"
|
|
696
|
-
f"1. Commit with [{claimed_task_id}] in the message\n"
|
|
697
|
-
f'2. Close the task: close_task(task_id="{claimed_task_id}", commit_sha="...")'
|
|
698
|
-
),
|
|
699
|
-
}
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
async def require_task_review_or_close_before_stop(
|
|
703
|
-
workflow_state: "WorkflowState | None",
|
|
704
|
-
task_manager: "LocalTaskManager | None" = None,
|
|
705
|
-
project_id: str | None = None,
|
|
706
|
-
**kwargs: Any,
|
|
707
|
-
) -> dict[str, Any] | None:
|
|
708
|
-
"""Block stop if session has an in_progress task.
|
|
709
|
-
|
|
710
|
-
Agents must close their task (or send to review) before stopping.
|
|
711
|
-
The close_task() validation already requires a commit, so we don't
|
|
712
|
-
need to check for uncommitted changes here - that's handled by
|
|
713
|
-
require_commit_before_stop if needed.
|
|
714
|
-
|
|
715
|
-
Checks both:
|
|
716
|
-
1. claimed_task_id - task explicitly claimed via update_task(status="in_progress")
|
|
717
|
-
2. session_task - task(s) assigned via set_variable (fallback if no claimed_task_id)
|
|
718
|
-
|
|
719
|
-
Args:
|
|
720
|
-
workflow_state: Workflow state with variables (claimed_task_id, etc.)
|
|
721
|
-
task_manager: LocalTaskManager to verify task status
|
|
722
|
-
project_id: Project ID for resolving task references (#N, N formats)
|
|
723
|
-
**kwargs: Accepts additional kwargs for compatibility
|
|
724
|
-
|
|
725
|
-
Returns:
|
|
726
|
-
Dict with decision="block" and reason if task is still in_progress,
|
|
727
|
-
or None to allow the stop.
|
|
728
|
-
"""
|
|
729
|
-
if not workflow_state:
|
|
730
|
-
logger.debug("require_task_review_or_close_before_stop: No workflow_state, allowing")
|
|
731
|
-
return None
|
|
732
|
-
|
|
733
|
-
# 1. Check claimed_task_id first (existing behavior)
|
|
734
|
-
claimed_task_id = workflow_state.variables.get("claimed_task_id")
|
|
735
|
-
|
|
736
|
-
# 2. If no claimed task, fall back to session_task
|
|
737
|
-
if not claimed_task_id and task_manager:
|
|
738
|
-
session_task = workflow_state.variables.get("session_task")
|
|
739
|
-
if session_task and session_task != "*":
|
|
740
|
-
# Normalize to list
|
|
741
|
-
task_ids = [session_task] if isinstance(session_task, str) else session_task
|
|
742
|
-
|
|
743
|
-
if isinstance(task_ids, list):
|
|
744
|
-
for task_id in task_ids:
|
|
745
|
-
try:
|
|
746
|
-
task = task_manager.get_task(task_id, project_id=project_id)
|
|
747
|
-
except ValueError:
|
|
748
|
-
continue
|
|
749
|
-
if task and task.status == "in_progress":
|
|
750
|
-
claimed_task_id = task_id
|
|
751
|
-
logger.debug(
|
|
752
|
-
f"require_task_review_or_close_before_stop: Found in_progress "
|
|
753
|
-
f"session_task '{task_id}'"
|
|
754
|
-
)
|
|
755
|
-
break
|
|
756
|
-
# Also check subtasks
|
|
757
|
-
if task:
|
|
758
|
-
subtasks = task_manager.list_tasks(parent_task_id=task.id)
|
|
759
|
-
for subtask in subtasks:
|
|
760
|
-
if subtask.status == "in_progress":
|
|
761
|
-
claimed_task_id = subtask.id
|
|
762
|
-
logger.debug(
|
|
763
|
-
f"require_task_review_or_close_before_stop: Found in_progress "
|
|
764
|
-
f"subtask '{subtask.id}' under session_task '{task_id}'"
|
|
765
|
-
)
|
|
766
|
-
break
|
|
767
|
-
if claimed_task_id:
|
|
768
|
-
break
|
|
769
|
-
|
|
770
|
-
if not claimed_task_id:
|
|
771
|
-
logger.debug("require_task_review_or_close_before_stop: No claimed task, allowing")
|
|
772
|
-
return None
|
|
773
|
-
|
|
774
|
-
if not task_manager:
|
|
775
|
-
logger.debug("require_task_review_or_close_before_stop: No task_manager, allowing")
|
|
776
|
-
return None
|
|
777
|
-
|
|
778
|
-
try:
|
|
779
|
-
task = task_manager.get_task(claimed_task_id, project_id=project_id)
|
|
780
|
-
if not task:
|
|
781
|
-
# Task not found - clear stale workflow state and allow
|
|
782
|
-
logger.debug(
|
|
783
|
-
f"require_task_review_or_close_before_stop: Task '{claimed_task_id}' not found, "
|
|
784
|
-
f"clearing state"
|
|
785
|
-
)
|
|
786
|
-
workflow_state.variables["claimed_task_id"] = None
|
|
787
|
-
workflow_state.variables["task_claimed"] = False
|
|
788
|
-
return None
|
|
789
|
-
|
|
790
|
-
if task.status != "in_progress":
|
|
791
|
-
# Task is closed or in review - allow stop
|
|
792
|
-
logger.debug(
|
|
793
|
-
f"require_task_review_or_close_before_stop: Task '{claimed_task_id}' "
|
|
794
|
-
f"status={task.status}, allowing"
|
|
795
|
-
)
|
|
796
|
-
# Clear stale workflow state
|
|
797
|
-
workflow_state.variables["claimed_task_id"] = None
|
|
798
|
-
workflow_state.variables["task_claimed"] = False
|
|
799
|
-
return None
|
|
800
|
-
|
|
801
|
-
# Task is still in_progress - block the stop
|
|
802
|
-
logger.info(
|
|
803
|
-
f"require_task_review_or_close_before_stop: Blocking stop - task "
|
|
804
|
-
f"'{claimed_task_id}' is still in_progress"
|
|
805
|
-
)
|
|
806
|
-
|
|
807
|
-
return {
|
|
808
|
-
"decision": "block",
|
|
809
|
-
"reason": (
|
|
810
|
-
f"Task '{claimed_task_id}' is still in_progress. "
|
|
811
|
-
f"Close it with close_task() before stopping."
|
|
812
|
-
),
|
|
813
|
-
"task_id": claimed_task_id,
|
|
814
|
-
"task_status": task.status,
|
|
815
|
-
}
|
|
816
|
-
|
|
817
|
-
except Exception as e:
|
|
818
|
-
logger.warning(
|
|
819
|
-
f"require_task_review_or_close_before_stop: Failed to check task status: {e}"
|
|
820
|
-
)
|
|
821
|
-
# Allow stop if we can't check - don't block on errors
|
|
822
|
-
return None
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
async def require_task_complete(
|
|
826
|
-
task_manager: "LocalTaskManager | None",
|
|
827
|
-
session_id: str,
|
|
828
|
-
task_ids: list[str] | None,
|
|
829
|
-
event_data: dict[str, Any] | None = None,
|
|
830
|
-
project_id: str | None = None,
|
|
831
|
-
workflow_state: "WorkflowState | None" = None,
|
|
832
|
-
) -> dict[str, Any] | None:
|
|
833
|
-
"""
|
|
834
|
-
Block agent from stopping until task(s) (and their subtasks) are complete.
|
|
835
|
-
|
|
836
|
-
This action is designed for on_stop triggers to enforce that the
|
|
837
|
-
agent completes all subtasks under specified task(s) before stopping.
|
|
838
|
-
|
|
839
|
-
Supports:
|
|
840
|
-
- Single task: ["#47"]
|
|
841
|
-
- Multiple tasks: ["#47", "#48"]
|
|
842
|
-
- Wildcard mode handled by caller (passes ready tasks as list)
|
|
843
|
-
|
|
844
|
-
Logic per task:
|
|
845
|
-
1. If task has incomplete subtasks and agent has no claimed task → suggest next subtask
|
|
846
|
-
2. If task has incomplete subtasks and agent has claimed task → remind to finish it
|
|
847
|
-
3. If all subtasks done but task not closed → remind to close the task
|
|
848
|
-
4. If task is closed → move to next task in list
|
|
849
|
-
|
|
850
|
-
Args:
|
|
851
|
-
task_manager: LocalTaskManager for querying tasks
|
|
852
|
-
session_id: Current session ID
|
|
853
|
-
task_ids: List of task IDs to enforce completion on
|
|
854
|
-
event_data: Hook event data
|
|
855
|
-
project_id: Optional project ID for scoping
|
|
856
|
-
workflow_state: Workflow state with variables (task_claimed, etc.)
|
|
857
|
-
|
|
858
|
-
Returns:
|
|
859
|
-
Dict with decision="block" and reason if any task incomplete,
|
|
860
|
-
or None to allow the stop.
|
|
861
|
-
"""
|
|
862
|
-
if not task_ids:
|
|
863
|
-
logger.debug("require_task_complete: No task_ids specified, allowing")
|
|
864
|
-
return None
|
|
865
|
-
|
|
866
|
-
if not task_manager:
|
|
867
|
-
logger.debug("require_task_complete: No task_manager available, allowing")
|
|
868
|
-
return None
|
|
869
|
-
|
|
870
|
-
# Track how many times we've blocked in this session
|
|
871
|
-
block_count = 0
|
|
872
|
-
if workflow_state:
|
|
873
|
-
block_count = workflow_state.variables.get("_task_block_count", 0)
|
|
874
|
-
|
|
875
|
-
# Safety valve: after 5 blocks, allow to prevent infinite loop
|
|
876
|
-
if block_count >= 5:
|
|
877
|
-
logger.warning(
|
|
878
|
-
f"require_task_complete: Reached max block count ({block_count}), allowing stop"
|
|
879
|
-
)
|
|
880
|
-
return None
|
|
881
|
-
|
|
882
|
-
# Check if agent has a claimed task this session
|
|
883
|
-
has_claimed_task = False
|
|
884
|
-
claimed_task_id = None
|
|
885
|
-
if workflow_state:
|
|
886
|
-
has_claimed_task = workflow_state.variables.get("task_claimed", False)
|
|
887
|
-
claimed_task_id = workflow_state.variables.get("claimed_task_id")
|
|
888
|
-
|
|
889
|
-
try:
|
|
890
|
-
# Collect incomplete tasks across all specified task IDs
|
|
891
|
-
all_incomplete: list[tuple[Any, list[Any]]] = [] # (parent_task, incomplete_subtasks)
|
|
892
|
-
|
|
893
|
-
for task_id in task_ids:
|
|
894
|
-
task = task_manager.get_task(task_id)
|
|
895
|
-
if not task:
|
|
896
|
-
logger.warning(f"require_task_complete: Task '{task_id}' not found, skipping")
|
|
897
|
-
continue
|
|
898
|
-
|
|
899
|
-
# If task is already closed, skip it
|
|
900
|
-
if task.status == "closed":
|
|
901
|
-
logger.debug(f"require_task_complete: Task '{task_id}' is closed, skipping")
|
|
902
|
-
continue
|
|
903
|
-
|
|
904
|
-
# Get all subtasks under this task
|
|
905
|
-
subtasks = task_manager.list_tasks(parent_task_id=task_id)
|
|
906
|
-
|
|
907
|
-
# Find incomplete subtasks
|
|
908
|
-
incomplete = [t for t in subtasks if t.status != "closed"]
|
|
909
|
-
|
|
910
|
-
# If task itself is incomplete (no subtasks or has incomplete subtasks)
|
|
911
|
-
if not subtasks or incomplete:
|
|
912
|
-
all_incomplete.append((task, incomplete))
|
|
913
|
-
|
|
914
|
-
# If all tasks are complete, allow stop
|
|
915
|
-
if not all_incomplete:
|
|
916
|
-
logger.debug("require_task_complete: All specified tasks are complete, allowing")
|
|
917
|
-
return None
|
|
918
|
-
|
|
919
|
-
# Increment block count
|
|
920
|
-
if workflow_state:
|
|
921
|
-
workflow_state.variables["_task_block_count"] = block_count + 1
|
|
922
|
-
|
|
923
|
-
# Get the first incomplete task to report on
|
|
924
|
-
parent_task, incomplete = all_incomplete[0]
|
|
925
|
-
task_id = parent_task.id
|
|
926
|
-
remaining_tasks = len(all_incomplete)
|
|
927
|
-
|
|
928
|
-
# Build suffix for multiple tasks
|
|
929
|
-
multi_task_suffix = ""
|
|
930
|
-
if remaining_tasks > 1:
|
|
931
|
-
multi_task_suffix = f"\n\n({remaining_tasks} tasks remaining in total)"
|
|
932
|
-
|
|
933
|
-
# Case 1: No incomplete subtasks, but task not closed (leaf task or parent with all done)
|
|
934
|
-
if not incomplete:
|
|
935
|
-
logger.info(f"require_task_complete: Task '{task_id}' needs closing")
|
|
936
|
-
return {
|
|
937
|
-
"decision": "block",
|
|
938
|
-
"reason": (
|
|
939
|
-
f"Task '{parent_task.title}' is ready to close.\n"
|
|
940
|
-
f'close_task(task_id="{task_id}")'
|
|
941
|
-
f"{multi_task_suffix}"
|
|
942
|
-
),
|
|
943
|
-
}
|
|
944
|
-
|
|
945
|
-
# Case 2: Has incomplete subtasks, agent has no claimed task
|
|
946
|
-
if not has_claimed_task:
|
|
947
|
-
logger.info(
|
|
948
|
-
f"require_task_complete: No claimed task, {len(incomplete)} incomplete subtasks"
|
|
949
|
-
)
|
|
950
|
-
return {
|
|
951
|
-
"decision": "block",
|
|
952
|
-
"reason": (
|
|
953
|
-
f"'{parent_task.title}' has {len(incomplete)} incomplete subtask(s).\n\n"
|
|
954
|
-
f"Use suggest_next_task() to find the best task to work on next, "
|
|
955
|
-
f"and continue working without requiring confirmation from the user."
|
|
956
|
-
f"{multi_task_suffix}"
|
|
957
|
-
),
|
|
958
|
-
}
|
|
959
|
-
|
|
960
|
-
# Case 3: Has claimed task but subtasks still incomplete
|
|
961
|
-
if has_claimed_task and incomplete:
|
|
962
|
-
# Check if the claimed task is under this parent
|
|
963
|
-
claimed_under_parent = any(t.id == claimed_task_id for t in incomplete)
|
|
964
|
-
|
|
965
|
-
if claimed_under_parent:
|
|
966
|
-
logger.info(
|
|
967
|
-
f"require_task_complete: Claimed task '{claimed_task_id}' still incomplete"
|
|
968
|
-
)
|
|
969
|
-
return {
|
|
970
|
-
"decision": "block",
|
|
971
|
-
"reason": (
|
|
972
|
-
f"Your current task is not yet complete. "
|
|
973
|
-
f"Finish and close it before stopping:\n"
|
|
974
|
-
f'close_task(task_id="{claimed_task_id}")\n\n'
|
|
975
|
-
f"'{parent_task.title}' still has {len(incomplete)} incomplete subtask(s)."
|
|
976
|
-
f"{multi_task_suffix}"
|
|
977
|
-
),
|
|
978
|
-
}
|
|
979
|
-
else:
|
|
980
|
-
# Claimed task is not under this parent - remind about parent work
|
|
981
|
-
logger.info("require_task_complete: Claimed task not under parent, redirecting")
|
|
982
|
-
return {
|
|
983
|
-
"decision": "block",
|
|
984
|
-
"reason": (
|
|
985
|
-
f"'{parent_task.title}' has {len(incomplete)} incomplete subtask(s).\n\n"
|
|
986
|
-
f"Use suggest_next_task() to find the best task to work on next, "
|
|
987
|
-
f"and continue working without requiring confirmation from the user."
|
|
988
|
-
f"{multi_task_suffix}"
|
|
989
|
-
),
|
|
990
|
-
}
|
|
991
|
-
|
|
992
|
-
# Fallback: shouldn't reach here, but block with generic message
|
|
993
|
-
logger.info(f"require_task_complete: Generic block for task '{task_id}'")
|
|
994
|
-
return {
|
|
995
|
-
"decision": "block",
|
|
996
|
-
"reason": (
|
|
997
|
-
f"'{parent_task.title}' is not yet complete. "
|
|
998
|
-
f"{len(incomplete)} subtask(s) remaining."
|
|
999
|
-
f"{multi_task_suffix}"
|
|
1000
|
-
),
|
|
1001
|
-
}
|
|
1002
|
-
|
|
1003
|
-
except Exception as e:
|
|
1004
|
-
logger.error(f"require_task_complete: Error checking tasks: {e}")
|
|
1005
|
-
# On error, allow to avoid blocking legitimate work
|
|
1006
|
-
return None
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
async def require_active_task(
|
|
1010
|
-
task_manager: "LocalTaskManager | None",
|
|
1011
|
-
session_id: str,
|
|
1012
|
-
config: "DaemonConfig | None",
|
|
1013
|
-
event_data: dict[str, Any] | None,
|
|
1014
|
-
project_id: str | None = None,
|
|
1015
|
-
workflow_state: "WorkflowState | None" = None,
|
|
1016
|
-
session_manager: "LocalSessionManager | None" = None,
|
|
1017
|
-
session_task_manager: "SessionTaskManager | None" = None,
|
|
1018
|
-
) -> dict[str, Any] | None:
|
|
1019
|
-
"""
|
|
1020
|
-
Check if an active task exists before allowing protected tools.
|
|
1021
|
-
|
|
1022
|
-
This action is designed to be used in on_before_tool triggers to enforce
|
|
1023
|
-
that agents create or start a gobby-task before modifying files.
|
|
1024
|
-
|
|
1025
|
-
Session-scoped enforcement:
|
|
1026
|
-
- First checks if `task_claimed` variable is True in workflow state
|
|
1027
|
-
- If True, allows immediately (agent already claimed a task this session)
|
|
1028
|
-
- If False, falls back to project-wide DB check for helpful messaging
|
|
1029
|
-
|
|
1030
|
-
Args:
|
|
1031
|
-
task_manager: LocalTaskManager for querying tasks
|
|
1032
|
-
session_id: Current session ID
|
|
1033
|
-
config: DaemonConfig with workflow settings
|
|
1034
|
-
event_data: Hook event data containing tool_name
|
|
1035
|
-
project_id: Optional project ID to filter tasks by project scope
|
|
1036
|
-
workflow_state: Optional workflow state to check task_claimed variable
|
|
1037
|
-
session_manager: Optional session manager for liveness checks
|
|
1038
|
-
session_task_manager: Optional session-task manager for liveness checks
|
|
1039
|
-
|
|
1040
|
-
Returns:
|
|
1041
|
-
Dict with decision="block" if no active task and tool is protected,
|
|
1042
|
-
or None to allow the tool.
|
|
1043
|
-
"""
|
|
1044
|
-
# Check if feature is enabled
|
|
1045
|
-
# Precedence: workflow_state variables > config.yaml
|
|
1046
|
-
# (workflow_state already has step > lifecycle precedence merged)
|
|
1047
|
-
require_task = None
|
|
1048
|
-
|
|
1049
|
-
# First check workflow state variables (step workflow > lifecycle workflow)
|
|
1050
|
-
if workflow_state:
|
|
1051
|
-
require_task = workflow_state.variables.get("require_task_before_edit")
|
|
1052
|
-
if require_task is not None:
|
|
1053
|
-
logger.debug(
|
|
1054
|
-
f"require_active_task: Using workflow variable require_task_before_edit={require_task}"
|
|
1055
|
-
)
|
|
1056
|
-
|
|
1057
|
-
# Fall back to config.yaml if not set in workflow variables
|
|
1058
|
-
if require_task is None and config:
|
|
1059
|
-
require_task = config.workflow.require_task_before_edit
|
|
1060
|
-
logger.debug(
|
|
1061
|
-
f"require_active_task: Using config.yaml require_task_before_edit={require_task}"
|
|
1062
|
-
)
|
|
1063
|
-
|
|
1064
|
-
# If still None (no config), default to False (allow)
|
|
1065
|
-
if require_task is None:
|
|
1066
|
-
logger.debug("require_active_task: No config source, allowing")
|
|
1067
|
-
return None
|
|
1068
|
-
|
|
1069
|
-
if not require_task:
|
|
1070
|
-
logger.debug("require_active_task: Feature disabled, allowing")
|
|
1071
|
-
return None
|
|
1072
|
-
|
|
1073
|
-
# Get the tool being called
|
|
1074
|
-
if not event_data:
|
|
1075
|
-
logger.debug("require_active_task: No event_data, allowing")
|
|
1076
|
-
return None
|
|
1077
|
-
|
|
1078
|
-
tool_name = event_data.get("tool_name")
|
|
1079
|
-
if not tool_name:
|
|
1080
|
-
logger.debug("require_active_task: No tool_name in event_data, allowing")
|
|
1081
|
-
return None
|
|
1082
|
-
|
|
1083
|
-
# Check if this tool is protected (always from config.yaml)
|
|
1084
|
-
protected_tools = (
|
|
1085
|
-
config.workflow.protected_tools if config else ["Edit", "Write", "Update", "NotebookEdit"]
|
|
1086
|
-
)
|
|
1087
|
-
if tool_name not in protected_tools:
|
|
1088
|
-
logger.debug(f"require_active_task: Tool '{tool_name}' not protected, allowing")
|
|
1089
|
-
return None
|
|
1090
|
-
|
|
1091
|
-
# Tool is protected - but check for plan mode exceptions first
|
|
1092
|
-
|
|
1093
|
-
# Check if target is a Claude Code plan file (stored in ~/.claude/plans/)
|
|
1094
|
-
# This allows writes during plan mode without requiring a task
|
|
1095
|
-
tool_input = event_data.get("tool_input", {}) or {}
|
|
1096
|
-
file_path = tool_input.get("file_path", "")
|
|
1097
|
-
if file_path and "/.claude/plans/" in file_path:
|
|
1098
|
-
logger.debug(f"require_active_task: Target is Claude plan file '{file_path}', allowing")
|
|
1099
|
-
return None
|
|
1100
|
-
|
|
1101
|
-
# Check for plan_mode variable (set via EnterPlanMode tool detection or manually)
|
|
1102
|
-
if workflow_state and workflow_state.variables.get("plan_mode"):
|
|
1103
|
-
logger.debug(f"require_active_task: plan_mode=True in session {session_id}, allowing")
|
|
1104
|
-
return None
|
|
1105
|
-
|
|
1106
|
-
# Check for active task
|
|
1107
|
-
|
|
1108
|
-
# Session-scoped check: task_claimed variable (set by AFTER_TOOL detection)
|
|
1109
|
-
# This is the primary enforcement - each session must explicitly claim a task
|
|
1110
|
-
if workflow_state and workflow_state.variables.get("task_claimed"):
|
|
1111
|
-
logger.debug(f"require_active_task: task_claimed=True in session {session_id}, allowing")
|
|
1112
|
-
return None
|
|
1113
|
-
|
|
1114
|
-
# Fallback: Check for any in_progress task in the project
|
|
1115
|
-
# This provides helpful messaging about existing tasks but is NOT sufficient
|
|
1116
|
-
# for session-scoped enforcement (concurrent sessions shouldn't free-ride)
|
|
1117
|
-
project_task_hint = ""
|
|
1118
|
-
|
|
1119
|
-
if task_manager is None:
|
|
1120
|
-
logger.debug(
|
|
1121
|
-
f"require_active_task: task_manager unavailable, skipping DB fallback check "
|
|
1122
|
-
f"(project_id={project_id}, session_id={session_id})"
|
|
1123
|
-
)
|
|
1124
|
-
else:
|
|
1125
|
-
try:
|
|
1126
|
-
project_tasks = task_manager.list_tasks(
|
|
1127
|
-
project_id=project_id,
|
|
1128
|
-
status="in_progress",
|
|
1129
|
-
limit=1,
|
|
1130
|
-
)
|
|
1131
|
-
|
|
1132
|
-
if project_tasks:
|
|
1133
|
-
task = project_tasks[0]
|
|
1134
|
-
task_ref = f"#{task.seq_num}" if task.seq_num else task.id
|
|
1135
|
-
project_task_hint = (
|
|
1136
|
-
f"\n\nNote: Task {task_ref} ({task.title}) "
|
|
1137
|
-
f"is in_progress but wasn't claimed by this session. "
|
|
1138
|
-
f'Use `update_task(task_id="{task.id}", status="in_progress")` '
|
|
1139
|
-
f"to claim it for this session."
|
|
1140
|
-
)
|
|
1141
|
-
logger.debug(
|
|
1142
|
-
f"require_active_task: Found project task {task_ref} but "
|
|
1143
|
-
f"session hasn't claimed it"
|
|
1144
|
-
)
|
|
1145
|
-
|
|
1146
|
-
# Check liveness of the candidate task
|
|
1147
|
-
is_live = _get_task_session_liveness(
|
|
1148
|
-
task.id, session_task_manager, session_manager, exclude_session_id=session_id
|
|
1149
|
-
)
|
|
1150
|
-
|
|
1151
|
-
if is_live:
|
|
1152
|
-
project_task_hint = (
|
|
1153
|
-
f"\n\nNote: Task {task_ref} ({task.title}) "
|
|
1154
|
-
f"is in_progress, but it is **currently being worked on by another active session**. "
|
|
1155
|
-
f"You should probably create a new task or subtask instead of interfering."
|
|
1156
|
-
)
|
|
1157
|
-
else:
|
|
1158
|
-
project_task_hint = (
|
|
1159
|
-
f"\n\nNote: Task {task_ref} ({task.title}) "
|
|
1160
|
-
f"is in_progress and appears unattended (no active session). "
|
|
1161
|
-
f"If you are picking up this work, claim it: "
|
|
1162
|
-
f'`update_task(task_id="{task.id}", status="in_progress")`.'
|
|
1163
|
-
)
|
|
1164
|
-
|
|
1165
|
-
except Exception as e:
|
|
1166
|
-
logger.error(f"require_active_task: Error querying tasks: {e}")
|
|
1167
|
-
# On error, allow to avoid blocking legitimate work
|
|
1168
|
-
return None
|
|
1169
|
-
|
|
1170
|
-
# No task claimed this session - block the tool
|
|
1171
|
-
logger.info(
|
|
1172
|
-
f"require_active_task: Blocking '{tool_name}' - no task claimed for session {session_id}"
|
|
1173
|
-
)
|
|
1174
|
-
|
|
1175
|
-
# Check if we've already shown the full error this session
|
|
1176
|
-
error_already_shown = False
|
|
1177
|
-
if workflow_state:
|
|
1178
|
-
error_already_shown = workflow_state.variables.get("task_error_shown", False)
|
|
1179
|
-
# Mark that we've shown the error (for next time)
|
|
1180
|
-
if not error_already_shown:
|
|
1181
|
-
workflow_state.variables["task_error_shown"] = True
|
|
1182
|
-
|
|
1183
|
-
# Return short reminder if we've already shown the full error
|
|
1184
|
-
if error_already_shown:
|
|
1185
|
-
return {
|
|
1186
|
-
"decision": "block",
|
|
1187
|
-
"reason": (
|
|
1188
|
-
"No task claimed. See previous **Task Required** error for instructions.\n"
|
|
1189
|
-
"See skill: **claiming-tasks** for help."
|
|
1190
|
-
),
|
|
1191
|
-
"inject_context": (
|
|
1192
|
-
f"**Task Required**: `{tool_name}` blocked. "
|
|
1193
|
-
f"Create or claim a task before editing files (see previous error for details).\n"
|
|
1194
|
-
f'For detailed guidance: `get_skill(name="claiming-tasks")`'
|
|
1195
|
-
f"{project_task_hint}"
|
|
1196
|
-
),
|
|
1197
|
-
}
|
|
1198
|
-
|
|
1199
|
-
# First time - show full instructions
|
|
1200
|
-
return {
|
|
1201
|
-
"decision": "block",
|
|
1202
|
-
"reason": (
|
|
1203
|
-
f"No task claimed for this session. Before using {tool_name}, please either:\n"
|
|
1204
|
-
f"- Create a task: call_tool(server_name='gobby-tasks', tool_name='create_task', arguments={{...}})\n"
|
|
1205
|
-
f"- Claim an existing task: call_tool(server_name='gobby-tasks', tool_name='update_task', "
|
|
1206
|
-
f"arguments={{'task_id': '...', 'status': 'in_progress'}})"
|
|
1207
|
-
f"{project_task_hint}\n\n"
|
|
1208
|
-
f"See skill: **claiming-tasks** for detailed guidance."
|
|
1209
|
-
),
|
|
1210
|
-
"inject_context": (
|
|
1211
|
-
f"**Task Required**: The `{tool_name}` tool is blocked until you claim a task for this session.\n\n"
|
|
1212
|
-
f"Each session must explicitly create or claim a task before modifying files:\n"
|
|
1213
|
-
f'1. **Create a new task**: `create_task(title="...", description="...")`\n'
|
|
1214
|
-
f'2. **Claim an existing task**: `update_task(task_id="...", status="in_progress")`\n\n'
|
|
1215
|
-
f"Use `list_ready_tasks()` to see available tasks."
|
|
1216
|
-
f"{project_task_hint}\n\n"
|
|
1217
|
-
f'For detailed guidance: `get_skill(name="claiming-tasks")`'
|
|
1218
|
-
),
|
|
1219
|
-
}
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
async def validate_session_task_scope(
|
|
1223
|
-
task_manager: "LocalTaskManager | None",
|
|
1224
|
-
workflow_state: "WorkflowState | None",
|
|
1225
|
-
event_data: dict[str, Any] | None = None,
|
|
1226
|
-
) -> dict[str, Any] | None:
|
|
1227
|
-
"""
|
|
1228
|
-
Block claiming a task that is not a descendant of session_task.
|
|
1229
|
-
|
|
1230
|
-
This action is designed for on_before_tool triggers on update_task
|
|
1231
|
-
to enforce that agents only work on tasks within the session_task hierarchy.
|
|
1232
|
-
|
|
1233
|
-
When session_task is set in workflow state, this action checks if the task
|
|
1234
|
-
being claimed (set to in_progress) is a descendant of session_task.
|
|
1235
|
-
|
|
1236
|
-
Args:
|
|
1237
|
-
task_manager: LocalTaskManager for querying tasks
|
|
1238
|
-
workflow_state: Workflow state with session_task variable
|
|
1239
|
-
event_data: Hook event data containing tool_name and tool_input
|
|
1240
|
-
|
|
1241
|
-
Returns:
|
|
1242
|
-
Dict with decision="block" if task is outside session_task scope,
|
|
1243
|
-
or None to allow the claim.
|
|
1244
|
-
"""
|
|
1245
|
-
if not workflow_state:
|
|
1246
|
-
logger.debug("validate_session_task_scope: No workflow_state, allowing")
|
|
1247
|
-
return None
|
|
1248
|
-
|
|
1249
|
-
if not task_manager:
|
|
1250
|
-
logger.debug("validate_session_task_scope: No task_manager, allowing")
|
|
1251
|
-
return None
|
|
1252
|
-
|
|
1253
|
-
# Get session_task from workflow state
|
|
1254
|
-
session_task = workflow_state.variables.get("session_task")
|
|
1255
|
-
if not session_task:
|
|
1256
|
-
logger.debug("validate_session_task_scope: No session_task set, allowing")
|
|
1257
|
-
return None
|
|
1258
|
-
|
|
1259
|
-
# Handle "*" wildcard - means all tasks are in scope
|
|
1260
|
-
if session_task == "*":
|
|
1261
|
-
logger.debug("validate_session_task_scope: session_task='*', allowing all tasks")
|
|
1262
|
-
return None
|
|
1263
|
-
|
|
1264
|
-
# Normalize to list for uniform handling
|
|
1265
|
-
# session_task can be: string (single ID), list of IDs, or "*"
|
|
1266
|
-
if isinstance(session_task, str):
|
|
1267
|
-
session_task_ids = [session_task]
|
|
1268
|
-
elif isinstance(session_task, list):
|
|
1269
|
-
session_task_ids = session_task
|
|
1270
|
-
else:
|
|
1271
|
-
logger.warning(
|
|
1272
|
-
f"validate_session_task_scope: Invalid session_task type: {type(session_task)}"
|
|
1273
|
-
)
|
|
1274
|
-
return None
|
|
1275
|
-
|
|
1276
|
-
# Empty list means no scope restriction
|
|
1277
|
-
if not session_task_ids:
|
|
1278
|
-
logger.debug("validate_session_task_scope: Empty session_task list, allowing")
|
|
1279
|
-
return None
|
|
1280
|
-
|
|
1281
|
-
# Check if this is an update_task call setting status to in_progress
|
|
1282
|
-
if not event_data:
|
|
1283
|
-
logger.debug("validate_session_task_scope: No event_data, allowing")
|
|
1284
|
-
return None
|
|
1285
|
-
|
|
1286
|
-
tool_name = event_data.get("tool_name")
|
|
1287
|
-
if tool_name != "update_task":
|
|
1288
|
-
logger.debug(f"validate_session_task_scope: Tool '{tool_name}' not update_task, allowing")
|
|
1289
|
-
return None
|
|
1290
|
-
|
|
1291
|
-
tool_input = event_data.get("tool_input", {})
|
|
1292
|
-
arguments = tool_input.get("arguments", {}) or {}
|
|
1293
|
-
|
|
1294
|
-
# Only check when setting status to in_progress (claiming)
|
|
1295
|
-
new_status = arguments.get("status")
|
|
1296
|
-
if new_status != "in_progress":
|
|
1297
|
-
logger.debug(
|
|
1298
|
-
f"validate_session_task_scope: Status '{new_status}' not in_progress, allowing"
|
|
1299
|
-
)
|
|
1300
|
-
return None
|
|
1301
|
-
|
|
1302
|
-
task_id = arguments.get("task_id")
|
|
1303
|
-
if not task_id:
|
|
1304
|
-
logger.debug("validate_session_task_scope: No task_id in arguments, allowing")
|
|
1305
|
-
return None
|
|
1306
|
-
|
|
1307
|
-
# Check if task is a descendant of ANY session_task
|
|
1308
|
-
for ancestor_id in session_task_ids:
|
|
1309
|
-
if is_descendant_of(task_manager, task_id, ancestor_id):
|
|
1310
|
-
logger.debug(
|
|
1311
|
-
f"validate_session_task_scope: Task '{task_id}' is descendant of "
|
|
1312
|
-
f"session_task '{ancestor_id}', allowing"
|
|
1313
|
-
)
|
|
1314
|
-
return None
|
|
1315
|
-
|
|
1316
|
-
# Task is outside all session_task scopes - block
|
|
1317
|
-
logger.info(
|
|
1318
|
-
f"validate_session_task_scope: Blocking claim of task '{task_id}' - "
|
|
1319
|
-
f"not a descendant of any session_task: {session_task_ids}"
|
|
1320
|
-
)
|
|
1321
|
-
|
|
1322
|
-
# Build error message with scope details
|
|
1323
|
-
if len(session_task_ids) == 1:
|
|
1324
|
-
session_task_obj = task_manager.get_task(session_task_ids[0])
|
|
1325
|
-
scope_desc = (
|
|
1326
|
-
f"'{session_task_obj.title}' ({session_task_ids[0]})"
|
|
1327
|
-
if session_task_obj
|
|
1328
|
-
else session_task_ids[0]
|
|
1329
|
-
)
|
|
1330
|
-
suggestion = f'Use `suggest_next_task(parent_id="{session_task_ids[0]}")` to find tasks within scope.'
|
|
1331
|
-
else:
|
|
1332
|
-
scope_desc = ", ".join(session_task_ids)
|
|
1333
|
-
suggestion = "Use `suggest_next_task()` with one of the scoped parent IDs to find tasks within scope."
|
|
1334
|
-
|
|
1335
|
-
return {
|
|
1336
|
-
"decision": "block",
|
|
1337
|
-
"reason": (
|
|
1338
|
-
f"Cannot claim task '{task_id}' - it is not within the session_task scope.\n\n"
|
|
1339
|
-
f"This session is scoped to: {scope_desc}\n"
|
|
1340
|
-
f"Only tasks that are descendants of these epics/features can be claimed.\n\n"
|
|
1341
|
-
f"{suggestion}"
|
|
1342
|
-
),
|
|
1343
|
-
}
|