gobby 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +1 -1
- gobby/adapters/__init__.py +2 -1
- gobby/adapters/claude_code.py +13 -4
- gobby/adapters/codex_impl/__init__.py +28 -0
- gobby/adapters/codex_impl/adapter.py +722 -0
- gobby/adapters/codex_impl/client.py +679 -0
- gobby/adapters/codex_impl/protocol.py +20 -0
- gobby/adapters/codex_impl/types.py +68 -0
- gobby/agents/definitions.py +11 -1
- gobby/agents/isolation.py +395 -0
- gobby/agents/runner.py +8 -0
- gobby/agents/sandbox.py +261 -0
- gobby/agents/spawn.py +42 -287
- gobby/agents/spawn_executor.py +385 -0
- gobby/agents/spawners/__init__.py +24 -0
- gobby/agents/spawners/command_builder.py +189 -0
- gobby/agents/spawners/embedded.py +21 -2
- gobby/agents/spawners/headless.py +21 -2
- gobby/agents/spawners/prompt_manager.py +125 -0
- gobby/cli/__init__.py +6 -0
- gobby/cli/clones.py +419 -0
- gobby/cli/conductor.py +266 -0
- gobby/cli/install.py +4 -4
- gobby/cli/installers/antigravity.py +3 -9
- gobby/cli/installers/claude.py +15 -9
- gobby/cli/installers/codex.py +2 -8
- gobby/cli/installers/gemini.py +8 -8
- gobby/cli/installers/shared.py +175 -13
- gobby/cli/sessions.py +1 -1
- gobby/cli/skills.py +858 -0
- gobby/cli/tasks/ai.py +0 -440
- gobby/cli/tasks/crud.py +44 -6
- gobby/cli/tasks/main.py +0 -4
- gobby/cli/tui.py +2 -2
- gobby/cli/utils.py +12 -5
- gobby/clones/__init__.py +13 -0
- gobby/clones/git.py +547 -0
- gobby/conductor/__init__.py +16 -0
- gobby/conductor/alerts.py +135 -0
- gobby/conductor/loop.py +164 -0
- gobby/conductor/monitors/__init__.py +11 -0
- gobby/conductor/monitors/agents.py +116 -0
- gobby/conductor/monitors/tasks.py +155 -0
- gobby/conductor/pricing.py +234 -0
- gobby/conductor/token_tracker.py +160 -0
- gobby/config/__init__.py +12 -97
- gobby/config/app.py +69 -91
- gobby/config/extensions.py +2 -2
- gobby/config/features.py +7 -130
- gobby/config/search.py +110 -0
- gobby/config/servers.py +1 -1
- gobby/config/skills.py +43 -0
- gobby/config/tasks.py +9 -41
- gobby/hooks/__init__.py +0 -13
- gobby/hooks/event_handlers.py +188 -2
- gobby/hooks/hook_manager.py +50 -4
- gobby/hooks/plugins.py +1 -1
- gobby/hooks/skill_manager.py +130 -0
- gobby/hooks/webhooks.py +1 -1
- gobby/install/claude/hooks/hook_dispatcher.py +4 -4
- gobby/install/codex/hooks/hook_dispatcher.py +1 -1
- gobby/install/gemini/hooks/hook_dispatcher.py +87 -12
- gobby/llm/claude.py +22 -34
- gobby/llm/claude_executor.py +46 -256
- gobby/llm/codex_executor.py +59 -291
- gobby/llm/executor.py +21 -0
- gobby/llm/gemini.py +134 -110
- gobby/llm/litellm_executor.py +143 -6
- gobby/llm/resolver.py +98 -35
- gobby/mcp_proxy/importer.py +62 -4
- gobby/mcp_proxy/instructions.py +56 -0
- gobby/mcp_proxy/models.py +15 -0
- gobby/mcp_proxy/registries.py +68 -8
- gobby/mcp_proxy/server.py +33 -3
- gobby/mcp_proxy/services/recommendation.py +43 -11
- gobby/mcp_proxy/services/tool_proxy.py +81 -1
- gobby/mcp_proxy/stdio.py +2 -1
- gobby/mcp_proxy/tools/__init__.py +0 -2
- gobby/mcp_proxy/tools/agent_messaging.py +317 -0
- gobby/mcp_proxy/tools/agents.py +31 -731
- gobby/mcp_proxy/tools/clones.py +518 -0
- gobby/mcp_proxy/tools/memory.py +3 -26
- gobby/mcp_proxy/tools/metrics.py +65 -1
- gobby/mcp_proxy/tools/orchestration/__init__.py +3 -0
- gobby/mcp_proxy/tools/orchestration/cleanup.py +151 -0
- gobby/mcp_proxy/tools/orchestration/wait.py +467 -0
- gobby/mcp_proxy/tools/sessions/__init__.py +14 -0
- gobby/mcp_proxy/tools/sessions/_commits.py +232 -0
- gobby/mcp_proxy/tools/sessions/_crud.py +253 -0
- gobby/mcp_proxy/tools/sessions/_factory.py +63 -0
- gobby/mcp_proxy/tools/sessions/_handoff.py +499 -0
- gobby/mcp_proxy/tools/sessions/_messages.py +138 -0
- gobby/mcp_proxy/tools/skills/__init__.py +616 -0
- gobby/mcp_proxy/tools/spawn_agent.py +417 -0
- gobby/mcp_proxy/tools/task_orchestration.py +7 -0
- gobby/mcp_proxy/tools/task_readiness.py +14 -0
- gobby/mcp_proxy/tools/task_sync.py +1 -1
- gobby/mcp_proxy/tools/tasks/_context.py +0 -20
- gobby/mcp_proxy/tools/tasks/_crud.py +91 -4
- gobby/mcp_proxy/tools/tasks/_expansion.py +348 -0
- gobby/mcp_proxy/tools/tasks/_factory.py +6 -16
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +110 -45
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +18 -29
- gobby/mcp_proxy/tools/workflows.py +1 -1
- gobby/mcp_proxy/tools/worktrees.py +0 -338
- gobby/memory/backends/__init__.py +6 -1
- gobby/memory/backends/mem0.py +6 -1
- gobby/memory/extractor.py +477 -0
- gobby/memory/ingestion/__init__.py +5 -0
- gobby/memory/ingestion/multimodal.py +221 -0
- gobby/memory/manager.py +73 -285
- gobby/memory/search/__init__.py +10 -0
- gobby/memory/search/coordinator.py +248 -0
- gobby/memory/services/__init__.py +5 -0
- gobby/memory/services/crossref.py +142 -0
- gobby/prompts/loader.py +5 -2
- gobby/runner.py +37 -16
- gobby/search/__init__.py +48 -6
- gobby/search/backends/__init__.py +159 -0
- gobby/search/backends/embedding.py +225 -0
- gobby/search/embeddings.py +238 -0
- gobby/search/models.py +148 -0
- gobby/search/unified.py +496 -0
- gobby/servers/http.py +24 -12
- gobby/servers/routes/admin.py +294 -0
- gobby/servers/routes/mcp/endpoints/__init__.py +61 -0
- gobby/servers/routes/mcp/endpoints/discovery.py +405 -0
- gobby/servers/routes/mcp/endpoints/execution.py +568 -0
- gobby/servers/routes/mcp/endpoints/registry.py +378 -0
- gobby/servers/routes/mcp/endpoints/server.py +304 -0
- gobby/servers/routes/mcp/hooks.py +1 -1
- gobby/servers/routes/mcp/tools.py +48 -1317
- gobby/servers/websocket.py +2 -2
- gobby/sessions/analyzer.py +2 -0
- gobby/sessions/lifecycle.py +1 -1
- gobby/sessions/processor.py +10 -0
- gobby/sessions/transcripts/base.py +2 -0
- gobby/sessions/transcripts/claude.py +79 -10
- gobby/skills/__init__.py +91 -0
- gobby/skills/loader.py +685 -0
- gobby/skills/manager.py +384 -0
- gobby/skills/parser.py +286 -0
- gobby/skills/search.py +463 -0
- gobby/skills/sync.py +119 -0
- gobby/skills/updater.py +385 -0
- gobby/skills/validator.py +368 -0
- gobby/storage/clones.py +378 -0
- gobby/storage/database.py +1 -1
- gobby/storage/memories.py +43 -13
- gobby/storage/migrations.py +162 -201
- gobby/storage/sessions.py +116 -7
- gobby/storage/skills.py +782 -0
- gobby/storage/tasks/_crud.py +4 -4
- gobby/storage/tasks/_lifecycle.py +57 -7
- gobby/storage/tasks/_manager.py +14 -5
- gobby/storage/tasks/_models.py +8 -3
- gobby/sync/memories.py +40 -5
- gobby/sync/tasks.py +83 -6
- gobby/tasks/__init__.py +1 -2
- gobby/tasks/external_validator.py +1 -1
- gobby/tasks/validation.py +46 -35
- gobby/tools/summarizer.py +91 -10
- gobby/tui/api_client.py +4 -7
- gobby/tui/app.py +5 -3
- gobby/tui/screens/orchestrator.py +1 -2
- gobby/tui/screens/tasks.py +2 -4
- gobby/tui/ws_client.py +1 -1
- gobby/utils/daemon_client.py +2 -2
- gobby/utils/project_context.py +2 -3
- gobby/utils/status.py +13 -0
- gobby/workflows/actions.py +221 -1135
- gobby/workflows/artifact_actions.py +31 -0
- gobby/workflows/autonomous_actions.py +11 -0
- gobby/workflows/context_actions.py +93 -1
- gobby/workflows/detection_helpers.py +115 -31
- gobby/workflows/enforcement/__init__.py +47 -0
- gobby/workflows/enforcement/blocking.py +269 -0
- gobby/workflows/enforcement/commit_policy.py +283 -0
- gobby/workflows/enforcement/handlers.py +269 -0
- gobby/workflows/{task_enforcement_actions.py → enforcement/task_policy.py} +29 -388
- gobby/workflows/engine.py +13 -2
- gobby/workflows/git_utils.py +106 -0
- gobby/workflows/lifecycle_evaluator.py +29 -1
- gobby/workflows/llm_actions.py +30 -0
- gobby/workflows/loader.py +19 -6
- gobby/workflows/mcp_actions.py +20 -1
- gobby/workflows/memory_actions.py +154 -0
- gobby/workflows/safe_evaluator.py +183 -0
- gobby/workflows/session_actions.py +44 -0
- gobby/workflows/state_actions.py +60 -1
- gobby/workflows/stop_signal_actions.py +55 -0
- gobby/workflows/summary_actions.py +111 -1
- gobby/workflows/task_sync_actions.py +347 -0
- gobby/workflows/todo_actions.py +34 -1
- gobby/workflows/webhook_actions.py +185 -0
- {gobby-0.2.5.dist-info → gobby-0.2.7.dist-info}/METADATA +87 -21
- {gobby-0.2.5.dist-info → gobby-0.2.7.dist-info}/RECORD +201 -172
- {gobby-0.2.5.dist-info → gobby-0.2.7.dist-info}/WHEEL +1 -1
- gobby/adapters/codex.py +0 -1292
- gobby/install/claude/commands/gobby/bug.md +0 -51
- gobby/install/claude/commands/gobby/chore.md +0 -51
- gobby/install/claude/commands/gobby/epic.md +0 -52
- gobby/install/claude/commands/gobby/eval.md +0 -235
- gobby/install/claude/commands/gobby/feat.md +0 -49
- gobby/install/claude/commands/gobby/nit.md +0 -52
- gobby/install/claude/commands/gobby/ref.md +0 -52
- gobby/install/codex/prompts/forget.md +0 -7
- gobby/install/codex/prompts/memories.md +0 -7
- gobby/install/codex/prompts/recall.md +0 -7
- gobby/install/codex/prompts/remember.md +0 -13
- gobby/llm/gemini_executor.py +0 -339
- gobby/mcp_proxy/tools/session_messages.py +0 -1056
- gobby/mcp_proxy/tools/task_expansion.py +0 -591
- gobby/prompts/defaults/expansion/system.md +0 -119
- gobby/prompts/defaults/expansion/user.md +0 -48
- gobby/prompts/defaults/external_validation/agent.md +0 -72
- gobby/prompts/defaults/external_validation/external.md +0 -63
- gobby/prompts/defaults/external_validation/spawn.md +0 -83
- gobby/prompts/defaults/external_validation/system.md +0 -6
- gobby/prompts/defaults/features/import_mcp.md +0 -22
- gobby/prompts/defaults/features/import_mcp_github.md +0 -17
- gobby/prompts/defaults/features/import_mcp_search.md +0 -16
- gobby/prompts/defaults/features/recommend_tools.md +0 -32
- gobby/prompts/defaults/features/recommend_tools_hybrid.md +0 -35
- gobby/prompts/defaults/features/recommend_tools_llm.md +0 -30
- gobby/prompts/defaults/features/server_description.md +0 -20
- gobby/prompts/defaults/features/server_description_system.md +0 -6
- gobby/prompts/defaults/features/task_description.md +0 -31
- gobby/prompts/defaults/features/task_description_system.md +0 -6
- gobby/prompts/defaults/features/tool_summary.md +0 -17
- gobby/prompts/defaults/features/tool_summary_system.md +0 -6
- gobby/prompts/defaults/research/step.md +0 -58
- gobby/prompts/defaults/validation/criteria.md +0 -47
- gobby/prompts/defaults/validation/validate.md +0 -38
- gobby/storage/migrations_legacy.py +0 -1359
- gobby/tasks/context.py +0 -747
- gobby/tasks/criteria.py +0 -342
- gobby/tasks/expansion.py +0 -626
- gobby/tasks/prompts/expand.py +0 -327
- gobby/tasks/research.py +0 -421
- gobby/tasks/tdd.py +0 -352
- {gobby-0.2.5.dist-info → gobby-0.2.7.dist-info}/entry_points.txt +0 -0
- {gobby-0.2.5.dist-info → gobby-0.2.7.dist-info}/licenses/LICENSE.md +0 -0
- {gobby-0.2.5.dist-info → gobby-0.2.7.dist-info}/top_level.txt +0 -0
gobby/workflows/actions.py
CHANGED
|
@@ -6,8 +6,10 @@ from typing import Any, Protocol
|
|
|
6
6
|
|
|
7
7
|
from gobby.storage.database import DatabaseProtocol
|
|
8
8
|
from gobby.storage.sessions import LocalSessionManager
|
|
9
|
-
from gobby.
|
|
10
|
-
|
|
9
|
+
from gobby.workflows.artifact_actions import (
|
|
10
|
+
handle_capture_artifact,
|
|
11
|
+
handle_read_artifact,
|
|
12
|
+
)
|
|
11
13
|
from gobby.workflows.autonomous_actions import (
|
|
12
14
|
detect_stuck,
|
|
13
15
|
detect_task_loop,
|
|
@@ -18,33 +20,41 @@ from gobby.workflows.autonomous_actions import (
|
|
|
18
20
|
stop_progress_tracking,
|
|
19
21
|
)
|
|
20
22
|
from gobby.workflows.context_actions import (
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
inject_message,
|
|
23
|
+
handle_extract_handoff_context,
|
|
24
|
+
handle_inject_context,
|
|
25
|
+
handle_inject_message,
|
|
25
26
|
)
|
|
26
27
|
from gobby.workflows.definitions import WorkflowState
|
|
27
|
-
from gobby.workflows.
|
|
28
|
-
|
|
29
|
-
|
|
28
|
+
from gobby.workflows.enforcement import (
|
|
29
|
+
handle_block_tools,
|
|
30
|
+
handle_capture_baseline_dirty_files,
|
|
31
|
+
handle_require_active_task,
|
|
32
|
+
handle_require_commit_before_stop,
|
|
33
|
+
handle_require_task_complete,
|
|
34
|
+
handle_require_task_review_or_close_before_stop,
|
|
35
|
+
handle_validate_session_task_scope,
|
|
36
|
+
)
|
|
37
|
+
from gobby.workflows.llm_actions import handle_call_llm
|
|
38
|
+
from gobby.workflows.mcp_actions import handle_call_mcp_tool
|
|
30
39
|
from gobby.workflows.memory_actions import (
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
40
|
+
handle_memory_extract,
|
|
41
|
+
handle_memory_recall_relevant,
|
|
42
|
+
handle_memory_save,
|
|
43
|
+
handle_memory_sync_export,
|
|
44
|
+
handle_memory_sync_import,
|
|
45
|
+
handle_reset_memory_injection_tracking,
|
|
36
46
|
)
|
|
37
47
|
from gobby.workflows.session_actions import (
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
48
|
+
handle_mark_session_status,
|
|
49
|
+
handle_start_new_session,
|
|
50
|
+
handle_switch_mode,
|
|
41
51
|
)
|
|
42
52
|
from gobby.workflows.state_actions import (
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
53
|
+
handle_increment_variable,
|
|
54
|
+
handle_load_workflow_state,
|
|
55
|
+
handle_mark_loop_complete,
|
|
56
|
+
handle_save_workflow_state,
|
|
57
|
+
handle_set_variable,
|
|
48
58
|
)
|
|
49
59
|
from gobby.workflows.stop_signal_actions import (
|
|
50
60
|
check_stop_signal,
|
|
@@ -52,23 +62,23 @@ from gobby.workflows.stop_signal_actions import (
|
|
|
52
62
|
request_stop,
|
|
53
63
|
)
|
|
54
64
|
from gobby.workflows.summary_actions import (
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
synthesize_title,
|
|
65
|
+
handle_generate_handoff,
|
|
66
|
+
handle_generate_summary,
|
|
67
|
+
handle_synthesize_title,
|
|
59
68
|
)
|
|
60
|
-
from gobby.workflows.
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
validate_session_task_scope,
|
|
69
|
+
from gobby.workflows.task_sync_actions import (
|
|
70
|
+
handle_get_workflow_tasks,
|
|
71
|
+
handle_persist_tasks,
|
|
72
|
+
handle_task_sync_export,
|
|
73
|
+
handle_task_sync_import,
|
|
74
|
+
handle_update_workflow_task,
|
|
67
75
|
)
|
|
68
76
|
from gobby.workflows.templates import TemplateEngine
|
|
69
|
-
from gobby.workflows.todo_actions import
|
|
70
|
-
|
|
71
|
-
|
|
77
|
+
from gobby.workflows.todo_actions import (
|
|
78
|
+
handle_mark_todo_complete,
|
|
79
|
+
handle_write_todos,
|
|
80
|
+
)
|
|
81
|
+
from gobby.workflows.webhook_actions import handle_webhook
|
|
72
82
|
|
|
73
83
|
logger = logging.getLogger(__name__)
|
|
74
84
|
|
|
@@ -146,17 +156,7 @@ class ActionExecutor:
|
|
|
146
156
|
self._handlers[name] = handler
|
|
147
157
|
|
|
148
158
|
def register_plugin_actions(self, plugin_registry: Any) -> None:
|
|
149
|
-
"""
|
|
150
|
-
Register actions from loaded plugins.
|
|
151
|
-
|
|
152
|
-
Actions are registered with the naming convention:
|
|
153
|
-
plugin:<plugin-name>:<action-name>
|
|
154
|
-
|
|
155
|
-
Plugin actions with schemas will have their inputs validated before execution.
|
|
156
|
-
|
|
157
|
-
Args:
|
|
158
|
-
plugin_registry: PluginRegistry instance containing loaded plugins.
|
|
159
|
-
"""
|
|
159
|
+
"""Register actions from loaded plugins."""
|
|
160
160
|
if plugin_registry is None:
|
|
161
161
|
return
|
|
162
162
|
|
|
@@ -164,1147 +164,233 @@ class ActionExecutor:
|
|
|
164
164
|
for action_name, plugin_action in plugin._actions.items():
|
|
165
165
|
full_name = f"plugin:{plugin_name}:{action_name}"
|
|
166
166
|
|
|
167
|
-
# Create wrapper that validates schema before calling handler
|
|
168
167
|
if plugin_action.schema:
|
|
169
168
|
wrapper = self._create_validating_wrapper(plugin_action)
|
|
170
169
|
self._handlers[full_name] = wrapper
|
|
171
170
|
else:
|
|
172
|
-
# No schema, use handler directly
|
|
173
171
|
self._handlers[full_name] = plugin_action.handler
|
|
174
172
|
|
|
175
173
|
logger.debug(f"Registered plugin action: {full_name}")
|
|
176
174
|
|
|
177
175
|
def _create_validating_wrapper(self, plugin_action: Any) -> ActionHandler:
|
|
178
|
-
"""Create a wrapper handler that validates input against schema.
|
|
179
|
-
|
|
180
|
-
Args:
|
|
181
|
-
plugin_action: PluginAction with schema and handler.
|
|
182
|
-
|
|
183
|
-
Returns:
|
|
184
|
-
Wrapper handler that validates before calling the real handler.
|
|
185
|
-
"""
|
|
176
|
+
"""Create a wrapper handler that validates input against schema."""
|
|
186
177
|
|
|
187
178
|
async def validating_handler(
|
|
188
179
|
context: ActionContext, **kwargs: Any
|
|
189
180
|
) -> dict[str, Any] | None:
|
|
190
|
-
# Validate input against schema
|
|
191
181
|
is_valid, error = plugin_action.validate_input(kwargs)
|
|
192
182
|
if not is_valid:
|
|
193
183
|
logger.warning(f"Plugin action '{plugin_action.name}' validation failed: {error}")
|
|
194
184
|
return {"error": f"Schema validation failed: {error}"}
|
|
195
185
|
|
|
196
|
-
# Call the actual handler
|
|
197
186
|
result = await plugin_action.handler(context, **kwargs)
|
|
198
187
|
return dict(result) if isinstance(result, dict) else None
|
|
199
188
|
|
|
200
189
|
return validating_handler
|
|
201
190
|
|
|
202
191
|
def _register_defaults(self) -> None:
|
|
203
|
-
"""Register built-in actions."""
|
|
204
|
-
|
|
205
|
-
self.register("
|
|
206
|
-
self.register("
|
|
207
|
-
self.register("
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
self.register("
|
|
211
|
-
self.register("read_artifact",
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
self.register("
|
|
215
|
-
self.register("
|
|
216
|
-
self.register("
|
|
217
|
-
self.register("
|
|
218
|
-
self.register("
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
self.register("
|
|
222
|
-
self.register("
|
|
223
|
-
self.register("
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
self.register("
|
|
227
|
-
self.register("
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
#
|
|
233
|
-
self.register("
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
self.register("
|
|
237
|
-
self.register("
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
self.register(
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
)
|
|
245
|
-
self.register("
|
|
246
|
-
self.register("
|
|
247
|
-
|
|
248
|
-
#
|
|
249
|
-
self.register("
|
|
250
|
-
|
|
251
|
-
self.register("
|
|
252
|
-
self.register("
|
|
253
|
-
self.register("
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
self.
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
self.
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
return await
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
return
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
self, context: ActionContext, **kwargs: Any
|
|
282
|
-
) -> dict[str, Any] | None:
|
|
283
|
-
"""Inject context from a source."""
|
|
284
|
-
return inject_context(
|
|
285
|
-
session_manager=context.session_manager,
|
|
286
|
-
session_id=context.session_id,
|
|
287
|
-
state=context.state,
|
|
288
|
-
template_engine=context.template_engine,
|
|
289
|
-
source=kwargs.get("source"),
|
|
290
|
-
template=kwargs.get("template"),
|
|
291
|
-
require=kwargs.get("require", False),
|
|
292
|
-
)
|
|
293
|
-
|
|
294
|
-
async def _handle_inject_message(
|
|
295
|
-
self, context: ActionContext, **kwargs: Any
|
|
296
|
-
) -> dict[str, Any] | None:
|
|
297
|
-
"""Inject a message to the user/assistant, rendering it as a template."""
|
|
298
|
-
return inject_message(
|
|
299
|
-
session_manager=context.session_manager,
|
|
300
|
-
session_id=context.session_id,
|
|
301
|
-
state=context.state,
|
|
302
|
-
template_engine=context.template_engine,
|
|
303
|
-
content=kwargs.get("content"),
|
|
304
|
-
**{k: v for k, v in kwargs.items() if k != "content"},
|
|
305
|
-
)
|
|
306
|
-
|
|
307
|
-
async def _handle_capture_artifact(
|
|
308
|
-
self, context: ActionContext, **kwargs: Any
|
|
309
|
-
) -> dict[str, Any] | None:
|
|
310
|
-
"""Capture an artifact (file) and store its path in state."""
|
|
311
|
-
return capture_artifact(
|
|
312
|
-
state=context.state,
|
|
313
|
-
pattern=kwargs.get("pattern"),
|
|
314
|
-
save_as=kwargs.get("as"),
|
|
315
|
-
)
|
|
316
|
-
|
|
317
|
-
async def _handle_read_artifact(
|
|
318
|
-
self, context: ActionContext, **kwargs: Any
|
|
319
|
-
) -> dict[str, Any] | None:
|
|
320
|
-
"""Read an artifact's content into a workflow variable."""
|
|
321
|
-
return read_artifact(
|
|
322
|
-
state=context.state,
|
|
323
|
-
pattern=kwargs.get("pattern"),
|
|
324
|
-
variable_name=kwargs.get("as"),
|
|
325
|
-
)
|
|
326
|
-
|
|
327
|
-
async def _handle_load_workflow_state(
|
|
328
|
-
self, context: ActionContext, **kwargs: Any
|
|
329
|
-
) -> dict[str, Any] | None:
|
|
330
|
-
"""Load workflow state from DB."""
|
|
331
|
-
return load_workflow_state(context.db, context.session_id, context.state)
|
|
332
|
-
|
|
333
|
-
async def _handle_save_workflow_state(
|
|
334
|
-
self, context: ActionContext, **kwargs: Any
|
|
335
|
-
) -> dict[str, Any] | None:
|
|
336
|
-
"""Save workflow state to DB."""
|
|
337
|
-
return save_workflow_state(context.db, context.state)
|
|
338
|
-
|
|
339
|
-
async def _handle_set_variable(
|
|
340
|
-
self, context: ActionContext, **kwargs: Any
|
|
341
|
-
) -> dict[str, Any] | None:
|
|
342
|
-
"""Set a workflow variable.
|
|
343
|
-
|
|
344
|
-
Values containing Jinja2 templates ({{ ... }}) are rendered before setting.
|
|
345
|
-
"""
|
|
346
|
-
value = kwargs.get("value")
|
|
347
|
-
|
|
348
|
-
# Render template if value contains Jinja2 syntax
|
|
349
|
-
if isinstance(value, str) and "{{" in value:
|
|
350
|
-
template_context = {
|
|
351
|
-
"variables": context.state.variables or {},
|
|
352
|
-
"state": context.state,
|
|
353
|
-
}
|
|
354
|
-
value = context.template_engine.render(value, template_context)
|
|
355
|
-
|
|
356
|
-
return set_variable(context.state, kwargs.get("name"), value)
|
|
357
|
-
|
|
358
|
-
async def _handle_increment_variable(
|
|
359
|
-
self, context: ActionContext, **kwargs: Any
|
|
360
|
-
) -> dict[str, Any] | None:
|
|
361
|
-
"""Increment a numeric workflow variable."""
|
|
362
|
-
return increment_variable(context.state, kwargs.get("name"), kwargs.get("amount", 1))
|
|
363
|
-
|
|
364
|
-
async def _handle_call_llm(
|
|
365
|
-
self, context: ActionContext, **kwargs: Any
|
|
366
|
-
) -> dict[str, Any] | None:
|
|
367
|
-
"""Call LLM with a prompt template and store result in variable."""
|
|
368
|
-
return await call_llm(
|
|
369
|
-
llm_service=context.llm_service,
|
|
370
|
-
template_engine=context.template_engine,
|
|
371
|
-
state=context.state,
|
|
372
|
-
session=context.session_manager.get(context.session_id),
|
|
373
|
-
prompt=kwargs.get("prompt"),
|
|
374
|
-
output_as=kwargs.get("output_as"),
|
|
375
|
-
**{k: v for k, v in kwargs.items() if k not in ("prompt", "output_as")},
|
|
376
|
-
)
|
|
377
|
-
|
|
378
|
-
async def _handle_synthesize_title(
|
|
379
|
-
self, context: ActionContext, **kwargs: Any
|
|
380
|
-
) -> dict[str, Any] | None:
|
|
381
|
-
"""Synthesize and set a session title."""
|
|
382
|
-
# Extract prompt from event data (UserPromptSubmit hook)
|
|
383
|
-
prompt = None
|
|
384
|
-
if context.event_data:
|
|
385
|
-
prompt = context.event_data.get("prompt")
|
|
386
|
-
|
|
387
|
-
return await synthesize_title(
|
|
388
|
-
session_manager=context.session_manager,
|
|
389
|
-
session_id=context.session_id,
|
|
390
|
-
llm_service=context.llm_service,
|
|
391
|
-
transcript_processor=context.transcript_processor,
|
|
392
|
-
template_engine=context.template_engine,
|
|
393
|
-
template=kwargs.get("template"),
|
|
394
|
-
prompt=prompt,
|
|
395
|
-
)
|
|
396
|
-
|
|
397
|
-
async def _handle_write_todos(
|
|
398
|
-
self, context: ActionContext, **kwargs: Any
|
|
399
|
-
) -> dict[str, Any] | None:
|
|
400
|
-
"""Write todos to a file (default TODO.md)."""
|
|
401
|
-
return write_todos(
|
|
402
|
-
todos=kwargs.get("todos", []),
|
|
403
|
-
filename=kwargs.get("filename", "TODO.md"),
|
|
404
|
-
mode=kwargs.get("mode", "w"),
|
|
405
|
-
)
|
|
406
|
-
|
|
407
|
-
async def _handle_mark_todo_complete(
|
|
408
|
-
self, context: ActionContext, **kwargs: Any
|
|
409
|
-
) -> dict[str, Any] | None:
|
|
410
|
-
"""Mark a todo as complete in TODO.md."""
|
|
411
|
-
return mark_todo_complete(
|
|
412
|
-
todo_text=kwargs.get("todo_text", ""),
|
|
413
|
-
filename=kwargs.get("filename", "TODO.md"),
|
|
414
|
-
)
|
|
415
|
-
|
|
416
|
-
async def _handle_memory_sync_import(
|
|
417
|
-
self, context: ActionContext, **kwargs: Any
|
|
418
|
-
) -> dict[str, Any] | None:
|
|
419
|
-
"""Import memories from filesystem."""
|
|
420
|
-
return await memory_sync_import(context.memory_sync_manager)
|
|
421
|
-
|
|
422
|
-
async def _handle_memory_sync_export(
|
|
423
|
-
self, context: ActionContext, **kwargs: Any
|
|
424
|
-
) -> dict[str, Any] | None:
|
|
425
|
-
"""Export memories to filesystem."""
|
|
426
|
-
return await memory_sync_export(context.memory_sync_manager)
|
|
427
|
-
|
|
428
|
-
async def _handle_task_sync_import(
|
|
429
|
-
self, context: ActionContext, **kwargs: Any
|
|
430
|
-
) -> dict[str, Any] | None:
|
|
431
|
-
"""Import tasks from JSONL file.
|
|
432
|
-
|
|
433
|
-
Reads .gobby/tasks.jsonl and imports tasks into SQLite using
|
|
434
|
-
Last-Write-Wins conflict resolution based on updated_at.
|
|
435
|
-
"""
|
|
436
|
-
if not context.task_sync_manager:
|
|
437
|
-
logger.debug("task_sync_import: No task_sync_manager available")
|
|
438
|
-
return {"error": "Task Sync Manager not available"}
|
|
439
|
-
|
|
440
|
-
try:
|
|
441
|
-
# Get project_id from session for project-scoped sync
|
|
442
|
-
project_id = None
|
|
443
|
-
session = context.session_manager.get(context.session_id)
|
|
444
|
-
if session:
|
|
445
|
-
project_id = session.project_id
|
|
446
|
-
|
|
447
|
-
context.task_sync_manager.import_from_jsonl(project_id=project_id)
|
|
448
|
-
logger.info("Task sync import completed")
|
|
449
|
-
return {"imported": True}
|
|
450
|
-
except Exception as e:
|
|
451
|
-
logger.error(f"task_sync_import failed: {e}", exc_info=True)
|
|
452
|
-
return {"error": str(e)}
|
|
453
|
-
|
|
454
|
-
async def _handle_task_sync_export(
|
|
455
|
-
self, context: ActionContext, **kwargs: Any
|
|
456
|
-
) -> dict[str, Any] | None:
|
|
457
|
-
"""Export tasks to JSONL file.
|
|
458
|
-
|
|
459
|
-
Writes tasks and dependencies to .gobby/tasks.jsonl for Git persistence.
|
|
460
|
-
Uses content hashing to skip writes if nothing changed.
|
|
461
|
-
"""
|
|
462
|
-
if not context.task_sync_manager:
|
|
463
|
-
logger.debug("task_sync_export: No task_sync_manager available")
|
|
464
|
-
return {"error": "Task Sync Manager not available"}
|
|
465
|
-
|
|
466
|
-
try:
|
|
467
|
-
# Get project_id from session for project-scoped sync
|
|
468
|
-
project_id = None
|
|
469
|
-
session = context.session_manager.get(context.session_id)
|
|
470
|
-
if session:
|
|
471
|
-
project_id = session.project_id
|
|
472
|
-
|
|
473
|
-
context.task_sync_manager.export_to_jsonl(project_id=project_id)
|
|
474
|
-
logger.info("Task sync export completed")
|
|
475
|
-
return {"exported": True}
|
|
476
|
-
except Exception as e:
|
|
477
|
-
logger.error(f"task_sync_export failed: {e}", exc_info=True)
|
|
478
|
-
return {"error": str(e)}
|
|
479
|
-
|
|
480
|
-
async def _handle_persist_tasks(
|
|
481
|
-
self, context: ActionContext, **kwargs: Any
|
|
482
|
-
) -> dict[str, Any] | None:
|
|
483
|
-
"""Persist a list of task dicts to Gobby task system.
|
|
484
|
-
|
|
485
|
-
Enhanced to support workflow integration with ID mapping.
|
|
486
|
-
|
|
487
|
-
Args (via kwargs):
|
|
488
|
-
tasks: List of task dicts (or source variable name)
|
|
489
|
-
source: Variable name containing task list (alternative to tasks)
|
|
490
|
-
workflow_name: Associate tasks with this workflow
|
|
491
|
-
parent_task_id: Optional parent task for all created tasks
|
|
492
|
-
|
|
493
|
-
Returns:
|
|
494
|
-
Dict with tasks_persisted count, ids list, and id_mapping dict
|
|
495
|
-
"""
|
|
496
|
-
# Get tasks from either 'tasks' kwarg or 'source' variable
|
|
497
|
-
tasks = kwargs.get("tasks", [])
|
|
498
|
-
source = kwargs.get("source")
|
|
499
|
-
|
|
500
|
-
if source and context.state.variables:
|
|
501
|
-
source_data = context.state.variables.get(source)
|
|
502
|
-
if source_data:
|
|
503
|
-
# Handle nested structure like task_list.tasks
|
|
504
|
-
if isinstance(source_data, dict) and "tasks" in source_data:
|
|
505
|
-
tasks = source_data["tasks"]
|
|
506
|
-
elif isinstance(source_data, list):
|
|
507
|
-
tasks = source_data
|
|
508
|
-
|
|
509
|
-
if not tasks:
|
|
510
|
-
return {"tasks_persisted": 0, "ids": [], "id_mapping": {}}
|
|
511
|
-
|
|
512
|
-
try:
|
|
513
|
-
from gobby.workflows.task_actions import persist_decomposed_tasks
|
|
514
|
-
|
|
515
|
-
current_session = context.session_manager.get(context.session_id)
|
|
516
|
-
project_id = current_session.project_id if current_session else "default"
|
|
517
|
-
|
|
518
|
-
# Get workflow name from kwargs or state
|
|
519
|
-
workflow_name = kwargs.get("workflow_name")
|
|
520
|
-
if not workflow_name and context.state.workflow_name:
|
|
521
|
-
workflow_name = context.state.workflow_name
|
|
522
|
-
|
|
523
|
-
parent_task_id = kwargs.get("parent_task_id")
|
|
524
|
-
|
|
525
|
-
id_mapping = persist_decomposed_tasks(
|
|
526
|
-
db=context.db,
|
|
527
|
-
project_id=project_id,
|
|
528
|
-
tasks_data=tasks,
|
|
529
|
-
workflow_name=workflow_name or "unnamed",
|
|
530
|
-
parent_task_id=parent_task_id,
|
|
531
|
-
created_in_session_id=context.session_id,
|
|
192
|
+
"""Register built-in actions using external handlers."""
|
|
193
|
+
# --- Context/injection actions ---
|
|
194
|
+
self.register("inject_context", handle_inject_context)
|
|
195
|
+
self.register("inject_message", handle_inject_message)
|
|
196
|
+
self.register("extract_handoff_context", handle_extract_handoff_context)
|
|
197
|
+
|
|
198
|
+
# --- Artifact actions ---
|
|
199
|
+
self.register("capture_artifact", handle_capture_artifact)
|
|
200
|
+
self.register("read_artifact", handle_read_artifact)
|
|
201
|
+
|
|
202
|
+
# --- State actions ---
|
|
203
|
+
self.register("load_workflow_state", handle_load_workflow_state)
|
|
204
|
+
self.register("save_workflow_state", handle_save_workflow_state)
|
|
205
|
+
self.register("set_variable", handle_set_variable)
|
|
206
|
+
self.register("increment_variable", handle_increment_variable)
|
|
207
|
+
self.register("mark_loop_complete", handle_mark_loop_complete)
|
|
208
|
+
|
|
209
|
+
# --- Session actions ---
|
|
210
|
+
self.register("start_new_session", handle_start_new_session)
|
|
211
|
+
self.register("mark_session_status", handle_mark_session_status)
|
|
212
|
+
self.register("switch_mode", handle_switch_mode)
|
|
213
|
+
|
|
214
|
+
# --- Todo actions ---
|
|
215
|
+
self.register("write_todos", handle_write_todos)
|
|
216
|
+
self.register("mark_todo_complete", handle_mark_todo_complete)
|
|
217
|
+
|
|
218
|
+
# --- LLM actions ---
|
|
219
|
+
self.register("call_llm", handle_call_llm)
|
|
220
|
+
|
|
221
|
+
# --- MCP actions ---
|
|
222
|
+
self.register("call_mcp_tool", handle_call_mcp_tool)
|
|
223
|
+
|
|
224
|
+
# --- Summary actions ---
|
|
225
|
+
self.register("synthesize_title", handle_synthesize_title)
|
|
226
|
+
self.register("generate_summary", handle_generate_summary)
|
|
227
|
+
self.register("generate_handoff", handle_generate_handoff)
|
|
228
|
+
|
|
229
|
+
# --- Memory actions ---
|
|
230
|
+
self.register("memory_save", handle_memory_save)
|
|
231
|
+
self.register("memory_recall_relevant", handle_memory_recall_relevant)
|
|
232
|
+
self.register("memory_sync_import", handle_memory_sync_import)
|
|
233
|
+
self.register("memory_sync_export", handle_memory_sync_export)
|
|
234
|
+
self.register("memory_extract", handle_memory_extract)
|
|
235
|
+
self.register("reset_memory_injection_tracking", handle_reset_memory_injection_tracking)
|
|
236
|
+
|
|
237
|
+
# --- Task sync actions ---
|
|
238
|
+
self.register("task_sync_import", handle_task_sync_import)
|
|
239
|
+
self.register("task_sync_export", handle_task_sync_export)
|
|
240
|
+
self.register("persist_tasks", handle_persist_tasks)
|
|
241
|
+
self.register("get_workflow_tasks", handle_get_workflow_tasks)
|
|
242
|
+
self.register("update_workflow_task", handle_update_workflow_task)
|
|
243
|
+
|
|
244
|
+
# --- Task enforcement actions (closures for task_manager access) ---
|
|
245
|
+
self._register_task_enforcement_actions()
|
|
246
|
+
|
|
247
|
+
# --- Webhook (closure for config access) ---
|
|
248
|
+
self._register_webhook_action()
|
|
249
|
+
|
|
250
|
+
# --- Stop signal actions (closures for stop_registry access) ---
|
|
251
|
+
self._register_stop_signal_actions()
|
|
252
|
+
|
|
253
|
+
# --- Autonomous execution actions (closures for progress_tracker/stuck_detector) ---
|
|
254
|
+
self._register_autonomous_actions()
|
|
255
|
+
|
|
256
|
+
def _register_task_enforcement_actions(self) -> None:
|
|
257
|
+
"""Register task enforcement actions with task_manager closure."""
|
|
258
|
+
tm = self.task_manager
|
|
259
|
+
te = self.template_engine
|
|
260
|
+
|
|
261
|
+
async def block_tools(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
262
|
+
return await handle_block_tools(context, task_manager=tm, **kw)
|
|
263
|
+
|
|
264
|
+
async def require_active(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
265
|
+
return await handle_require_active_task(context, task_manager=tm, **kw)
|
|
266
|
+
|
|
267
|
+
async def require_complete(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
268
|
+
return await handle_require_task_complete(
|
|
269
|
+
context, task_manager=tm, template_engine=te, **kw
|
|
532
270
|
)
|
|
533
271
|
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
context.state.variables = {}
|
|
537
|
-
context.state.variables["task_id_mapping"] = id_mapping
|
|
538
|
-
|
|
539
|
-
return {
|
|
540
|
-
"tasks_persisted": len(id_mapping),
|
|
541
|
-
"ids": list(id_mapping.values()),
|
|
542
|
-
"id_mapping": id_mapping,
|
|
543
|
-
}
|
|
544
|
-
except Exception as e:
|
|
545
|
-
logger.error(f"persist_tasks: Failed: {e}")
|
|
546
|
-
return {"error": str(e)}
|
|
547
|
-
|
|
548
|
-
async def _handle_get_workflow_tasks(
|
|
549
|
-
self, context: ActionContext, **kwargs: Any
|
|
550
|
-
) -> dict[str, Any] | None:
|
|
551
|
-
"""Get tasks associated with the current workflow.
|
|
552
|
-
|
|
553
|
-
Args (via kwargs):
|
|
554
|
-
workflow_name: Override workflow name (defaults to current)
|
|
555
|
-
include_closed: Include closed tasks (default: False)
|
|
556
|
-
as: Variable name to store result in
|
|
272
|
+
async def require_commit(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
273
|
+
return await handle_require_commit_before_stop(context, task_manager=tm, **kw)
|
|
557
274
|
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
workflow_name = kwargs.get("workflow_name")
|
|
564
|
-
if not workflow_name and context.state.workflow_name:
|
|
565
|
-
workflow_name = context.state.workflow_name
|
|
566
|
-
|
|
567
|
-
if not workflow_name:
|
|
568
|
-
return {"error": "No workflow name specified"}
|
|
569
|
-
|
|
570
|
-
current_session = context.session_manager.get(context.session_id)
|
|
571
|
-
project_id = current_session.project_id if current_session else None
|
|
275
|
+
async def require_review(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
276
|
+
return await handle_require_task_review_or_close_before_stop(
|
|
277
|
+
context, task_manager=tm, **kw
|
|
278
|
+
)
|
|
572
279
|
|
|
573
|
-
|
|
280
|
+
async def validate_scope(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
281
|
+
return await handle_validate_session_task_scope(context, task_manager=tm, **kw)
|
|
574
282
|
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
workflow_name=workflow_name,
|
|
578
|
-
project_id=project_id,
|
|
579
|
-
include_closed=include_closed,
|
|
580
|
-
)
|
|
283
|
+
async def capture_baseline(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
284
|
+
return await handle_capture_baseline_dirty_files(context, task_manager=tm, **kw)
|
|
581
285
|
|
|
582
|
-
|
|
583
|
-
|
|
286
|
+
self.register("block_tools", block_tools)
|
|
287
|
+
self.register("require_active_task", require_active)
|
|
288
|
+
self.register("require_task_complete", require_complete)
|
|
289
|
+
self.register("require_commit_before_stop", require_commit)
|
|
290
|
+
self.register("require_task_review_or_close_before_stop", require_review)
|
|
291
|
+
self.register("validate_session_task_scope", validate_scope)
|
|
292
|
+
self.register("capture_baseline_dirty_files", capture_baseline)
|
|
584
293
|
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
if not context.state.variables:
|
|
589
|
-
context.state.variables = {}
|
|
590
|
-
context.state.variables[output_as] = tasks_data
|
|
294
|
+
def _register_webhook_action(self) -> None:
|
|
295
|
+
"""Register webhook action with config closure."""
|
|
296
|
+
cfg = self.config
|
|
591
297
|
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
{"id": t.id, "title": t.title, "status": t.status} for t in tasks
|
|
595
|
-
]
|
|
298
|
+
async def webhook(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
299
|
+
return await handle_webhook(context, config=cfg, **kw)
|
|
596
300
|
|
|
597
|
-
|
|
301
|
+
self.register("webhook", webhook)
|
|
598
302
|
|
|
599
|
-
|
|
600
|
-
self
|
|
601
|
-
|
|
602
|
-
"""Update a task from workflow context.
|
|
603
|
-
|
|
604
|
-
Args (via kwargs):
|
|
605
|
-
task_id: ID of task to update (required)
|
|
606
|
-
status: New status
|
|
607
|
-
verification: Verification result
|
|
608
|
-
validation_status: Validation status
|
|
609
|
-
|
|
610
|
-
Returns:
|
|
611
|
-
Dict with updated task data
|
|
612
|
-
"""
|
|
613
|
-
from gobby.workflows.task_actions import update_task_from_workflow
|
|
614
|
-
|
|
615
|
-
task_id = kwargs.get("task_id")
|
|
616
|
-
if not task_id:
|
|
617
|
-
# Try to get from current_task_index in state
|
|
618
|
-
if context.state.task_list and context.state.current_task_index is not None:
|
|
619
|
-
idx = context.state.current_task_index
|
|
620
|
-
if 0 <= idx < len(context.state.task_list):
|
|
621
|
-
task_id = context.state.task_list[idx].get("id")
|
|
622
|
-
|
|
623
|
-
if not task_id:
|
|
624
|
-
return {"error": "No task_id specified"}
|
|
625
|
-
|
|
626
|
-
task = update_task_from_workflow(
|
|
627
|
-
db=context.db,
|
|
628
|
-
task_id=task_id,
|
|
629
|
-
status=kwargs.get("status"),
|
|
630
|
-
verification=kwargs.get("verification"),
|
|
631
|
-
validation_status=kwargs.get("validation_status"),
|
|
632
|
-
validation_feedback=kwargs.get("validation_feedback"),
|
|
633
|
-
)
|
|
634
|
-
|
|
635
|
-
if task:
|
|
636
|
-
return {"updated": True, "task": task.to_dict()}
|
|
637
|
-
return {"updated": False, "error": "Task not found"}
|
|
638
|
-
|
|
639
|
-
async def _handle_call_mcp_tool(
|
|
640
|
-
self,
|
|
641
|
-
context: ActionContext,
|
|
642
|
-
**kwargs: Any,
|
|
643
|
-
) -> dict[str, Any] | None:
|
|
644
|
-
"""Call an MCP tool on a connected server."""
|
|
645
|
-
return await call_mcp_tool(
|
|
646
|
-
mcp_manager=context.mcp_manager,
|
|
647
|
-
state=context.state,
|
|
648
|
-
server_name=kwargs.get("server_name"),
|
|
649
|
-
tool_name=kwargs.get("tool_name"),
|
|
650
|
-
arguments=kwargs.get("arguments"),
|
|
651
|
-
output_as=kwargs.get("as"),
|
|
652
|
-
)
|
|
653
|
-
|
|
654
|
-
async def _handle_generate_handoff(
|
|
655
|
-
self, context: ActionContext, **kwargs: Any
|
|
656
|
-
) -> dict[str, Any] | None:
|
|
657
|
-
"""Generate a handoff record (summary + mark status).
|
|
658
|
-
|
|
659
|
-
For compact mode, fetches the current session's existing summary_markdown
|
|
660
|
-
as previous_summary for cumulative compression.
|
|
661
|
-
"""
|
|
662
|
-
# Detect mode from kwargs or event data
|
|
663
|
-
mode = kwargs.get("mode", "clear")
|
|
664
|
-
|
|
665
|
-
# Check if this is a compact event based on event_data
|
|
666
|
-
# Use precise matching against known compact event types to avoid false positives
|
|
667
|
-
COMPACT_EVENT_TYPES = {"pre_compact", "compact"}
|
|
668
|
-
if context.event_data:
|
|
669
|
-
raw_event_type = context.event_data.get("event_type") or ""
|
|
670
|
-
normalized_event_type = str(raw_event_type).strip().lower()
|
|
671
|
-
if normalized_event_type in COMPACT_EVENT_TYPES:
|
|
672
|
-
mode = "compact"
|
|
673
|
-
|
|
674
|
-
# For compact mode, fetch previous summary for cumulative compression
|
|
675
|
-
previous_summary = None
|
|
676
|
-
if mode == "compact":
|
|
677
|
-
current_session = context.session_manager.get(context.session_id)
|
|
678
|
-
if current_session:
|
|
679
|
-
previous_summary = getattr(current_session, "summary_markdown", None)
|
|
680
|
-
if previous_summary:
|
|
681
|
-
logger.debug(
|
|
682
|
-
f"Compact mode: using previous summary ({len(previous_summary)} chars) "
|
|
683
|
-
f"for cumulative compression"
|
|
684
|
-
)
|
|
685
|
-
|
|
686
|
-
return await generate_handoff(
|
|
687
|
-
session_manager=context.session_manager,
|
|
688
|
-
session_id=context.session_id,
|
|
689
|
-
llm_service=context.llm_service,
|
|
690
|
-
transcript_processor=context.transcript_processor,
|
|
691
|
-
template=kwargs.get("template"),
|
|
692
|
-
previous_summary=previous_summary,
|
|
693
|
-
mode=mode,
|
|
694
|
-
)
|
|
695
|
-
|
|
696
|
-
async def _handle_generate_summary(
|
|
697
|
-
self, context: ActionContext, **kwargs: Any
|
|
698
|
-
) -> dict[str, Any] | None:
|
|
699
|
-
"""Generate a session summary using LLM."""
|
|
700
|
-
return await generate_summary(
|
|
701
|
-
session_manager=context.session_manager,
|
|
702
|
-
session_id=context.session_id,
|
|
703
|
-
llm_service=context.llm_service,
|
|
704
|
-
transcript_processor=context.transcript_processor,
|
|
705
|
-
template=kwargs.get("template"),
|
|
706
|
-
)
|
|
707
|
-
|
|
708
|
-
async def _handle_start_new_session(
|
|
709
|
-
self, context: ActionContext, **kwargs: Any
|
|
710
|
-
) -> dict[str, Any] | None:
|
|
711
|
-
"""Start a new CLI session (chaining)."""
|
|
712
|
-
return start_new_session(
|
|
713
|
-
session_manager=context.session_manager,
|
|
714
|
-
session_id=context.session_id,
|
|
715
|
-
command=kwargs.get("command"),
|
|
716
|
-
args=kwargs.get("args"),
|
|
717
|
-
prompt=kwargs.get("prompt"),
|
|
718
|
-
cwd=kwargs.get("cwd"),
|
|
719
|
-
)
|
|
720
|
-
|
|
721
|
-
async def _handle_mark_loop_complete(
|
|
722
|
-
self, context: ActionContext, **kwargs: Any
|
|
723
|
-
) -> dict[str, Any] | None:
|
|
724
|
-
"""Mark the autonomous loop as complete."""
|
|
725
|
-
return mark_loop_complete(context.state)
|
|
726
|
-
|
|
727
|
-
async def _handle_extract_handoff_context(
|
|
728
|
-
self, context: ActionContext, **kwargs: Any
|
|
729
|
-
) -> dict[str, Any] | None:
|
|
730
|
-
"""Extract handoff context from transcript and save to session.compact_markdown."""
|
|
731
|
-
return extract_handoff_context(
|
|
732
|
-
session_manager=context.session_manager,
|
|
733
|
-
session_id=context.session_id,
|
|
734
|
-
config=context.config,
|
|
735
|
-
db=self.db,
|
|
736
|
-
)
|
|
737
|
-
|
|
738
|
-
def _format_handoff_as_markdown(self, ctx: Any, prompt_template: str | None = None) -> str:
|
|
739
|
-
"""Format HandoffContext as markdown for injection."""
|
|
740
|
-
return format_handoff_as_markdown(ctx, prompt_template)
|
|
741
|
-
|
|
742
|
-
async def _handle_save_memory(
|
|
743
|
-
self, context: ActionContext, **kwargs: Any
|
|
744
|
-
) -> dict[str, Any] | None:
|
|
745
|
-
"""Save a memory directly from workflow context."""
|
|
746
|
-
return await memory_save(
|
|
747
|
-
memory_manager=context.memory_manager,
|
|
748
|
-
session_manager=context.session_manager,
|
|
749
|
-
session_id=context.session_id,
|
|
750
|
-
content=kwargs.get("content"),
|
|
751
|
-
memory_type=kwargs.get("memory_type", "fact"),
|
|
752
|
-
importance=kwargs.get("importance", 0.5),
|
|
753
|
-
tags=kwargs.get("tags"),
|
|
754
|
-
project_id=kwargs.get("project_id"),
|
|
755
|
-
)
|
|
756
|
-
|
|
757
|
-
async def _handle_memory_recall_relevant(
|
|
758
|
-
self, context: ActionContext, **kwargs: Any
|
|
759
|
-
) -> dict[str, Any] | None:
|
|
760
|
-
"""Recall memories relevant to the current user prompt."""
|
|
761
|
-
prompt_text = None
|
|
762
|
-
if context.event_data:
|
|
763
|
-
# Check both "prompt" (from hook event) and "prompt_text" (legacy/alternative)
|
|
764
|
-
prompt_text = context.event_data.get("prompt") or context.event_data.get("prompt_text")
|
|
765
|
-
|
|
766
|
-
return await memory_recall_relevant(
|
|
767
|
-
memory_manager=context.memory_manager,
|
|
768
|
-
session_manager=context.session_manager,
|
|
769
|
-
session_id=context.session_id,
|
|
770
|
-
prompt_text=prompt_text,
|
|
771
|
-
project_id=kwargs.get("project_id"),
|
|
772
|
-
limit=kwargs.get("limit", 5),
|
|
773
|
-
min_importance=kwargs.get("min_importance", 0.3),
|
|
774
|
-
state=context.state,
|
|
775
|
-
)
|
|
776
|
-
|
|
777
|
-
async def _handle_reset_memory_injection_tracking(
|
|
778
|
-
self, context: ActionContext, **kwargs: Any
|
|
779
|
-
) -> dict[str, Any] | None:
|
|
780
|
-
"""Reset memory injection tracking to allow re-injection after context loss."""
|
|
781
|
-
return reset_memory_injection_tracking(state=context.state)
|
|
782
|
-
|
|
783
|
-
async def _handle_mark_session_status(
|
|
784
|
-
self, context: ActionContext, **kwargs: Any
|
|
785
|
-
) -> dict[str, Any] | None:
|
|
786
|
-
"""Mark a session status (current or parent)."""
|
|
787
|
-
return mark_session_status(
|
|
788
|
-
session_manager=context.session_manager,
|
|
789
|
-
session_id=context.session_id,
|
|
790
|
-
status=kwargs.get("status"),
|
|
791
|
-
target=kwargs.get("target", "current_session"),
|
|
792
|
-
)
|
|
793
|
-
|
|
794
|
-
async def _handle_switch_mode(
|
|
795
|
-
self, context: ActionContext, **kwargs: Any
|
|
796
|
-
) -> dict[str, Any] | None:
|
|
797
|
-
"""Signal the agent to switch modes (e.g., PLAN, ACT)."""
|
|
798
|
-
return switch_mode(kwargs.get("mode"))
|
|
303
|
+
def _register_stop_signal_actions(self) -> None:
|
|
304
|
+
"""Register stop signal actions accessing self at call time."""
|
|
305
|
+
executor = self
|
|
799
306
|
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
return get_git_status()
|
|
807
|
-
|
|
808
|
-
def _get_recent_git_commits(self, max_commits: int = 10) -> list[dict[str, str]]:
|
|
809
|
-
"""Get recent git commits with hash and message."""
|
|
810
|
-
return get_recent_git_commits(max_commits)
|
|
811
|
-
|
|
812
|
-
def _get_file_changes(self) -> str:
|
|
813
|
-
"""Get detailed file changes from git."""
|
|
814
|
-
return get_file_changes()
|
|
815
|
-
|
|
816
|
-
async def _handle_capture_baseline_dirty_files(
|
|
817
|
-
self, context: ActionContext, **kwargs: Any
|
|
818
|
-
) -> dict[str, Any] | None:
|
|
819
|
-
"""Capture baseline dirty files at session start."""
|
|
820
|
-
# Get project path - prioritize session lookup over hook payload
|
|
821
|
-
project_path = None
|
|
822
|
-
|
|
823
|
-
# 1. Get from session's project (most reliable - session exists by now)
|
|
824
|
-
if context.session_id and context.session_manager:
|
|
825
|
-
session = context.session_manager.get(context.session_id)
|
|
826
|
-
if session and session.project_id:
|
|
827
|
-
from gobby.storage.projects import LocalProjectManager
|
|
828
|
-
|
|
829
|
-
project_mgr = LocalProjectManager(context.db)
|
|
830
|
-
project = project_mgr.get(session.project_id)
|
|
831
|
-
if project and project.repo_path:
|
|
832
|
-
project_path = project.repo_path
|
|
833
|
-
|
|
834
|
-
# 2. Fallback to event_data.cwd (from hook payload)
|
|
835
|
-
if not project_path and context.event_data:
|
|
836
|
-
project_path = context.event_data.get("cwd")
|
|
837
|
-
|
|
838
|
-
return await capture_baseline_dirty_files(
|
|
839
|
-
workflow_state=context.state,
|
|
840
|
-
project_path=project_path,
|
|
841
|
-
)
|
|
842
|
-
|
|
843
|
-
async def _handle_require_active_task(
|
|
844
|
-
self, context: ActionContext, **kwargs: Any
|
|
845
|
-
) -> dict[str, Any] | None:
|
|
846
|
-
"""Check for active task before allowing protected tools."""
|
|
847
|
-
# Get project_id from session for project-scoped task filtering
|
|
848
|
-
current_session = context.session_manager.get(context.session_id)
|
|
849
|
-
project_id = current_session.project_id if current_session else None
|
|
850
|
-
|
|
851
|
-
return await require_active_task(
|
|
852
|
-
task_manager=self.task_manager,
|
|
853
|
-
session_id=context.session_id,
|
|
854
|
-
config=context.config,
|
|
855
|
-
event_data=context.event_data,
|
|
856
|
-
project_id=project_id,
|
|
857
|
-
workflow_state=context.state,
|
|
858
|
-
session_manager=context.session_manager,
|
|
859
|
-
session_task_manager=context.session_task_manager,
|
|
860
|
-
)
|
|
861
|
-
|
|
862
|
-
async def _handle_require_commit_before_stop(
|
|
863
|
-
self, context: ActionContext, **kwargs: Any
|
|
864
|
-
) -> dict[str, Any] | None:
|
|
865
|
-
"""Block stop if task has uncommitted changes."""
|
|
866
|
-
# Get project path - prioritize session lookup over hook payload
|
|
867
|
-
project_path = None
|
|
868
|
-
|
|
869
|
-
# 1. Get from session's project (most reliable - session exists by now)
|
|
870
|
-
if context.session_id and context.session_manager:
|
|
871
|
-
session = context.session_manager.get(context.session_id)
|
|
872
|
-
if session and session.project_id:
|
|
873
|
-
from gobby.storage.projects import LocalProjectManager
|
|
874
|
-
|
|
875
|
-
project_mgr = LocalProjectManager(context.db)
|
|
876
|
-
project = project_mgr.get(session.project_id)
|
|
877
|
-
if project and project.repo_path:
|
|
878
|
-
project_path = project.repo_path
|
|
879
|
-
|
|
880
|
-
# 2. Fallback to event_data.cwd (from hook payload)
|
|
881
|
-
if not project_path and context.event_data:
|
|
882
|
-
project_path = context.event_data.get("cwd")
|
|
883
|
-
|
|
884
|
-
return await require_commit_before_stop(
|
|
885
|
-
workflow_state=context.state,
|
|
886
|
-
project_path=project_path,
|
|
887
|
-
task_manager=self.task_manager,
|
|
888
|
-
)
|
|
889
|
-
|
|
890
|
-
async def _handle_require_task_review_or_close_before_stop(
|
|
891
|
-
self, context: ActionContext, **kwargs: Any
|
|
892
|
-
) -> dict[str, Any] | None:
|
|
893
|
-
"""Block stop if task is still in_progress (regardless of dirty files)."""
|
|
894
|
-
# Get project_id from session for task reference resolution
|
|
895
|
-
project_id = None
|
|
896
|
-
session = context.session_manager.get(context.session_id)
|
|
897
|
-
if session:
|
|
898
|
-
project_id = session.project_id
|
|
899
|
-
|
|
900
|
-
return await require_task_review_or_close_before_stop(
|
|
901
|
-
workflow_state=context.state,
|
|
902
|
-
task_manager=self.task_manager,
|
|
903
|
-
project_id=project_id,
|
|
904
|
-
)
|
|
905
|
-
|
|
906
|
-
async def _handle_require_task_complete(
|
|
907
|
-
self, context: ActionContext, **kwargs: Any
|
|
908
|
-
) -> dict[str, Any] | None:
|
|
909
|
-
"""Check that a task (and its subtasks) are complete before allowing stop.
|
|
910
|
-
|
|
911
|
-
Supports:
|
|
912
|
-
- Single task ID: "#47"
|
|
913
|
-
- List of task IDs: ["#47", "#48"]
|
|
914
|
-
- Wildcard: "*" - work until no ready tasks remain
|
|
915
|
-
"""
|
|
916
|
-
current_session = context.session_manager.get(context.session_id)
|
|
917
|
-
project_id = current_session.project_id if current_session else None
|
|
918
|
-
|
|
919
|
-
# Get task_id from kwargs - may be a template that needs resolving
|
|
920
|
-
task_spec = kwargs.get("task_id")
|
|
921
|
-
|
|
922
|
-
# If it's a template reference like "{{ variables.session_task }}", resolve it
|
|
923
|
-
if task_spec and "{{" in str(task_spec):
|
|
924
|
-
task_spec = context.template_engine.render(
|
|
925
|
-
str(task_spec),
|
|
926
|
-
{"variables": context.state.variables or {}},
|
|
307
|
+
async def check_stop(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
308
|
+
return check_stop_signal(
|
|
309
|
+
executor.stop_registry,
|
|
310
|
+
context.session_id,
|
|
311
|
+
context.state,
|
|
312
|
+
kw.get("acknowledge", False),
|
|
927
313
|
)
|
|
928
314
|
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
if not task_spec:
|
|
937
|
-
return None
|
|
938
|
-
elif task_spec == "*":
|
|
939
|
-
# Wildcard: get all ready tasks for this project
|
|
940
|
-
if self.task_manager:
|
|
941
|
-
ready_tasks = self.task_manager.list_ready_tasks(
|
|
942
|
-
project_id=project_id,
|
|
943
|
-
limit=100,
|
|
944
|
-
)
|
|
945
|
-
task_ids = [t.id for t in ready_tasks]
|
|
946
|
-
if not task_ids:
|
|
947
|
-
# No ready tasks - allow stop
|
|
948
|
-
logger.debug("require_task_complete: Wildcard mode, no ready tasks")
|
|
949
|
-
return None
|
|
950
|
-
elif isinstance(task_spec, list):
|
|
951
|
-
task_ids = task_spec
|
|
952
|
-
else:
|
|
953
|
-
task_ids = [str(task_spec)]
|
|
954
|
-
|
|
955
|
-
return await require_task_complete(
|
|
956
|
-
task_manager=self.task_manager,
|
|
957
|
-
session_id=context.session_id,
|
|
958
|
-
task_ids=task_ids,
|
|
959
|
-
event_data=context.event_data,
|
|
960
|
-
project_id=project_id,
|
|
961
|
-
workflow_state=context.state,
|
|
962
|
-
)
|
|
963
|
-
|
|
964
|
-
async def _handle_validate_session_task_scope(
|
|
965
|
-
self, context: ActionContext, **kwargs: Any
|
|
966
|
-
) -> dict[str, Any] | None:
|
|
967
|
-
"""Validate that claimed task is within session_task scope.
|
|
968
|
-
|
|
969
|
-
When session_task is set in workflow state, this blocks claiming
|
|
970
|
-
tasks that are not descendants of session_task.
|
|
971
|
-
"""
|
|
972
|
-
return await validate_session_task_scope(
|
|
973
|
-
task_manager=self.task_manager,
|
|
974
|
-
workflow_state=context.state,
|
|
975
|
-
event_data=context.event_data,
|
|
976
|
-
)
|
|
977
|
-
|
|
978
|
-
async def _handle_webhook(self, context: ActionContext, **kwargs: Any) -> dict[str, Any] | None:
|
|
979
|
-
"""Execute a webhook HTTP request.
|
|
980
|
-
|
|
981
|
-
Args (via kwargs):
|
|
982
|
-
url: Target URL for the request (required unless webhook_id provided)
|
|
983
|
-
webhook_id: ID of a pre-configured webhook (alternative to url)
|
|
984
|
-
method: HTTP method (GET, POST, PUT, PATCH, DELETE), default: POST
|
|
985
|
-
headers: Request headers dict (supports ${secrets.VAR} interpolation)
|
|
986
|
-
payload: Request body as dict or string (supports template interpolation)
|
|
987
|
-
timeout: Request timeout in seconds (1-300), default: 30
|
|
988
|
-
retry: Retry configuration dict with:
|
|
989
|
-
- max_attempts: Max retry attempts (1-10), default: 3
|
|
990
|
-
- backoff_seconds: Initial backoff delay, default: 1
|
|
991
|
-
- retry_on_status: HTTP status codes to retry on
|
|
992
|
-
capture_response: Response capture config with:
|
|
993
|
-
- status_var: Variable name for status code
|
|
994
|
-
- body_var: Variable name for response body
|
|
995
|
-
- headers_var: Variable name for response headers
|
|
996
|
-
on_success: Step to transition to on success (2xx)
|
|
997
|
-
on_failure: Step to transition to on failure
|
|
998
|
-
|
|
999
|
-
Returns:
|
|
1000
|
-
Dict with success status, status_code, and captured response data.
|
|
1001
|
-
"""
|
|
1002
|
-
try:
|
|
1003
|
-
# Parse WebhookAction from kwargs to validate config
|
|
1004
|
-
webhook_action = WebhookAction.from_dict(kwargs)
|
|
1005
|
-
except ValueError as e:
|
|
1006
|
-
logger.error(f"Invalid webhook action config: {e}")
|
|
1007
|
-
return {"success": False, "error": str(e)}
|
|
1008
|
-
|
|
1009
|
-
# Build context for variable interpolation
|
|
1010
|
-
interpolation_context: dict[str, Any] = {}
|
|
1011
|
-
if context.state.variables:
|
|
1012
|
-
interpolation_context["state"] = {"variables": context.state.variables}
|
|
1013
|
-
if context.state.artifacts:
|
|
1014
|
-
interpolation_context["artifacts"] = context.state.artifacts
|
|
1015
|
-
|
|
1016
|
-
# Get secrets from config if available
|
|
1017
|
-
secrets: dict[str, str] = {}
|
|
1018
|
-
if self.config:
|
|
1019
|
-
secrets = getattr(self.config, "webhook_secrets", {})
|
|
1020
|
-
|
|
1021
|
-
# Create executor with template engine for payload interpolation
|
|
1022
|
-
executor = WebhookExecutor(
|
|
1023
|
-
template_engine=context.template_engine,
|
|
1024
|
-
secrets=secrets,
|
|
1025
|
-
)
|
|
1026
|
-
|
|
1027
|
-
# Execute the webhook
|
|
1028
|
-
if webhook_action.url:
|
|
1029
|
-
result = await executor.execute(
|
|
1030
|
-
url=webhook_action.url,
|
|
1031
|
-
method=webhook_action.method,
|
|
1032
|
-
headers=webhook_action.headers,
|
|
1033
|
-
payload=webhook_action.payload,
|
|
1034
|
-
timeout=webhook_action.timeout,
|
|
1035
|
-
retry_config=webhook_action.retry.to_dict() if webhook_action.retry else None,
|
|
1036
|
-
context=interpolation_context,
|
|
1037
|
-
)
|
|
1038
|
-
elif webhook_action.webhook_id:
|
|
1039
|
-
# webhook_id execution requires a registry which would be configured
|
|
1040
|
-
# at the daemon level - for now we return an error if no registry
|
|
1041
|
-
logger.warning("webhook_id execution not yet supported without registry")
|
|
1042
|
-
return {"success": False, "error": "webhook_id requires configured webhook registry"}
|
|
1043
|
-
else:
|
|
1044
|
-
return {"success": False, "error": "Either url or webhook_id is required"}
|
|
1045
|
-
|
|
1046
|
-
# Capture response into workflow variables if configured
|
|
1047
|
-
if webhook_action.capture_response:
|
|
1048
|
-
if not context.state.variables:
|
|
1049
|
-
context.state.variables = {}
|
|
1050
|
-
|
|
1051
|
-
capture = webhook_action.capture_response
|
|
1052
|
-
if capture.status_var and result.status_code is not None:
|
|
1053
|
-
context.state.variables[capture.status_var] = result.status_code
|
|
1054
|
-
if capture.body_var and result.body is not None:
|
|
1055
|
-
# Try to parse as JSON, fall back to raw string
|
|
1056
|
-
json_body = result.json_body()
|
|
1057
|
-
context.state.variables[capture.body_var] = json_body if json_body else result.body
|
|
1058
|
-
if capture.headers_var and result.headers is not None:
|
|
1059
|
-
context.state.variables[capture.headers_var] = result.headers
|
|
1060
|
-
|
|
1061
|
-
# Log outcome
|
|
1062
|
-
if result.success:
|
|
1063
|
-
logger.info(
|
|
1064
|
-
f"Webhook {webhook_action.method} {webhook_action.url} succeeded: {result.status_code}"
|
|
315
|
+
async def req_stop(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
316
|
+
return request_stop(
|
|
317
|
+
executor.stop_registry,
|
|
318
|
+
context.session_id,
|
|
319
|
+
kw.get("source", "workflow"),
|
|
320
|
+
kw.get("reason"),
|
|
1065
321
|
)
|
|
1066
|
-
else:
|
|
1067
|
-
logger.warning(
|
|
1068
|
-
f"Webhook {webhook_action.method} {webhook_action.url} failed: "
|
|
1069
|
-
f"{result.error or result.status_code}"
|
|
1070
|
-
)
|
|
1071
|
-
|
|
1072
|
-
return {
|
|
1073
|
-
"success": result.success,
|
|
1074
|
-
"status_code": result.status_code,
|
|
1075
|
-
"error": result.error,
|
|
1076
|
-
"body": result.body if result.success else None,
|
|
1077
|
-
}
|
|
1078
|
-
|
|
1079
|
-
# --- Stop Signal Actions ---
|
|
1080
|
-
|
|
1081
|
-
async def _handle_check_stop_signal(
|
|
1082
|
-
self, context: ActionContext, **kwargs: Any
|
|
1083
|
-
) -> dict[str, Any] | None:
|
|
1084
|
-
"""Check if a stop signal has been sent for this session.
|
|
1085
|
-
|
|
1086
|
-
Args (via kwargs):
|
|
1087
|
-
acknowledge: If True, acknowledge the signal (session will stop)
|
|
1088
|
-
|
|
1089
|
-
Returns:
|
|
1090
|
-
Dict with has_signal, signal details, and optional inject_context
|
|
1091
|
-
"""
|
|
1092
|
-
return check_stop_signal(
|
|
1093
|
-
stop_registry=self.stop_registry,
|
|
1094
|
-
session_id=context.session_id,
|
|
1095
|
-
state=context.state,
|
|
1096
|
-
acknowledge=kwargs.get("acknowledge", False),
|
|
1097
|
-
)
|
|
1098
|
-
|
|
1099
|
-
async def _handle_request_stop(
|
|
1100
|
-
self, context: ActionContext, **kwargs: Any
|
|
1101
|
-
) -> dict[str, Any] | None:
|
|
1102
|
-
"""Request a session to stop (used by stuck detection, etc.).
|
|
1103
|
-
|
|
1104
|
-
Args (via kwargs):
|
|
1105
|
-
session_id: The session to signal (defaults to current session)
|
|
1106
|
-
source: Source of the request (default: "workflow")
|
|
1107
|
-
reason: Optional reason for the stop request
|
|
1108
|
-
|
|
1109
|
-
Returns:
|
|
1110
|
-
Dict with success status and signal details
|
|
1111
|
-
"""
|
|
1112
|
-
target_session = kwargs.get("session_id", context.session_id)
|
|
1113
|
-
return request_stop(
|
|
1114
|
-
stop_registry=self.stop_registry,
|
|
1115
|
-
session_id=target_session,
|
|
1116
|
-
source=kwargs.get("source", "workflow"),
|
|
1117
|
-
reason=kwargs.get("reason"),
|
|
1118
|
-
)
|
|
1119
|
-
|
|
1120
|
-
async def _handle_clear_stop_signal(
|
|
1121
|
-
self, context: ActionContext, **kwargs: Any
|
|
1122
|
-
) -> dict[str, Any] | None:
|
|
1123
|
-
"""Clear any stop signal for a session.
|
|
1124
|
-
|
|
1125
|
-
Args (via kwargs):
|
|
1126
|
-
session_id: The session to clear (defaults to current session)
|
|
1127
|
-
|
|
1128
|
-
Returns:
|
|
1129
|
-
Dict with success status
|
|
1130
|
-
"""
|
|
1131
|
-
target_session = kwargs.get("session_id", context.session_id)
|
|
1132
|
-
return clear_stop_signal(
|
|
1133
|
-
stop_registry=self.stop_registry,
|
|
1134
|
-
session_id=target_session,
|
|
1135
|
-
)
|
|
1136
322
|
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
323
|
+
async def clear_stop(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
324
|
+
return clear_stop_signal(
|
|
325
|
+
executor.stop_registry, kw.get("session_id") or context.session_id
|
|
326
|
+
)
|
|
1141
327
|
|
|
1142
|
-
|
|
328
|
+
self.register("check_stop_signal", check_stop)
|
|
329
|
+
self.register("request_stop", req_stop)
|
|
330
|
+
self.register("clear_stop_signal", clear_stop)
|
|
1143
331
|
|
|
1144
|
-
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
**kwargs: Additional event data
|
|
1148
|
-
"""
|
|
1149
|
-
import asyncio
|
|
332
|
+
def _register_autonomous_actions(self) -> None:
|
|
333
|
+
"""Register autonomous actions accessing self at call time."""
|
|
334
|
+
executor = self
|
|
1150
335
|
|
|
1151
|
-
|
|
1152
|
-
return
|
|
1153
|
-
|
|
1154
|
-
try:
|
|
1155
|
-
# Create non-blocking task for broadcast
|
|
1156
|
-
task = asyncio.create_task(
|
|
1157
|
-
self.websocket_server.broadcast_autonomous_event(
|
|
1158
|
-
event=event,
|
|
1159
|
-
session_id=session_id,
|
|
1160
|
-
**kwargs,
|
|
1161
|
-
)
|
|
336
|
+
async def start_tracking(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
337
|
+
return start_progress_tracking(
|
|
338
|
+
executor.progress_tracker, context.session_id, context.state
|
|
1162
339
|
)
|
|
1163
|
-
# Add callback to log errors silently
|
|
1164
|
-
task.add_done_callback(
|
|
1165
|
-
lambda t: (
|
|
1166
|
-
logger.debug(f"Broadcast {event} failed: {t.exception()}")
|
|
1167
|
-
if t.exception()
|
|
1168
|
-
else None
|
|
1169
|
-
)
|
|
1170
|
-
)
|
|
1171
|
-
except Exception as e:
|
|
1172
|
-
logger.debug(f"Failed to schedule broadcast for {event}: {e}")
|
|
1173
340
|
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
session_id=context.session_id,
|
|
1181
|
-
state=context.state,
|
|
1182
|
-
)
|
|
1183
|
-
|
|
1184
|
-
# Broadcast loop_started event
|
|
1185
|
-
if result and result.get("success"):
|
|
1186
|
-
await self._broadcast_autonomous_event(
|
|
1187
|
-
event="loop_started",
|
|
1188
|
-
session_id=context.session_id,
|
|
341
|
+
async def stop_tracking(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
342
|
+
return stop_progress_tracking(
|
|
343
|
+
executor.progress_tracker,
|
|
344
|
+
context.session_id,
|
|
345
|
+
context.state,
|
|
346
|
+
kw.get("keep_data", False),
|
|
1189
347
|
)
|
|
1190
348
|
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
progress_tracker=self.progress_tracker,
|
|
1199
|
-
session_id=context.session_id,
|
|
1200
|
-
state=context.state,
|
|
1201
|
-
keep_data=kwargs.get("keep_data", False),
|
|
1202
|
-
)
|
|
1203
|
-
|
|
1204
|
-
# Broadcast loop_stopped event
|
|
1205
|
-
if result and result.get("success"):
|
|
1206
|
-
await self._broadcast_autonomous_event(
|
|
1207
|
-
event="loop_stopped",
|
|
1208
|
-
session_id=context.session_id,
|
|
1209
|
-
final_summary=result.get("final_summary"),
|
|
349
|
+
async def record_prog(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
350
|
+
return record_progress(
|
|
351
|
+
executor.progress_tracker,
|
|
352
|
+
context.session_id,
|
|
353
|
+
kw.get("progress_type", "tool_call"),
|
|
354
|
+
kw.get("tool_name"),
|
|
355
|
+
kw.get("details"),
|
|
1210
356
|
)
|
|
1211
357
|
|
|
1212
|
-
|
|
358
|
+
async def detect_loop(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
359
|
+
return detect_task_loop(executor.stuck_detector, context.session_id, context.state)
|
|
1213
360
|
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
) -> dict[str, Any] | None:
|
|
1217
|
-
"""Record a progress event."""
|
|
1218
|
-
result = record_progress(
|
|
1219
|
-
progress_tracker=self.progress_tracker,
|
|
1220
|
-
session_id=context.session_id,
|
|
1221
|
-
progress_type=kwargs.get("progress_type", "tool_call"),
|
|
1222
|
-
tool_name=kwargs.get("tool_name"),
|
|
1223
|
-
details=kwargs.get("details"),
|
|
1224
|
-
)
|
|
1225
|
-
|
|
1226
|
-
# Broadcast progress_recorded event for high-value events
|
|
1227
|
-
if result and result.get("success") and result.get("event", {}).get("is_high_value"):
|
|
1228
|
-
await self._broadcast_autonomous_event(
|
|
1229
|
-
event="progress_recorded",
|
|
1230
|
-
session_id=context.session_id,
|
|
1231
|
-
progress_type=result.get("event", {}).get("type"),
|
|
1232
|
-
is_high_value=True,
|
|
1233
|
-
)
|
|
361
|
+
async def detect_stk(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
362
|
+
return detect_stuck(executor.stuck_detector, context.session_id, context.state)
|
|
1234
363
|
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
result = detect_task_loop(
|
|
1242
|
-
stuck_detector=self.stuck_detector,
|
|
1243
|
-
session_id=context.session_id,
|
|
1244
|
-
state=context.state,
|
|
1245
|
-
)
|
|
1246
|
-
|
|
1247
|
-
# Broadcast stuck_detected if stuck
|
|
1248
|
-
if result and result.get("is_stuck"):
|
|
1249
|
-
await self._broadcast_autonomous_event(
|
|
1250
|
-
event="stuck_detected",
|
|
1251
|
-
session_id=context.session_id,
|
|
1252
|
-
layer="task_loop",
|
|
1253
|
-
reason=result.get("reason"),
|
|
1254
|
-
details=result.get("details"),
|
|
364
|
+
async def record_sel(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
365
|
+
return record_task_selection(
|
|
366
|
+
executor.stuck_detector,
|
|
367
|
+
context.session_id,
|
|
368
|
+
kw.get("task_id", ""),
|
|
369
|
+
kw.get("context"),
|
|
1255
370
|
)
|
|
1256
371
|
|
|
1257
|
-
|
|
372
|
+
async def get_summary(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
373
|
+
return get_progress_summary(executor.progress_tracker, context.session_id)
|
|
1258
374
|
|
|
1259
|
-
|
|
1260
|
-
self,
|
|
1261
|
-
|
|
1262
|
-
""
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
state=context.state,
|
|
1267
|
-
)
|
|
1268
|
-
|
|
1269
|
-
# Broadcast stuck_detected if stuck
|
|
1270
|
-
if result and result.get("is_stuck"):
|
|
1271
|
-
await self._broadcast_autonomous_event(
|
|
1272
|
-
event="stuck_detected",
|
|
1273
|
-
session_id=context.session_id,
|
|
1274
|
-
layer=result.get("layer"),
|
|
1275
|
-
reason=result.get("reason"),
|
|
1276
|
-
suggested_action=result.get("suggested_action"),
|
|
1277
|
-
)
|
|
1278
|
-
|
|
1279
|
-
return result
|
|
375
|
+
self.register("start_progress_tracking", start_tracking)
|
|
376
|
+
self.register("stop_progress_tracking", stop_tracking)
|
|
377
|
+
self.register("record_progress", record_prog)
|
|
378
|
+
self.register("detect_task_loop", detect_loop)
|
|
379
|
+
self.register("detect_stuck", detect_stk)
|
|
380
|
+
self.register("record_task_selection", record_sel)
|
|
381
|
+
self.register("get_progress_summary", get_summary)
|
|
1280
382
|
|
|
1281
|
-
async def
|
|
1282
|
-
self, context: ActionContext, **kwargs: Any
|
|
383
|
+
async def execute(
|
|
384
|
+
self, action_type: str, context: ActionContext, **kwargs: Any
|
|
1283
385
|
) -> dict[str, Any] | None:
|
|
1284
|
-
"""
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
task_id=task_id,
|
|
1290
|
-
context=kwargs.get("context"),
|
|
1291
|
-
)
|
|
1292
|
-
|
|
1293
|
-
# Broadcast task_started event
|
|
1294
|
-
if result and result.get("success"):
|
|
1295
|
-
await self._broadcast_autonomous_event(
|
|
1296
|
-
event="task_started",
|
|
1297
|
-
session_id=context.session_id,
|
|
1298
|
-
task_id=task_id,
|
|
1299
|
-
)
|
|
1300
|
-
|
|
1301
|
-
return result
|
|
386
|
+
"""Execute an action."""
|
|
387
|
+
handler = self._handlers.get(action_type)
|
|
388
|
+
if not handler:
|
|
389
|
+
logger.warning(f"Unknown action type: {action_type}")
|
|
390
|
+
return None
|
|
1302
391
|
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
progress_tracker=self.progress_tracker,
|
|
1309
|
-
session_id=context.session_id,
|
|
1310
|
-
)
|
|
392
|
+
try:
|
|
393
|
+
return await handler(context, **kwargs)
|
|
394
|
+
except Exception as e:
|
|
395
|
+
logger.error(f"Error executing action {action_type}: {e}", exc_info=True)
|
|
396
|
+
return {"error": str(e)}
|