gobby 0.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +3 -0
- gobby/adapters/__init__.py +30 -0
- gobby/adapters/base.py +93 -0
- gobby/adapters/claude_code.py +276 -0
- gobby/adapters/codex.py +1292 -0
- gobby/adapters/gemini.py +343 -0
- gobby/agents/__init__.py +37 -0
- gobby/agents/codex_session.py +120 -0
- gobby/agents/constants.py +112 -0
- gobby/agents/context.py +362 -0
- gobby/agents/definitions.py +133 -0
- gobby/agents/gemini_session.py +111 -0
- gobby/agents/registry.py +618 -0
- gobby/agents/runner.py +968 -0
- gobby/agents/session.py +259 -0
- gobby/agents/spawn.py +916 -0
- gobby/agents/spawners/__init__.py +77 -0
- gobby/agents/spawners/base.py +142 -0
- gobby/agents/spawners/cross_platform.py +266 -0
- gobby/agents/spawners/embedded.py +225 -0
- gobby/agents/spawners/headless.py +226 -0
- gobby/agents/spawners/linux.py +125 -0
- gobby/agents/spawners/macos.py +277 -0
- gobby/agents/spawners/windows.py +308 -0
- gobby/agents/tty_config.py +319 -0
- gobby/autonomous/__init__.py +32 -0
- gobby/autonomous/progress_tracker.py +447 -0
- gobby/autonomous/stop_registry.py +269 -0
- gobby/autonomous/stuck_detector.py +383 -0
- gobby/cli/__init__.py +67 -0
- gobby/cli/__main__.py +8 -0
- gobby/cli/agents.py +529 -0
- gobby/cli/artifacts.py +266 -0
- gobby/cli/daemon.py +329 -0
- gobby/cli/extensions.py +526 -0
- gobby/cli/github.py +263 -0
- gobby/cli/init.py +53 -0
- gobby/cli/install.py +614 -0
- gobby/cli/installers/__init__.py +37 -0
- gobby/cli/installers/antigravity.py +65 -0
- gobby/cli/installers/claude.py +363 -0
- gobby/cli/installers/codex.py +192 -0
- gobby/cli/installers/gemini.py +294 -0
- gobby/cli/installers/git_hooks.py +377 -0
- gobby/cli/installers/shared.py +737 -0
- gobby/cli/linear.py +250 -0
- gobby/cli/mcp.py +30 -0
- gobby/cli/mcp_proxy.py +698 -0
- gobby/cli/memory.py +304 -0
- gobby/cli/merge.py +384 -0
- gobby/cli/projects.py +79 -0
- gobby/cli/sessions.py +622 -0
- gobby/cli/tasks/__init__.py +30 -0
- gobby/cli/tasks/_utils.py +658 -0
- gobby/cli/tasks/ai.py +1025 -0
- gobby/cli/tasks/commits.py +169 -0
- gobby/cli/tasks/crud.py +685 -0
- gobby/cli/tasks/deps.py +135 -0
- gobby/cli/tasks/labels.py +63 -0
- gobby/cli/tasks/main.py +273 -0
- gobby/cli/tasks/search.py +178 -0
- gobby/cli/tui.py +34 -0
- gobby/cli/utils.py +513 -0
- gobby/cli/workflows.py +927 -0
- gobby/cli/worktrees.py +481 -0
- gobby/config/__init__.py +129 -0
- gobby/config/app.py +551 -0
- gobby/config/extensions.py +167 -0
- gobby/config/features.py +472 -0
- gobby/config/llm_providers.py +98 -0
- gobby/config/logging.py +66 -0
- gobby/config/mcp.py +346 -0
- gobby/config/persistence.py +247 -0
- gobby/config/servers.py +141 -0
- gobby/config/sessions.py +250 -0
- gobby/config/tasks.py +784 -0
- gobby/hooks/__init__.py +104 -0
- gobby/hooks/artifact_capture.py +213 -0
- gobby/hooks/broadcaster.py +243 -0
- gobby/hooks/event_handlers.py +723 -0
- gobby/hooks/events.py +218 -0
- gobby/hooks/git.py +169 -0
- gobby/hooks/health_monitor.py +171 -0
- gobby/hooks/hook_manager.py +856 -0
- gobby/hooks/hook_types.py +575 -0
- gobby/hooks/plugins.py +813 -0
- gobby/hooks/session_coordinator.py +396 -0
- gobby/hooks/verification_runner.py +268 -0
- gobby/hooks/webhooks.py +339 -0
- gobby/install/claude/commands/gobby/bug.md +51 -0
- gobby/install/claude/commands/gobby/chore.md +51 -0
- gobby/install/claude/commands/gobby/epic.md +52 -0
- gobby/install/claude/commands/gobby/eval.md +235 -0
- gobby/install/claude/commands/gobby/feat.md +49 -0
- gobby/install/claude/commands/gobby/nit.md +52 -0
- gobby/install/claude/commands/gobby/ref.md +52 -0
- gobby/install/claude/hooks/HOOK_SCHEMAS.md +632 -0
- gobby/install/claude/hooks/hook_dispatcher.py +364 -0
- gobby/install/claude/hooks/validate_settings.py +102 -0
- gobby/install/claude/hooks-template.json +118 -0
- gobby/install/codex/hooks/hook_dispatcher.py +153 -0
- gobby/install/codex/prompts/forget.md +7 -0
- gobby/install/codex/prompts/memories.md +7 -0
- gobby/install/codex/prompts/recall.md +7 -0
- gobby/install/codex/prompts/remember.md +13 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +268 -0
- gobby/install/gemini/hooks-template.json +138 -0
- gobby/install/shared/plugins/code_guardian.py +456 -0
- gobby/install/shared/plugins/example_notify.py +331 -0
- gobby/integrations/__init__.py +10 -0
- gobby/integrations/github.py +145 -0
- gobby/integrations/linear.py +145 -0
- gobby/llm/__init__.py +40 -0
- gobby/llm/base.py +120 -0
- gobby/llm/claude.py +578 -0
- gobby/llm/claude_executor.py +503 -0
- gobby/llm/codex.py +322 -0
- gobby/llm/codex_executor.py +513 -0
- gobby/llm/executor.py +316 -0
- gobby/llm/factory.py +34 -0
- gobby/llm/gemini.py +258 -0
- gobby/llm/gemini_executor.py +339 -0
- gobby/llm/litellm.py +287 -0
- gobby/llm/litellm_executor.py +303 -0
- gobby/llm/resolver.py +499 -0
- gobby/llm/service.py +236 -0
- gobby/mcp_proxy/__init__.py +29 -0
- gobby/mcp_proxy/actions.py +175 -0
- gobby/mcp_proxy/daemon_control.py +198 -0
- gobby/mcp_proxy/importer.py +436 -0
- gobby/mcp_proxy/lazy.py +325 -0
- gobby/mcp_proxy/manager.py +798 -0
- gobby/mcp_proxy/metrics.py +609 -0
- gobby/mcp_proxy/models.py +139 -0
- gobby/mcp_proxy/registries.py +215 -0
- gobby/mcp_proxy/schema_hash.py +381 -0
- gobby/mcp_proxy/semantic_search.py +706 -0
- gobby/mcp_proxy/server.py +549 -0
- gobby/mcp_proxy/services/__init__.py +0 -0
- gobby/mcp_proxy/services/fallback.py +306 -0
- gobby/mcp_proxy/services/recommendation.py +224 -0
- gobby/mcp_proxy/services/server_mgmt.py +214 -0
- gobby/mcp_proxy/services/system.py +72 -0
- gobby/mcp_proxy/services/tool_filter.py +231 -0
- gobby/mcp_proxy/services/tool_proxy.py +309 -0
- gobby/mcp_proxy/stdio.py +565 -0
- gobby/mcp_proxy/tools/__init__.py +27 -0
- gobby/mcp_proxy/tools/agents.py +1103 -0
- gobby/mcp_proxy/tools/artifacts.py +207 -0
- gobby/mcp_proxy/tools/hub.py +335 -0
- gobby/mcp_proxy/tools/internal.py +337 -0
- gobby/mcp_proxy/tools/memory.py +543 -0
- gobby/mcp_proxy/tools/merge.py +422 -0
- gobby/mcp_proxy/tools/metrics.py +283 -0
- gobby/mcp_proxy/tools/orchestration/__init__.py +23 -0
- gobby/mcp_proxy/tools/orchestration/cleanup.py +619 -0
- gobby/mcp_proxy/tools/orchestration/monitor.py +380 -0
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +746 -0
- gobby/mcp_proxy/tools/orchestration/review.py +736 -0
- gobby/mcp_proxy/tools/orchestration/utils.py +16 -0
- gobby/mcp_proxy/tools/session_messages.py +1056 -0
- gobby/mcp_proxy/tools/task_dependencies.py +219 -0
- gobby/mcp_proxy/tools/task_expansion.py +591 -0
- gobby/mcp_proxy/tools/task_github.py +393 -0
- gobby/mcp_proxy/tools/task_linear.py +379 -0
- gobby/mcp_proxy/tools/task_orchestration.py +77 -0
- gobby/mcp_proxy/tools/task_readiness.py +522 -0
- gobby/mcp_proxy/tools/task_sync.py +351 -0
- gobby/mcp_proxy/tools/task_validation.py +843 -0
- gobby/mcp_proxy/tools/tasks/__init__.py +25 -0
- gobby/mcp_proxy/tools/tasks/_context.py +112 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +516 -0
- gobby/mcp_proxy/tools/tasks/_factory.py +176 -0
- gobby/mcp_proxy/tools/tasks/_helpers.py +129 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +517 -0
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +301 -0
- gobby/mcp_proxy/tools/tasks/_resolution.py +55 -0
- gobby/mcp_proxy/tools/tasks/_search.py +215 -0
- gobby/mcp_proxy/tools/tasks/_session.py +125 -0
- gobby/mcp_proxy/tools/workflows.py +973 -0
- gobby/mcp_proxy/tools/worktrees.py +1264 -0
- gobby/mcp_proxy/transports/__init__.py +0 -0
- gobby/mcp_proxy/transports/base.py +95 -0
- gobby/mcp_proxy/transports/factory.py +44 -0
- gobby/mcp_proxy/transports/http.py +139 -0
- gobby/mcp_proxy/transports/stdio.py +213 -0
- gobby/mcp_proxy/transports/websocket.py +136 -0
- gobby/memory/backends/__init__.py +116 -0
- gobby/memory/backends/mem0.py +408 -0
- gobby/memory/backends/memu.py +485 -0
- gobby/memory/backends/null.py +111 -0
- gobby/memory/backends/openmemory.py +537 -0
- gobby/memory/backends/sqlite.py +304 -0
- gobby/memory/context.py +87 -0
- gobby/memory/manager.py +1001 -0
- gobby/memory/protocol.py +451 -0
- gobby/memory/search/__init__.py +66 -0
- gobby/memory/search/text.py +127 -0
- gobby/memory/viz.py +258 -0
- gobby/prompts/__init__.py +13 -0
- gobby/prompts/defaults/expansion/system.md +119 -0
- gobby/prompts/defaults/expansion/user.md +48 -0
- gobby/prompts/defaults/external_validation/agent.md +72 -0
- gobby/prompts/defaults/external_validation/external.md +63 -0
- gobby/prompts/defaults/external_validation/spawn.md +83 -0
- gobby/prompts/defaults/external_validation/system.md +6 -0
- gobby/prompts/defaults/features/import_mcp.md +22 -0
- gobby/prompts/defaults/features/import_mcp_github.md +17 -0
- gobby/prompts/defaults/features/import_mcp_search.md +16 -0
- gobby/prompts/defaults/features/recommend_tools.md +32 -0
- gobby/prompts/defaults/features/recommend_tools_hybrid.md +35 -0
- gobby/prompts/defaults/features/recommend_tools_llm.md +30 -0
- gobby/prompts/defaults/features/server_description.md +20 -0
- gobby/prompts/defaults/features/server_description_system.md +6 -0
- gobby/prompts/defaults/features/task_description.md +31 -0
- gobby/prompts/defaults/features/task_description_system.md +6 -0
- gobby/prompts/defaults/features/tool_summary.md +17 -0
- gobby/prompts/defaults/features/tool_summary_system.md +6 -0
- gobby/prompts/defaults/research/step.md +58 -0
- gobby/prompts/defaults/validation/criteria.md +47 -0
- gobby/prompts/defaults/validation/validate.md +38 -0
- gobby/prompts/loader.py +346 -0
- gobby/prompts/models.py +113 -0
- gobby/py.typed +0 -0
- gobby/runner.py +488 -0
- gobby/search/__init__.py +23 -0
- gobby/search/protocol.py +104 -0
- gobby/search/tfidf.py +232 -0
- gobby/servers/__init__.py +7 -0
- gobby/servers/http.py +636 -0
- gobby/servers/models.py +31 -0
- gobby/servers/routes/__init__.py +23 -0
- gobby/servers/routes/admin.py +416 -0
- gobby/servers/routes/dependencies.py +118 -0
- gobby/servers/routes/mcp/__init__.py +24 -0
- gobby/servers/routes/mcp/hooks.py +135 -0
- gobby/servers/routes/mcp/plugins.py +121 -0
- gobby/servers/routes/mcp/tools.py +1337 -0
- gobby/servers/routes/mcp/webhooks.py +159 -0
- gobby/servers/routes/sessions.py +582 -0
- gobby/servers/websocket.py +766 -0
- gobby/sessions/__init__.py +13 -0
- gobby/sessions/analyzer.py +322 -0
- gobby/sessions/lifecycle.py +240 -0
- gobby/sessions/manager.py +563 -0
- gobby/sessions/processor.py +225 -0
- gobby/sessions/summary.py +532 -0
- gobby/sessions/transcripts/__init__.py +41 -0
- gobby/sessions/transcripts/base.py +125 -0
- gobby/sessions/transcripts/claude.py +386 -0
- gobby/sessions/transcripts/codex.py +143 -0
- gobby/sessions/transcripts/gemini.py +195 -0
- gobby/storage/__init__.py +21 -0
- gobby/storage/agents.py +409 -0
- gobby/storage/artifact_classifier.py +341 -0
- gobby/storage/artifacts.py +285 -0
- gobby/storage/compaction.py +67 -0
- gobby/storage/database.py +357 -0
- gobby/storage/inter_session_messages.py +194 -0
- gobby/storage/mcp.py +680 -0
- gobby/storage/memories.py +562 -0
- gobby/storage/merge_resolutions.py +550 -0
- gobby/storage/migrations.py +860 -0
- gobby/storage/migrations_legacy.py +1359 -0
- gobby/storage/projects.py +166 -0
- gobby/storage/session_messages.py +251 -0
- gobby/storage/session_tasks.py +97 -0
- gobby/storage/sessions.py +817 -0
- gobby/storage/task_dependencies.py +223 -0
- gobby/storage/tasks/__init__.py +42 -0
- gobby/storage/tasks/_aggregates.py +180 -0
- gobby/storage/tasks/_crud.py +449 -0
- gobby/storage/tasks/_id.py +104 -0
- gobby/storage/tasks/_lifecycle.py +311 -0
- gobby/storage/tasks/_manager.py +889 -0
- gobby/storage/tasks/_models.py +300 -0
- gobby/storage/tasks/_ordering.py +119 -0
- gobby/storage/tasks/_path_cache.py +110 -0
- gobby/storage/tasks/_queries.py +343 -0
- gobby/storage/tasks/_search.py +143 -0
- gobby/storage/workflow_audit.py +393 -0
- gobby/storage/worktrees.py +547 -0
- gobby/sync/__init__.py +29 -0
- gobby/sync/github.py +333 -0
- gobby/sync/linear.py +304 -0
- gobby/sync/memories.py +284 -0
- gobby/sync/tasks.py +641 -0
- gobby/tasks/__init__.py +8 -0
- gobby/tasks/build_verification.py +193 -0
- gobby/tasks/commits.py +633 -0
- gobby/tasks/context.py +747 -0
- gobby/tasks/criteria.py +342 -0
- gobby/tasks/enhanced_validator.py +226 -0
- gobby/tasks/escalation.py +263 -0
- gobby/tasks/expansion.py +626 -0
- gobby/tasks/external_validator.py +764 -0
- gobby/tasks/issue_extraction.py +171 -0
- gobby/tasks/prompts/expand.py +327 -0
- gobby/tasks/research.py +421 -0
- gobby/tasks/tdd.py +352 -0
- gobby/tasks/tree_builder.py +263 -0
- gobby/tasks/validation.py +712 -0
- gobby/tasks/validation_history.py +357 -0
- gobby/tasks/validation_models.py +89 -0
- gobby/tools/__init__.py +0 -0
- gobby/tools/summarizer.py +170 -0
- gobby/tui/__init__.py +5 -0
- gobby/tui/api_client.py +281 -0
- gobby/tui/app.py +327 -0
- gobby/tui/screens/__init__.py +25 -0
- gobby/tui/screens/agents.py +333 -0
- gobby/tui/screens/chat.py +450 -0
- gobby/tui/screens/dashboard.py +377 -0
- gobby/tui/screens/memory.py +305 -0
- gobby/tui/screens/metrics.py +231 -0
- gobby/tui/screens/orchestrator.py +904 -0
- gobby/tui/screens/sessions.py +412 -0
- gobby/tui/screens/tasks.py +442 -0
- gobby/tui/screens/workflows.py +289 -0
- gobby/tui/screens/worktrees.py +174 -0
- gobby/tui/widgets/__init__.py +21 -0
- gobby/tui/widgets/chat.py +210 -0
- gobby/tui/widgets/conductor.py +104 -0
- gobby/tui/widgets/menu.py +132 -0
- gobby/tui/widgets/message_panel.py +160 -0
- gobby/tui/widgets/review_gate.py +224 -0
- gobby/tui/widgets/task_tree.py +99 -0
- gobby/tui/widgets/token_budget.py +166 -0
- gobby/tui/ws_client.py +258 -0
- gobby/utils/__init__.py +3 -0
- gobby/utils/daemon_client.py +235 -0
- gobby/utils/git.py +222 -0
- gobby/utils/id.py +38 -0
- gobby/utils/json_helpers.py +161 -0
- gobby/utils/logging.py +376 -0
- gobby/utils/machine_id.py +135 -0
- gobby/utils/metrics.py +589 -0
- gobby/utils/project_context.py +182 -0
- gobby/utils/project_init.py +263 -0
- gobby/utils/status.py +256 -0
- gobby/utils/validation.py +80 -0
- gobby/utils/version.py +23 -0
- gobby/workflows/__init__.py +4 -0
- gobby/workflows/actions.py +1310 -0
- gobby/workflows/approval_flow.py +138 -0
- gobby/workflows/artifact_actions.py +103 -0
- gobby/workflows/audit_helpers.py +110 -0
- gobby/workflows/autonomous_actions.py +286 -0
- gobby/workflows/context_actions.py +394 -0
- gobby/workflows/definitions.py +130 -0
- gobby/workflows/detection_helpers.py +208 -0
- gobby/workflows/engine.py +485 -0
- gobby/workflows/evaluator.py +669 -0
- gobby/workflows/git_utils.py +96 -0
- gobby/workflows/hooks.py +169 -0
- gobby/workflows/lifecycle_evaluator.py +613 -0
- gobby/workflows/llm_actions.py +70 -0
- gobby/workflows/loader.py +333 -0
- gobby/workflows/mcp_actions.py +60 -0
- gobby/workflows/memory_actions.py +272 -0
- gobby/workflows/premature_stop.py +164 -0
- gobby/workflows/session_actions.py +139 -0
- gobby/workflows/state_actions.py +123 -0
- gobby/workflows/state_manager.py +104 -0
- gobby/workflows/stop_signal_actions.py +163 -0
- gobby/workflows/summary_actions.py +344 -0
- gobby/workflows/task_actions.py +249 -0
- gobby/workflows/task_enforcement_actions.py +901 -0
- gobby/workflows/templates.py +52 -0
- gobby/workflows/todo_actions.py +84 -0
- gobby/workflows/webhook.py +223 -0
- gobby/workflows/webhook_executor.py +399 -0
- gobby/worktrees/__init__.py +5 -0
- gobby/worktrees/git.py +690 -0
- gobby/worktrees/merge/__init__.py +20 -0
- gobby/worktrees/merge/conflict_parser.py +177 -0
- gobby/worktrees/merge/resolver.py +485 -0
- gobby-0.2.5.dist-info/METADATA +351 -0
- gobby-0.2.5.dist-info/RECORD +383 -0
- gobby-0.2.5.dist-info/WHEEL +5 -0
- gobby-0.2.5.dist-info/entry_points.txt +2 -0
- gobby-0.2.5.dist-info/licenses/LICENSE.md +193 -0
- gobby-0.2.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,613 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lifecycle workflow evaluation for workflow engine.
|
|
3
|
+
|
|
4
|
+
Extracted from engine.py to reduce complexity.
|
|
5
|
+
Handles discovery and evaluation of lifecycle workflows and their triggers.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from datetime import UTC, datetime
|
|
10
|
+
from typing import TYPE_CHECKING, Any, Literal
|
|
11
|
+
|
|
12
|
+
from gobby.hooks.events import HookEvent, HookEventType, HookResponse
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from .actions import ActionExecutor
|
|
16
|
+
from .definitions import WorkflowDefinition, WorkflowState
|
|
17
|
+
from .evaluator import ConditionEvaluator
|
|
18
|
+
from .loader import WorkflowLoader
|
|
19
|
+
from .state_manager import WorkflowStateManager
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
# Maximum iterations to prevent infinite loops in trigger evaluation
|
|
24
|
+
MAX_TRIGGER_ITERATIONS = 10
|
|
25
|
+
|
|
26
|
+
# Variables to inherit from parent session
|
|
27
|
+
VARS_TO_INHERIT = ["plan_mode"]
|
|
28
|
+
|
|
29
|
+
# Maps canonical trigger names to their legacy aliases for backward compatibility
|
|
30
|
+
TRIGGER_ALIASES: dict[str, list[str]] = {
|
|
31
|
+
"on_before_agent": ["on_prompt_submit"],
|
|
32
|
+
"on_before_tool": ["on_tool_call"],
|
|
33
|
+
"on_after_tool": ["on_tool_result"],
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def process_action_result(
|
|
38
|
+
result: dict[str, Any],
|
|
39
|
+
context_data: dict[str, Any],
|
|
40
|
+
state: "WorkflowState",
|
|
41
|
+
injected_context: list[str],
|
|
42
|
+
) -> str | None:
|
|
43
|
+
"""
|
|
44
|
+
Process action execution result.
|
|
45
|
+
|
|
46
|
+
Updates shared context and state variables.
|
|
47
|
+
Handles inject_context, inject_message, and system_message.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
result: The action execution result dictionary
|
|
51
|
+
context_data: Shared context to update
|
|
52
|
+
state: Workflow state to update
|
|
53
|
+
injected_context: List to append injected content to
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
New system_message if present, None otherwise
|
|
57
|
+
"""
|
|
58
|
+
# Update shared context for chaining
|
|
59
|
+
context_data.update(result)
|
|
60
|
+
state.variables.update(result)
|
|
61
|
+
|
|
62
|
+
if "inject_context" in result:
|
|
63
|
+
msg = result["inject_context"]
|
|
64
|
+
logger.debug(f"Found inject_context in result, length={len(msg)}")
|
|
65
|
+
injected_context.append(msg)
|
|
66
|
+
|
|
67
|
+
if "inject_message" in result:
|
|
68
|
+
msg = result["inject_message"]
|
|
69
|
+
logger.debug(f"Found inject_message in result, length={len(msg)}")
|
|
70
|
+
injected_context.append(msg)
|
|
71
|
+
|
|
72
|
+
return result.get("system_message")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
async def evaluate_workflow_triggers(
|
|
76
|
+
workflow: "WorkflowDefinition",
|
|
77
|
+
event: HookEvent,
|
|
78
|
+
context_data: dict[str, Any],
|
|
79
|
+
state_manager: "WorkflowStateManager",
|
|
80
|
+
action_executor: "ActionExecutor",
|
|
81
|
+
evaluator: "ConditionEvaluator",
|
|
82
|
+
) -> HookResponse:
|
|
83
|
+
"""
|
|
84
|
+
Evaluate triggers for a single workflow definition.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
workflow: The workflow definition to evaluate
|
|
88
|
+
event: The hook event
|
|
89
|
+
context_data: Shared context for chaining (mutated by actions)
|
|
90
|
+
state_manager: Workflow state manager
|
|
91
|
+
action_executor: Action executor for running actions
|
|
92
|
+
evaluator: Condition evaluator
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
HookResponse from this workflow's triggers
|
|
96
|
+
"""
|
|
97
|
+
from .actions import ActionContext
|
|
98
|
+
from .definitions import WorkflowState
|
|
99
|
+
|
|
100
|
+
# Map hook event to trigger name
|
|
101
|
+
trigger_name = f"on_{event.event_type.name.lower()}"
|
|
102
|
+
|
|
103
|
+
# Look up triggers - try canonical name first, then aliases
|
|
104
|
+
triggers = []
|
|
105
|
+
if workflow.triggers:
|
|
106
|
+
triggers = workflow.triggers.get(trigger_name, [])
|
|
107
|
+
if not triggers:
|
|
108
|
+
aliases = TRIGGER_ALIASES.get(trigger_name, [])
|
|
109
|
+
for alias in aliases:
|
|
110
|
+
triggers = workflow.triggers.get(alias, [])
|
|
111
|
+
if triggers:
|
|
112
|
+
break
|
|
113
|
+
|
|
114
|
+
if not triggers:
|
|
115
|
+
return HookResponse(decision="allow")
|
|
116
|
+
|
|
117
|
+
logger.debug(
|
|
118
|
+
f"Evaluating {len(triggers)} trigger(s) for '{trigger_name}' in workflow '{workflow.name}'"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Get or create persisted state for action execution
|
|
122
|
+
# This ensures variables like _injected_memory_ids persist across hook calls
|
|
123
|
+
session_id = event.metadata.get("_platform_session_id") or "global"
|
|
124
|
+
|
|
125
|
+
# Try to load existing state, or create new one
|
|
126
|
+
state = state_manager.get_state(session_id)
|
|
127
|
+
if state is None:
|
|
128
|
+
state = WorkflowState(
|
|
129
|
+
session_id=session_id,
|
|
130
|
+
workflow_name=workflow.name,
|
|
131
|
+
step="global",
|
|
132
|
+
step_entered_at=datetime.now(UTC),
|
|
133
|
+
step_action_count=0,
|
|
134
|
+
total_action_count=0,
|
|
135
|
+
artifacts=event.data.get("artifacts", {}) if event.data else {},
|
|
136
|
+
observations=[],
|
|
137
|
+
reflection_pending=False,
|
|
138
|
+
context_injected=False,
|
|
139
|
+
variables={},
|
|
140
|
+
task_list=None,
|
|
141
|
+
current_task_index=0,
|
|
142
|
+
files_modified_this_task=0,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Merge context_data into state variables (context_data has session vars from earlier load)
|
|
146
|
+
if context_data:
|
|
147
|
+
state.variables.update(context_data)
|
|
148
|
+
|
|
149
|
+
action_ctx = ActionContext(
|
|
150
|
+
session_id=session_id,
|
|
151
|
+
state=state,
|
|
152
|
+
db=action_executor.db,
|
|
153
|
+
session_manager=action_executor.session_manager,
|
|
154
|
+
template_engine=action_executor.template_engine,
|
|
155
|
+
llm_service=action_executor.llm_service,
|
|
156
|
+
transcript_processor=action_executor.transcript_processor,
|
|
157
|
+
config=action_executor.config,
|
|
158
|
+
mcp_manager=action_executor.mcp_manager,
|
|
159
|
+
memory_manager=action_executor.memory_manager,
|
|
160
|
+
memory_sync_manager=action_executor.memory_sync_manager,
|
|
161
|
+
task_sync_manager=action_executor.task_sync_manager,
|
|
162
|
+
session_task_manager=action_executor.session_task_manager,
|
|
163
|
+
event_data=event.data, # Pass hook event data (prompt_text, etc.)
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
injected_context: list[str] = []
|
|
167
|
+
system_message: str | None = None
|
|
168
|
+
|
|
169
|
+
# Fetch session for condition evaluation (enables session.title checks)
|
|
170
|
+
session = None
|
|
171
|
+
if action_executor.session_manager:
|
|
172
|
+
session = action_executor.session_manager.get(session_id)
|
|
173
|
+
|
|
174
|
+
for trigger in triggers:
|
|
175
|
+
# Check 'when' condition if present
|
|
176
|
+
when_condition = trigger.get("when")
|
|
177
|
+
if when_condition:
|
|
178
|
+
eval_ctx = {
|
|
179
|
+
"event": event,
|
|
180
|
+
"workflow_state": state,
|
|
181
|
+
"handoff": context_data,
|
|
182
|
+
"variables": state.variables,
|
|
183
|
+
"session": session,
|
|
184
|
+
}
|
|
185
|
+
eval_ctx.update(context_data)
|
|
186
|
+
eval_result = evaluator.evaluate(when_condition, eval_ctx)
|
|
187
|
+
logger.debug(
|
|
188
|
+
f"When condition '{when_condition}' evaluated to {eval_result}, "
|
|
189
|
+
f"event.data.source={event.data.get('source') if event.data else None}"
|
|
190
|
+
)
|
|
191
|
+
if not eval_result:
|
|
192
|
+
continue
|
|
193
|
+
|
|
194
|
+
# Execute action
|
|
195
|
+
action_type = trigger.get("action")
|
|
196
|
+
if not action_type:
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
logger.debug(f"Executing action '{action_type}' in workflow '{workflow.name}'")
|
|
200
|
+
try:
|
|
201
|
+
kwargs = trigger.copy()
|
|
202
|
+
kwargs.pop("action", None)
|
|
203
|
+
kwargs.pop("when", None)
|
|
204
|
+
|
|
205
|
+
# Debug: log kwargs being passed to action
|
|
206
|
+
if action_type == "inject_context":
|
|
207
|
+
template_val = kwargs.get("template")
|
|
208
|
+
logger.debug(
|
|
209
|
+
f"inject_context kwargs: source={kwargs.get('source')!r}, "
|
|
210
|
+
f"template_present={template_val is not None}, "
|
|
211
|
+
f"template_len={len(template_val) if template_val else 0}"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
result = await action_executor.execute(action_type, action_ctx, **kwargs)
|
|
215
|
+
logger.debug(
|
|
216
|
+
f"Action '{action_type}' result: {type(result)}, keys={list(result.keys()) if isinstance(result, dict) else 'N/A'}"
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if result and isinstance(result, dict):
|
|
220
|
+
sys_msg = process_action_result(result, context_data, state, injected_context)
|
|
221
|
+
if sys_msg:
|
|
222
|
+
system_message = sys_msg
|
|
223
|
+
|
|
224
|
+
# Check for blocking decision from action
|
|
225
|
+
if result.get("decision") == "block":
|
|
226
|
+
return HookResponse(
|
|
227
|
+
decision="block",
|
|
228
|
+
reason=result.get("reason", "Blocked by action"),
|
|
229
|
+
context="\n\n".join(injected_context) if injected_context else None,
|
|
230
|
+
system_message=system_message,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
except Exception as e:
|
|
234
|
+
logger.error(
|
|
235
|
+
f"Failed to execute action '{action_type}' in '{workflow.name}': {e}",
|
|
236
|
+
exc_info=True,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Persist state changes (e.g., _injected_memory_ids from memory_recall_relevant)
|
|
240
|
+
# Only save if we have a real session ID (not "global" fallback)
|
|
241
|
+
# The workflow_states table has a FK to sessions, so we can't save for non-existent sessions
|
|
242
|
+
if session_id != "global":
|
|
243
|
+
state_manager.save_state(state)
|
|
244
|
+
|
|
245
|
+
final_context = "\n\n".join(injected_context) if injected_context else None
|
|
246
|
+
logger.debug(
|
|
247
|
+
f"_evaluate_workflow_triggers returning: context_len={len(final_context) if final_context else 0}, system_message={system_message is not None}"
|
|
248
|
+
)
|
|
249
|
+
return HookResponse(
|
|
250
|
+
decision="allow",
|
|
251
|
+
context=final_context,
|
|
252
|
+
system_message=system_message,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
async def evaluate_lifecycle_triggers(
|
|
257
|
+
workflow_name: str,
|
|
258
|
+
event: HookEvent,
|
|
259
|
+
loader: "WorkflowLoader",
|
|
260
|
+
action_executor: "ActionExecutor",
|
|
261
|
+
evaluator: "ConditionEvaluator",
|
|
262
|
+
context_data: dict[str, Any] | None = None,
|
|
263
|
+
) -> HookResponse:
|
|
264
|
+
"""
|
|
265
|
+
Evaluate triggers for a specific lifecycle workflow (e.g. session-handoff).
|
|
266
|
+
Does not require an active session state.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
workflow_name: Name of the workflow to evaluate
|
|
270
|
+
event: The hook event
|
|
271
|
+
loader: Workflow loader
|
|
272
|
+
action_executor: Action executor for running actions
|
|
273
|
+
evaluator: Condition evaluator
|
|
274
|
+
context_data: Optional context data
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
HookResponse from the workflow's triggers
|
|
278
|
+
"""
|
|
279
|
+
from .actions import ActionContext
|
|
280
|
+
from .definitions import WorkflowState
|
|
281
|
+
|
|
282
|
+
# Get project path from event for project-specific workflow lookup
|
|
283
|
+
project_path = event.data.get("cwd") if event.data else None
|
|
284
|
+
logger.debug(
|
|
285
|
+
f"evaluate_lifecycle_triggers: workflow={workflow_name}, project_path={project_path}"
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
workflow = loader.load_workflow(workflow_name, project_path=project_path)
|
|
289
|
+
if not workflow:
|
|
290
|
+
logger.warning(f"Workflow '{workflow_name}' not found in project_path={project_path}")
|
|
291
|
+
return HookResponse(decision="allow")
|
|
292
|
+
|
|
293
|
+
logger.debug(
|
|
294
|
+
f"Workflow '{workflow_name}' loaded, triggers={list(workflow.triggers.keys()) if workflow.triggers else []}"
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Map hook event to trigger name (canonical name based on HookEventType)
|
|
298
|
+
trigger_name = f"on_{event.event_type.name.lower()}" # e.g. on_session_start, on_before_agent
|
|
299
|
+
|
|
300
|
+
# Look up triggers - try canonical name first, then aliases
|
|
301
|
+
triggers = []
|
|
302
|
+
if workflow.triggers:
|
|
303
|
+
triggers = workflow.triggers.get(trigger_name, [])
|
|
304
|
+
# If no triggers found, check aliases (e.g., on_prompt_submit for on_before_agent)
|
|
305
|
+
if not triggers:
|
|
306
|
+
aliases = TRIGGER_ALIASES.get(trigger_name, [])
|
|
307
|
+
for alias in aliases:
|
|
308
|
+
triggers = workflow.triggers.get(alias, [])
|
|
309
|
+
if triggers:
|
|
310
|
+
logger.debug(f"Using alias '{alias}' for trigger '{trigger_name}'")
|
|
311
|
+
break
|
|
312
|
+
|
|
313
|
+
if not triggers:
|
|
314
|
+
logger.debug(f"No triggers for '{trigger_name}' in workflow '{workflow_name}'")
|
|
315
|
+
return HookResponse(decision="allow")
|
|
316
|
+
|
|
317
|
+
logger.info(
|
|
318
|
+
f"Executing lifecycle triggers for '{workflow_name}' on '{trigger_name}', count={len(triggers)}"
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
# Create a temporary/ephemeral context for execution
|
|
322
|
+
# Create a dummy state for context - lifecycle workflows shouldn't depend on step state
|
|
323
|
+
# but actions might need access to 'state.artifacts' or similar if provided
|
|
324
|
+
session_id = event.metadata.get("_platform_session_id") or "global"
|
|
325
|
+
|
|
326
|
+
state = WorkflowState(
|
|
327
|
+
session_id=session_id,
|
|
328
|
+
workflow_name=workflow_name,
|
|
329
|
+
step="global",
|
|
330
|
+
step_entered_at=datetime.now(UTC),
|
|
331
|
+
step_action_count=0,
|
|
332
|
+
total_action_count=0,
|
|
333
|
+
artifacts=event.data.get("artifacts", {}), # Pass artifacts if available
|
|
334
|
+
observations=[],
|
|
335
|
+
reflection_pending=False,
|
|
336
|
+
context_injected=False,
|
|
337
|
+
variables=context_data or {}, # Pass extra context as variables
|
|
338
|
+
task_list=None,
|
|
339
|
+
current_task_index=0,
|
|
340
|
+
files_modified_this_task=0,
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
action_ctx = ActionContext(
|
|
344
|
+
session_id=session_id,
|
|
345
|
+
state=state,
|
|
346
|
+
db=action_executor.db,
|
|
347
|
+
session_manager=action_executor.session_manager,
|
|
348
|
+
template_engine=action_executor.template_engine,
|
|
349
|
+
llm_service=action_executor.llm_service,
|
|
350
|
+
transcript_processor=action_executor.transcript_processor,
|
|
351
|
+
config=action_executor.config,
|
|
352
|
+
mcp_manager=action_executor.mcp_manager,
|
|
353
|
+
memory_manager=action_executor.memory_manager,
|
|
354
|
+
memory_sync_manager=action_executor.memory_sync_manager,
|
|
355
|
+
task_sync_manager=action_executor.task_sync_manager,
|
|
356
|
+
session_task_manager=action_executor.session_task_manager,
|
|
357
|
+
event_data=event.data, # Pass hook event data (prompt_text, etc.)
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
injected_context: list[str] = []
|
|
361
|
+
system_message: str | None = None
|
|
362
|
+
|
|
363
|
+
# Fetch session for condition evaluation (enables session.title checks)
|
|
364
|
+
session = None
|
|
365
|
+
if action_executor.session_manager:
|
|
366
|
+
session = action_executor.session_manager.get(session_id)
|
|
367
|
+
|
|
368
|
+
for trigger in triggers:
|
|
369
|
+
# Check 'when' condition if present
|
|
370
|
+
when_condition = trigger.get("when")
|
|
371
|
+
if when_condition:
|
|
372
|
+
eval_ctx = {
|
|
373
|
+
"event": event,
|
|
374
|
+
"workflow_state": state,
|
|
375
|
+
"handoff": context_data or {},
|
|
376
|
+
"variables": state.variables,
|
|
377
|
+
"session": session,
|
|
378
|
+
}
|
|
379
|
+
if context_data:
|
|
380
|
+
eval_ctx.update(context_data)
|
|
381
|
+
eval_result = evaluator.evaluate(when_condition, eval_ctx)
|
|
382
|
+
logger.debug(
|
|
383
|
+
f"When condition '{when_condition}' evaluated to {eval_result}, event.data.reason={event.data.get('reason') if event.data else None}"
|
|
384
|
+
)
|
|
385
|
+
if not eval_result:
|
|
386
|
+
continue
|
|
387
|
+
|
|
388
|
+
# Execute action
|
|
389
|
+
action_type = trigger.get("action")
|
|
390
|
+
if not action_type:
|
|
391
|
+
continue
|
|
392
|
+
|
|
393
|
+
logger.info(f"Executing action '{action_type}' for trigger")
|
|
394
|
+
try:
|
|
395
|
+
# Pass triggers definition as kwargs
|
|
396
|
+
kwargs = trigger.copy()
|
|
397
|
+
kwargs.pop("action", None)
|
|
398
|
+
kwargs.pop("when", None)
|
|
399
|
+
|
|
400
|
+
result = await action_executor.execute(action_type, action_ctx, **kwargs)
|
|
401
|
+
logger.debug(
|
|
402
|
+
f"Action '{action_type}' returned: {type(result).__name__}, keys={list(result.keys()) if isinstance(result, dict) else 'N/A'}"
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
if result and isinstance(result, dict):
|
|
406
|
+
if context_data is None:
|
|
407
|
+
context_data = {}
|
|
408
|
+
|
|
409
|
+
sys_msg = process_action_result(result, context_data, state, injected_context)
|
|
410
|
+
if sys_msg:
|
|
411
|
+
system_message = sys_msg
|
|
412
|
+
|
|
413
|
+
# Check for blocking decision from action
|
|
414
|
+
if result.get("decision") == "block":
|
|
415
|
+
return HookResponse(
|
|
416
|
+
decision="block",
|
|
417
|
+
reason=result.get("reason", "Blocked by action"),
|
|
418
|
+
context="\n\n".join(injected_context) if injected_context else None,
|
|
419
|
+
system_message=system_message,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
except Exception as e:
|
|
423
|
+
logger.error(f"Failed to execute lifecycle action '{action_type}': {e}", exc_info=True)
|
|
424
|
+
|
|
425
|
+
return HookResponse(
|
|
426
|
+
decision="allow",
|
|
427
|
+
context="\n\n".join(injected_context) if injected_context else None,
|
|
428
|
+
system_message=system_message,
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
async def evaluate_all_lifecycle_workflows(
|
|
433
|
+
event: HookEvent,
|
|
434
|
+
loader: "WorkflowLoader",
|
|
435
|
+
state_manager: "WorkflowStateManager",
|
|
436
|
+
action_executor: "ActionExecutor",
|
|
437
|
+
evaluator: "ConditionEvaluator",
|
|
438
|
+
detect_task_claim_fn: Any,
|
|
439
|
+
detect_plan_mode_fn: Any,
|
|
440
|
+
check_premature_stop_fn: Any,
|
|
441
|
+
context_data: dict[str, Any] | None = None,
|
|
442
|
+
) -> HookResponse:
|
|
443
|
+
"""
|
|
444
|
+
Discover and evaluate all lifecycle workflows for the given event.
|
|
445
|
+
|
|
446
|
+
Workflows are evaluated in order (project first by priority/alpha, then global).
|
|
447
|
+
Loops until no more triggers fire (up to MAX_TRIGGER_ITERATIONS).
|
|
448
|
+
|
|
449
|
+
Args:
|
|
450
|
+
event: The hook event to evaluate
|
|
451
|
+
loader: Workflow loader for discovering workflows
|
|
452
|
+
state_manager: Workflow state manager
|
|
453
|
+
action_executor: Action executor for running actions
|
|
454
|
+
evaluator: Condition evaluator
|
|
455
|
+
detect_task_claim_fn: Function to detect task claims
|
|
456
|
+
detect_plan_mode_fn: Function to detect plan mode
|
|
457
|
+
check_premature_stop_fn: Async function to check premature stop
|
|
458
|
+
context_data: Optional context data passed between actions
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
Merged HookResponse with combined context and first non-allow decision.
|
|
462
|
+
"""
|
|
463
|
+
from .definitions import WorkflowState
|
|
464
|
+
|
|
465
|
+
# Use event.cwd (top-level attribute set by adapter) with fallback to event.data
|
|
466
|
+
# This ensures consistent project_path across all calls, preventing duplicate
|
|
467
|
+
# workflow discovery when cwd is in data but not on the event object
|
|
468
|
+
project_path = event.cwd or (event.data.get("cwd") if event.data else None)
|
|
469
|
+
|
|
470
|
+
# Discover all lifecycle workflows
|
|
471
|
+
workflows = loader.discover_lifecycle_workflows(project_path)
|
|
472
|
+
|
|
473
|
+
if not workflows:
|
|
474
|
+
logger.debug("No lifecycle workflows discovered")
|
|
475
|
+
return HookResponse(decision="allow")
|
|
476
|
+
|
|
477
|
+
logger.debug(
|
|
478
|
+
f"Discovered {len(workflows)} lifecycle workflow(s): {[w.name for w in workflows]}"
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# Accumulate context from all workflows
|
|
482
|
+
all_context: list[str] = []
|
|
483
|
+
final_decision: Literal["allow", "deny", "ask", "block", "modify"] = "allow"
|
|
484
|
+
final_reason: str | None = None
|
|
485
|
+
final_system_message: str | None = None
|
|
486
|
+
|
|
487
|
+
# Initialize shared context for chaining between workflows
|
|
488
|
+
if context_data is None:
|
|
489
|
+
context_data = {}
|
|
490
|
+
|
|
491
|
+
# Load all session variables from persistent state
|
|
492
|
+
# This enables:
|
|
493
|
+
# - require_task_before_edit (task_claimed variable)
|
|
494
|
+
# - require_task_complete (session_task variable)
|
|
495
|
+
# - worktree detection (is_worktree variable)
|
|
496
|
+
# - any other session-scoped variables set via gobby-workflows MCP tools
|
|
497
|
+
session_id = event.metadata.get("_platform_session_id")
|
|
498
|
+
if session_id:
|
|
499
|
+
lifecycle_state = state_manager.get_state(session_id)
|
|
500
|
+
if lifecycle_state and lifecycle_state.variables:
|
|
501
|
+
context_data.update(lifecycle_state.variables)
|
|
502
|
+
logger.debug(
|
|
503
|
+
f"Loaded {len(lifecycle_state.variables)} session variable(s) "
|
|
504
|
+
f"for {session_id}: {list(lifecycle_state.variables.keys())}"
|
|
505
|
+
)
|
|
506
|
+
elif event.event_type == HookEventType.SESSION_START:
|
|
507
|
+
# New session - check if we should inherit from parent
|
|
508
|
+
parent_id = event.metadata.get("_parent_session_id")
|
|
509
|
+
if parent_id:
|
|
510
|
+
parent_state = state_manager.get_state(parent_id)
|
|
511
|
+
if parent_state and parent_state.variables:
|
|
512
|
+
# Inherit specific variables
|
|
513
|
+
inherited = {
|
|
514
|
+
k: v for k, v in parent_state.variables.items() if k in VARS_TO_INHERIT
|
|
515
|
+
}
|
|
516
|
+
if inherited:
|
|
517
|
+
context_data.update(inherited)
|
|
518
|
+
logger.info(
|
|
519
|
+
f"Session {session_id} inherited variables from {parent_id}: {inherited}"
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
# Track which workflow+trigger combinations have already been processed
|
|
523
|
+
# to prevent duplicate execution of the same trigger
|
|
524
|
+
processed_triggers: set[tuple[str, str]] = set()
|
|
525
|
+
trigger_name = f"on_{event.event_type.name.lower()}"
|
|
526
|
+
|
|
527
|
+
# Loop until no triggers fire (or max iterations)
|
|
528
|
+
for iteration in range(MAX_TRIGGER_ITERATIONS):
|
|
529
|
+
triggers_fired = False
|
|
530
|
+
|
|
531
|
+
for discovered in workflows:
|
|
532
|
+
workflow = discovered.definition
|
|
533
|
+
|
|
534
|
+
# Skip if this workflow+trigger has already been processed
|
|
535
|
+
key = (workflow.name, trigger_name)
|
|
536
|
+
if key in processed_triggers:
|
|
537
|
+
continue
|
|
538
|
+
|
|
539
|
+
# Merge workflow definition's default variables (lower priority than session state)
|
|
540
|
+
# Precedence: session state > workflow YAML defaults
|
|
541
|
+
workflow_context = {**workflow.variables, **context_data}
|
|
542
|
+
|
|
543
|
+
response = await evaluate_workflow_triggers(
|
|
544
|
+
workflow, event, workflow_context, state_manager, action_executor, evaluator
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
# Accumulate context
|
|
548
|
+
if response.context:
|
|
549
|
+
all_context.append(response.context)
|
|
550
|
+
triggers_fired = True
|
|
551
|
+
# Mark this workflow+trigger as processed
|
|
552
|
+
processed_triggers.add(key)
|
|
553
|
+
|
|
554
|
+
# Capture system_message (last one wins)
|
|
555
|
+
if response.system_message:
|
|
556
|
+
final_system_message = response.system_message
|
|
557
|
+
|
|
558
|
+
# First non-allow decision wins
|
|
559
|
+
if response.decision != "allow" and final_decision == "allow":
|
|
560
|
+
final_decision = response.decision
|
|
561
|
+
final_reason = response.reason
|
|
562
|
+
|
|
563
|
+
# If blocked, stop immediately
|
|
564
|
+
if response.decision == "block":
|
|
565
|
+
logger.info(f"Workflow '{discovered.name}' blocked event: {response.reason}")
|
|
566
|
+
return HookResponse(
|
|
567
|
+
decision="block",
|
|
568
|
+
reason=response.reason,
|
|
569
|
+
context="\n\n".join(all_context) if all_context else None,
|
|
570
|
+
system_message=final_system_message,
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
# If no triggers fired this iteration, we're done
|
|
574
|
+
if not triggers_fired:
|
|
575
|
+
logger.debug(f"No triggers fired in iteration {iteration + 1}, stopping")
|
|
576
|
+
break
|
|
577
|
+
|
|
578
|
+
logger.debug(f"Triggers fired in iteration {iteration + 1}, continuing")
|
|
579
|
+
|
|
580
|
+
# Detect task claims for AFTER_TOOL events (session-scoped enforcement)
|
|
581
|
+
# This enables require_task_before_edit to work with lifecycle workflows
|
|
582
|
+
if event.event_type == HookEventType.AFTER_TOOL:
|
|
583
|
+
session_id = event.metadata.get("_platform_session_id")
|
|
584
|
+
if session_id:
|
|
585
|
+
# Get or create a minimal state for tracking task_claimed
|
|
586
|
+
state = state_manager.get_state(session_id)
|
|
587
|
+
if state is None:
|
|
588
|
+
state = WorkflowState(
|
|
589
|
+
session_id=session_id,
|
|
590
|
+
workflow_name="__lifecycle__",
|
|
591
|
+
step="",
|
|
592
|
+
)
|
|
593
|
+
detect_task_claim_fn(event, state)
|
|
594
|
+
detect_plan_mode_fn(event, state)
|
|
595
|
+
state_manager.save_state(state)
|
|
596
|
+
|
|
597
|
+
# Check for premature stop in active step workflows on STOP events
|
|
598
|
+
if event.event_type == HookEventType.STOP:
|
|
599
|
+
premature_response = await check_premature_stop_fn(event, context_data)
|
|
600
|
+
if premature_response:
|
|
601
|
+
# Merge premature stop response with lifecycle response
|
|
602
|
+
if premature_response.context:
|
|
603
|
+
all_context.append(premature_response.context)
|
|
604
|
+
if premature_response.decision != "allow":
|
|
605
|
+
final_decision = premature_response.decision
|
|
606
|
+
final_reason = premature_response.reason
|
|
607
|
+
|
|
608
|
+
return HookResponse(
|
|
609
|
+
decision=final_decision,
|
|
610
|
+
reason=final_reason,
|
|
611
|
+
context="\n\n".join(all_context) if all_context else None,
|
|
612
|
+
system_message=final_system_message,
|
|
613
|
+
)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""LLM invocation workflow actions.
|
|
2
|
+
|
|
3
|
+
Extracted from actions.py as part of strangler fig decomposition.
|
|
4
|
+
These functions handle direct LLM calls from workflows.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
async def call_llm(
|
|
14
|
+
llm_service: Any,
|
|
15
|
+
template_engine: Any,
|
|
16
|
+
state: Any,
|
|
17
|
+
session: Any,
|
|
18
|
+
prompt: str | None,
|
|
19
|
+
output_as: str | None,
|
|
20
|
+
**extra_context: Any,
|
|
21
|
+
) -> dict[str, Any]:
|
|
22
|
+
"""Call LLM with a prompt template and store result in variable.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
llm_service: LLM service instance
|
|
26
|
+
template_engine: Template engine for rendering
|
|
27
|
+
state: WorkflowState object
|
|
28
|
+
session: Current session object
|
|
29
|
+
prompt: Prompt template string
|
|
30
|
+
output_as: Variable name to store result
|
|
31
|
+
**extra_context: Additional context for template rendering
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Dict with llm_called boolean and output_variable, or error
|
|
35
|
+
"""
|
|
36
|
+
if not prompt or not output_as:
|
|
37
|
+
return {"error": "Missing prompt or output_as"}
|
|
38
|
+
|
|
39
|
+
if not llm_service:
|
|
40
|
+
logger.warning("call_llm: Missing LLM service")
|
|
41
|
+
return {"error": "Missing LLM service"}
|
|
42
|
+
|
|
43
|
+
# Render prompt template
|
|
44
|
+
render_context = {
|
|
45
|
+
"session": session,
|
|
46
|
+
"state": state,
|
|
47
|
+
"variables": state.variables or {},
|
|
48
|
+
}
|
|
49
|
+
# Add extra context
|
|
50
|
+
render_context.update(extra_context)
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
rendered_prompt = template_engine.render(prompt, render_context)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
logger.error(f"call_llm: Template rendering failed for prompt '{prompt[:50]}...': {e}")
|
|
56
|
+
return {"error": f"Template rendering failed: {e}"}
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
provider = llm_service.get_default_provider()
|
|
60
|
+
response = await provider.generate_text(rendered_prompt)
|
|
61
|
+
|
|
62
|
+
# Store result
|
|
63
|
+
if not state.variables:
|
|
64
|
+
state.variables = {}
|
|
65
|
+
state.variables[output_as] = response
|
|
66
|
+
|
|
67
|
+
return {"llm_called": True, "output_variable": output_as}
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.error(f"call_llm: Failed: {e}")
|
|
70
|
+
return {"error": str(e)}
|