gobby 0.2.8__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +1 -1
- gobby/adapters/__init__.py +6 -0
- gobby/adapters/base.py +11 -2
- gobby/adapters/claude_code.py +5 -28
- gobby/adapters/codex_impl/adapter.py +38 -43
- gobby/adapters/copilot.py +324 -0
- gobby/adapters/cursor.py +373 -0
- gobby/adapters/gemini.py +2 -26
- gobby/adapters/windsurf.py +359 -0
- gobby/agents/definitions.py +162 -2
- gobby/agents/isolation.py +33 -1
- gobby/agents/pty_reader.py +192 -0
- gobby/agents/registry.py +10 -1
- gobby/agents/runner.py +24 -8
- gobby/agents/sandbox.py +8 -3
- gobby/agents/session.py +4 -0
- gobby/agents/spawn.py +9 -2
- gobby/agents/spawn_executor.py +49 -61
- gobby/agents/spawners/command_builder.py +4 -4
- gobby/app_context.py +64 -0
- gobby/cli/__init__.py +4 -0
- gobby/cli/install.py +259 -4
- gobby/cli/installers/__init__.py +12 -0
- gobby/cli/installers/copilot.py +242 -0
- gobby/cli/installers/cursor.py +244 -0
- gobby/cli/installers/shared.py +3 -0
- gobby/cli/installers/windsurf.py +242 -0
- gobby/cli/pipelines.py +639 -0
- gobby/cli/sessions.py +3 -1
- gobby/cli/skills.py +209 -0
- gobby/cli/tasks/crud.py +6 -5
- gobby/cli/tasks/search.py +1 -1
- gobby/cli/ui.py +116 -0
- gobby/cli/utils.py +5 -17
- gobby/cli/workflows.py +38 -17
- gobby/config/app.py +5 -0
- gobby/config/features.py +0 -20
- gobby/config/skills.py +23 -2
- gobby/config/tasks.py +4 -0
- gobby/hooks/broadcaster.py +9 -0
- gobby/hooks/event_handlers/__init__.py +155 -0
- gobby/hooks/event_handlers/_agent.py +175 -0
- gobby/hooks/event_handlers/_base.py +92 -0
- gobby/hooks/event_handlers/_misc.py +66 -0
- gobby/hooks/event_handlers/_session.py +487 -0
- gobby/hooks/event_handlers/_tool.py +196 -0
- gobby/hooks/events.py +48 -0
- gobby/hooks/hook_manager.py +27 -3
- gobby/install/copilot/hooks/hook_dispatcher.py +203 -0
- gobby/install/cursor/hooks/hook_dispatcher.py +203 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +8 -0
- gobby/install/windsurf/hooks/hook_dispatcher.py +205 -0
- gobby/llm/__init__.py +14 -1
- gobby/llm/claude.py +594 -43
- gobby/llm/service.py +149 -0
- gobby/mcp_proxy/importer.py +4 -41
- gobby/mcp_proxy/instructions.py +9 -27
- gobby/mcp_proxy/manager.py +13 -3
- gobby/mcp_proxy/models.py +1 -0
- gobby/mcp_proxy/registries.py +66 -5
- gobby/mcp_proxy/server.py +6 -2
- gobby/mcp_proxy/services/recommendation.py +2 -28
- gobby/mcp_proxy/services/tool_filter.py +7 -0
- gobby/mcp_proxy/services/tool_proxy.py +19 -1
- gobby/mcp_proxy/stdio.py +37 -21
- gobby/mcp_proxy/tools/agents.py +7 -0
- gobby/mcp_proxy/tools/artifacts.py +3 -3
- gobby/mcp_proxy/tools/hub.py +30 -1
- gobby/mcp_proxy/tools/orchestration/cleanup.py +5 -5
- gobby/mcp_proxy/tools/orchestration/monitor.py +1 -1
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +8 -3
- gobby/mcp_proxy/tools/orchestration/review.py +17 -4
- gobby/mcp_proxy/tools/orchestration/wait.py +7 -7
- gobby/mcp_proxy/tools/pipelines/__init__.py +254 -0
- gobby/mcp_proxy/tools/pipelines/_discovery.py +67 -0
- gobby/mcp_proxy/tools/pipelines/_execution.py +281 -0
- gobby/mcp_proxy/tools/sessions/_crud.py +4 -4
- gobby/mcp_proxy/tools/sessions/_handoff.py +1 -1
- gobby/mcp_proxy/tools/skills/__init__.py +184 -30
- gobby/mcp_proxy/tools/spawn_agent.py +229 -14
- gobby/mcp_proxy/tools/task_readiness.py +27 -4
- gobby/mcp_proxy/tools/tasks/_context.py +8 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +27 -1
- gobby/mcp_proxy/tools/tasks/_helpers.py +1 -1
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +125 -8
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +2 -1
- gobby/mcp_proxy/tools/tasks/_search.py +1 -1
- gobby/mcp_proxy/tools/workflows/__init__.py +273 -0
- gobby/mcp_proxy/tools/workflows/_artifacts.py +225 -0
- gobby/mcp_proxy/tools/workflows/_import.py +112 -0
- gobby/mcp_proxy/tools/workflows/_lifecycle.py +332 -0
- gobby/mcp_proxy/tools/workflows/_query.py +226 -0
- gobby/mcp_proxy/tools/workflows/_resolution.py +78 -0
- gobby/mcp_proxy/tools/workflows/_terminal.py +175 -0
- gobby/mcp_proxy/tools/worktrees.py +54 -15
- gobby/memory/components/__init__.py +0 -0
- gobby/memory/components/ingestion.py +98 -0
- gobby/memory/components/search.py +108 -0
- gobby/memory/context.py +5 -5
- gobby/memory/manager.py +16 -25
- gobby/paths.py +51 -0
- gobby/prompts/loader.py +1 -35
- gobby/runner.py +131 -16
- gobby/servers/http.py +193 -150
- gobby/servers/routes/__init__.py +2 -0
- gobby/servers/routes/admin.py +56 -0
- gobby/servers/routes/mcp/endpoints/execution.py +33 -32
- gobby/servers/routes/mcp/endpoints/registry.py +8 -8
- gobby/servers/routes/mcp/hooks.py +10 -1
- gobby/servers/routes/pipelines.py +227 -0
- gobby/servers/websocket.py +314 -1
- gobby/sessions/analyzer.py +89 -3
- gobby/sessions/manager.py +5 -5
- gobby/sessions/transcripts/__init__.py +3 -0
- gobby/sessions/transcripts/claude.py +5 -0
- gobby/sessions/transcripts/codex.py +5 -0
- gobby/sessions/transcripts/gemini.py +5 -0
- gobby/skills/hubs/__init__.py +25 -0
- gobby/skills/hubs/base.py +234 -0
- gobby/skills/hubs/claude_plugins.py +328 -0
- gobby/skills/hubs/clawdhub.py +289 -0
- gobby/skills/hubs/github_collection.py +465 -0
- gobby/skills/hubs/manager.py +263 -0
- gobby/skills/hubs/skillhub.py +342 -0
- gobby/skills/parser.py +23 -0
- gobby/skills/sync.py +5 -4
- gobby/storage/artifacts.py +19 -0
- gobby/storage/memories.py +4 -4
- gobby/storage/migrations.py +118 -3
- gobby/storage/pipelines.py +367 -0
- gobby/storage/sessions.py +23 -4
- gobby/storage/skills.py +48 -8
- gobby/storage/tasks/_aggregates.py +2 -2
- gobby/storage/tasks/_lifecycle.py +4 -4
- gobby/storage/tasks/_models.py +7 -1
- gobby/storage/tasks/_queries.py +3 -3
- gobby/sync/memories.py +4 -3
- gobby/tasks/commits.py +48 -17
- gobby/tasks/external_validator.py +4 -17
- gobby/tasks/validation.py +13 -87
- gobby/tools/summarizer.py +18 -51
- gobby/utils/status.py +13 -0
- gobby/workflows/actions.py +80 -0
- gobby/workflows/context_actions.py +265 -27
- gobby/workflows/definitions.py +119 -1
- gobby/workflows/detection_helpers.py +23 -11
- gobby/workflows/enforcement/__init__.py +11 -1
- gobby/workflows/enforcement/blocking.py +96 -0
- gobby/workflows/enforcement/handlers.py +35 -1
- gobby/workflows/enforcement/task_policy.py +18 -0
- gobby/workflows/engine.py +26 -4
- gobby/workflows/evaluator.py +8 -5
- gobby/workflows/lifecycle_evaluator.py +59 -27
- gobby/workflows/loader.py +567 -30
- gobby/workflows/lobster_compat.py +147 -0
- gobby/workflows/pipeline_executor.py +801 -0
- gobby/workflows/pipeline_state.py +172 -0
- gobby/workflows/pipeline_webhooks.py +206 -0
- gobby/workflows/premature_stop.py +5 -0
- gobby/worktrees/git.py +135 -20
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/METADATA +56 -22
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/RECORD +166 -122
- gobby/hooks/event_handlers.py +0 -1008
- gobby/mcp_proxy/tools/workflows.py +0 -1023
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/WHEEL +0 -0
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/entry_points.txt +0 -0
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/licenses/LICENSE.md +0 -0
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,487 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from gobby.hooks.event_handlers._base import EventHandlersBase
|
|
6
|
+
from gobby.hooks.events import HookEvent, HookResponse
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from gobby.storage.sessions import Session
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class SessionEventHandlerMixin(EventHandlersBase):
|
|
13
|
+
"""Mixin for handling session-related events."""
|
|
14
|
+
|
|
15
|
+
def handle_session_start(self, event: HookEvent) -> HookResponse:
|
|
16
|
+
"""
|
|
17
|
+
Handle SESSION_START event.
|
|
18
|
+
|
|
19
|
+
Register session and execute session-handoff workflow.
|
|
20
|
+
"""
|
|
21
|
+
external_id = event.session_id
|
|
22
|
+
input_data = event.data
|
|
23
|
+
transcript_path = input_data.get("transcript_path")
|
|
24
|
+
cli_source = event.source.value
|
|
25
|
+
cwd = input_data.get("cwd")
|
|
26
|
+
session_source = input_data.get("source", "startup")
|
|
27
|
+
|
|
28
|
+
# Resolve project_id (auto-creates if needed)
|
|
29
|
+
project_id = self._resolve_project_id(input_data.get("project_id"), cwd)
|
|
30
|
+
# Always use Gobby's machine_id for cross-CLI consistency
|
|
31
|
+
machine_id = self._get_machine_id()
|
|
32
|
+
|
|
33
|
+
self.logger.debug(
|
|
34
|
+
f"SESSION_START: cli={cli_source}, project={project_id}, source={session_source}"
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# Step 0: Check if this is a pre-created session (terminal mode agent)
|
|
38
|
+
# Two cases:
|
|
39
|
+
# 1. Claude: We pass --session-id <internal_id>, so external_id IS our internal ID
|
|
40
|
+
# 2. Gemini: We pass GOBBY_SESSION_ID env var, hook_dispatcher includes it in terminal_context
|
|
41
|
+
existing_session = None
|
|
42
|
+
terminal_context = input_data.get("terminal_context")
|
|
43
|
+
gobby_session_id_from_env = (
|
|
44
|
+
terminal_context.get("gobby_session_id") if terminal_context else None
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
if self._session_storage:
|
|
48
|
+
try:
|
|
49
|
+
# Try to find by internal ID first (Claude case - external_id IS internal_id)
|
|
50
|
+
existing_session = self._session_storage.get(external_id)
|
|
51
|
+
if existing_session:
|
|
52
|
+
return self._handle_pre_created_session(
|
|
53
|
+
existing_session=existing_session,
|
|
54
|
+
external_id=external_id,
|
|
55
|
+
transcript_path=transcript_path,
|
|
56
|
+
cli_source=cli_source,
|
|
57
|
+
event=event,
|
|
58
|
+
cwd=cwd,
|
|
59
|
+
)
|
|
60
|
+
except Exception as e:
|
|
61
|
+
self.logger.debug(f"No pre-created session found by external_id: {e}")
|
|
62
|
+
|
|
63
|
+
# Gemini case: Look up by gobby_session_id from terminal_context
|
|
64
|
+
if gobby_session_id_from_env and not existing_session:
|
|
65
|
+
try:
|
|
66
|
+
existing_session = self._session_storage.get(gobby_session_id_from_env)
|
|
67
|
+
if existing_session:
|
|
68
|
+
self.logger.info(
|
|
69
|
+
f"Found pre-created session {gobby_session_id_from_env} via "
|
|
70
|
+
f"terminal_context, updating external_id to {external_id}"
|
|
71
|
+
)
|
|
72
|
+
# Update the session's external_id with CLI's native session_id
|
|
73
|
+
self._session_storage.update(
|
|
74
|
+
gobby_session_id_from_env,
|
|
75
|
+
external_id=external_id,
|
|
76
|
+
)
|
|
77
|
+
return self._handle_pre_created_session(
|
|
78
|
+
existing_session=existing_session,
|
|
79
|
+
external_id=external_id,
|
|
80
|
+
transcript_path=transcript_path,
|
|
81
|
+
cli_source=cli_source,
|
|
82
|
+
event=event,
|
|
83
|
+
cwd=cwd,
|
|
84
|
+
)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
self.logger.debug(f"No pre-created session found by gobby_session_id: {e}")
|
|
87
|
+
|
|
88
|
+
# Step 1: Find parent session
|
|
89
|
+
# Check env vars first (spawned agent case), then handoff (source='clear')
|
|
90
|
+
parent_session_id = input_data.get("parent_session_id")
|
|
91
|
+
workflow_name = input_data.get("workflow_name")
|
|
92
|
+
agent_depth = input_data.get("agent_depth")
|
|
93
|
+
|
|
94
|
+
if not parent_session_id and session_source == "clear" and self._session_storage:
|
|
95
|
+
try:
|
|
96
|
+
parent = self._session_storage.find_parent(
|
|
97
|
+
machine_id=machine_id,
|
|
98
|
+
project_id=project_id,
|
|
99
|
+
source=cli_source,
|
|
100
|
+
status="handoff_ready",
|
|
101
|
+
)
|
|
102
|
+
if parent:
|
|
103
|
+
parent_session_id = parent.id
|
|
104
|
+
self.logger.debug(f"Found parent session: {parent_session_id}")
|
|
105
|
+
except Exception as e:
|
|
106
|
+
self.logger.warning(f"Error finding parent session: {e}")
|
|
107
|
+
|
|
108
|
+
# Step 2: Register new session with parent if found
|
|
109
|
+
# terminal_context already extracted in Step 0
|
|
110
|
+
# Parse agent_depth as int if provided
|
|
111
|
+
agent_depth_val = 0
|
|
112
|
+
if agent_depth:
|
|
113
|
+
try:
|
|
114
|
+
agent_depth_val = int(agent_depth)
|
|
115
|
+
except (ValueError, TypeError):
|
|
116
|
+
pass
|
|
117
|
+
|
|
118
|
+
session_id = None
|
|
119
|
+
if self._session_manager:
|
|
120
|
+
session_id = self._session_manager.register_session(
|
|
121
|
+
external_id=external_id,
|
|
122
|
+
machine_id=machine_id,
|
|
123
|
+
project_id=project_id,
|
|
124
|
+
parent_session_id=parent_session_id,
|
|
125
|
+
jsonl_path=transcript_path,
|
|
126
|
+
source=cli_source,
|
|
127
|
+
project_path=cwd,
|
|
128
|
+
terminal_context=terminal_context,
|
|
129
|
+
workflow_name=workflow_name,
|
|
130
|
+
agent_depth=agent_depth_val,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Step 2b: Mark parent session as expired after successful handoff
|
|
134
|
+
if parent_session_id and self._session_manager:
|
|
135
|
+
try:
|
|
136
|
+
self._session_manager.mark_session_expired(parent_session_id)
|
|
137
|
+
self.logger.debug(f"Marked parent session {parent_session_id} as expired")
|
|
138
|
+
except Exception as e:
|
|
139
|
+
self.logger.warning(f"Failed to mark parent session as expired: {e}")
|
|
140
|
+
|
|
141
|
+
# Step 2c: Auto-activate workflow if specified (for spawned agents)
|
|
142
|
+
if workflow_name and session_id:
|
|
143
|
+
self._auto_activate_workflow(workflow_name, session_id, cwd)
|
|
144
|
+
|
|
145
|
+
# Step 3: Track registered session
|
|
146
|
+
if transcript_path and self._session_coordinator:
|
|
147
|
+
try:
|
|
148
|
+
self._session_coordinator.register_session(external_id)
|
|
149
|
+
except Exception as e:
|
|
150
|
+
self.logger.error(f"Failed to setup session tracking: {e}", exc_info=True)
|
|
151
|
+
|
|
152
|
+
# Step 4: Update event metadata with the newly registered session_id
|
|
153
|
+
event.metadata["_platform_session_id"] = session_id
|
|
154
|
+
if parent_session_id:
|
|
155
|
+
event.metadata["_parent_session_id"] = parent_session_id
|
|
156
|
+
|
|
157
|
+
# Step 5: Register with Message Processor
|
|
158
|
+
if self._message_processor and transcript_path and session_id:
|
|
159
|
+
try:
|
|
160
|
+
self._message_processor.register_session(
|
|
161
|
+
session_id, transcript_path, source=cli_source
|
|
162
|
+
)
|
|
163
|
+
except Exception as e:
|
|
164
|
+
self.logger.warning(f"Failed to register session with message processor: {e}")
|
|
165
|
+
|
|
166
|
+
# Step 6: Execute lifecycle workflows
|
|
167
|
+
wf_response = HookResponse(decision="allow", context="")
|
|
168
|
+
if self._workflow_handler:
|
|
169
|
+
try:
|
|
170
|
+
wf_response = self._workflow_handler.handle_all_lifecycles(event)
|
|
171
|
+
except Exception as e:
|
|
172
|
+
self.logger.warning(f"Workflow error: {e}")
|
|
173
|
+
|
|
174
|
+
# Build additional context (task context)
|
|
175
|
+
# Note: Skill injection is now handled by workflows via inject_context action
|
|
176
|
+
additional_context: list[str] = []
|
|
177
|
+
if event.task_id:
|
|
178
|
+
task_title = event.metadata.get("_task_title", "Unknown Task")
|
|
179
|
+
additional_context.append("\n## Active Task Context\n")
|
|
180
|
+
additional_context.append(f"You are working on task: {task_title} ({event.task_id})")
|
|
181
|
+
|
|
182
|
+
# Fetch session to get seq_num for #N display
|
|
183
|
+
session_obj = None
|
|
184
|
+
if session_id and self._session_storage:
|
|
185
|
+
session_obj = self._session_storage.get(session_id)
|
|
186
|
+
|
|
187
|
+
return self._compose_session_response(
|
|
188
|
+
session=session_obj,
|
|
189
|
+
wf_response=wf_response,
|
|
190
|
+
session_id=session_id,
|
|
191
|
+
external_id=external_id,
|
|
192
|
+
parent_session_id=parent_session_id,
|
|
193
|
+
machine_id=machine_id,
|
|
194
|
+
project_id=project_id,
|
|
195
|
+
task_id=event.task_id,
|
|
196
|
+
additional_context=additional_context,
|
|
197
|
+
terminal_context=terminal_context,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
def handle_session_end(self, event: HookEvent) -> HookResponse:
|
|
201
|
+
"""Handle SESSION_END event."""
|
|
202
|
+
from gobby.tasks.commits import auto_link_commits
|
|
203
|
+
|
|
204
|
+
external_id = event.session_id
|
|
205
|
+
session_id = event.metadata.get("_platform_session_id")
|
|
206
|
+
|
|
207
|
+
if session_id:
|
|
208
|
+
self.logger.debug(f"SESSION_END: session {session_id}")
|
|
209
|
+
else:
|
|
210
|
+
self.logger.warning(f"SESSION_END: session_id not found for external_id={external_id}")
|
|
211
|
+
|
|
212
|
+
# If not in mapping, query database
|
|
213
|
+
if not session_id and external_id and self._session_manager:
|
|
214
|
+
self.logger.debug(f"external_id {external_id} not in mapping, querying database")
|
|
215
|
+
# Resolve context for lookup
|
|
216
|
+
machine_id = self._get_machine_id()
|
|
217
|
+
cwd = event.data.get("cwd")
|
|
218
|
+
project_id = self._resolve_project_id(event.data.get("project_id"), cwd)
|
|
219
|
+
# Lookup with full composite key
|
|
220
|
+
session_id = self._session_manager.lookup_session_id(
|
|
221
|
+
external_id,
|
|
222
|
+
source=event.source.value,
|
|
223
|
+
machine_id=machine_id,
|
|
224
|
+
project_id=project_id,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# Ensure session_id is available in event metadata for workflow actions
|
|
228
|
+
if session_id and not event.metadata.get("_platform_session_id"):
|
|
229
|
+
event.metadata["_platform_session_id"] = session_id
|
|
230
|
+
|
|
231
|
+
# Execute lifecycle workflow triggers
|
|
232
|
+
if self._workflow_handler:
|
|
233
|
+
try:
|
|
234
|
+
self._workflow_handler.handle_all_lifecycles(event)
|
|
235
|
+
except Exception as e:
|
|
236
|
+
self.logger.error(f"Failed to execute lifecycle workflows: {e}", exc_info=True)
|
|
237
|
+
|
|
238
|
+
# Auto-link commits made during this session to tasks
|
|
239
|
+
if session_id and self._session_storage and self._task_manager:
|
|
240
|
+
try:
|
|
241
|
+
session = self._session_storage.get(session_id)
|
|
242
|
+
if session:
|
|
243
|
+
cwd = event.data.get("cwd")
|
|
244
|
+
link_result = auto_link_commits(
|
|
245
|
+
task_manager=self._task_manager,
|
|
246
|
+
since=session.created_at,
|
|
247
|
+
cwd=cwd,
|
|
248
|
+
)
|
|
249
|
+
if link_result.total_linked > 0:
|
|
250
|
+
self.logger.info(
|
|
251
|
+
f"Auto-linked {link_result.total_linked} commits to tasks: "
|
|
252
|
+
f"{list(link_result.linked_tasks.keys())}"
|
|
253
|
+
)
|
|
254
|
+
except Exception as e:
|
|
255
|
+
self.logger.warning(f"Failed to auto-link session commits: {e}")
|
|
256
|
+
|
|
257
|
+
# Complete agent run if this is a terminal-mode agent session
|
|
258
|
+
if session_id and self._session_storage and self._session_coordinator:
|
|
259
|
+
try:
|
|
260
|
+
session = self._session_storage.get(session_id)
|
|
261
|
+
if session and session.agent_run_id:
|
|
262
|
+
self._session_coordinator.complete_agent_run(session)
|
|
263
|
+
except Exception as e:
|
|
264
|
+
self.logger.warning(f"Failed to complete agent run: {e}")
|
|
265
|
+
|
|
266
|
+
# Generate independent session summary file
|
|
267
|
+
if self._summary_file_generator:
|
|
268
|
+
try:
|
|
269
|
+
summary_input = {
|
|
270
|
+
"session_id": external_id,
|
|
271
|
+
"transcript_path": event.data.get("transcript_path"),
|
|
272
|
+
}
|
|
273
|
+
self._summary_file_generator.generate_session_summary(
|
|
274
|
+
session_id=session_id or external_id,
|
|
275
|
+
input_data=summary_input,
|
|
276
|
+
)
|
|
277
|
+
except Exception as e:
|
|
278
|
+
self.logger.error(f"Failed to generate failover summary: {e}")
|
|
279
|
+
|
|
280
|
+
# Unregister from message processor
|
|
281
|
+
if self._message_processor and (session_id or external_id):
|
|
282
|
+
try:
|
|
283
|
+
target_id = session_id or external_id
|
|
284
|
+
self._message_processor.unregister_session(target_id)
|
|
285
|
+
except Exception as e:
|
|
286
|
+
self.logger.warning(f"Failed to unregister session from message processor: {e}")
|
|
287
|
+
|
|
288
|
+
return HookResponse(decision="allow")
|
|
289
|
+
|
|
290
|
+
def _handle_pre_created_session(
|
|
291
|
+
self,
|
|
292
|
+
existing_session: Session,
|
|
293
|
+
external_id: str,
|
|
294
|
+
transcript_path: str | None,
|
|
295
|
+
cli_source: str,
|
|
296
|
+
event: HookEvent,
|
|
297
|
+
cwd: str | None,
|
|
298
|
+
) -> HookResponse:
|
|
299
|
+
"""Handle session start for a pre-created session (terminal mode agent).
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
existing_session: Pre-created session object
|
|
303
|
+
external_id: External (CLI-native) session ID
|
|
304
|
+
transcript_path: Path to transcript file
|
|
305
|
+
cli_source: CLI source (e.g., "claude-code")
|
|
306
|
+
event: Hook event
|
|
307
|
+
cwd: Current working directory
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
HookResponse for the pre-created session
|
|
311
|
+
"""
|
|
312
|
+
self.logger.info(f"Found pre-created session {external_id}, updating instead of creating")
|
|
313
|
+
|
|
314
|
+
# Update the session with actual runtime info
|
|
315
|
+
if self._session_storage:
|
|
316
|
+
self._session_storage.update(
|
|
317
|
+
session_id=existing_session.id,
|
|
318
|
+
jsonl_path=transcript_path,
|
|
319
|
+
status="active",
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
session_id = existing_session.id
|
|
323
|
+
parent_session_id = existing_session.parent_session_id
|
|
324
|
+
machine_id = self._get_machine_id()
|
|
325
|
+
|
|
326
|
+
# Track registered session
|
|
327
|
+
if transcript_path and self._session_coordinator:
|
|
328
|
+
try:
|
|
329
|
+
self._session_coordinator.register_session(external_id)
|
|
330
|
+
except Exception as e:
|
|
331
|
+
self.logger.error(f"Failed to setup session tracking: {e}")
|
|
332
|
+
|
|
333
|
+
# Start the agent run if this is a terminal-mode agent session
|
|
334
|
+
if existing_session.agent_run_id and self._session_coordinator:
|
|
335
|
+
try:
|
|
336
|
+
self._session_coordinator.start_agent_run(existing_session.agent_run_id)
|
|
337
|
+
except Exception as e:
|
|
338
|
+
self.logger.warning(f"Failed to start agent run: {e}")
|
|
339
|
+
|
|
340
|
+
# Auto-activate workflow if specified for this session
|
|
341
|
+
if existing_session.workflow_name and session_id:
|
|
342
|
+
self._auto_activate_workflow(
|
|
343
|
+
existing_session.workflow_name,
|
|
344
|
+
session_id,
|
|
345
|
+
cwd,
|
|
346
|
+
variables=existing_session.step_variables,
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Update event metadata
|
|
350
|
+
event.metadata["_platform_session_id"] = session_id
|
|
351
|
+
|
|
352
|
+
# Register with Message Processor
|
|
353
|
+
if self._message_processor and transcript_path:
|
|
354
|
+
try:
|
|
355
|
+
self._message_processor.register_session(
|
|
356
|
+
session_id, transcript_path, source=cli_source
|
|
357
|
+
)
|
|
358
|
+
except Exception as e:
|
|
359
|
+
self.logger.warning(f"Failed to register with message processor: {e}")
|
|
360
|
+
|
|
361
|
+
# Execute lifecycle workflows
|
|
362
|
+
wf_response = HookResponse(decision="allow", context="")
|
|
363
|
+
if self._workflow_handler:
|
|
364
|
+
try:
|
|
365
|
+
wf_response = self._workflow_handler.handle_all_lifecycles(event)
|
|
366
|
+
except Exception as e:
|
|
367
|
+
self.logger.warning(f"Workflow error: {e}")
|
|
368
|
+
|
|
369
|
+
return self._compose_session_response(
|
|
370
|
+
session=existing_session,
|
|
371
|
+
wf_response=wf_response,
|
|
372
|
+
session_id=session_id,
|
|
373
|
+
external_id=external_id,
|
|
374
|
+
parent_session_id=parent_session_id,
|
|
375
|
+
machine_id=machine_id,
|
|
376
|
+
project_id=existing_session.project_id,
|
|
377
|
+
task_id=event.task_id,
|
|
378
|
+
is_pre_created=True,
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
def _compose_session_response(
|
|
382
|
+
self,
|
|
383
|
+
session: Session | None,
|
|
384
|
+
wf_response: HookResponse,
|
|
385
|
+
session_id: str | None,
|
|
386
|
+
external_id: str,
|
|
387
|
+
parent_session_id: str | None,
|
|
388
|
+
machine_id: str,
|
|
389
|
+
project_id: str | None = None,
|
|
390
|
+
task_id: str | None = None,
|
|
391
|
+
additional_context: list[str] | None = None,
|
|
392
|
+
is_pre_created: bool = False,
|
|
393
|
+
terminal_context: dict[str, Any] | None = None,
|
|
394
|
+
) -> HookResponse:
|
|
395
|
+
"""Build HookResponse for session start.
|
|
396
|
+
|
|
397
|
+
Shared helper that builds the system message, context, and metadata
|
|
398
|
+
for both pre-created and newly-created sessions.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
session: Session object (used for seq_num)
|
|
402
|
+
wf_response: Response from workflow handler
|
|
403
|
+
session_id: Session ID
|
|
404
|
+
external_id: External (CLI-native) session ID
|
|
405
|
+
parent_session_id: Parent session ID if any
|
|
406
|
+
machine_id: Machine ID
|
|
407
|
+
project_id: Project ID
|
|
408
|
+
task_id: Task ID if any
|
|
409
|
+
additional_context: Additional context strings to append (e.g., task/skill context)
|
|
410
|
+
is_pre_created: Whether this is a pre-created session
|
|
411
|
+
terminal_context: Terminal context dict to add to metadata
|
|
412
|
+
|
|
413
|
+
Returns:
|
|
414
|
+
HookResponse with system_message, context, and metadata
|
|
415
|
+
"""
|
|
416
|
+
# Build context_parts
|
|
417
|
+
context_parts: list[str] = []
|
|
418
|
+
if wf_response.context:
|
|
419
|
+
context_parts.append(wf_response.context)
|
|
420
|
+
if parent_session_id:
|
|
421
|
+
context_parts.append(f"Parent session: {parent_session_id}")
|
|
422
|
+
if additional_context:
|
|
423
|
+
context_parts.extend(additional_context)
|
|
424
|
+
|
|
425
|
+
# Compute session_ref from session object or fallback to session_id
|
|
426
|
+
session_ref = session_id
|
|
427
|
+
if session and session.seq_num:
|
|
428
|
+
session_ref = f"#{session.seq_num}"
|
|
429
|
+
|
|
430
|
+
# Build system message (terminal display only)
|
|
431
|
+
if session_ref and session_ref != session_id:
|
|
432
|
+
system_message = f"\nGobby Session ID: {session_ref}"
|
|
433
|
+
else:
|
|
434
|
+
system_message = f"\nGobby Session ID: {session_id}"
|
|
435
|
+
system_message += " <- Use this for MCP tool calls (session_id parameter)"
|
|
436
|
+
system_message += f"\nExternal ID: {external_id} (CLI-native, rarely needed)"
|
|
437
|
+
|
|
438
|
+
# Add active lifecycle workflows
|
|
439
|
+
if wf_response.metadata and "discovered_workflows" in wf_response.metadata:
|
|
440
|
+
wf_list = wf_response.metadata["discovered_workflows"]
|
|
441
|
+
if wf_list:
|
|
442
|
+
system_message += "\nActive workflows:"
|
|
443
|
+
for w in wf_list:
|
|
444
|
+
source = "project" if w["is_project"] else "global"
|
|
445
|
+
system_message += f"\n - {w['name']} ({source}, priority={w['priority']})"
|
|
446
|
+
|
|
447
|
+
if wf_response.system_message:
|
|
448
|
+
system_message += f"\n\n{wf_response.system_message}"
|
|
449
|
+
|
|
450
|
+
# Build metadata
|
|
451
|
+
metadata: dict[str, Any] = {
|
|
452
|
+
"session_id": session_id,
|
|
453
|
+
"session_ref": session_ref,
|
|
454
|
+
"parent_session_id": parent_session_id,
|
|
455
|
+
"machine_id": machine_id,
|
|
456
|
+
"project_id": project_id,
|
|
457
|
+
"external_id": external_id,
|
|
458
|
+
"task_id": task_id,
|
|
459
|
+
}
|
|
460
|
+
if is_pre_created:
|
|
461
|
+
metadata["is_pre_created"] = True
|
|
462
|
+
if terminal_context:
|
|
463
|
+
# Only include non-null terminal values
|
|
464
|
+
for key, value in terminal_context.items():
|
|
465
|
+
if value is not None:
|
|
466
|
+
metadata[f"terminal_{key}"] = value
|
|
467
|
+
|
|
468
|
+
final_context = "\n".join(context_parts) if context_parts else None
|
|
469
|
+
|
|
470
|
+
# Debug: echo additionalContext to system_message if enabled
|
|
471
|
+
# Workflow variable takes precedence over config
|
|
472
|
+
debug_echo = False
|
|
473
|
+
workflow_vars = (wf_response.metadata or {}).get("workflow_variables", {})
|
|
474
|
+
if workflow_vars.get("debug_echo_context") is not None:
|
|
475
|
+
debug_echo = bool(workflow_vars.get("debug_echo_context"))
|
|
476
|
+
elif self._workflow_config and self._workflow_config.debug_echo_context:
|
|
477
|
+
debug_echo = True
|
|
478
|
+
|
|
479
|
+
if debug_echo and final_context:
|
|
480
|
+
system_message += f"\n\n[DEBUG additionalContext]\n{final_context}"
|
|
481
|
+
|
|
482
|
+
return HookResponse(
|
|
483
|
+
decision="allow",
|
|
484
|
+
context=final_context,
|
|
485
|
+
system_message=system_message,
|
|
486
|
+
metadata=metadata,
|
|
487
|
+
)
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from gobby.hooks.event_handlers._base import EventHandlersBase
|
|
6
|
+
from gobby.hooks.events import HookEvent, HookResponse
|
|
7
|
+
|
|
8
|
+
EDIT_TOOLS = {
|
|
9
|
+
"write_file",
|
|
10
|
+
"replace",
|
|
11
|
+
"edit_file",
|
|
12
|
+
"notebook_edit",
|
|
13
|
+
"edit",
|
|
14
|
+
"write",
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ToolEventHandlerMixin(EventHandlersBase):
|
|
19
|
+
"""Mixin for handling tool-related events."""
|
|
20
|
+
|
|
21
|
+
def handle_before_tool(self, event: HookEvent) -> HookResponse:
|
|
22
|
+
"""Handle BEFORE_TOOL event."""
|
|
23
|
+
input_data = event.data
|
|
24
|
+
tool_name = input_data.get("tool_name", "unknown")
|
|
25
|
+
session_id = event.metadata.get("_platform_session_id")
|
|
26
|
+
|
|
27
|
+
if session_id:
|
|
28
|
+
self.logger.debug(f"BEFORE_TOOL: {tool_name}, session {session_id}")
|
|
29
|
+
else:
|
|
30
|
+
self.logger.debug(f"BEFORE_TOOL: {tool_name}")
|
|
31
|
+
|
|
32
|
+
context_parts = []
|
|
33
|
+
|
|
34
|
+
# Execute lifecycle workflow triggers
|
|
35
|
+
if self._workflow_handler:
|
|
36
|
+
try:
|
|
37
|
+
wf_response = self._workflow_handler.handle_all_lifecycles(event)
|
|
38
|
+
if wf_response.context:
|
|
39
|
+
context_parts.append(wf_response.context)
|
|
40
|
+
if wf_response.decision != "allow":
|
|
41
|
+
return wf_response
|
|
42
|
+
except Exception as e:
|
|
43
|
+
self.logger.error(f"Failed to execute lifecycle workflows: {e}", exc_info=True)
|
|
44
|
+
|
|
45
|
+
return HookResponse(
|
|
46
|
+
decision="allow",
|
|
47
|
+
context="\n\n".join(context_parts) if context_parts else None,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def handle_after_tool(self, event: HookEvent) -> HookResponse:
|
|
51
|
+
"""Handle AFTER_TOOL event."""
|
|
52
|
+
input_data = event.data
|
|
53
|
+
tool_name = input_data.get("tool_name", "unknown")
|
|
54
|
+
session_id = event.metadata.get("_platform_session_id")
|
|
55
|
+
is_failure = event.metadata.get("is_failure", False)
|
|
56
|
+
|
|
57
|
+
status = "FAIL" if is_failure else "OK"
|
|
58
|
+
if session_id:
|
|
59
|
+
self.logger.debug(f"AFTER_TOOL [{status}]: {tool_name}, session {session_id}")
|
|
60
|
+
|
|
61
|
+
# Track edits for session high-water mark
|
|
62
|
+
# Only if tool succeeded, matches edit tools, and session has claimed a task
|
|
63
|
+
# Skip .gobby/ internal files (tasks.jsonl, memories.jsonl, etc.)
|
|
64
|
+
tool_input = input_data.get("tool_input", {})
|
|
65
|
+
|
|
66
|
+
# Capture artifacts from edit tools
|
|
67
|
+
if not is_failure and self._artifact_capture_hook:
|
|
68
|
+
self._capture_tool_artifact(session_id, tool_name, tool_input)
|
|
69
|
+
|
|
70
|
+
# Simple check for edit tools (case-insensitive)
|
|
71
|
+
is_edit = tool_name.lower() in EDIT_TOOLS
|
|
72
|
+
|
|
73
|
+
# For complex tools (multi_replace, etc), check if they modify files
|
|
74
|
+
# This logic could be expanded, but for now stick to the basic set
|
|
75
|
+
|
|
76
|
+
if not is_failure and is_edit and self._session_storage:
|
|
77
|
+
try:
|
|
78
|
+
# Check if file is internal .gobby file
|
|
79
|
+
file_path = (
|
|
80
|
+
tool_input.get("file_path")
|
|
81
|
+
or tool_input.get("target_file")
|
|
82
|
+
or tool_input.get("path")
|
|
83
|
+
)
|
|
84
|
+
is_internal = file_path and ".gobby/" in str(file_path)
|
|
85
|
+
|
|
86
|
+
if not is_internal:
|
|
87
|
+
# Check if session has any claimed tasks before marking had_edits
|
|
88
|
+
has_claimed_task = False
|
|
89
|
+
if self._task_manager:
|
|
90
|
+
try:
|
|
91
|
+
claimed_tasks = self._task_manager.list_tasks(assignee=session_id)
|
|
92
|
+
has_claimed_task = len(claimed_tasks) > 0
|
|
93
|
+
except Exception as e:
|
|
94
|
+
self.logger.debug(
|
|
95
|
+
f"Failed to check claimed tasks for session {session_id}: {e}"
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if has_claimed_task:
|
|
99
|
+
self._session_storage.mark_had_edits(session_id)
|
|
100
|
+
except Exception as e:
|
|
101
|
+
# Don't fail the event if tracking fails
|
|
102
|
+
self.logger.warning(f"Failed to process file edit: {e}")
|
|
103
|
+
|
|
104
|
+
else:
|
|
105
|
+
self.logger.debug(f"AFTER_TOOL [{status}]: {tool_name}")
|
|
106
|
+
|
|
107
|
+
# Execute lifecycle workflow triggers
|
|
108
|
+
if self._workflow_handler:
|
|
109
|
+
try:
|
|
110
|
+
wf_response = self._workflow_handler.handle_all_lifecycles(event)
|
|
111
|
+
if wf_response.decision != "allow":
|
|
112
|
+
return wf_response
|
|
113
|
+
if wf_response.context:
|
|
114
|
+
return wf_response
|
|
115
|
+
except Exception as e:
|
|
116
|
+
self.logger.error(f"Failed to execute lifecycle workflows: {e}", exc_info=True)
|
|
117
|
+
|
|
118
|
+
return HookResponse(decision="allow")
|
|
119
|
+
|
|
120
|
+
def handle_before_tool_selection(self, event: HookEvent) -> HookResponse:
|
|
121
|
+
"""Handle BEFORE_TOOL_SELECTION event (Gemini only)."""
|
|
122
|
+
session_id = event.metadata.get("_platform_session_id")
|
|
123
|
+
|
|
124
|
+
if session_id:
|
|
125
|
+
self.logger.debug(f"BEFORE_TOOL_SELECTION: session {session_id}")
|
|
126
|
+
else:
|
|
127
|
+
self.logger.debug("BEFORE_TOOL_SELECTION")
|
|
128
|
+
|
|
129
|
+
return HookResponse(decision="allow")
|
|
130
|
+
|
|
131
|
+
def _capture_tool_artifact(
|
|
132
|
+
self, session_id: str, tool_name: str, tool_input: dict[str, Any]
|
|
133
|
+
) -> None:
|
|
134
|
+
"""Capture artifacts from tool inputs for edit/write tools.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
session_id: Platform session ID
|
|
138
|
+
tool_name: Name of the tool
|
|
139
|
+
tool_input: Tool input dictionary
|
|
140
|
+
"""
|
|
141
|
+
if not self._artifact_capture_hook:
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
# Get content and file path from tool input
|
|
145
|
+
content = tool_input.get("content") or tool_input.get("new_string")
|
|
146
|
+
file_path = (
|
|
147
|
+
tool_input.get("file_path") or tool_input.get("target_file") or tool_input.get("path")
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if not content:
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
# Skip internal .gobby files
|
|
154
|
+
if file_path and ".gobby/" in str(file_path):
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
# Detect language from file extension
|
|
158
|
+
language = ""
|
|
159
|
+
if file_path:
|
|
160
|
+
ext_map = {
|
|
161
|
+
".py": "python",
|
|
162
|
+
".js": "javascript",
|
|
163
|
+
".ts": "typescript",
|
|
164
|
+
".tsx": "tsx",
|
|
165
|
+
".jsx": "jsx",
|
|
166
|
+
".rs": "rust",
|
|
167
|
+
".go": "go",
|
|
168
|
+
".java": "java",
|
|
169
|
+
".rb": "ruby",
|
|
170
|
+
".sh": "bash",
|
|
171
|
+
".yaml": "yaml",
|
|
172
|
+
".yml": "yaml",
|
|
173
|
+
".json": "json",
|
|
174
|
+
".md": "markdown",
|
|
175
|
+
".sql": "sql",
|
|
176
|
+
".html": "html",
|
|
177
|
+
".css": "css",
|
|
178
|
+
}
|
|
179
|
+
for ext, lang in ext_map.items():
|
|
180
|
+
if str(file_path).endswith(ext):
|
|
181
|
+
language = lang
|
|
182
|
+
break
|
|
183
|
+
|
|
184
|
+
# Wrap content as markdown code block for process_message
|
|
185
|
+
# This reuses the deduplication and classification logic
|
|
186
|
+
markdown_content = f"```{language}\n{content}\n```"
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
self._artifact_capture_hook.process_message(
|
|
190
|
+
session_id=session_id,
|
|
191
|
+
role="assistant",
|
|
192
|
+
content=markdown_content,
|
|
193
|
+
)
|
|
194
|
+
self.logger.debug(f"Captured artifact from {tool_name}: {file_path or 'unknown'}")
|
|
195
|
+
except Exception as e:
|
|
196
|
+
self.logger.warning(f"Failed to capture artifact from {tool_name}: {e}")
|