gobby 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. gobby/__init__.py +1 -1
  2. gobby/adapters/__init__.py +6 -0
  3. gobby/adapters/base.py +11 -2
  4. gobby/adapters/claude_code.py +2 -2
  5. gobby/adapters/codex_impl/adapter.py +38 -43
  6. gobby/adapters/copilot.py +324 -0
  7. gobby/adapters/cursor.py +373 -0
  8. gobby/adapters/gemini.py +2 -26
  9. gobby/adapters/windsurf.py +359 -0
  10. gobby/agents/definitions.py +162 -2
  11. gobby/agents/isolation.py +33 -1
  12. gobby/agents/pty_reader.py +192 -0
  13. gobby/agents/registry.py +10 -1
  14. gobby/agents/runner.py +24 -8
  15. gobby/agents/sandbox.py +8 -3
  16. gobby/agents/session.py +4 -0
  17. gobby/agents/spawn.py +9 -2
  18. gobby/agents/spawn_executor.py +49 -61
  19. gobby/agents/spawners/command_builder.py +4 -4
  20. gobby/app_context.py +5 -0
  21. gobby/cli/__init__.py +4 -0
  22. gobby/cli/install.py +259 -4
  23. gobby/cli/installers/__init__.py +12 -0
  24. gobby/cli/installers/copilot.py +242 -0
  25. gobby/cli/installers/cursor.py +244 -0
  26. gobby/cli/installers/shared.py +3 -0
  27. gobby/cli/installers/windsurf.py +242 -0
  28. gobby/cli/pipelines.py +639 -0
  29. gobby/cli/sessions.py +3 -1
  30. gobby/cli/skills.py +209 -0
  31. gobby/cli/tasks/crud.py +6 -5
  32. gobby/cli/tasks/search.py +1 -1
  33. gobby/cli/ui.py +116 -0
  34. gobby/cli/workflows.py +38 -17
  35. gobby/config/app.py +5 -0
  36. gobby/config/skills.py +23 -2
  37. gobby/hooks/broadcaster.py +9 -0
  38. gobby/hooks/event_handlers/_base.py +6 -1
  39. gobby/hooks/event_handlers/_session.py +44 -130
  40. gobby/hooks/events.py +48 -0
  41. gobby/hooks/hook_manager.py +25 -3
  42. gobby/install/copilot/hooks/hook_dispatcher.py +203 -0
  43. gobby/install/cursor/hooks/hook_dispatcher.py +203 -0
  44. gobby/install/gemini/hooks/hook_dispatcher.py +8 -0
  45. gobby/install/windsurf/hooks/hook_dispatcher.py +205 -0
  46. gobby/llm/__init__.py +14 -1
  47. gobby/llm/claude.py +217 -1
  48. gobby/llm/service.py +149 -0
  49. gobby/mcp_proxy/instructions.py +9 -27
  50. gobby/mcp_proxy/models.py +1 -0
  51. gobby/mcp_proxy/registries.py +56 -9
  52. gobby/mcp_proxy/server.py +6 -2
  53. gobby/mcp_proxy/services/tool_filter.py +7 -0
  54. gobby/mcp_proxy/services/tool_proxy.py +19 -1
  55. gobby/mcp_proxy/stdio.py +37 -21
  56. gobby/mcp_proxy/tools/agents.py +7 -0
  57. gobby/mcp_proxy/tools/hub.py +30 -1
  58. gobby/mcp_proxy/tools/orchestration/cleanup.py +5 -5
  59. gobby/mcp_proxy/tools/orchestration/monitor.py +1 -1
  60. gobby/mcp_proxy/tools/orchestration/orchestrate.py +8 -3
  61. gobby/mcp_proxy/tools/orchestration/review.py +17 -4
  62. gobby/mcp_proxy/tools/orchestration/wait.py +7 -7
  63. gobby/mcp_proxy/tools/pipelines/__init__.py +254 -0
  64. gobby/mcp_proxy/tools/pipelines/_discovery.py +67 -0
  65. gobby/mcp_proxy/tools/pipelines/_execution.py +281 -0
  66. gobby/mcp_proxy/tools/sessions/_crud.py +4 -4
  67. gobby/mcp_proxy/tools/sessions/_handoff.py +1 -1
  68. gobby/mcp_proxy/tools/skills/__init__.py +184 -30
  69. gobby/mcp_proxy/tools/spawn_agent.py +229 -14
  70. gobby/mcp_proxy/tools/tasks/_context.py +8 -0
  71. gobby/mcp_proxy/tools/tasks/_crud.py +27 -1
  72. gobby/mcp_proxy/tools/tasks/_helpers.py +1 -1
  73. gobby/mcp_proxy/tools/tasks/_lifecycle.py +125 -8
  74. gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +2 -1
  75. gobby/mcp_proxy/tools/tasks/_search.py +1 -1
  76. gobby/mcp_proxy/tools/workflows/__init__.py +9 -2
  77. gobby/mcp_proxy/tools/workflows/_lifecycle.py +12 -1
  78. gobby/mcp_proxy/tools/workflows/_query.py +45 -26
  79. gobby/mcp_proxy/tools/workflows/_terminal.py +39 -3
  80. gobby/mcp_proxy/tools/worktrees.py +54 -15
  81. gobby/memory/context.py +5 -5
  82. gobby/runner.py +108 -6
  83. gobby/servers/http.py +7 -1
  84. gobby/servers/routes/__init__.py +2 -0
  85. gobby/servers/routes/admin.py +44 -0
  86. gobby/servers/routes/mcp/endpoints/execution.py +18 -25
  87. gobby/servers/routes/mcp/hooks.py +10 -1
  88. gobby/servers/routes/pipelines.py +227 -0
  89. gobby/servers/websocket.py +314 -1
  90. gobby/sessions/analyzer.py +87 -1
  91. gobby/sessions/manager.py +5 -5
  92. gobby/sessions/transcripts/__init__.py +3 -0
  93. gobby/sessions/transcripts/claude.py +5 -0
  94. gobby/sessions/transcripts/codex.py +5 -0
  95. gobby/sessions/transcripts/gemini.py +5 -0
  96. gobby/skills/hubs/__init__.py +25 -0
  97. gobby/skills/hubs/base.py +234 -0
  98. gobby/skills/hubs/claude_plugins.py +328 -0
  99. gobby/skills/hubs/clawdhub.py +289 -0
  100. gobby/skills/hubs/github_collection.py +465 -0
  101. gobby/skills/hubs/manager.py +263 -0
  102. gobby/skills/hubs/skillhub.py +342 -0
  103. gobby/storage/memories.py +4 -4
  104. gobby/storage/migrations.py +95 -3
  105. gobby/storage/pipelines.py +367 -0
  106. gobby/storage/sessions.py +23 -4
  107. gobby/storage/skills.py +1 -1
  108. gobby/storage/tasks/_aggregates.py +2 -2
  109. gobby/storage/tasks/_lifecycle.py +4 -4
  110. gobby/storage/tasks/_models.py +7 -1
  111. gobby/storage/tasks/_queries.py +3 -3
  112. gobby/sync/memories.py +4 -3
  113. gobby/tasks/commits.py +48 -17
  114. gobby/workflows/actions.py +75 -0
  115. gobby/workflows/context_actions.py +246 -5
  116. gobby/workflows/definitions.py +119 -1
  117. gobby/workflows/detection_helpers.py +23 -11
  118. gobby/workflows/enforcement/task_policy.py +18 -0
  119. gobby/workflows/engine.py +20 -1
  120. gobby/workflows/evaluator.py +8 -5
  121. gobby/workflows/lifecycle_evaluator.py +57 -26
  122. gobby/workflows/loader.py +567 -30
  123. gobby/workflows/lobster_compat.py +147 -0
  124. gobby/workflows/pipeline_executor.py +801 -0
  125. gobby/workflows/pipeline_state.py +172 -0
  126. gobby/workflows/pipeline_webhooks.py +206 -0
  127. gobby/workflows/premature_stop.py +5 -0
  128. gobby/worktrees/git.py +135 -20
  129. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/METADATA +56 -22
  130. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/RECORD +134 -106
  131. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/WHEEL +0 -0
  132. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/entry_points.txt +0 -0
  133. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/licenses/LICENSE.md +0 -0
  134. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,359 @@
1
+ """Windsurf adapter for hook translation.
2
+
3
+ This adapter translates between Windsurf's Cascade hooks format and the unified
4
+ HookEvent/HookResponse models.
5
+
6
+ Windsurf Cascade Hook Types:
7
+ - pre_read_code: Before reading a file
8
+ - post_read_code: After reading a file
9
+ - post_write_code: After writing/editing a file
10
+ - post_run_command: After running a shell command
11
+ - post_mcp_tool_use: After using an MCP tool
12
+ - post_cascade_response: After agent response
13
+
14
+ Key differences from Claude Code:
15
+ - Uses `agent_action_name` instead of `hook_type`
16
+ - Uses nested `tool_info` object for tool details
17
+ - Action-specific field names (file_path, edits, command, etc.)
18
+ - Different response format
19
+ """
20
+
21
+ from datetime import UTC, datetime
22
+ from typing import TYPE_CHECKING, Any
23
+
24
+ from gobby.adapters.base import BaseAdapter
25
+ from gobby.hooks.events import HookEvent, HookEventType, HookResponse, SessionSource
26
+
27
+ if TYPE_CHECKING:
28
+ from gobby.hooks.hook_manager import HookManager
29
+
30
+
31
+ class WindsurfAdapter(BaseAdapter):
32
+ """Adapter for Windsurf Cascade hook translation.
33
+
34
+ This adapter:
35
+ 1. Translates Windsurf's action-based hook payloads to unified HookEvent
36
+ 2. Extracts tool details from nested `tool_info` structures
37
+ 3. Translates HookResponse back to Windsurf's expected format
38
+ """
39
+
40
+ source = SessionSource.WINDSURF
41
+
42
+ # Event type mapping: Windsurf action names -> unified HookEventType
43
+ # Windsurf uses agent_action_name field with underscore-separated names
44
+ EVENT_MAP: dict[str, HookEventType] = {
45
+ # Pre-action hooks (before tool execution)
46
+ "pre_read_code": HookEventType.BEFORE_TOOL,
47
+ "pre_write_code": HookEventType.BEFORE_TOOL,
48
+ "pre_run_command": HookEventType.BEFORE_TOOL,
49
+ "pre_mcp_tool_use": HookEventType.BEFORE_TOOL,
50
+ # Post-action hooks (after tool execution)
51
+ "post_read_code": HookEventType.AFTER_TOOL,
52
+ "post_write_code": HookEventType.AFTER_TOOL,
53
+ "post_run_command": HookEventType.AFTER_TOOL,
54
+ "post_mcp_tool_use": HookEventType.AFTER_TOOL,
55
+ # Agent lifecycle
56
+ "post_cascade_response": HookEventType.AFTER_AGENT,
57
+ "pre_cascade_request": HookEventType.BEFORE_AGENT,
58
+ # Session lifecycle
59
+ "session_start": HookEventType.SESSION_START,
60
+ "session_end": HookEventType.SESSION_END,
61
+ }
62
+
63
+ # Map action names to normalized tool names
64
+ # This allows workflows to use consistent tool names across CLIs
65
+ TOOL_MAP: dict[str, str] = {
66
+ "pre_read_code": "Read",
67
+ "post_read_code": "Read",
68
+ "pre_write_code": "Write",
69
+ "post_write_code": "Write",
70
+ "pre_run_command": "Bash",
71
+ "post_run_command": "Bash",
72
+ "pre_mcp_tool_use": "mcp_call",
73
+ "post_mcp_tool_use": "mcp_call",
74
+ }
75
+
76
+ # Map unified event types back to Windsurf action names for response
77
+ HOOK_EVENT_NAME_MAP: dict[str, str] = {
78
+ "session_start": "SessionStart",
79
+ "session_end": "SessionEnd",
80
+ "before_agent": "PreCascadeRequest",
81
+ "after_agent": "PostCascadeResponse",
82
+ "before_tool": "PreToolUse",
83
+ "after_tool": "PostToolUse",
84
+ }
85
+
86
+ def __init__(self, hook_manager: "HookManager | None" = None):
87
+ """Initialize the Windsurf adapter.
88
+
89
+ Args:
90
+ hook_manager: Reference to HookManager for handling events.
91
+ If None, the adapter can only translate (not handle events).
92
+ """
93
+ self._hook_manager = hook_manager
94
+
95
+ def _extract_tool_info(self, action_name: str, tool_info: dict[str, Any]) -> dict[str, Any]:
96
+ """Extract and normalize tool information based on action type.
97
+
98
+ Different actions have different structures in tool_info:
99
+ - read_code: file_path, content (post only)
100
+ - write_code: file_path, edits[{old_string, new_string}]
101
+ - run_command: command, output (post only), exit_code (post only)
102
+ - mcp_tool_use: server_name, tool_name, arguments, result (post only)
103
+
104
+ Args:
105
+ action_name: The Windsurf action name (e.g., "post_write_code")
106
+ tool_info: The nested tool_info dict from the payload
107
+
108
+ Returns:
109
+ Normalized dict with tool_name, tool_input, tool_output
110
+ """
111
+ result: dict[str, Any] = {}
112
+
113
+ # Get normalized tool name
114
+ result["tool_name"] = self.TOOL_MAP.get(action_name, action_name)
115
+
116
+ # Extract action-specific fields
117
+ if "read_code" in action_name:
118
+ # Read file action
119
+ result["tool_input"] = {"file_path": tool_info.get("file_path")}
120
+ if "content" in tool_info:
121
+ result["tool_output"] = tool_info["content"]
122
+
123
+ elif "write_code" in action_name:
124
+ # Write/edit file action
125
+ file_path = tool_info.get("file_path")
126
+ edits = tool_info.get("edits", [])
127
+ result["tool_input"] = {
128
+ "file_path": file_path,
129
+ "edits": edits,
130
+ }
131
+ # For post, indicate success
132
+ if action_name.startswith("post_"):
133
+ result["tool_output"] = f"Successfully edited {file_path}"
134
+
135
+ elif "run_command" in action_name:
136
+ # Shell command action
137
+ result["tool_input"] = {"command": tool_info.get("command")}
138
+ if "output" in tool_info:
139
+ result["tool_output"] = tool_info["output"]
140
+ if "exit_code" in tool_info:
141
+ result["exit_code"] = tool_info["exit_code"]
142
+ if tool_info["exit_code"] != 0:
143
+ result["is_error"] = True
144
+
145
+ elif "mcp_tool_use" in action_name:
146
+ # MCP tool call
147
+ result["mcp_server"] = tool_info.get("server_name")
148
+ result["mcp_tool"] = tool_info.get("tool_name")
149
+ result["tool_input"] = tool_info.get("arguments", {})
150
+ if "result" in tool_info:
151
+ result["tool_output"] = tool_info["result"]
152
+
153
+ return result
154
+
155
+ def _normalize_event_data(self, action_name: str, input_data: dict[str, Any]) -> dict[str, Any]:
156
+ """Normalize Windsurf event data for CLI-agnostic processing.
157
+
158
+ Windsurf nests tool details in `tool_info` which needs to be flattened
159
+ and normalized for unified processing.
160
+
161
+ Args:
162
+ action_name: The Windsurf action name
163
+ input_data: Raw input data from Windsurf
164
+
165
+ Returns:
166
+ Enriched data dict with normalized fields added
167
+ """
168
+ # Start with a copy to avoid mutating original
169
+ data = dict(input_data)
170
+
171
+ # Extract and normalize tool_info if present
172
+ tool_info = data.get("tool_info", {})
173
+ if tool_info:
174
+ normalized = self._extract_tool_info(action_name, tool_info)
175
+ # Merge normalized fields (don't overwrite existing)
176
+ for key, value in normalized.items():
177
+ if key not in data:
178
+ data[key] = value
179
+
180
+ # Store original action name for reference
181
+ data["original_action"] = action_name
182
+
183
+ return data
184
+
185
+ def translate_to_hook_event(self, native_event: dict[str, Any]) -> HookEvent:
186
+ """Convert Windsurf native event to unified HookEvent.
187
+
188
+ Windsurf payloads have the structure:
189
+ {
190
+ "hook_type": "post_write_code", # or via agent_action_name
191
+ "input_data": {
192
+ "session_id": "abc123",
193
+ "cwd": "/path/to/project",
194
+ "agent_action_name": "post_write_code",
195
+ "tool_info": {
196
+ "file_path": "/path/to/file.py",
197
+ "edits": [{"old_string": "...", "new_string": "..."}]
198
+ }
199
+ }
200
+ }
201
+
202
+ Args:
203
+ native_event: Raw payload from Windsurf hook dispatcher
204
+
205
+ Returns:
206
+ Unified HookEvent with normalized fields.
207
+ """
208
+ # Get hook type - could be in hook_type or agent_action_name
209
+ hook_type = native_event.get("hook_type", "")
210
+ input_data = native_event.get("input_data", {})
211
+
212
+ # Windsurf might use agent_action_name in input_data
213
+ if not hook_type:
214
+ hook_type = input_data.get("agent_action_name", "")
215
+
216
+ # Map Windsurf action to unified event type
217
+ # Fall back to NOTIFICATION for unknown types (fail-open)
218
+ event_type = self.EVENT_MAP.get(hook_type, HookEventType.NOTIFICATION)
219
+
220
+ # Extract session_id
221
+ session_id = input_data.get("session_id", "")
222
+
223
+ # Check for errors
224
+ tool_info = input_data.get("tool_info", {})
225
+ is_error = False
226
+ if isinstance(tool_info, dict):
227
+ exit_code = tool_info.get("exit_code")
228
+ if exit_code is not None and exit_code != 0:
229
+ is_error = True
230
+
231
+ metadata = {"is_failure": is_error} if is_error else {}
232
+
233
+ # Normalize event data for CLI-agnostic processing
234
+ normalized_data = self._normalize_event_data(hook_type, input_data)
235
+
236
+ return HookEvent(
237
+ event_type=event_type,
238
+ session_id=session_id,
239
+ source=self.source,
240
+ timestamp=datetime.now(UTC),
241
+ machine_id=input_data.get("machine_id"),
242
+ cwd=input_data.get("cwd"),
243
+ data=normalized_data,
244
+ metadata=metadata,
245
+ )
246
+
247
+ def translate_from_hook_response(
248
+ self, response: HookResponse, hook_type: str | None = None
249
+ ) -> dict[str, Any]:
250
+ """Convert HookResponse to Windsurf's expected format.
251
+
252
+ Windsurf expects responses in this format:
253
+ {
254
+ "decision": "allow" | "deny",
255
+ "reason": "...",
256
+ "context": "..." # Context to inject
257
+ }
258
+
259
+ Args:
260
+ response: Unified HookResponse from HookManager.
261
+ hook_type: Original Windsurf action name (e.g., "post_write_code")
262
+
263
+ Returns:
264
+ Dict in Windsurf's expected format.
265
+ """
266
+ # Map decision - Windsurf uses allow/deny
267
+ if response.decision in ("deny", "block"):
268
+ decision = "deny"
269
+ else:
270
+ decision = "allow"
271
+
272
+ result: dict[str, Any] = {
273
+ "decision": decision,
274
+ }
275
+
276
+ # Add reason if present
277
+ if response.reason:
278
+ result["reason"] = response.reason
279
+
280
+ # Add system message if present
281
+ if response.system_message:
282
+ result["systemMessage"] = response.system_message
283
+
284
+ # Build context for injection
285
+ context_parts: list[str] = []
286
+
287
+ # Add workflow-injected context
288
+ if response.context:
289
+ context_parts.append(response.context)
290
+
291
+ # Add session identifiers from metadata
292
+ if response.metadata:
293
+ gobby_session_id = response.metadata.get("session_id")
294
+ session_ref = response.metadata.get("session_ref")
295
+ external_id = response.metadata.get("external_id")
296
+ is_first_hook = response.metadata.get("_first_hook_for_session", False)
297
+
298
+ if gobby_session_id:
299
+ if is_first_hook:
300
+ # First hook: inject full metadata
301
+ context_lines = []
302
+ if session_ref:
303
+ context_lines.append(
304
+ f"Gobby Session ID: {session_ref} (or {gobby_session_id})"
305
+ )
306
+ else:
307
+ context_lines.append(f"Gobby Session ID: {gobby_session_id}")
308
+ if external_id:
309
+ context_lines.append(
310
+ f"CLI-Specific Session ID (external_id): {external_id}"
311
+ )
312
+ if response.metadata.get("parent_session_id"):
313
+ context_lines.append(
314
+ f"parent_session_id: {response.metadata['parent_session_id']}"
315
+ )
316
+ if response.metadata.get("machine_id"):
317
+ context_lines.append(f"machine_id: {response.metadata['machine_id']}")
318
+ if response.metadata.get("project_id"):
319
+ context_lines.append(f"project_id: {response.metadata['project_id']}")
320
+ context_parts.append("\n".join(context_lines))
321
+ else:
322
+ # Subsequent hooks: inject minimal session ref only
323
+ if session_ref:
324
+ context_parts.append(f"Gobby Session ID: {session_ref}")
325
+
326
+ # Add context if we have any
327
+ if context_parts:
328
+ result["context"] = "\n\n".join(context_parts)
329
+
330
+ return result
331
+
332
+ def handle_native(
333
+ self, native_event: dict[str, Any], hook_manager: "HookManager"
334
+ ) -> dict[str, Any]:
335
+ """Main entry point for HTTP endpoint.
336
+
337
+ Translates native Windsurf event, processes through HookManager,
338
+ and returns response in Windsurf's expected format.
339
+
340
+ Args:
341
+ native_event: Raw payload from Windsurf hook dispatcher
342
+ hook_manager: HookManager instance for processing.
343
+
344
+ Returns:
345
+ Response dict in Windsurf's expected format.
346
+ """
347
+ # Translate to unified HookEvent
348
+ hook_event = self.translate_to_hook_event(native_event)
349
+
350
+ # Get original hook type for response formatting
351
+ hook_type = native_event.get("hook_type", "")
352
+ if not hook_type:
353
+ hook_type = native_event.get("input_data", {}).get("agent_action_name", "")
354
+
355
+ # Process through HookManager
356
+ hook_response = hook_manager.handle(hook_event)
357
+
358
+ # Translate response back to Windsurf format
359
+ return self.translate_from_hook_response(hook_response, hook_type=hook_type)
@@ -19,9 +19,76 @@ from gobby.utils.project_context import get_project_context
19
19
  logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
+ class WorkflowSpec(BaseModel):
23
+ """
24
+ Workflow specification - either a file reference or inline definition.
25
+
26
+ Supports two modes:
27
+ 1. File reference: `file: "workflow-name.yaml"` - loads from workflow search paths
28
+ 2. Inline definition: Full workflow definition embedded in agent YAML
29
+
30
+ Examples:
31
+ # File reference
32
+ workflows:
33
+ box:
34
+ file: meeseeks-box.yaml
35
+
36
+ # Inline definition
37
+ workflows:
38
+ worker:
39
+ type: step
40
+ steps:
41
+ - name: work
42
+ description: "Do the work"
43
+ """
44
+
45
+ # File reference mode
46
+ file: str | None = None
47
+
48
+ # Inline workflow fields (subset of WorkflowDefinition)
49
+ type: Literal["step", "lifecycle", "pipeline"] | None = None
50
+ name: str | None = None
51
+ description: str | None = None
52
+ version: str = "1.0"
53
+ variables: dict[str, Any] = Field(default_factory=dict)
54
+ steps: list[dict[str, Any]] = Field(default_factory=list)
55
+ exit_condition: str | None = None
56
+ on_premature_stop: dict[str, Any] | None = None
57
+ settings: dict[str, Any] = Field(default_factory=dict)
58
+
59
+ # Pipeline-specific fields
60
+ inputs: dict[str, Any] = Field(default_factory=dict)
61
+ outputs: dict[str, Any] = Field(default_factory=dict)
62
+
63
+ # Execution mode override for this workflow
64
+ # Allows per-workflow control over how the workflow is executed
65
+ mode: Literal["terminal", "embedded", "headless", "self"] | None = None
66
+
67
+ def is_file_reference(self) -> bool:
68
+ """Check if this spec is a file reference vs inline definition."""
69
+ return self.file is not None
70
+
71
+ def is_inline(self) -> bool:
72
+ """Check if this spec is an inline definition."""
73
+ return self.file is None and (self.type is not None or len(self.steps) > 0)
74
+
75
+
22
76
  class AgentDefinition(BaseModel):
23
77
  """
24
78
  Configuration for a named agent.
79
+
80
+ Supports named workflows via the `workflows` map, allowing a single agent
81
+ definition to contain multiple workflow configurations selectable at spawn time.
82
+
83
+ Example:
84
+ name: meeseeks
85
+ workflows:
86
+ box:
87
+ file: meeseeks-box.yaml
88
+ worker:
89
+ type: step
90
+ steps: [...]
91
+ default_workflow: box
25
92
  """
26
93
 
27
94
  name: str
@@ -30,7 +97,7 @@ class AgentDefinition(BaseModel):
30
97
  # Execution parameters
31
98
  model: str | None = None
32
99
  mode: str = "headless" # Default to headless for stability
33
- provider: str = "claude" # Provider: claude, gemini, codex
100
+ provider: str = "claude" # Provider: claude, gemini, codex, cursor, windsurf, copilot
34
101
 
35
102
  # Isolation configuration
36
103
  isolation: Literal["current", "worktree", "clone"] | None = None
@@ -40,7 +107,14 @@ class AgentDefinition(BaseModel):
40
107
  # Sandbox configuration
41
108
  sandbox: SandboxConfig | None = None
42
109
 
43
- # Workflow configuration
110
+ # Named workflows map
111
+ # Keys are workflow names, values are WorkflowSpec (file ref or inline)
112
+ workflows: dict[str, WorkflowSpec] | None = None
113
+
114
+ # Default workflow name (key in workflows map)
115
+ default_workflow: str | None = None
116
+
117
+ # Legacy: single workflow reference (for backwards compatibility)
44
118
  workflow: str | None = None
45
119
 
46
120
  # Lifecycle variables to override parent's lifecycle settings
@@ -53,6 +127,92 @@ class AgentDefinition(BaseModel):
53
127
  timeout: float = 120.0
54
128
  max_turns: int = 10
55
129
 
130
+ def get_workflow_spec(self, workflow_name: str | None = None) -> WorkflowSpec | None:
131
+ """
132
+ Get a workflow spec by name, or the default workflow.
133
+
134
+ Args:
135
+ workflow_name: Name of workflow to get. If None, returns default_workflow.
136
+
137
+ Returns:
138
+ WorkflowSpec if found, None otherwise.
139
+ """
140
+ if not self.workflows:
141
+ return None
142
+
143
+ name = workflow_name or self.default_workflow
144
+ if not name:
145
+ return None
146
+
147
+ return self.workflows.get(name)
148
+
149
+ def get_effective_workflow(self, workflow_name: str | None = None) -> str | None:
150
+ """
151
+ Get the effective workflow name/file for spawning.
152
+
153
+ Resolution order:
154
+ 1. If workflow_name specified and in workflows map -> resolve that spec
155
+ 2. If workflow_name specified but NOT in map -> return workflow_name (external ref)
156
+ 3. If no workflow_name -> check default_workflow in workflows map
157
+ 4. Fallback to legacy `workflow` field
158
+
159
+ Args:
160
+ workflow_name: Explicit workflow name parameter
161
+
162
+ Returns:
163
+ Workflow name/file to use, or None if no workflow configured.
164
+ """
165
+ # Check if workflow_name matches a named workflow in the map
166
+ if workflow_name and self.workflows and workflow_name in self.workflows:
167
+ spec = self.workflows[workflow_name]
168
+ if spec.is_file_reference():
169
+ # Return the file reference (without .yaml extension if present)
170
+ file_name = spec.file or ""
171
+ return file_name.removesuffix(".yaml")
172
+ else:
173
+ # Inline workflow - return qualified name for registration
174
+ return f"{self.name}:{workflow_name}"
175
+
176
+ # If workflow_name specified but not in map, treat as external workflow reference
177
+ if workflow_name:
178
+ return workflow_name
179
+
180
+ # Try default_workflow from map
181
+ if self.default_workflow and self.workflows and self.default_workflow in self.workflows:
182
+ spec = self.workflows[self.default_workflow]
183
+ if spec.is_file_reference():
184
+ file_name = spec.file or ""
185
+ return file_name.removesuffix(".yaml")
186
+ else:
187
+ return f"{self.name}:{self.default_workflow}"
188
+
189
+ # Fallback to legacy workflow field
190
+ return self.workflow
191
+
192
+ def get_effective_mode(
193
+ self, workflow_name: str | None = None
194
+ ) -> Literal["terminal", "embedded", "headless", "self"]:
195
+ """
196
+ Get the effective execution mode for a workflow.
197
+
198
+ Resolution:
199
+ 1. Check if specified workflow has a mode in its WorkflowSpec
200
+ 2. Fall back to agent-level mode
201
+
202
+ Args:
203
+ workflow_name: Workflow name to check
204
+
205
+ Returns:
206
+ Execution mode to use
207
+ """
208
+ # Check workflow-specific mode
209
+ spec = self.get_workflow_spec(workflow_name)
210
+ if spec and spec.mode:
211
+ return spec.mode
212
+
213
+ # Fall back to agent-level mode
214
+ return self.mode # type: ignore[return-value]
215
+
56
216
 
57
217
  class AgentDefinitionLoader:
58
218
  """
gobby/agents/isolation.py CHANGED
@@ -154,6 +154,8 @@ class WorktreeIsolationHandler(IsolationHandler):
154
154
 
155
155
  - Generate branch name if not provided
156
156
  - Check for existing worktree for the branch
157
+ - Determine base branch (use parent's current branch if not specified)
158
+ - Check for unpushed commits and use local ref if needed
157
159
  - Create new worktree if needed
158
160
  - Return IsolationContext with worktree info
159
161
  """
@@ -171,6 +173,29 @@ class WorktreeIsolationHandler(IsolationHandler):
171
173
  extra={"main_repo_path": self._git_manager.repo_path},
172
174
  )
173
175
 
176
+ # Determine base branch - use parent's current branch if default "main" was passed
177
+ base_branch = config.base_branch
178
+ use_local = False
179
+
180
+ # If base_branch is the default "main", check if parent is on a different branch
181
+ current_branch = self._git_manager.get_current_branch()
182
+ if current_branch and base_branch == "main" and current_branch != "main":
183
+ # Use parent's current branch instead
184
+ base_branch = current_branch
185
+
186
+ # Check for unpushed commits on the base branch
187
+ has_unpushed, unpushed_count = self._git_manager.has_unpushed_commits(base_branch)
188
+ if has_unpushed:
189
+ # Use local branch ref to preserve unpushed commits
190
+ use_local = True
191
+ import logging
192
+
193
+ logger = logging.getLogger(__name__)
194
+ logger.info(
195
+ f"Using local branch '{base_branch}' for worktree "
196
+ f"({unpushed_count} unpushed commits)"
197
+ )
198
+
174
199
  # Generate worktree path
175
200
  from pathlib import Path
176
201
 
@@ -181,8 +206,9 @@ class WorktreeIsolationHandler(IsolationHandler):
181
206
  result = self._git_manager.create_worktree(
182
207
  worktree_path=worktree_path,
183
208
  branch_name=branch_name,
184
- base_branch=config.base_branch,
209
+ base_branch=base_branch,
185
210
  create_branch=True,
211
+ use_local=use_local,
186
212
  )
187
213
 
188
214
  if not result.success:
@@ -271,6 +297,9 @@ Commit your changes to the worktree branch when done.
271
297
  "gemini": ".gemini",
272
298
  "claude": ".claude",
273
299
  "codex": ".codex",
300
+ "cursor": ".claude",
301
+ "windsurf": ".claude",
302
+ "copilot": ".claude",
274
303
  }
275
304
 
276
305
  cli_dir = cli_dirs.get(provider)
@@ -449,6 +478,9 @@ Push your changes when ready to share with the original.
449
478
  "gemini": ".gemini",
450
479
  "claude": ".claude",
451
480
  "codex": ".codex",
481
+ "cursor": ".claude",
482
+ "windsurf": ".claude",
483
+ "copilot": ".claude",
452
484
  }
453
485
 
454
486
  cli_dir = cli_dirs.get(provider)