gobby 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. gobby/__init__.py +1 -1
  2. gobby/adapters/__init__.py +6 -0
  3. gobby/adapters/base.py +11 -2
  4. gobby/adapters/claude_code.py +2 -2
  5. gobby/adapters/codex_impl/adapter.py +38 -43
  6. gobby/adapters/copilot.py +324 -0
  7. gobby/adapters/cursor.py +373 -0
  8. gobby/adapters/gemini.py +2 -26
  9. gobby/adapters/windsurf.py +359 -0
  10. gobby/agents/definitions.py +162 -2
  11. gobby/agents/isolation.py +33 -1
  12. gobby/agents/pty_reader.py +192 -0
  13. gobby/agents/registry.py +10 -1
  14. gobby/agents/runner.py +24 -8
  15. gobby/agents/sandbox.py +8 -3
  16. gobby/agents/session.py +4 -0
  17. gobby/agents/spawn.py +9 -2
  18. gobby/agents/spawn_executor.py +49 -61
  19. gobby/agents/spawners/command_builder.py +4 -4
  20. gobby/app_context.py +5 -0
  21. gobby/cli/__init__.py +4 -0
  22. gobby/cli/install.py +259 -4
  23. gobby/cli/installers/__init__.py +12 -0
  24. gobby/cli/installers/copilot.py +242 -0
  25. gobby/cli/installers/cursor.py +244 -0
  26. gobby/cli/installers/shared.py +3 -0
  27. gobby/cli/installers/windsurf.py +242 -0
  28. gobby/cli/pipelines.py +639 -0
  29. gobby/cli/sessions.py +3 -1
  30. gobby/cli/skills.py +209 -0
  31. gobby/cli/tasks/crud.py +6 -5
  32. gobby/cli/tasks/search.py +1 -1
  33. gobby/cli/ui.py +116 -0
  34. gobby/cli/workflows.py +38 -17
  35. gobby/config/app.py +5 -0
  36. gobby/config/skills.py +23 -2
  37. gobby/hooks/broadcaster.py +9 -0
  38. gobby/hooks/event_handlers/_base.py +6 -1
  39. gobby/hooks/event_handlers/_session.py +44 -130
  40. gobby/hooks/events.py +48 -0
  41. gobby/hooks/hook_manager.py +25 -3
  42. gobby/install/copilot/hooks/hook_dispatcher.py +203 -0
  43. gobby/install/cursor/hooks/hook_dispatcher.py +203 -0
  44. gobby/install/gemini/hooks/hook_dispatcher.py +8 -0
  45. gobby/install/windsurf/hooks/hook_dispatcher.py +205 -0
  46. gobby/llm/__init__.py +14 -1
  47. gobby/llm/claude.py +217 -1
  48. gobby/llm/service.py +149 -0
  49. gobby/mcp_proxy/instructions.py +9 -27
  50. gobby/mcp_proxy/models.py +1 -0
  51. gobby/mcp_proxy/registries.py +56 -9
  52. gobby/mcp_proxy/server.py +6 -2
  53. gobby/mcp_proxy/services/tool_filter.py +7 -0
  54. gobby/mcp_proxy/services/tool_proxy.py +19 -1
  55. gobby/mcp_proxy/stdio.py +37 -21
  56. gobby/mcp_proxy/tools/agents.py +7 -0
  57. gobby/mcp_proxy/tools/hub.py +30 -1
  58. gobby/mcp_proxy/tools/orchestration/cleanup.py +5 -5
  59. gobby/mcp_proxy/tools/orchestration/monitor.py +1 -1
  60. gobby/mcp_proxy/tools/orchestration/orchestrate.py +8 -3
  61. gobby/mcp_proxy/tools/orchestration/review.py +17 -4
  62. gobby/mcp_proxy/tools/orchestration/wait.py +7 -7
  63. gobby/mcp_proxy/tools/pipelines/__init__.py +254 -0
  64. gobby/mcp_proxy/tools/pipelines/_discovery.py +67 -0
  65. gobby/mcp_proxy/tools/pipelines/_execution.py +281 -0
  66. gobby/mcp_proxy/tools/sessions/_crud.py +4 -4
  67. gobby/mcp_proxy/tools/sessions/_handoff.py +1 -1
  68. gobby/mcp_proxy/tools/skills/__init__.py +184 -30
  69. gobby/mcp_proxy/tools/spawn_agent.py +229 -14
  70. gobby/mcp_proxy/tools/tasks/_context.py +8 -0
  71. gobby/mcp_proxy/tools/tasks/_crud.py +27 -1
  72. gobby/mcp_proxy/tools/tasks/_helpers.py +1 -1
  73. gobby/mcp_proxy/tools/tasks/_lifecycle.py +125 -8
  74. gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +2 -1
  75. gobby/mcp_proxy/tools/tasks/_search.py +1 -1
  76. gobby/mcp_proxy/tools/workflows/__init__.py +9 -2
  77. gobby/mcp_proxy/tools/workflows/_lifecycle.py +12 -1
  78. gobby/mcp_proxy/tools/workflows/_query.py +45 -26
  79. gobby/mcp_proxy/tools/workflows/_terminal.py +39 -3
  80. gobby/mcp_proxy/tools/worktrees.py +54 -15
  81. gobby/memory/context.py +5 -5
  82. gobby/runner.py +108 -6
  83. gobby/servers/http.py +7 -1
  84. gobby/servers/routes/__init__.py +2 -0
  85. gobby/servers/routes/admin.py +44 -0
  86. gobby/servers/routes/mcp/endpoints/execution.py +18 -25
  87. gobby/servers/routes/mcp/hooks.py +10 -1
  88. gobby/servers/routes/pipelines.py +227 -0
  89. gobby/servers/websocket.py +314 -1
  90. gobby/sessions/analyzer.py +87 -1
  91. gobby/sessions/manager.py +5 -5
  92. gobby/sessions/transcripts/__init__.py +3 -0
  93. gobby/sessions/transcripts/claude.py +5 -0
  94. gobby/sessions/transcripts/codex.py +5 -0
  95. gobby/sessions/transcripts/gemini.py +5 -0
  96. gobby/skills/hubs/__init__.py +25 -0
  97. gobby/skills/hubs/base.py +234 -0
  98. gobby/skills/hubs/claude_plugins.py +328 -0
  99. gobby/skills/hubs/clawdhub.py +289 -0
  100. gobby/skills/hubs/github_collection.py +465 -0
  101. gobby/skills/hubs/manager.py +263 -0
  102. gobby/skills/hubs/skillhub.py +342 -0
  103. gobby/storage/memories.py +4 -4
  104. gobby/storage/migrations.py +95 -3
  105. gobby/storage/pipelines.py +367 -0
  106. gobby/storage/sessions.py +23 -4
  107. gobby/storage/skills.py +1 -1
  108. gobby/storage/tasks/_aggregates.py +2 -2
  109. gobby/storage/tasks/_lifecycle.py +4 -4
  110. gobby/storage/tasks/_models.py +7 -1
  111. gobby/storage/tasks/_queries.py +3 -3
  112. gobby/sync/memories.py +4 -3
  113. gobby/tasks/commits.py +48 -17
  114. gobby/workflows/actions.py +75 -0
  115. gobby/workflows/context_actions.py +246 -5
  116. gobby/workflows/definitions.py +119 -1
  117. gobby/workflows/detection_helpers.py +23 -11
  118. gobby/workflows/enforcement/task_policy.py +18 -0
  119. gobby/workflows/engine.py +20 -1
  120. gobby/workflows/evaluator.py +8 -5
  121. gobby/workflows/lifecycle_evaluator.py +57 -26
  122. gobby/workflows/loader.py +567 -30
  123. gobby/workflows/lobster_compat.py +147 -0
  124. gobby/workflows/pipeline_executor.py +801 -0
  125. gobby/workflows/pipeline_state.py +172 -0
  126. gobby/workflows/pipeline_webhooks.py +206 -0
  127. gobby/workflows/premature_stop.py +5 -0
  128. gobby/worktrees/git.py +135 -20
  129. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/METADATA +56 -22
  130. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/RECORD +134 -106
  131. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/WHEEL +0 -0
  132. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/entry_points.txt +0 -0
  133. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/licenses/LICENSE.md +0 -0
  134. {gobby-0.2.9.dist-info → gobby-0.2.11.dist-info}/top_level.txt +0 -0
gobby/llm/service.py CHANGED
@@ -6,7 +6,9 @@ Gemini, LiteLLM) based on the multi-provider config structure with feature-speci
6
6
  provider routing.
7
7
  """
8
8
 
9
+ import asyncio
9
10
  import logging
11
+ from collections.abc import AsyncIterator
10
12
  from typing import TYPE_CHECKING, Any
11
13
 
12
14
  if TYPE_CHECKING:
@@ -14,6 +16,7 @@ if TYPE_CHECKING:
14
16
  DaemonConfig,
15
17
  )
16
18
  from gobby.llm.base import LLMProvider
19
+ from gobby.llm.claude import ChatEvent
17
20
 
18
21
  logger = logging.getLogger(__name__)
19
22
 
@@ -234,3 +237,149 @@ class LLMService:
234
237
  enabled = self.enabled_providers
235
238
  initialized = self.initialized_providers
236
239
  return f"LLMService(enabled={enabled}, initialized={initialized})"
240
+
241
+ async def stream_chat(
242
+ self,
243
+ messages: list[dict[str, str]],
244
+ provider_name: str | None = None,
245
+ model: str | None = None,
246
+ ) -> AsyncIterator[str]:
247
+ """
248
+ Stream a chat response from the LLM.
249
+
250
+ Takes messages in OpenAI-style format and yields response chunks.
251
+ Currently simulates streaming by chunking the full response.
252
+ Real streaming support can be added per-provider later.
253
+
254
+ Args:
255
+ messages: List of message dicts with 'role' and 'content' keys
256
+ provider_name: Optional provider to use (defaults to default provider)
257
+ model: Optional model override
258
+
259
+ Yields:
260
+ String chunks of the response
261
+
262
+ Example:
263
+ messages = [
264
+ {"role": "system", "content": "You are helpful."},
265
+ {"role": "user", "content": "Hello!"}
266
+ ]
267
+ async for chunk in service.stream_chat(messages):
268
+ print(chunk, end="", flush=True)
269
+ """
270
+ # Get provider
271
+ if provider_name:
272
+ provider = self.get_provider(provider_name)
273
+ else:
274
+ provider = self.get_default_provider()
275
+
276
+ # Build prompt from messages
277
+ system_prompt = None
278
+ user_messages = []
279
+
280
+ for msg in messages:
281
+ role = msg.get("role", "user")
282
+ content = msg.get("content", "")
283
+
284
+ if role == "system":
285
+ system_prompt = content
286
+ else:
287
+ prefix = "User: " if role == "user" else "Assistant: "
288
+ user_messages.append(f"{prefix}{content}")
289
+
290
+ prompt = "\n\n".join(user_messages)
291
+ if user_messages:
292
+ prompt += "\n\nAssistant:"
293
+
294
+ # Generate full response
295
+ response = await provider.generate_text(
296
+ prompt=prompt,
297
+ system_prompt=system_prompt,
298
+ model=model,
299
+ )
300
+
301
+ # Simulate streaming by yielding words with small delays
302
+ # This provides a better UX while we add real streaming later
303
+ words = response.split(" ")
304
+ for i, word in enumerate(words):
305
+ if i > 0:
306
+ yield " "
307
+ yield word
308
+ # Small delay to simulate streaming (5-15ms per word)
309
+ await asyncio.sleep(0.008)
310
+
311
+ async def stream_chat_with_tools(
312
+ self,
313
+ messages: list[dict[str, str]],
314
+ allowed_tools: list[str],
315
+ model: str | None = None,
316
+ max_turns: int = 10,
317
+ ) -> AsyncIterator["ChatEvent"]:
318
+ """
319
+ Stream a chat response with MCP tool support.
320
+
321
+ Takes messages in OpenAI-style format and streams response events
322
+ including text chunks and tool call/result events.
323
+
324
+ This method uses the Claude provider's stream_with_mcp_tools(),
325
+ which requires subscription mode (Claude Agent SDK).
326
+
327
+ Args:
328
+ messages: List of message dicts with 'role' and 'content' keys
329
+ allowed_tools: List of allowed MCP tool patterns.
330
+ Tools should be in format "mcp__{server}__{tool}" or patterns
331
+ like "mcp__gobby-tasks__*" for all tools from a server.
332
+ model: Optional model override
333
+ max_turns: Maximum number of agentic turns (default: 10)
334
+
335
+ Yields:
336
+ ChatEvent: One of TextChunk, ToolCallEvent, ToolResultEvent, or DoneEvent.
337
+
338
+ Example:
339
+ >>> allowed_tools = ["mcp__gobby-tasks__*", "mcp__gobby-memory__*"]
340
+ >>> async for event in service.stream_chat_with_tools(messages, allowed_tools):
341
+ ... if isinstance(event, TextChunk):
342
+ ... print(event.content, end="")
343
+ """
344
+ from gobby.llm.claude import ClaudeLLMProvider, DoneEvent, TextChunk
345
+
346
+ # Get Claude provider (required for MCP tools)
347
+ try:
348
+ provider = self.get_provider("claude")
349
+ except ValueError:
350
+ yield TextChunk(content="Claude provider not configured. MCP tools require Claude.")
351
+ yield DoneEvent(tool_calls_count=0)
352
+ return
353
+
354
+ if not isinstance(provider, ClaudeLLMProvider):
355
+ yield TextChunk(content="MCP tools require Claude provider.")
356
+ yield DoneEvent(tool_calls_count=0)
357
+ return
358
+
359
+ # Build system prompt and user prompt from messages
360
+ system_prompt = None
361
+ user_messages = []
362
+
363
+ for msg in messages:
364
+ role = msg.get("role", "user")
365
+ content = msg.get("content", "")
366
+
367
+ if role == "system":
368
+ system_prompt = content
369
+ else:
370
+ prefix = "User: " if role == "user" else "Assistant: "
371
+ user_messages.append(f"{prefix}{content}")
372
+
373
+ prompt = "\n\n".join(user_messages)
374
+ if user_messages:
375
+ prompt += "\n\nAssistant:"
376
+
377
+ # Stream with MCP tools
378
+ async for event in provider.stream_with_mcp_tools(
379
+ prompt=prompt,
380
+ allowed_tools=allowed_tools,
381
+ system_prompt=system_prompt,
382
+ model=model,
383
+ max_turns=max_turns,
384
+ ):
385
+ yield event
@@ -6,33 +6,16 @@ These instructions are injected into the MCP server via FastMCP's `instructions`
6
6
 
7
7
 
8
8
  def build_gobby_instructions() -> str:
9
- """Build XML-structured instructions for Gobby MCP server.
9
+ """Build compact instructions for Gobby MCP server.
10
10
 
11
- These instructions teach agents how to use Gobby correctly.
12
- Every agent connecting to Gobby receives these automatically.
13
-
14
- The instructions cover:
15
- - Session startup sequence
16
- - Progressive tool disclosure pattern
17
- - Progressive skill disclosure pattern
18
- - Critical rules for task management
11
+ Provides minimal guidance for progressive tool disclosure, caching, and task rules.
12
+ Startup sequence and skill discovery are now handled via workflow injection.
19
13
 
20
14
  Returns:
21
- XML-structured instructions string
15
+ XML-structured instructions string (~120 tokens)
22
16
  """
23
17
  return """<gobby_system>
24
18
 
25
- <startup>
26
- At the start of EVERY session:
27
- 1. `list_mcp_servers()` — Discover available servers
28
- 2. `list_skills()` — Discover available skills
29
- 3. Session ID: Look for `Gobby Session Ref:` or `Gobby Session ID:` in your context.
30
- If missing, call:
31
- `call_tool("gobby-sessions", "get_current_session", {"external_id": "<your-session-id>", "source": "<cli-name>"})`
32
-
33
- Session and task references use `#N` format (e.g., `#1`, `#42`) which is project-scoped.
34
- </startup>
35
-
36
19
  <tool_discovery>
37
20
  NEVER assume tool schemas. Use progressive disclosure:
38
21
  1. `list_tools(server="...")` — Lightweight metadata (~100 tokens/tool)
@@ -40,12 +23,11 @@ NEVER assume tool schemas. Use progressive disclosure:
40
23
  3. `call_tool(server, tool, args)` — Execute
41
24
  </tool_discovery>
42
25
 
43
- <skill_discovery>
44
- Skills provide detailed guidance. Use progressive disclosure:
45
- 1. `list_skills()` Already done at startup
46
- 2. `get_skill(name="...")`Full content when needed
47
- 3. `search_skills(query="...")` — Find by task description
48
- </skill_discovery>
26
+ <caching>
27
+ Schema fetches are cached per session. Once you call `get_tool_schema(server, tool)`,
28
+ you can `call_tool` that same server:tool repeatedly WITHOUT re-fetching the schema.
29
+ Do NOT call list_tools or get_tool_schema before every call_tool only on first use.
30
+ </caching>
49
31
 
50
32
  <rules>
51
33
  - Create/claim a task before using Edit, Write, or NotebookEdit tools
gobby/mcp_proxy/models.py CHANGED
@@ -41,6 +41,7 @@ class ToolProxyErrorCode(str, Enum):
41
41
  SERVER_NOT_FOUND = "SERVER_NOT_FOUND"
42
42
  SERVER_NOT_CONFIGURED = "SERVER_NOT_CONFIGURED"
43
43
  TOOL_NOT_FOUND = "TOOL_NOT_FOUND"
44
+ TOOL_BLOCKED = "TOOL_BLOCKED"
44
45
  INVALID_ARGUMENTS = "INVALID_ARGUMENTS"
45
46
  EXECUTION_ERROR = "EXECUTION_ERROR"
46
47
  CONNECTION_ERROR = "CONNECTION_ERROR"
@@ -17,14 +17,18 @@ if TYPE_CHECKING:
17
17
  from gobby.memory.manager import MemoryManager
18
18
  from gobby.sessions.manager import SessionManager
19
19
  from gobby.storage.clones import LocalCloneManager
20
+ from gobby.storage.database import DatabaseProtocol
20
21
  from gobby.storage.inter_session_messages import InterSessionMessageManager
21
22
  from gobby.storage.merge_resolutions import MergeResolutionManager
23
+ from gobby.storage.pipelines import LocalPipelineExecutionManager
22
24
  from gobby.storage.session_messages import LocalSessionMessageManager
23
25
  from gobby.storage.sessions import LocalSessionManager
24
26
  from gobby.storage.tasks import LocalTaskManager
25
27
  from gobby.storage.worktrees import LocalWorktreeManager
26
28
  from gobby.sync.tasks import TaskSyncManager
27
29
  from gobby.tasks.validation import TaskValidator
30
+ from gobby.workflows.loader import WorkflowLoader
31
+ from gobby.workflows.pipeline_executor import PipelineExecutor
28
32
  from gobby.worktrees.git import WorktreeGitManager
29
33
  from gobby.worktrees.merge import MergeResolver
30
34
 
@@ -36,6 +40,7 @@ def setup_internal_registries(
36
40
  _session_manager: SessionManager | None = None,
37
41
  memory_manager: MemoryManager | None = None,
38
42
  task_manager: LocalTaskManager | None = None,
43
+ db: DatabaseProtocol | None = None,
39
44
  sync_manager: TaskSyncManager | None = None,
40
45
  task_validator: TaskValidator | None = None,
41
46
  message_manager: LocalSessionMessageManager | None = None,
@@ -51,6 +56,9 @@ def setup_internal_registries(
51
56
  project_id: str | None = None,
52
57
  tool_proxy_getter: Callable[[], ToolProxyService | None] | None = None,
53
58
  inter_session_message_manager: InterSessionMessageManager | None = None,
59
+ pipeline_executor: PipelineExecutor | None = None,
60
+ workflow_loader: WorkflowLoader | None = None,
61
+ pipeline_execution_manager: LocalPipelineExecutionManager | None = None,
54
62
  ) -> InternalRegistryManager:
55
63
  """
56
64
  Setup internal MCP registries (tasks, messages, memory, metrics, agents, worktrees).
@@ -60,6 +68,7 @@ def setup_internal_registries(
60
68
  _session_manager: Session manager (reserved for future use)
61
69
  memory_manager: Memory manager for memory operations
62
70
  task_manager: Task storage manager
71
+ db: Database connection for registries that only need storage (skills, artifacts)
63
72
  sync_manager: Task sync manager for git sync
64
73
  task_validator: Task validator for validation
65
74
  message_manager: Message storage manager
@@ -75,6 +84,9 @@ def setup_internal_registries(
75
84
  tool_proxy_getter: Callable that returns ToolProxyService for routing
76
85
  tool calls in in-process agents. Called lazily during agent execution.
77
86
  inter_session_message_manager: Inter-session message manager for agent messaging
87
+ pipeline_executor: Pipeline executor for running pipelines
88
+ workflow_loader: Workflow loader for loading pipeline definitions
89
+ pipeline_execution_manager: Pipeline execution manager for tracking executions
78
90
 
79
91
  Returns:
80
92
  InternalRegistryManager containing all registries
@@ -191,6 +203,9 @@ def setup_internal_registries(
191
203
  git_manager=git_manager,
192
204
  clone_storage=clone_storage,
193
205
  clone_manager=clone_git_manager,
206
+ # For mode=self (workflow activation on caller session)
207
+ workflow_loader=workflow_loader,
208
+ db=db,
194
209
  )
195
210
 
196
211
  # Add inter-agent messaging tools if message manager and session manager are available
@@ -268,32 +283,64 @@ def setup_internal_registries(
268
283
  manager.add_registry(hub_registry)
269
284
  logger.debug("Hub registry initialized")
270
285
 
271
- # Initialize skills registry using the existing database from task_manager
272
- # to avoid creating a duplicate connection that would leak
273
- if task_manager is not None:
286
+ # Initialize skills registry if database is available
287
+ if db is not None:
288
+ from gobby.config.skills import SkillsConfig
274
289
  from gobby.mcp_proxy.tools.skills import create_skills_registry
290
+ from gobby.skills.hubs import (
291
+ ClaudePluginsProvider,
292
+ ClawdHubProvider,
293
+ GitHubCollectionProvider,
294
+ HubManager,
295
+ SkillHubProvider,
296
+ )
297
+
298
+ # Get skills config (or use defaults)
299
+ skills_config = _config.skills if _config and hasattr(_config, "skills") else SkillsConfig()
300
+
301
+ # Create hub manager with configured hubs
302
+ hub_manager = HubManager(configs=skills_config.hubs)
303
+
304
+ # Register provider factories
305
+ hub_manager.register_provider_factory("clawdhub", ClawdHubProvider)
306
+ hub_manager.register_provider_factory("skillhub", SkillHubProvider)
307
+ hub_manager.register_provider_factory("github-collection", GitHubCollectionProvider)
308
+ hub_manager.register_provider_factory("claude-plugins", ClaudePluginsProvider)
275
309
 
276
310
  skills_registry = create_skills_registry(
277
- db=task_manager.db,
311
+ db=db,
278
312
  project_id=project_id,
313
+ hub_manager=hub_manager,
279
314
  )
280
315
  manager.add_registry(skills_registry)
281
316
  logger.debug("Skills registry initialized")
282
317
  else:
283
- logger.debug("Skills registry not initialized: task_manager is None")
318
+ logger.debug("Skills registry not initialized: db is None")
284
319
 
285
- # Initialize artifacts registry using the existing database from task_manager
286
- if task_manager is not None:
320
+ # Initialize artifacts registry if database is available
321
+ if db is not None:
287
322
  from gobby.mcp_proxy.tools.artifacts import create_artifacts_registry
288
323
 
289
324
  artifacts_registry = create_artifacts_registry(
290
- db=task_manager.db,
325
+ db=db,
291
326
  session_manager=local_session_manager,
292
327
  )
293
328
  manager.add_registry(artifacts_registry)
294
329
  logger.debug("Artifacts registry initialized")
295
330
  else:
296
- logger.debug("Artifacts registry not initialized: task_manager is None")
331
+ logger.debug("Artifacts registry not initialized: db is None")
332
+
333
+ # Initialize pipelines registry if pipeline_executor is available
334
+ if pipeline_executor is not None:
335
+ from gobby.mcp_proxy.tools.pipelines import create_pipelines_registry
336
+
337
+ pipelines_registry = create_pipelines_registry(
338
+ loader=workflow_loader,
339
+ executor=pipeline_executor,
340
+ execution_manager=pipeline_execution_manager,
341
+ )
342
+ manager.add_registry(pipelines_registry)
343
+ logger.debug("Pipelines registry initialized")
297
344
 
298
345
  logger.info(f"Internal registries initialized: {len(manager)} registries")
299
346
  return manager
gobby/mcp_proxy/server.py CHANGED
@@ -98,6 +98,7 @@ class GobbyDaemonTools:
98
98
  server_name: str,
99
99
  tool_name: str,
100
100
  arguments: dict[str, Any] | None = None,
101
+ session_id: str | None = None,
101
102
  ) -> Any:
102
103
  """Call a tool.
103
104
 
@@ -105,8 +106,11 @@ class GobbyDaemonTools:
105
106
  underlying service indicates an error. This ensures the MCP protocol
106
107
  properly signals errors to LLM clients instead of returning error dicts
107
108
  as successful responses.
109
+
110
+ When session_id is provided and a workflow is active, checks that the
111
+ tool is not blocked by the current workflow step's blocked_tools setting.
108
112
  """
109
- result = await self.tool_proxy.call_tool(server_name, tool_name, arguments)
113
+ result = await self.tool_proxy.call_tool(server_name, tool_name, arguments, session_id)
110
114
 
111
115
  # Check if result indicates an error (ToolProxyService returns dict with success: False)
112
116
  if isinstance(result, dict) and result.get("success") is False:
@@ -382,7 +386,7 @@ class GobbyDaemonTools:
382
386
 
383
387
  Args:
384
388
  event_type: Hook event type (e.g., "session_start", "before_tool")
385
- source: Source CLI to simulate (claude, gemini, codex)
389
+ source: Source CLI to simulate (claude, gemini, codex, cursor, windsurf, copilot)
386
390
  data: Optional additional data for the event
387
391
 
388
392
  Returns:
@@ -4,6 +4,8 @@ import logging
4
4
  from pathlib import Path
5
5
  from typing import TYPE_CHECKING, Any
6
6
 
7
+ from gobby.workflows.definitions import WorkflowDefinition
8
+
7
9
  if TYPE_CHECKING:
8
10
  from gobby.storage.database import LocalDatabase
9
11
  from gobby.workflows.loader import WorkflowLoader
@@ -89,6 +91,11 @@ class ToolFilterService:
89
91
  logger.warning(f"Workflow '{state.workflow_name}' not found")
90
92
  return None
91
93
 
94
+ # Tool filtering only applies to step-based workflows
95
+ if not isinstance(definition, WorkflowDefinition):
96
+ logger.debug(f"Workflow '{state.workflow_name}' is not a step-based workflow")
97
+ return None
98
+
92
99
  step = definition.get_step(state.step)
93
100
  if not step:
94
101
  logger.warning(f"Step '{state.step}' not found in workflow '{state.workflow_name}'")
@@ -208,6 +208,7 @@ class ToolProxyService:
208
208
  server_name: str,
209
209
  tool_name: str,
210
210
  arguments: dict[str, Any] | None = None,
211
+ session_id: str | None = None,
211
212
  ) -> Any:
212
213
  """Execute a tool with optional pre-validation.
213
214
 
@@ -218,9 +219,24 @@ class ToolProxyService:
218
219
  On execution error, includes fallback_suggestions if a fallback resolver
219
220
  is configured.
220
221
 
222
+ When session_id is provided and a workflow is active, checks that the
223
+ tool is not blocked by the current workflow step's blocked_tools setting.
224
+
221
225
  """
222
226
  args = arguments or {}
223
227
 
228
+ # Check workflow tool restrictions if session_id provided
229
+ if session_id and self._tool_filter:
230
+ is_allowed, reason = self._tool_filter.is_tool_allowed(tool_name, session_id)
231
+ if not is_allowed:
232
+ return {
233
+ "success": False,
234
+ "error": reason,
235
+ "error_code": ToolProxyErrorCode.TOOL_BLOCKED.value,
236
+ "server_name": server_name,
237
+ "tool_name": tool_name,
238
+ }
239
+
224
240
  # Pre-validate arguments if enabled
225
241
  if self._validate_arguments and args:
226
242
  schema_result = await self.get_tool_schema(server_name, tool_name)
@@ -361,6 +377,7 @@ class ToolProxyService:
361
377
  self,
362
378
  tool_name: str,
363
379
  arguments: dict[str, Any] | None = None,
380
+ session_id: str | None = None,
364
381
  ) -> Any:
365
382
  """
366
383
  Call a tool by name, automatically resolving the server.
@@ -371,6 +388,7 @@ class ToolProxyService:
371
388
  Args:
372
389
  tool_name: Name of the tool to call
373
390
  arguments: Tool arguments
391
+ session_id: Optional session ID for workflow tool restriction checks
374
392
 
375
393
  Returns:
376
394
  Tool execution result, or error dict if tool not found
@@ -386,4 +404,4 @@ class ToolProxyService:
386
404
  }
387
405
 
388
406
  logger.debug(f"Routing tool '{tool_name}' to server '{server_name}'")
389
- return await self.call_tool(server_name, tool_name, arguments)
407
+ return await self.call_tool(server_name, tool_name, arguments, session_id)
gobby/mcp_proxy/stdio.py CHANGED
@@ -116,6 +116,16 @@ class DaemonProxy:
116
116
  "validate_task",
117
117
  ):
118
118
  timeout = 300.0
119
+ # Wait tools: use the requested timeout plus a buffer
120
+ elif tool_name in (
121
+ "wait_for_task",
122
+ "wait_for_any_task",
123
+ "wait_for_all_tasks",
124
+ ):
125
+ # Extract timeout from arguments, default to 300s if not specified
126
+ arg_timeout = float((arguments or {}).get("timeout", 300.0))
127
+ # Add 30s buffer for HTTP overhead
128
+ timeout = arg_timeout + 30.0
119
129
 
120
130
  return await self._request(
121
131
  "POST",
@@ -242,13 +252,15 @@ class DaemonProxy:
242
252
  },
243
253
  )
244
254
 
245
- async def init_project(
246
- self, name: str | None = None, github_url: str | None = None
247
- ) -> dict[str, Any]:
248
- """Initialize a project - use 'gobby init' CLI command instead."""
255
+ async def init_project(self, name: str, project_path: str | None = None) -> dict[str, Any]:
256
+ """Initialize a new Gobby project.
257
+
258
+ Note: Project initialization requires CLI access and cannot be done
259
+ via the MCP proxy. Use 'gobby init' command instead.
260
+ """
249
261
  return {
250
262
  "success": False,
251
- "error": "init_project requires CLI access. Run 'gobby init' from your terminal.",
263
+ "error": "Project initialization requires CLI access. Use 'gobby init' command instead.",
252
264
  }
253
265
 
254
266
 
@@ -410,22 +422,6 @@ def register_proxy_tools(mcp: FastMCP, proxy: DaemonProxy) -> None:
410
422
  cwd=cwd,
411
423
  )
412
424
 
413
- @mcp.tool()
414
- async def init_project(
415
- name: str | None = None, github_url: str | None = None
416
- ) -> dict[str, Any]:
417
- """
418
- Initialize a new Gobby project in the current directory.
419
-
420
- Args:
421
- name: Optional project name (auto-detected from directory name if not provided)
422
- github_url: Optional GitHub URL (auto-detected from git remote if not provided)
423
-
424
- Returns:
425
- Dict with success status and project details
426
- """
427
- return await proxy.init_project(name, github_url)
428
-
429
425
  @mcp.tool()
430
426
  async def add_mcp_server(
431
427
  name: str,
@@ -503,6 +499,26 @@ def register_proxy_tools(mcp: FastMCP, proxy: DaemonProxy) -> None:
503
499
  query=query,
504
500
  )
505
501
 
502
+ @mcp.tool()
503
+ async def init_project(
504
+ name: str,
505
+ project_path: str | None = None,
506
+ ) -> dict[str, Any]:
507
+ """
508
+ Initialize a new Gobby project.
509
+
510
+ Note: Project initialization requires CLI access and cannot be done
511
+ via the MCP proxy. Use 'gobby init' command instead.
512
+
513
+ Args:
514
+ name: Project name
515
+ project_path: Path to project directory (optional)
516
+
517
+ Returns:
518
+ Result dict with error (CLI access required)
519
+ """
520
+ return await proxy.init_project(name, project_path)
521
+
506
522
 
507
523
  async def ensure_daemon_running() -> None:
508
524
  """Ensure the Gobby daemon is running and healthy."""
@@ -40,6 +40,9 @@ def create_agents_registry(
40
40
  git_manager: Any | None = None,
41
41
  clone_storage: Any | None = None,
42
42
  clone_manager: Any | None = None,
43
+ # For mode=self (workflow activation on caller session)
44
+ workflow_loader: Any | None = None,
45
+ db: Any | None = None,
43
46
  ) -> InternalToolRegistry:
44
47
  """
45
48
  Create an agent tool registry with all agent-related tools.
@@ -430,6 +433,10 @@ def create_agents_registry(
430
433
  clone_storage=clone_storage,
431
434
  clone_manager=clone_manager,
432
435
  session_manager=session_manager,
436
+ workflow_loader=workflow_loader,
437
+ # For mode=self (workflow activation on caller session)
438
+ state_manager=workflow_state_manager,
439
+ db=db,
433
440
  )
434
441
 
435
442
  # Merge spawn_agent tools into agents registry
@@ -45,7 +45,7 @@ def create_hub_registry(
45
45
  """
46
46
  registry = HubToolRegistry(
47
47
  name="gobby-hub",
48
- description="Hub (cross-project) queries - list_all_projects, list_cross_project_tasks, list_cross_project_sessions, hub_stats",
48
+ description="Hub (cross-project) queries and system info - get_machine_id, list_all_projects, list_cross_project_tasks, list_cross_project_sessions, hub_stats",
49
49
  )
50
50
 
51
51
  def _get_hub_db() -> LocalDatabase | None:
@@ -54,6 +54,35 @@ def create_hub_registry(
54
54
  return None
55
55
  return LocalDatabase(hub_db_path)
56
56
 
57
+ @registry.tool(
58
+ name="get_machine_id",
59
+ description="Get the daemon's machine identifier. Use this from sandboxed agents that cannot read ~/.gobby/machine_id directly.",
60
+ )
61
+ async def get_machine_id() -> dict[str, Any]:
62
+ """
63
+ Get the machine identifier used by this Gobby daemon.
64
+
65
+ The machine_id is stored in ~/.gobby/machine_id and is generated
66
+ once on first daemon run. This tool provides read-only access to
67
+ the daemon's authoritative machine_id.
68
+
69
+ Returns:
70
+ Dict with machine_id or error if not found.
71
+ """
72
+ from gobby.utils.machine_id import get_machine_id as _get_machine_id
73
+
74
+ machine_id = _get_machine_id()
75
+ if machine_id:
76
+ return {
77
+ "success": True,
78
+ "machine_id": machine_id,
79
+ }
80
+
81
+ return {
82
+ "success": False,
83
+ "error": "machine_id not found - daemon may not have initialized properly",
84
+ }
85
+
57
86
  @registry.tool(
58
87
  name="list_all_projects",
59
88
  description="List all unique projects in the hub database.",
@@ -38,7 +38,7 @@ def register_cleanup(
38
38
  """
39
39
  Approve a reviewed task and clean up its worktree.
40
40
 
41
- This tool transitions a task from "review" to "closed" status
41
+ This tool transitions a task from "needs_review" to "closed" status
42
42
  and optionally deletes the associated worktree.
43
43
 
44
44
  Args:
@@ -71,11 +71,11 @@ def register_cleanup(
71
71
  "error": f"Task not found: {task_id}",
72
72
  }
73
73
 
74
- # Verify task is in review status
75
- if task.status != "review":
74
+ # Verify task is in needs_review status
75
+ if task.status != "needs_review":
76
76
  return {
77
77
  "success": False,
78
- "error": f"Task must be in 'review' status to approve. Current status: {task.status}",
78
+ "error": f"Task must be in 'needs_review' status to approve. Current status: {task.status}",
79
79
  }
80
80
 
81
81
  # Get associated worktree (if any)
@@ -148,7 +148,7 @@ def register_cleanup(
148
148
  name="approve_and_cleanup",
149
149
  description=(
150
150
  "Approve a reviewed task and clean up its worktree. "
151
- "Transitions task from 'review' to 'closed' status and deletes worktree."
151
+ "Transitions task from 'needs_review' to 'closed' status and deletes worktree."
152
152
  ),
153
153
  input_schema={
154
154
  "type": "object",
@@ -101,7 +101,7 @@ def register_monitor(
101
101
  closed_tasks.append(task_info)
102
102
  elif task.status == "in_progress":
103
103
  in_progress_tasks.append(task_info)
104
- elif task.status == "review":
104
+ elif task.status == "needs_review":
105
105
  review_tasks.append(task_info)
106
106
  else:
107
107
  open_tasks.append(task_info)