gobby 0.2.8__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +1 -1
- gobby/adapters/__init__.py +6 -0
- gobby/adapters/base.py +11 -2
- gobby/adapters/claude_code.py +5 -28
- gobby/adapters/codex_impl/adapter.py +38 -43
- gobby/adapters/copilot.py +324 -0
- gobby/adapters/cursor.py +373 -0
- gobby/adapters/gemini.py +2 -26
- gobby/adapters/windsurf.py +359 -0
- gobby/agents/definitions.py +162 -2
- gobby/agents/isolation.py +33 -1
- gobby/agents/pty_reader.py +192 -0
- gobby/agents/registry.py +10 -1
- gobby/agents/runner.py +24 -8
- gobby/agents/sandbox.py +8 -3
- gobby/agents/session.py +4 -0
- gobby/agents/spawn.py +9 -2
- gobby/agents/spawn_executor.py +49 -61
- gobby/agents/spawners/command_builder.py +4 -4
- gobby/app_context.py +64 -0
- gobby/cli/__init__.py +4 -0
- gobby/cli/install.py +259 -4
- gobby/cli/installers/__init__.py +12 -0
- gobby/cli/installers/copilot.py +242 -0
- gobby/cli/installers/cursor.py +244 -0
- gobby/cli/installers/shared.py +3 -0
- gobby/cli/installers/windsurf.py +242 -0
- gobby/cli/pipelines.py +639 -0
- gobby/cli/sessions.py +3 -1
- gobby/cli/skills.py +209 -0
- gobby/cli/tasks/crud.py +6 -5
- gobby/cli/tasks/search.py +1 -1
- gobby/cli/ui.py +116 -0
- gobby/cli/utils.py +5 -17
- gobby/cli/workflows.py +38 -17
- gobby/config/app.py +5 -0
- gobby/config/features.py +0 -20
- gobby/config/skills.py +23 -2
- gobby/config/tasks.py +4 -0
- gobby/hooks/broadcaster.py +9 -0
- gobby/hooks/event_handlers/__init__.py +155 -0
- gobby/hooks/event_handlers/_agent.py +175 -0
- gobby/hooks/event_handlers/_base.py +92 -0
- gobby/hooks/event_handlers/_misc.py +66 -0
- gobby/hooks/event_handlers/_session.py +487 -0
- gobby/hooks/event_handlers/_tool.py +196 -0
- gobby/hooks/events.py +48 -0
- gobby/hooks/hook_manager.py +27 -3
- gobby/install/copilot/hooks/hook_dispatcher.py +203 -0
- gobby/install/cursor/hooks/hook_dispatcher.py +203 -0
- gobby/install/gemini/hooks/hook_dispatcher.py +8 -0
- gobby/install/windsurf/hooks/hook_dispatcher.py +205 -0
- gobby/llm/__init__.py +14 -1
- gobby/llm/claude.py +594 -43
- gobby/llm/service.py +149 -0
- gobby/mcp_proxy/importer.py +4 -41
- gobby/mcp_proxy/instructions.py +9 -27
- gobby/mcp_proxy/manager.py +13 -3
- gobby/mcp_proxy/models.py +1 -0
- gobby/mcp_proxy/registries.py +66 -5
- gobby/mcp_proxy/server.py +6 -2
- gobby/mcp_proxy/services/recommendation.py +2 -28
- gobby/mcp_proxy/services/tool_filter.py +7 -0
- gobby/mcp_proxy/services/tool_proxy.py +19 -1
- gobby/mcp_proxy/stdio.py +37 -21
- gobby/mcp_proxy/tools/agents.py +7 -0
- gobby/mcp_proxy/tools/artifacts.py +3 -3
- gobby/mcp_proxy/tools/hub.py +30 -1
- gobby/mcp_proxy/tools/orchestration/cleanup.py +5 -5
- gobby/mcp_proxy/tools/orchestration/monitor.py +1 -1
- gobby/mcp_proxy/tools/orchestration/orchestrate.py +8 -3
- gobby/mcp_proxy/tools/orchestration/review.py +17 -4
- gobby/mcp_proxy/tools/orchestration/wait.py +7 -7
- gobby/mcp_proxy/tools/pipelines/__init__.py +254 -0
- gobby/mcp_proxy/tools/pipelines/_discovery.py +67 -0
- gobby/mcp_proxy/tools/pipelines/_execution.py +281 -0
- gobby/mcp_proxy/tools/sessions/_crud.py +4 -4
- gobby/mcp_proxy/tools/sessions/_handoff.py +1 -1
- gobby/mcp_proxy/tools/skills/__init__.py +184 -30
- gobby/mcp_proxy/tools/spawn_agent.py +229 -14
- gobby/mcp_proxy/tools/task_readiness.py +27 -4
- gobby/mcp_proxy/tools/tasks/_context.py +8 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +27 -1
- gobby/mcp_proxy/tools/tasks/_helpers.py +1 -1
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +125 -8
- gobby/mcp_proxy/tools/tasks/_lifecycle_validation.py +2 -1
- gobby/mcp_proxy/tools/tasks/_search.py +1 -1
- gobby/mcp_proxy/tools/workflows/__init__.py +273 -0
- gobby/mcp_proxy/tools/workflows/_artifacts.py +225 -0
- gobby/mcp_proxy/tools/workflows/_import.py +112 -0
- gobby/mcp_proxy/tools/workflows/_lifecycle.py +332 -0
- gobby/mcp_proxy/tools/workflows/_query.py +226 -0
- gobby/mcp_proxy/tools/workflows/_resolution.py +78 -0
- gobby/mcp_proxy/tools/workflows/_terminal.py +175 -0
- gobby/mcp_proxy/tools/worktrees.py +54 -15
- gobby/memory/components/__init__.py +0 -0
- gobby/memory/components/ingestion.py +98 -0
- gobby/memory/components/search.py +108 -0
- gobby/memory/context.py +5 -5
- gobby/memory/manager.py +16 -25
- gobby/paths.py +51 -0
- gobby/prompts/loader.py +1 -35
- gobby/runner.py +131 -16
- gobby/servers/http.py +193 -150
- gobby/servers/routes/__init__.py +2 -0
- gobby/servers/routes/admin.py +56 -0
- gobby/servers/routes/mcp/endpoints/execution.py +33 -32
- gobby/servers/routes/mcp/endpoints/registry.py +8 -8
- gobby/servers/routes/mcp/hooks.py +10 -1
- gobby/servers/routes/pipelines.py +227 -0
- gobby/servers/websocket.py +314 -1
- gobby/sessions/analyzer.py +89 -3
- gobby/sessions/manager.py +5 -5
- gobby/sessions/transcripts/__init__.py +3 -0
- gobby/sessions/transcripts/claude.py +5 -0
- gobby/sessions/transcripts/codex.py +5 -0
- gobby/sessions/transcripts/gemini.py +5 -0
- gobby/skills/hubs/__init__.py +25 -0
- gobby/skills/hubs/base.py +234 -0
- gobby/skills/hubs/claude_plugins.py +328 -0
- gobby/skills/hubs/clawdhub.py +289 -0
- gobby/skills/hubs/github_collection.py +465 -0
- gobby/skills/hubs/manager.py +263 -0
- gobby/skills/hubs/skillhub.py +342 -0
- gobby/skills/parser.py +23 -0
- gobby/skills/sync.py +5 -4
- gobby/storage/artifacts.py +19 -0
- gobby/storage/memories.py +4 -4
- gobby/storage/migrations.py +118 -3
- gobby/storage/pipelines.py +367 -0
- gobby/storage/sessions.py +23 -4
- gobby/storage/skills.py +48 -8
- gobby/storage/tasks/_aggregates.py +2 -2
- gobby/storage/tasks/_lifecycle.py +4 -4
- gobby/storage/tasks/_models.py +7 -1
- gobby/storage/tasks/_queries.py +3 -3
- gobby/sync/memories.py +4 -3
- gobby/tasks/commits.py +48 -17
- gobby/tasks/external_validator.py +4 -17
- gobby/tasks/validation.py +13 -87
- gobby/tools/summarizer.py +18 -51
- gobby/utils/status.py +13 -0
- gobby/workflows/actions.py +80 -0
- gobby/workflows/context_actions.py +265 -27
- gobby/workflows/definitions.py +119 -1
- gobby/workflows/detection_helpers.py +23 -11
- gobby/workflows/enforcement/__init__.py +11 -1
- gobby/workflows/enforcement/blocking.py +96 -0
- gobby/workflows/enforcement/handlers.py +35 -1
- gobby/workflows/enforcement/task_policy.py +18 -0
- gobby/workflows/engine.py +26 -4
- gobby/workflows/evaluator.py +8 -5
- gobby/workflows/lifecycle_evaluator.py +59 -27
- gobby/workflows/loader.py +567 -30
- gobby/workflows/lobster_compat.py +147 -0
- gobby/workflows/pipeline_executor.py +801 -0
- gobby/workflows/pipeline_state.py +172 -0
- gobby/workflows/pipeline_webhooks.py +206 -0
- gobby/workflows/premature_stop.py +5 -0
- gobby/worktrees/git.py +135 -20
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/METADATA +56 -22
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/RECORD +166 -122
- gobby/hooks/event_handlers.py +0 -1008
- gobby/mcp_proxy/tools/workflows.py +0 -1023
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/WHEEL +0 -0
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/entry_points.txt +0 -0
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/licenses/LICENSE.md +0 -0
- {gobby-0.2.8.dist-info → gobby-0.2.11.dist-info}/top_level.txt +0 -0
gobby/utils/status.py
CHANGED
|
@@ -84,6 +84,11 @@ def fetch_rich_status(http_port: int, timeout: float = 2.0) -> dict[str, Any]:
|
|
|
84
84
|
if skills_data:
|
|
85
85
|
status_kwargs["skills_total"] = skills_data.get("total", 0)
|
|
86
86
|
|
|
87
|
+
# Artifacts
|
|
88
|
+
artifacts_data = data.get("artifacts", {})
|
|
89
|
+
if artifacts_data and artifacts_data.get("count", 0) > 0:
|
|
90
|
+
status_kwargs["artifacts_count"] = artifacts_data.get("count", 0)
|
|
91
|
+
|
|
87
92
|
except (httpx.ConnectError, httpx.TimeoutException):
|
|
88
93
|
# Daemon not responding - return empty
|
|
89
94
|
pass
|
|
@@ -124,6 +129,8 @@ def format_status_message(
|
|
|
124
129
|
memories_avg_importance: float | None = None,
|
|
125
130
|
# Skills
|
|
126
131
|
skills_total: int | None = None,
|
|
132
|
+
# Artifacts
|
|
133
|
+
artifacts_count: int | None = None,
|
|
127
134
|
**kwargs: Any,
|
|
128
135
|
) -> str:
|
|
129
136
|
"""
|
|
@@ -254,6 +261,12 @@ def format_status_message(
|
|
|
254
261
|
lines.append(f" {mem_str}")
|
|
255
262
|
lines.append("")
|
|
256
263
|
|
|
264
|
+
# Artifacts section (only show if we have data)
|
|
265
|
+
if artifacts_count is not None:
|
|
266
|
+
lines.append("Artifacts:")
|
|
267
|
+
lines.append(f" Captured: {artifacts_count}")
|
|
268
|
+
lines.append("")
|
|
269
|
+
|
|
257
270
|
# Paths section (only when running)
|
|
258
271
|
if running and (pid_file or log_files):
|
|
259
272
|
lines.append("Paths:")
|
gobby/workflows/actions.py
CHANGED
|
@@ -32,6 +32,7 @@ from gobby.workflows.enforcement import (
|
|
|
32
32
|
handle_require_commit_before_stop,
|
|
33
33
|
handle_require_task_complete,
|
|
34
34
|
handle_require_task_review_or_close_before_stop,
|
|
35
|
+
handle_track_schema_lookup,
|
|
35
36
|
handle_validate_session_task_scope,
|
|
36
37
|
)
|
|
37
38
|
from gobby.workflows.llm_actions import handle_call_llm
|
|
@@ -100,7 +101,10 @@ class ActionContext:
|
|
|
100
101
|
memory_sync_manager: Any | None = None
|
|
101
102
|
task_sync_manager: Any | None = None
|
|
102
103
|
session_task_manager: Any | None = None
|
|
104
|
+
skill_manager: Any | None = None
|
|
103
105
|
event_data: dict[str, Any] | None = None # Hook event data (e.g., prompt_text)
|
|
106
|
+
pipeline_executor: Any | None = None # PipelineExecutor
|
|
107
|
+
workflow_loader: Any | None = None # WorkflowLoader
|
|
104
108
|
|
|
105
109
|
|
|
106
110
|
class ActionHandler(Protocol):
|
|
@@ -130,6 +134,9 @@ class ActionExecutor:
|
|
|
130
134
|
progress_tracker: Any | None = None,
|
|
131
135
|
stuck_detector: Any | None = None,
|
|
132
136
|
websocket_server: Any | None = None,
|
|
137
|
+
skill_manager: Any | None = None,
|
|
138
|
+
pipeline_executor: Any | None = None,
|
|
139
|
+
workflow_loader: Any | None = None,
|
|
133
140
|
):
|
|
134
141
|
self.db = db
|
|
135
142
|
self.session_manager = session_manager
|
|
@@ -147,6 +154,9 @@ class ActionExecutor:
|
|
|
147
154
|
self.progress_tracker = progress_tracker
|
|
148
155
|
self.stuck_detector = stuck_detector
|
|
149
156
|
self.websocket_server = websocket_server
|
|
157
|
+
self.skill_manager = skill_manager
|
|
158
|
+
self.pipeline_executor = pipeline_executor
|
|
159
|
+
self.workflow_loader = workflow_loader
|
|
150
160
|
self._handlers: dict[str, ActionHandler] = {}
|
|
151
161
|
|
|
152
162
|
self._register_defaults()
|
|
@@ -253,6 +263,9 @@ class ActionExecutor:
|
|
|
253
263
|
# --- Autonomous execution actions (closures for progress_tracker/stuck_detector) ---
|
|
254
264
|
self._register_autonomous_actions()
|
|
255
265
|
|
|
266
|
+
# --- Pipeline actions (closures for pipeline_executor/workflow_loader) ---
|
|
267
|
+
self._register_pipeline_actions()
|
|
268
|
+
|
|
256
269
|
def _register_task_enforcement_actions(self) -> None:
|
|
257
270
|
"""Register task enforcement actions with task_manager closure."""
|
|
258
271
|
tm = self.task_manager
|
|
@@ -283,6 +296,9 @@ class ActionExecutor:
|
|
|
283
296
|
async def capture_baseline(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
284
297
|
return await handle_capture_baseline_dirty_files(context, task_manager=tm, **kw)
|
|
285
298
|
|
|
299
|
+
async def track_schema(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
300
|
+
return await handle_track_schema_lookup(context, task_manager=tm, **kw)
|
|
301
|
+
|
|
286
302
|
self.register("block_tools", block_tools)
|
|
287
303
|
self.register("require_active_task", require_active)
|
|
288
304
|
self.register("require_task_complete", require_complete)
|
|
@@ -290,6 +306,7 @@ class ActionExecutor:
|
|
|
290
306
|
self.register("require_task_review_or_close_before_stop", require_review)
|
|
291
307
|
self.register("validate_session_task_scope", validate_scope)
|
|
292
308
|
self.register("capture_baseline_dirty_files", capture_baseline)
|
|
309
|
+
self.register("track_schema_lookup", track_schema)
|
|
293
310
|
|
|
294
311
|
def _register_webhook_action(self) -> None:
|
|
295
312
|
"""Register webhook action with config closure."""
|
|
@@ -380,6 +397,69 @@ class ActionExecutor:
|
|
|
380
397
|
self.register("record_task_selection", record_sel)
|
|
381
398
|
self.register("get_progress_summary", get_summary)
|
|
382
399
|
|
|
400
|
+
def _register_pipeline_actions(self) -> None:
|
|
401
|
+
"""Register pipeline actions with pipeline_executor/workflow_loader closures."""
|
|
402
|
+
executor = self
|
|
403
|
+
|
|
404
|
+
async def run_pipeline(context: ActionContext, **kw: Any) -> dict[str, Any] | None:
|
|
405
|
+
from gobby.workflows.pipeline_state import ApprovalRequired
|
|
406
|
+
|
|
407
|
+
name = kw.get("name")
|
|
408
|
+
inputs = kw.get("inputs") or {}
|
|
409
|
+
await_completion = kw.get("await_completion", False)
|
|
410
|
+
|
|
411
|
+
if not name:
|
|
412
|
+
return {"error": "Pipeline name is required"}
|
|
413
|
+
|
|
414
|
+
if executor.workflow_loader is None:
|
|
415
|
+
return {"error": "Workflow loader not configured"}
|
|
416
|
+
|
|
417
|
+
if executor.pipeline_executor is None:
|
|
418
|
+
return {"error": "Pipeline executor not configured"}
|
|
419
|
+
|
|
420
|
+
# Load the pipeline
|
|
421
|
+
pipeline = executor.workflow_loader.load_pipeline(name)
|
|
422
|
+
if pipeline is None:
|
|
423
|
+
return {"error": f"Pipeline '{name}' not found"}
|
|
424
|
+
|
|
425
|
+
# Render template variables in inputs
|
|
426
|
+
rendered_inputs = {}
|
|
427
|
+
variables = context.state.variables if context.state else {}
|
|
428
|
+
for key, value in inputs.items():
|
|
429
|
+
if isinstance(value, str):
|
|
430
|
+
rendered_inputs[key] = context.template_engine.render(value, variables)
|
|
431
|
+
else:
|
|
432
|
+
rendered_inputs[key] = value
|
|
433
|
+
|
|
434
|
+
try:
|
|
435
|
+
# Execute the pipeline
|
|
436
|
+
execution = await executor.pipeline_executor.execute(
|
|
437
|
+
pipeline=pipeline,
|
|
438
|
+
inputs=rendered_inputs,
|
|
439
|
+
project_id=variables.get("project_id", ""),
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
return {
|
|
443
|
+
"status": execution.status.value,
|
|
444
|
+
"execution_id": execution.id,
|
|
445
|
+
"pipeline_name": execution.pipeline_name,
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
except ApprovalRequired as e:
|
|
449
|
+
# Store pending pipeline in state if await_completion is True
|
|
450
|
+
if await_completion:
|
|
451
|
+
context.state.variables["pending_pipeline"] = e.execution_id
|
|
452
|
+
|
|
453
|
+
return {
|
|
454
|
+
"status": "waiting_approval",
|
|
455
|
+
"execution_id": e.execution_id,
|
|
456
|
+
"step_id": e.step_id,
|
|
457
|
+
"token": e.token,
|
|
458
|
+
"message": e.message,
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
self.register("run_pipeline", run_pipeline)
|
|
462
|
+
|
|
383
463
|
async def execute(
|
|
384
464
|
self, action_type: str, context: ActionContext, **kwargs: Any
|
|
385
465
|
) -> dict[str, Any] | None:
|
|
@@ -25,20 +25,35 @@ def inject_context(
|
|
|
25
25
|
session_id: str,
|
|
26
26
|
state: Any,
|
|
27
27
|
template_engine: Any,
|
|
28
|
-
source: str | None = None,
|
|
28
|
+
source: str | list[str] | None = None,
|
|
29
29
|
template: str | None = None,
|
|
30
30
|
require: bool = False,
|
|
31
|
+
skill_manager: Any | None = None,
|
|
32
|
+
filter: str | None = None,
|
|
33
|
+
session_task_manager: Any | None = None,
|
|
34
|
+
memory_manager: Any | None = None,
|
|
35
|
+
prompt_text: str | None = None,
|
|
36
|
+
limit: int = 5,
|
|
37
|
+
min_importance: float = 0.3,
|
|
31
38
|
) -> dict[str, Any] | None:
|
|
32
|
-
"""Inject context from a source.
|
|
39
|
+
"""Inject context from a source or multiple sources.
|
|
33
40
|
|
|
34
41
|
Args:
|
|
35
42
|
session_manager: The session manager instance
|
|
36
43
|
session_id: Current session ID
|
|
37
44
|
state: WorkflowState instance
|
|
38
45
|
template_engine: Template engine for rendering
|
|
39
|
-
source: Source type
|
|
46
|
+
source: Source type(s). Can be a string or list of strings.
|
|
47
|
+
Supported: previous_session_summary, handoff, artifacts, skills, task_context, memories, etc.
|
|
40
48
|
template: Optional template for rendering
|
|
41
49
|
require: If True, block session when no content found (default: False)
|
|
50
|
+
skill_manager: HookSkillManager instance (required for source='skills')
|
|
51
|
+
filter: Optional filter for skills source ('always_apply' to only include always-apply skills)
|
|
52
|
+
session_task_manager: SessionTaskManager instance (required for source='task_context')
|
|
53
|
+
memory_manager: MemoryManager instance (required for source='memories')
|
|
54
|
+
prompt_text: User prompt text for memory recall (required for source='memories')
|
|
55
|
+
limit: Max memories to retrieve (default: 5, used with source='memories')
|
|
56
|
+
min_importance: Minimum importance threshold (default: 0.3, used with source='memories')
|
|
42
57
|
|
|
43
58
|
Returns:
|
|
44
59
|
Dict with inject_context key, blocking decision, or None
|
|
@@ -60,6 +75,57 @@ def inject_context(
|
|
|
60
75
|
logger.warning("inject_context: session_id is empty or None")
|
|
61
76
|
return None
|
|
62
77
|
|
|
78
|
+
# Handle list of sources - recursively call for each source and combine
|
|
79
|
+
if isinstance(source, list):
|
|
80
|
+
combined_content: list[str] = []
|
|
81
|
+
for single_source in source:
|
|
82
|
+
result = inject_context(
|
|
83
|
+
session_manager=session_manager,
|
|
84
|
+
session_id=session_id,
|
|
85
|
+
state=state,
|
|
86
|
+
template_engine=template_engine,
|
|
87
|
+
source=single_source,
|
|
88
|
+
template=None, # Don't render template for individual sources
|
|
89
|
+
require=False, # Don't block for individual sources
|
|
90
|
+
skill_manager=skill_manager,
|
|
91
|
+
filter=filter,
|
|
92
|
+
session_task_manager=session_task_manager,
|
|
93
|
+
memory_manager=memory_manager,
|
|
94
|
+
prompt_text=prompt_text,
|
|
95
|
+
limit=limit,
|
|
96
|
+
min_importance=min_importance,
|
|
97
|
+
)
|
|
98
|
+
if result and result.get("inject_context"):
|
|
99
|
+
combined_content.append(result["inject_context"])
|
|
100
|
+
|
|
101
|
+
if combined_content:
|
|
102
|
+
content = "\n\n".join(combined_content)
|
|
103
|
+
if template:
|
|
104
|
+
# Build source_contents mapping for individual source access
|
|
105
|
+
source_contents: dict[str, str] = {}
|
|
106
|
+
for i, single_source in enumerate(source):
|
|
107
|
+
if i < len(combined_content):
|
|
108
|
+
source_contents[single_source] = combined_content[i]
|
|
109
|
+
render_context: dict[str, Any] = {
|
|
110
|
+
"session": session_manager.get(session_id),
|
|
111
|
+
"state": state,
|
|
112
|
+
"artifacts": state.artifacts if state else {},
|
|
113
|
+
"observations": state.observations if state else {},
|
|
114
|
+
"combined_content": content,
|
|
115
|
+
"source_contents": source_contents,
|
|
116
|
+
}
|
|
117
|
+
content = template_engine.render(template, render_context)
|
|
118
|
+
state.context_injected = True
|
|
119
|
+
return {"inject_context": content}
|
|
120
|
+
|
|
121
|
+
# No content from any source - block if required
|
|
122
|
+
if require:
|
|
123
|
+
reason = f"Required handoff context not found (sources={source})"
|
|
124
|
+
logger.warning(f"inject_context: {reason}")
|
|
125
|
+
return {"decision": "block", "reason": reason}
|
|
126
|
+
|
|
127
|
+
return None
|
|
128
|
+
|
|
63
129
|
# Debug logging for troubleshooting
|
|
64
130
|
logger.debug(
|
|
65
131
|
f"inject_context called: source={source!r}, "
|
|
@@ -77,7 +143,7 @@ def inject_context(
|
|
|
77
143
|
if not source and template:
|
|
78
144
|
# Render static template directly
|
|
79
145
|
logger.debug("inject_context: entering template-only path")
|
|
80
|
-
render_context
|
|
146
|
+
render_context = {
|
|
81
147
|
"session": session_manager.get(session_id),
|
|
82
148
|
"state": state,
|
|
83
149
|
"artifacts": state.artifacts if state else {},
|
|
@@ -152,6 +218,69 @@ def inject_context(
|
|
|
152
218
|
f"Loaded compact_markdown ({len(content)} chars) from current session {session_id}"
|
|
153
219
|
)
|
|
154
220
|
|
|
221
|
+
elif source == "skills":
|
|
222
|
+
# Inject skill context from skill_manager
|
|
223
|
+
if skill_manager is None:
|
|
224
|
+
logger.debug("inject_context: skills source requires skill_manager")
|
|
225
|
+
return None
|
|
226
|
+
|
|
227
|
+
skills = skill_manager.discover_core_skills()
|
|
228
|
+
|
|
229
|
+
# Apply filter if specified
|
|
230
|
+
if filter == "always_apply":
|
|
231
|
+
skills = [s for s in skills if s.is_always_apply()]
|
|
232
|
+
|
|
233
|
+
if skills:
|
|
234
|
+
content = _format_skills(skills)
|
|
235
|
+
logger.debug(f"Formatted {len(skills)} skills for injection")
|
|
236
|
+
|
|
237
|
+
elif source == "task_context":
|
|
238
|
+
# Inject current task context from session_task_manager
|
|
239
|
+
if session_task_manager is None:
|
|
240
|
+
logger.debug("inject_context: task_context source requires session_task_manager")
|
|
241
|
+
return None
|
|
242
|
+
|
|
243
|
+
session_tasks = session_task_manager.get_session_tasks(session_id)
|
|
244
|
+
|
|
245
|
+
# Filter for "worked_on" tasks (the active task)
|
|
246
|
+
worked_on_tasks = [t for t in session_tasks if t.get("action") == "worked_on"]
|
|
247
|
+
|
|
248
|
+
if worked_on_tasks:
|
|
249
|
+
content = _format_task_context(worked_on_tasks)
|
|
250
|
+
logger.debug(f"Formatted {len(worked_on_tasks)} active tasks for injection")
|
|
251
|
+
|
|
252
|
+
elif source == "memories":
|
|
253
|
+
# Inject relevant memories from memory_manager
|
|
254
|
+
if memory_manager is None:
|
|
255
|
+
logger.debug("inject_context: memories source requires memory_manager")
|
|
256
|
+
return None
|
|
257
|
+
|
|
258
|
+
if not memory_manager.config.enabled:
|
|
259
|
+
logger.debug("inject_context: memory manager is disabled")
|
|
260
|
+
return None
|
|
261
|
+
|
|
262
|
+
# Get project_id from session
|
|
263
|
+
project_id = None
|
|
264
|
+
session = session_manager.get(session_id)
|
|
265
|
+
if session:
|
|
266
|
+
project_id = getattr(session, "project_id", None)
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
memories = memory_manager.recall(
|
|
270
|
+
query=prompt_text or "",
|
|
271
|
+
project_id=project_id,
|
|
272
|
+
limit=limit,
|
|
273
|
+
min_importance=min_importance,
|
|
274
|
+
use_semantic=True,
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
if memories:
|
|
278
|
+
content = _format_memories(memories)
|
|
279
|
+
logger.debug(f"Formatted {len(memories)} memories for injection")
|
|
280
|
+
except Exception as e:
|
|
281
|
+
logger.error(f"inject_context: memory recall failed: {e}")
|
|
282
|
+
return None
|
|
283
|
+
|
|
155
284
|
if content:
|
|
156
285
|
if template:
|
|
157
286
|
render_context = {
|
|
@@ -173,6 +302,12 @@ def inject_context(
|
|
|
173
302
|
elif source == "compact_handoff":
|
|
174
303
|
# Pass content to template (like /clear does with summary)
|
|
175
304
|
render_context["handoff"] = content
|
|
305
|
+
elif source == "skills":
|
|
306
|
+
render_context["skills_list"] = content
|
|
307
|
+
elif source == "task_context":
|
|
308
|
+
render_context["task_context"] = content
|
|
309
|
+
elif source == "memories":
|
|
310
|
+
render_context["memories_list"] = content
|
|
176
311
|
|
|
177
312
|
content = template_engine.render(template, render_context)
|
|
178
313
|
|
|
@@ -308,16 +443,8 @@ def extract_handoff_context(
|
|
|
308
443
|
except Exception as wt_err:
|
|
309
444
|
logger.debug(f"Failed to get worktree context: {wt_err}")
|
|
310
445
|
|
|
311
|
-
#
|
|
312
|
-
|
|
313
|
-
from gobby.hooks.skill_manager import HookSkillManager
|
|
314
|
-
|
|
315
|
-
skill_manager = HookSkillManager()
|
|
316
|
-
core_skills = skill_manager.discover_core_skills()
|
|
317
|
-
always_apply_skills = [s.name for s in core_skills if s.is_always_apply()]
|
|
318
|
-
handoff_ctx.active_skills = always_apply_skills
|
|
319
|
-
except Exception as skill_err:
|
|
320
|
-
logger.debug(f"Failed to get active skills: {skill_err}")
|
|
446
|
+
# Note: active_skills population removed - redundant with _build_skill_injection_context()
|
|
447
|
+
# which already handles skill restoration on session start
|
|
321
448
|
|
|
322
449
|
# Format as markdown (like /clear stores formatted summary)
|
|
323
450
|
markdown = format_handoff_as_markdown(handoff_ctx)
|
|
@@ -335,6 +462,87 @@ def extract_handoff_context(
|
|
|
335
462
|
return {"error": str(e)}
|
|
336
463
|
|
|
337
464
|
|
|
465
|
+
def _format_memories(memories: list[Any]) -> str:
|
|
466
|
+
"""Format memory objects as markdown for injection.
|
|
467
|
+
|
|
468
|
+
Args:
|
|
469
|
+
memories: List of Memory objects
|
|
470
|
+
|
|
471
|
+
Returns:
|
|
472
|
+
Formatted markdown string with memory content
|
|
473
|
+
"""
|
|
474
|
+
lines = ["## Relevant Memories"]
|
|
475
|
+
for memory in memories:
|
|
476
|
+
content = getattr(memory, "content", str(memory))
|
|
477
|
+
memory_type = getattr(memory, "memory_type", None)
|
|
478
|
+
importance = getattr(memory, "importance", None)
|
|
479
|
+
|
|
480
|
+
if memory_type:
|
|
481
|
+
lines.append(f"- [{memory_type}] {content}")
|
|
482
|
+
else:
|
|
483
|
+
lines.append(f"- {content}")
|
|
484
|
+
|
|
485
|
+
if importance and importance >= 0.8:
|
|
486
|
+
lines[-1] += " *(high importance)*"
|
|
487
|
+
|
|
488
|
+
return "\n".join(lines)
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def _format_task_context(task_entries: list[dict[str, Any]]) -> str:
|
|
492
|
+
"""Format task entries as markdown for injection.
|
|
493
|
+
|
|
494
|
+
Args:
|
|
495
|
+
task_entries: List of dicts with 'task' key containing Task objects
|
|
496
|
+
|
|
497
|
+
Returns:
|
|
498
|
+
Formatted markdown string with task info
|
|
499
|
+
"""
|
|
500
|
+
lines = ["## Active Task"]
|
|
501
|
+
for entry in task_entries:
|
|
502
|
+
task = entry.get("task")
|
|
503
|
+
if task is None:
|
|
504
|
+
continue
|
|
505
|
+
|
|
506
|
+
seq_num = getattr(task, "seq_num", None)
|
|
507
|
+
title = getattr(task, "title", "Untitled")
|
|
508
|
+
status = getattr(task, "status", "unknown")
|
|
509
|
+
description = getattr(task, "description", "")
|
|
510
|
+
validation = getattr(task, "validation_criteria", "")
|
|
511
|
+
|
|
512
|
+
# Format task reference
|
|
513
|
+
ref = f"#{seq_num}" if seq_num else task.id[:8] if hasattr(task, "id") else "unknown"
|
|
514
|
+
lines.append(f"**{ref}**: {title}")
|
|
515
|
+
lines.append(f"Status: {status}")
|
|
516
|
+
|
|
517
|
+
if description:
|
|
518
|
+
lines.append(f"\n{description}")
|
|
519
|
+
|
|
520
|
+
if validation:
|
|
521
|
+
lines.append(f"\n**Validation Criteria**: {validation}")
|
|
522
|
+
|
|
523
|
+
return "\n".join(lines)
|
|
524
|
+
|
|
525
|
+
|
|
526
|
+
def _format_skills(skills: list[Any]) -> str:
|
|
527
|
+
"""Format a list of ParsedSkill objects as markdown for injection.
|
|
528
|
+
|
|
529
|
+
Args:
|
|
530
|
+
skills: List of ParsedSkill objects
|
|
531
|
+
|
|
532
|
+
Returns:
|
|
533
|
+
Formatted markdown string with skill names and descriptions
|
|
534
|
+
"""
|
|
535
|
+
lines = ["## Available Skills"]
|
|
536
|
+
for skill in skills:
|
|
537
|
+
name = getattr(skill, "name", "unknown")
|
|
538
|
+
description = getattr(skill, "description", "")
|
|
539
|
+
if description:
|
|
540
|
+
lines.append(f"- **{name}**: {description}")
|
|
541
|
+
else:
|
|
542
|
+
lines.append(f"- **{name}**")
|
|
543
|
+
return "\n".join(lines)
|
|
544
|
+
|
|
545
|
+
|
|
338
546
|
def recommend_skills_for_task(task: dict[str, Any] | None) -> list[str]:
|
|
339
547
|
"""Recommend relevant skills based on task category.
|
|
340
548
|
|
|
@@ -414,16 +622,37 @@ def format_handoff_as_markdown(ctx: Any, prompt_template: str | None = None) ->
|
|
|
414
622
|
if ctx.git_status:
|
|
415
623
|
sections.append(f"### Uncommitted Changes\n```\n{ctx.git_status}\n```")
|
|
416
624
|
|
|
417
|
-
# Files modified section
|
|
418
|
-
if ctx.files_modified:
|
|
419
|
-
|
|
625
|
+
# Files modified section - only show files still dirty (not yet committed)
|
|
626
|
+
if ctx.files_modified and ctx.git_status:
|
|
627
|
+
# Filter to files that appear in git status (still uncommitted)
|
|
628
|
+
# Normalize paths: files_modified may have absolute paths, git_status has relative
|
|
629
|
+
cwd = Path.cwd()
|
|
630
|
+
dirty_files = []
|
|
420
631
|
for f in ctx.files_modified:
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
632
|
+
# Try to make path relative to cwd for comparison
|
|
633
|
+
try:
|
|
634
|
+
rel_path = Path(f).relative_to(cwd)
|
|
635
|
+
rel_str = str(rel_path)
|
|
636
|
+
except ValueError:
|
|
637
|
+
# Path not relative to cwd, use as-is
|
|
638
|
+
rel_str = f
|
|
639
|
+
# Check if relative path appears in git status
|
|
640
|
+
if rel_str in ctx.git_status:
|
|
641
|
+
dirty_files.append(rel_str)
|
|
642
|
+
if dirty_files:
|
|
643
|
+
lines = ["### Files Being Modified"]
|
|
644
|
+
for f in dirty_files:
|
|
645
|
+
lines.append(f"- {f}")
|
|
646
|
+
sections.append("\n".join(lines))
|
|
647
|
+
|
|
648
|
+
# Initial goal section - only if task is still active (not closed/completed)
|
|
425
649
|
if ctx.initial_goal:
|
|
426
|
-
|
|
650
|
+
task_status = None
|
|
651
|
+
if ctx.active_gobby_task:
|
|
652
|
+
task_status = ctx.active_gobby_task.get("status")
|
|
653
|
+
# Only include if no task or task is still open/in_progress
|
|
654
|
+
if task_status in (None, "open", "in_progress"):
|
|
655
|
+
sections.append(f"### Original Goal\n{ctx.initial_goal}")
|
|
427
656
|
|
|
428
657
|
# Recent activity section
|
|
429
658
|
if ctx.recent_activity:
|
|
@@ -432,11 +661,8 @@ def format_handoff_as_markdown(ctx: Any, prompt_template: str | None = None) ->
|
|
|
432
661
|
lines.append(f"- {activity}")
|
|
433
662
|
sections.append("\n".join(lines))
|
|
434
663
|
|
|
435
|
-
# Active
|
|
436
|
-
|
|
437
|
-
lines = ["### Active Skills"]
|
|
438
|
-
lines.append(f"Skills available: {', '.join(ctx.active_skills)}")
|
|
439
|
-
sections.append("\n".join(lines))
|
|
664
|
+
# Note: Active Skills section removed - redundant with _build_skill_injection_context()
|
|
665
|
+
# which already handles skill restoration on session start
|
|
440
666
|
|
|
441
667
|
return "\n\n".join(sections)
|
|
442
668
|
|
|
@@ -447,6 +673,11 @@ def format_handoff_as_markdown(ctx: Any, prompt_template: str | None = None) ->
|
|
|
447
673
|
|
|
448
674
|
async def handle_inject_context(context: ActionContext, **kwargs: Any) -> dict[str, Any] | None:
|
|
449
675
|
"""ActionHandler wrapper for inject_context."""
|
|
676
|
+
# Get prompt_text from event_data if not explicitly passed
|
|
677
|
+
prompt_text = kwargs.get("prompt_text")
|
|
678
|
+
if prompt_text is None and context.event_data:
|
|
679
|
+
prompt_text = context.event_data.get("prompt_text")
|
|
680
|
+
|
|
450
681
|
return await asyncio.to_thread(
|
|
451
682
|
inject_context,
|
|
452
683
|
session_manager=context.session_manager,
|
|
@@ -456,6 +687,13 @@ async def handle_inject_context(context: ActionContext, **kwargs: Any) -> dict[s
|
|
|
456
687
|
source=kwargs.get("source"),
|
|
457
688
|
template=kwargs.get("template"),
|
|
458
689
|
require=kwargs.get("require", False),
|
|
690
|
+
skill_manager=context.skill_manager,
|
|
691
|
+
filter=kwargs.get("filter"),
|
|
692
|
+
session_task_manager=context.session_task_manager,
|
|
693
|
+
memory_manager=context.memory_manager,
|
|
694
|
+
prompt_text=prompt_text,
|
|
695
|
+
limit=kwargs.get("limit", 5),
|
|
696
|
+
min_importance=kwargs.get("min_importance", 0.3),
|
|
459
697
|
)
|
|
460
698
|
|
|
461
699
|
|
gobby/workflows/definitions.py
CHANGED
|
@@ -30,7 +30,11 @@ class PrematureStopHandler(BaseModel):
|
|
|
30
30
|
"""Handler for when an agent attempts to stop before task completion."""
|
|
31
31
|
|
|
32
32
|
action: Literal["guide_continuation", "block", "warn"] = "guide_continuation"
|
|
33
|
-
message: str =
|
|
33
|
+
message: str = (
|
|
34
|
+
"Task has incomplete subtasks. Options: "
|
|
35
|
+
"1) Continue: use suggest_next_task() to find the next task. "
|
|
36
|
+
"2) Stop anyway: use `/g workflows deactivate` to end the workflow first."
|
|
37
|
+
)
|
|
34
38
|
condition: str | None = None # Optional condition to check (e.g., task_tree_complete)
|
|
35
39
|
|
|
36
40
|
|
|
@@ -88,6 +92,120 @@ class WorkflowDefinition(BaseModel):
|
|
|
88
92
|
return None
|
|
89
93
|
|
|
90
94
|
|
|
95
|
+
# --- Pipeline Definition Models (YAML) ---
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class WebhookEndpoint(BaseModel):
|
|
99
|
+
"""Configuration for a webhook endpoint."""
|
|
100
|
+
|
|
101
|
+
url: str
|
|
102
|
+
method: str = "POST"
|
|
103
|
+
headers: dict[str, str] = Field(default_factory=dict)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class WebhookConfig(BaseModel):
|
|
107
|
+
"""Webhook configuration for pipeline events."""
|
|
108
|
+
|
|
109
|
+
on_approval_pending: WebhookEndpoint | None = None
|
|
110
|
+
on_complete: WebhookEndpoint | None = None
|
|
111
|
+
on_failure: WebhookEndpoint | None = None
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class PipelineApproval(BaseModel):
|
|
115
|
+
"""Approval gate configuration for a pipeline step."""
|
|
116
|
+
|
|
117
|
+
required: bool = False
|
|
118
|
+
message: str | None = None
|
|
119
|
+
timeout_seconds: int | None = None
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class PipelineStep(BaseModel):
|
|
123
|
+
"""A single step in a pipeline workflow.
|
|
124
|
+
|
|
125
|
+
Steps must have exactly one execution type: exec, prompt, or invoke_pipeline.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
id: str
|
|
129
|
+
|
|
130
|
+
# Execution types (mutually exclusive - exactly one required)
|
|
131
|
+
exec: str | None = None # Shell command to run
|
|
132
|
+
prompt: str | None = None # LLM prompt template
|
|
133
|
+
invoke_pipeline: str | None = None # Name of pipeline to invoke
|
|
134
|
+
|
|
135
|
+
# Optional fields
|
|
136
|
+
condition: str | None = None # Condition for step execution
|
|
137
|
+
approval: PipelineApproval | None = None # Approval gate
|
|
138
|
+
tools: list[str] = Field(default_factory=list) # Tool restrictions for prompt steps
|
|
139
|
+
input: str | None = None # Explicit input reference (e.g., $prev_step.output)
|
|
140
|
+
|
|
141
|
+
def model_post_init(self, __context: Any) -> None:
|
|
142
|
+
"""Validate that exactly one execution type is specified."""
|
|
143
|
+
exec_types = [self.exec, self.prompt, self.invoke_pipeline]
|
|
144
|
+
specified = [t for t in exec_types if t is not None]
|
|
145
|
+
|
|
146
|
+
if len(specified) == 0:
|
|
147
|
+
raise ValueError(
|
|
148
|
+
"PipelineStep requires at least one execution type: exec, prompt, or invoke_pipeline"
|
|
149
|
+
)
|
|
150
|
+
if len(specified) > 1:
|
|
151
|
+
raise ValueError(
|
|
152
|
+
"PipelineStep exec, prompt, and invoke_pipeline are mutually exclusive - only one allowed"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class PipelineDefinition(BaseModel):
|
|
157
|
+
"""Definition for a pipeline workflow with typed data flow between steps.
|
|
158
|
+
|
|
159
|
+
Pipelines execute steps sequentially with explicit data flow via $step.output references.
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
name: str
|
|
163
|
+
description: str | None = None
|
|
164
|
+
version: str = "1.0"
|
|
165
|
+
type: Literal["pipeline"] = "pipeline"
|
|
166
|
+
|
|
167
|
+
@field_validator("version", mode="before")
|
|
168
|
+
@classmethod
|
|
169
|
+
def coerce_version_to_string(cls, v: Any) -> str:
|
|
170
|
+
"""Accept numeric versions (1.0, 2) and coerce to string."""
|
|
171
|
+
return str(v) if v is not None else "1.0"
|
|
172
|
+
|
|
173
|
+
# Input/output schema
|
|
174
|
+
inputs: dict[str, Any] = Field(default_factory=dict)
|
|
175
|
+
outputs: dict[str, Any] = Field(default_factory=dict)
|
|
176
|
+
|
|
177
|
+
# Pipeline steps
|
|
178
|
+
steps: list[PipelineStep] = Field(default_factory=list)
|
|
179
|
+
|
|
180
|
+
# Webhook notifications
|
|
181
|
+
webhooks: WebhookConfig | None = None
|
|
182
|
+
|
|
183
|
+
# Expose as MCP tool
|
|
184
|
+
expose_as_tool: bool = False
|
|
185
|
+
|
|
186
|
+
@field_validator("steps", mode="after")
|
|
187
|
+
@classmethod
|
|
188
|
+
def validate_steps(cls, v: list[PipelineStep]) -> list[PipelineStep]:
|
|
189
|
+
"""Validate pipeline steps."""
|
|
190
|
+
if len(v) == 0:
|
|
191
|
+
raise ValueError("Pipeline requires at least one step")
|
|
192
|
+
|
|
193
|
+
# Check for duplicate step IDs
|
|
194
|
+
ids = [step.id for step in v]
|
|
195
|
+
if len(ids) != len(set(ids)):
|
|
196
|
+
duplicates = [id for id in ids if ids.count(id) > 1]
|
|
197
|
+
raise ValueError(f"Pipeline step IDs must be unique. Duplicates: {set(duplicates)}")
|
|
198
|
+
|
|
199
|
+
return v
|
|
200
|
+
|
|
201
|
+
def get_step(self, step_id: str) -> PipelineStep | None:
|
|
202
|
+
"""Get a step by its ID."""
|
|
203
|
+
for step in self.steps:
|
|
204
|
+
if step.id == step_id:
|
|
205
|
+
return step
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
|
|
91
209
|
# --- Workflow State Models (Runtime) ---
|
|
92
210
|
|
|
93
211
|
|