gobby 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gobby/__init__.py +1 -1
- gobby/adapters/claude_code.py +99 -61
- gobby/adapters/gemini.py +140 -38
- gobby/agents/isolation.py +130 -0
- gobby/agents/registry.py +11 -0
- gobby/agents/session.py +1 -0
- gobby/agents/spawn_executor.py +43 -13
- gobby/agents/spawners/macos.py +26 -1
- gobby/app_context.py +59 -0
- gobby/cli/__init__.py +0 -2
- gobby/cli/memory.py +185 -0
- gobby/cli/utils.py +5 -17
- gobby/clones/git.py +177 -0
- gobby/config/features.py +0 -20
- gobby/config/skills.py +31 -0
- gobby/config/tasks.py +4 -0
- gobby/hooks/event_handlers/__init__.py +155 -0
- gobby/hooks/event_handlers/_agent.py +175 -0
- gobby/hooks/event_handlers/_base.py +87 -0
- gobby/hooks/event_handlers/_misc.py +66 -0
- gobby/hooks/event_handlers/_session.py +573 -0
- gobby/hooks/event_handlers/_tool.py +196 -0
- gobby/hooks/hook_manager.py +21 -1
- gobby/install/gemini/hooks/hook_dispatcher.py +74 -15
- gobby/llm/claude.py +377 -42
- gobby/mcp_proxy/importer.py +4 -41
- gobby/mcp_proxy/instructions.py +2 -2
- gobby/mcp_proxy/manager.py +13 -3
- gobby/mcp_proxy/registries.py +35 -4
- gobby/mcp_proxy/services/recommendation.py +2 -28
- gobby/mcp_proxy/tools/agent_messaging.py +93 -44
- gobby/mcp_proxy/tools/agents.py +45 -9
- gobby/mcp_proxy/tools/artifacts.py +46 -12
- gobby/mcp_proxy/tools/sessions/_commits.py +31 -24
- gobby/mcp_proxy/tools/sessions/_crud.py +5 -5
- gobby/mcp_proxy/tools/sessions/_handoff.py +45 -41
- gobby/mcp_proxy/tools/sessions/_messages.py +35 -7
- gobby/mcp_proxy/tools/spawn_agent.py +44 -6
- gobby/mcp_proxy/tools/task_readiness.py +27 -4
- gobby/mcp_proxy/tools/tasks/_context.py +18 -0
- gobby/mcp_proxy/tools/tasks/_crud.py +13 -6
- gobby/mcp_proxy/tools/tasks/_lifecycle.py +29 -14
- gobby/mcp_proxy/tools/tasks/_session.py +22 -7
- gobby/mcp_proxy/tools/workflows/__init__.py +266 -0
- gobby/mcp_proxy/tools/workflows/_artifacts.py +225 -0
- gobby/mcp_proxy/tools/workflows/_import.py +112 -0
- gobby/mcp_proxy/tools/workflows/_lifecycle.py +321 -0
- gobby/mcp_proxy/tools/workflows/_query.py +207 -0
- gobby/mcp_proxy/tools/workflows/_resolution.py +78 -0
- gobby/mcp_proxy/tools/workflows/_terminal.py +139 -0
- gobby/mcp_proxy/tools/worktrees.py +32 -7
- gobby/memory/components/__init__.py +0 -0
- gobby/memory/components/ingestion.py +98 -0
- gobby/memory/components/search.py +108 -0
- gobby/memory/extractor.py +15 -1
- gobby/memory/manager.py +16 -25
- gobby/paths.py +51 -0
- gobby/prompts/loader.py +1 -35
- gobby/runner.py +36 -10
- gobby/servers/http.py +186 -149
- gobby/servers/routes/admin.py +12 -0
- gobby/servers/routes/mcp/endpoints/execution.py +15 -7
- gobby/servers/routes/mcp/endpoints/registry.py +8 -8
- gobby/servers/routes/mcp/hooks.py +50 -3
- gobby/servers/websocket.py +57 -1
- gobby/sessions/analyzer.py +4 -4
- gobby/sessions/manager.py +9 -0
- gobby/sessions/transcripts/gemini.py +100 -34
- gobby/skills/parser.py +23 -0
- gobby/skills/sync.py +5 -4
- gobby/storage/artifacts.py +19 -0
- gobby/storage/database.py +9 -2
- gobby/storage/memories.py +32 -21
- gobby/storage/migrations.py +46 -4
- gobby/storage/sessions.py +4 -2
- gobby/storage/skills.py +87 -7
- gobby/tasks/external_validator.py +4 -17
- gobby/tasks/validation.py +13 -87
- gobby/tools/summarizer.py +18 -51
- gobby/utils/status.py +13 -0
- gobby/workflows/actions.py +5 -0
- gobby/workflows/context_actions.py +21 -24
- gobby/workflows/detection_helpers.py +38 -24
- gobby/workflows/enforcement/__init__.py +11 -1
- gobby/workflows/enforcement/blocking.py +109 -1
- gobby/workflows/enforcement/handlers.py +35 -1
- gobby/workflows/engine.py +96 -0
- gobby/workflows/evaluator.py +110 -0
- gobby/workflows/hooks.py +41 -0
- gobby/workflows/lifecycle_evaluator.py +2 -1
- gobby/workflows/memory_actions.py +11 -0
- gobby/workflows/safe_evaluator.py +8 -0
- gobby/workflows/summary_actions.py +123 -50
- {gobby-0.2.7.dist-info → gobby-0.2.9.dist-info}/METADATA +1 -1
- {gobby-0.2.7.dist-info → gobby-0.2.9.dist-info}/RECORD +99 -107
- gobby/cli/tui.py +0 -34
- gobby/hooks/event_handlers.py +0 -909
- gobby/mcp_proxy/tools/workflows.py +0 -973
- gobby/tui/__init__.py +0 -5
- gobby/tui/api_client.py +0 -278
- gobby/tui/app.py +0 -329
- gobby/tui/screens/__init__.py +0 -25
- gobby/tui/screens/agents.py +0 -333
- gobby/tui/screens/chat.py +0 -450
- gobby/tui/screens/dashboard.py +0 -377
- gobby/tui/screens/memory.py +0 -305
- gobby/tui/screens/metrics.py +0 -231
- gobby/tui/screens/orchestrator.py +0 -903
- gobby/tui/screens/sessions.py +0 -412
- gobby/tui/screens/tasks.py +0 -440
- gobby/tui/screens/workflows.py +0 -289
- gobby/tui/screens/worktrees.py +0 -174
- gobby/tui/widgets/__init__.py +0 -21
- gobby/tui/widgets/chat.py +0 -210
- gobby/tui/widgets/conductor.py +0 -104
- gobby/tui/widgets/menu.py +0 -132
- gobby/tui/widgets/message_panel.py +0 -160
- gobby/tui/widgets/review_gate.py +0 -224
- gobby/tui/widgets/task_tree.py +0 -99
- gobby/tui/widgets/token_budget.py +0 -166
- gobby/tui/ws_client.py +0 -258
- {gobby-0.2.7.dist-info → gobby-0.2.9.dist-info}/WHEEL +0 -0
- {gobby-0.2.7.dist-info → gobby-0.2.9.dist-info}/entry_points.txt +0 -0
- {gobby-0.2.7.dist-info → gobby-0.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {gobby-0.2.7.dist-info → gobby-0.2.9.dist-info}/top_level.txt +0 -0
gobby/storage/database.py
CHANGED
|
@@ -11,7 +11,7 @@ import threading
|
|
|
11
11
|
import weakref
|
|
12
12
|
from collections.abc import Iterator
|
|
13
13
|
from contextlib import AbstractContextManager, contextmanager
|
|
14
|
-
from datetime import date, datetime
|
|
14
|
+
from datetime import UTC, date, datetime
|
|
15
15
|
from pathlib import Path
|
|
16
16
|
from typing import TYPE_CHECKING, Any, Protocol, cast, runtime_checkable
|
|
17
17
|
|
|
@@ -21,6 +21,9 @@ from typing import TYPE_CHECKING, Any, Protocol, cast, runtime_checkable
|
|
|
21
21
|
|
|
22
22
|
def _adapt_datetime(val: datetime) -> str:
|
|
23
23
|
"""Adapt datetime to ISO format string for SQLite storage."""
|
|
24
|
+
# If naive datetime, assume UTC and add timezone info for RFC3339 compliance
|
|
25
|
+
if val.tzinfo is None:
|
|
26
|
+
val = val.replace(tzinfo=UTC)
|
|
24
27
|
return val.isoformat()
|
|
25
28
|
|
|
26
29
|
|
|
@@ -31,7 +34,11 @@ def _adapt_date(val: date) -> str:
|
|
|
31
34
|
|
|
32
35
|
def _convert_datetime(val: bytes) -> datetime:
|
|
33
36
|
"""Convert SQLite datetime string back to datetime object."""
|
|
34
|
-
|
|
37
|
+
dt = datetime.fromisoformat(val.decode())
|
|
38
|
+
# Ensure timezone-aware (treat naive as UTC) for consistency
|
|
39
|
+
if dt.tzinfo is None:
|
|
40
|
+
dt = dt.replace(tzinfo=UTC)
|
|
41
|
+
return dt
|
|
35
42
|
|
|
36
43
|
|
|
37
44
|
def _convert_date(val: bytes) -> date:
|
gobby/storage/memories.py
CHANGED
|
@@ -197,40 +197,51 @@ class LocalMemoryManager:
|
|
|
197
197
|
return row is not None
|
|
198
198
|
|
|
199
199
|
def content_exists(self, content: str, project_id: str | None = None) -> bool:
|
|
200
|
-
"""Check if a memory with identical content already exists.
|
|
201
|
-
# Normalize content same way as ID generation in create_memory
|
|
202
|
-
normalized_content = content.strip()
|
|
203
|
-
project_str = project_id if project_id else ""
|
|
204
|
-
# Use delimiter to match create_memory ID generation
|
|
205
|
-
memory_id = generate_prefixed_id("mm", f"{normalized_content}||{project_str}")
|
|
200
|
+
"""Check if a memory with identical content already exists.
|
|
206
201
|
|
|
207
|
-
|
|
208
|
-
|
|
202
|
+
Uses global deduplication - checks if any memory has the same content,
|
|
203
|
+
regardless of project_id. This prevents duplicates when the same content
|
|
204
|
+
is stored with different or NULL project_ids.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
content: The content to check for
|
|
208
|
+
project_id: Ignored (kept for backward compatibility)
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
True if a memory with identical content exists
|
|
212
|
+
"""
|
|
213
|
+
# Global deduplication: check by content directly, ignoring project_id
|
|
214
|
+
# This fixes the duplicate issue where same content + different project_id
|
|
215
|
+
# would create different memory IDs
|
|
216
|
+
normalized_content = content.strip()
|
|
217
|
+
row = self.db.fetchone(
|
|
218
|
+
"SELECT 1 FROM memories WHERE content = ? LIMIT 1",
|
|
219
|
+
(normalized_content,),
|
|
220
|
+
)
|
|
209
221
|
return row is not None
|
|
210
222
|
|
|
211
223
|
def get_memory_by_content(self, content: str, project_id: str | None = None) -> Memory | None:
|
|
212
|
-
"""Get a memory by its exact content
|
|
224
|
+
"""Get a memory by its exact content.
|
|
213
225
|
|
|
214
|
-
|
|
215
|
-
|
|
226
|
+
Uses global lookup - finds any memory with matching content regardless
|
|
227
|
+
of project_id. This matches the behavior of content_exists().
|
|
216
228
|
|
|
217
229
|
Args:
|
|
218
230
|
content: The exact content to look up (will be normalized)
|
|
219
|
-
project_id:
|
|
231
|
+
project_id: Ignored (kept for backward compatibility)
|
|
220
232
|
|
|
221
233
|
Returns:
|
|
222
234
|
The Memory object if found, None otherwise
|
|
223
235
|
"""
|
|
224
|
-
#
|
|
236
|
+
# Global lookup: find by content directly, ignoring project_id
|
|
225
237
|
normalized_content = content.strip()
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
return
|
|
232
|
-
|
|
233
|
-
return None
|
|
238
|
+
row = self.db.fetchone(
|
|
239
|
+
"SELECT * FROM memories WHERE content = ? LIMIT 1",
|
|
240
|
+
(normalized_content,),
|
|
241
|
+
)
|
|
242
|
+
if row:
|
|
243
|
+
return Memory.from_row(row)
|
|
244
|
+
return None
|
|
234
245
|
|
|
235
246
|
def update_memory(
|
|
236
247
|
self,
|
gobby/storage/migrations.py
CHANGED
|
@@ -43,11 +43,11 @@ class MigrationUnsupportedError(Exception):
|
|
|
43
43
|
# Migration can be SQL string or a callable that takes LocalDatabase
|
|
44
44
|
MigrationAction = str | Callable[[LocalDatabase], None]
|
|
45
45
|
|
|
46
|
-
# Baseline version - the schema state
|
|
47
|
-
#
|
|
48
|
-
BASELINE_VERSION =
|
|
46
|
+
# Baseline version - the schema state at v79 (flattened)
|
|
47
|
+
# This is applied for new databases directly
|
|
48
|
+
BASELINE_VERSION = 79
|
|
49
49
|
|
|
50
|
-
# Baseline schema - flattened from
|
|
50
|
+
# Baseline schema - flattened from v78 production state, includes hub tracking fields
|
|
51
51
|
# This is applied for new databases directly
|
|
52
52
|
# Generated by: sqlite3 ~/.gobby/gobby-hub.db .schema
|
|
53
53
|
BASELINE_SCHEMA = """
|
|
@@ -583,7 +583,12 @@ CREATE TABLE skills (
|
|
|
583
583
|
source_path TEXT,
|
|
584
584
|
source_type TEXT,
|
|
585
585
|
source_ref TEXT,
|
|
586
|
+
hub_name TEXT,
|
|
587
|
+
hub_slug TEXT,
|
|
588
|
+
hub_version TEXT,
|
|
586
589
|
enabled INTEGER DEFAULT 1,
|
|
590
|
+
always_apply INTEGER DEFAULT 0,
|
|
591
|
+
injection_format TEXT DEFAULT 'summary',
|
|
587
592
|
project_id TEXT REFERENCES projects(id) ON DELETE CASCADE,
|
|
588
593
|
created_at TEXT NOT NULL,
|
|
589
594
|
updated_at TEXT NOT NULL
|
|
@@ -591,6 +596,7 @@ CREATE TABLE skills (
|
|
|
591
596
|
CREATE INDEX idx_skills_name ON skills(name);
|
|
592
597
|
CREATE INDEX idx_skills_project_id ON skills(project_id);
|
|
593
598
|
CREATE INDEX idx_skills_enabled ON skills(enabled);
|
|
599
|
+
CREATE INDEX idx_skills_always_apply ON skills(always_apply);
|
|
594
600
|
CREATE UNIQUE INDEX idx_skills_name_project ON skills(name, project_id);
|
|
595
601
|
CREATE UNIQUE INDEX idx_skills_name_global ON skills(name) WHERE project_id IS NULL;
|
|
596
602
|
|
|
@@ -692,11 +698,47 @@ def _migrate_backfill_session_seq_num_per_project(db: LocalDatabase) -> None:
|
|
|
692
698
|
logger.info(f"Re-numbered {updated} sessions with per-project seq_num")
|
|
693
699
|
|
|
694
700
|
|
|
701
|
+
def _migrate_add_hub_tracking_to_skills(db: LocalDatabase) -> None:
|
|
702
|
+
"""Add hub tracking fields to skills table.
|
|
703
|
+
|
|
704
|
+
Adds hub_name, hub_slug, and hub_version columns to track which hub
|
|
705
|
+
a skill was installed from.
|
|
706
|
+
"""
|
|
707
|
+
with db.transaction() as conn:
|
|
708
|
+
conn.execute("ALTER TABLE skills ADD COLUMN hub_name TEXT")
|
|
709
|
+
conn.execute("ALTER TABLE skills ADD COLUMN hub_slug TEXT")
|
|
710
|
+
conn.execute("ALTER TABLE skills ADD COLUMN hub_version TEXT")
|
|
711
|
+
|
|
712
|
+
logger.info("Added hub tracking fields to skills table")
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
def _migrate_add_skill_injection_columns(db: LocalDatabase) -> None:
|
|
716
|
+
"""Add always_apply and injection_format columns to skills table.
|
|
717
|
+
|
|
718
|
+
These columns enable per-skill control over:
|
|
719
|
+
- always_apply: Whether skill should always be injected at session start
|
|
720
|
+
- injection_format: How to inject the skill (summary, full, content)
|
|
721
|
+
|
|
722
|
+
The values are extracted from SKILL.md frontmatter during sync and stored
|
|
723
|
+
as columns for efficient querying.
|
|
724
|
+
"""
|
|
725
|
+
with db.transaction() as conn:
|
|
726
|
+
conn.execute("ALTER TABLE skills ADD COLUMN always_apply INTEGER DEFAULT 0")
|
|
727
|
+
conn.execute("ALTER TABLE skills ADD COLUMN injection_format TEXT DEFAULT 'summary'")
|
|
728
|
+
conn.execute("CREATE INDEX idx_skills_always_apply ON skills(always_apply)")
|
|
729
|
+
|
|
730
|
+
logger.info("Added always_apply and injection_format columns to skills table")
|
|
731
|
+
|
|
732
|
+
|
|
695
733
|
MIGRATIONS: list[tuple[int, str, MigrationAction]] = [
|
|
696
734
|
# Project-scoped session refs: Change seq_num index from global to project-scoped
|
|
697
735
|
(76, "Make sessions.seq_num project-scoped", _migrate_session_seq_num_project_scoped),
|
|
698
736
|
# Project-scoped session refs: Re-backfill seq_num per project
|
|
699
737
|
(77, "Backfill sessions.seq_num per project", _migrate_backfill_session_seq_num_per_project),
|
|
738
|
+
# Hub tracking: Add hub_name, hub_slug, hub_version to skills table
|
|
739
|
+
(78, "Add hub tracking fields to skills", _migrate_add_hub_tracking_to_skills),
|
|
740
|
+
# Skill injection: Add always_apply and injection_format columns
|
|
741
|
+
(79, "Add skill injection columns", _migrate_add_skill_injection_columns),
|
|
700
742
|
]
|
|
701
743
|
|
|
702
744
|
|
gobby/storage/sessions.py
CHANGED
|
@@ -166,6 +166,7 @@ class LocalSessionManager:
|
|
|
166
166
|
agent_depth: int = 0,
|
|
167
167
|
spawned_by_agent_id: str | None = None,
|
|
168
168
|
terminal_context: dict[str, Any] | None = None,
|
|
169
|
+
workflow_name: str | None = None,
|
|
169
170
|
) -> Session:
|
|
170
171
|
"""
|
|
171
172
|
Register a new session or return existing one.
|
|
@@ -241,9 +242,9 @@ class LocalSessionManager:
|
|
|
241
242
|
id, external_id, machine_id, source, project_id, title,
|
|
242
243
|
jsonl_path, git_branch, parent_session_id,
|
|
243
244
|
agent_depth, spawned_by_agent_id, terminal_context,
|
|
244
|
-
status, created_at, updated_at, seq_num, had_edits
|
|
245
|
+
workflow_name, status, created_at, updated_at, seq_num, had_edits
|
|
245
246
|
)
|
|
246
|
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'active', ?, ?, ?, 0)
|
|
247
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 'active', ?, ?, ?, 0)
|
|
247
248
|
""",
|
|
248
249
|
(
|
|
249
250
|
session_id,
|
|
@@ -258,6 +259,7 @@ class LocalSessionManager:
|
|
|
258
259
|
agent_depth,
|
|
259
260
|
spawned_by_agent_id,
|
|
260
261
|
json.dumps(terminal_context) if terminal_context else None,
|
|
262
|
+
workflow_name,
|
|
261
263
|
now,
|
|
262
264
|
now,
|
|
263
265
|
next_seq_num,
|
gobby/storage/skills.py
CHANGED
|
@@ -52,6 +52,11 @@ class Skill:
|
|
|
52
52
|
- source_type: 'local', 'github', 'url', 'zip', 'filesystem'
|
|
53
53
|
- source_ref: Git ref for updates (branch/tag/commit)
|
|
54
54
|
|
|
55
|
+
Hub Tracking:
|
|
56
|
+
- hub_name: Name of the hub the skill originated from
|
|
57
|
+
- hub_slug: Slug of the hub the skill originated from
|
|
58
|
+
- hub_version: Version of the skill as reported by the hub
|
|
59
|
+
|
|
55
60
|
Gobby-specific:
|
|
56
61
|
- enabled: Toggle skill on/off without removing
|
|
57
62
|
- project_id: NULL for global, else project-scoped
|
|
@@ -79,8 +84,15 @@ class Skill:
|
|
|
79
84
|
source_type: SkillSourceType | None = None
|
|
80
85
|
source_ref: str | None = None
|
|
81
86
|
|
|
87
|
+
# Hub Tracking
|
|
88
|
+
hub_name: str | None = None
|
|
89
|
+
hub_slug: str | None = None
|
|
90
|
+
hub_version: str | None = None
|
|
91
|
+
|
|
82
92
|
# Gobby-specific
|
|
83
93
|
enabled: bool = True
|
|
94
|
+
always_apply: bool = False
|
|
95
|
+
injection_format: str = "summary" # "summary", "full", "content"
|
|
84
96
|
project_id: str | None = None
|
|
85
97
|
|
|
86
98
|
# Timestamps
|
|
@@ -117,7 +129,14 @@ class Skill:
|
|
|
117
129
|
source_path=row["source_path"],
|
|
118
130
|
source_type=row["source_type"],
|
|
119
131
|
source_ref=row["source_ref"],
|
|
132
|
+
hub_name=row["hub_name"] if "hub_name" in row.keys() else None,
|
|
133
|
+
hub_slug=row["hub_slug"] if "hub_slug" in row.keys() else None,
|
|
134
|
+
hub_version=row["hub_version"] if "hub_version" in row.keys() else None,
|
|
120
135
|
enabled=bool(row["enabled"]),
|
|
136
|
+
always_apply=bool(row["always_apply"]) if "always_apply" in row.keys() else False,
|
|
137
|
+
injection_format=row["injection_format"]
|
|
138
|
+
if "injection_format" in row.keys()
|
|
139
|
+
else "summary",
|
|
121
140
|
project_id=row["project_id"],
|
|
122
141
|
created_at=row["created_at"],
|
|
123
142
|
updated_at=row["updated_at"],
|
|
@@ -142,7 +161,12 @@ class Skill:
|
|
|
142
161
|
"source_path": self.source_path,
|
|
143
162
|
"source_type": self.source_type,
|
|
144
163
|
"source_ref": self.source_ref,
|
|
164
|
+
"hub_name": self.hub_name,
|
|
165
|
+
"hub_slug": self.hub_slug,
|
|
166
|
+
"hub_version": self.hub_version,
|
|
145
167
|
"enabled": self.enabled,
|
|
168
|
+
"always_apply": self.always_apply,
|
|
169
|
+
"injection_format": self.injection_format,
|
|
146
170
|
"project_id": self.project_id,
|
|
147
171
|
"created_at": self.created_at,
|
|
148
172
|
"updated_at": self.updated_at,
|
|
@@ -176,9 +200,13 @@ class Skill:
|
|
|
176
200
|
def is_always_apply(self) -> bool:
|
|
177
201
|
"""Check if this is a core skill that should always be applied.
|
|
178
202
|
|
|
179
|
-
|
|
180
|
-
|
|
203
|
+
Reads from the always_apply column first (set during sync from frontmatter).
|
|
204
|
+
Falls back to metadata for backwards compatibility with older records.
|
|
181
205
|
"""
|
|
206
|
+
# Primary: read from column (set during sync)
|
|
207
|
+
if self.always_apply:
|
|
208
|
+
return True
|
|
209
|
+
# Fallback: check metadata for backwards compatibility
|
|
182
210
|
if not self.metadata:
|
|
183
211
|
return False
|
|
184
212
|
# Check top-level first
|
|
@@ -387,7 +415,12 @@ class LocalSkillManager:
|
|
|
387
415
|
source_path: str | None = None,
|
|
388
416
|
source_type: SkillSourceType | None = None,
|
|
389
417
|
source_ref: str | None = None,
|
|
418
|
+
hub_name: str | None = None,
|
|
419
|
+
hub_slug: str | None = None,
|
|
420
|
+
hub_version: str | None = None,
|
|
390
421
|
enabled: bool = True,
|
|
422
|
+
always_apply: bool = False,
|
|
423
|
+
injection_format: str = "summary",
|
|
391
424
|
project_id: str | None = None,
|
|
392
425
|
) -> Skill:
|
|
393
426
|
"""Create a new skill.
|
|
@@ -404,7 +437,12 @@ class LocalSkillManager:
|
|
|
404
437
|
source_path: Original file path or URL
|
|
405
438
|
source_type: Source type ('local', 'github', 'url', 'zip', 'filesystem')
|
|
406
439
|
source_ref: Git ref for updates
|
|
440
|
+
hub_name: Optional hub name
|
|
441
|
+
hub_slug: Optional hub slug
|
|
442
|
+
hub_version: Optional hub version
|
|
407
443
|
enabled: Whether skill is active
|
|
444
|
+
always_apply: Whether skill should always be injected at session start
|
|
445
|
+
injection_format: How to inject skill (summary, full, content)
|
|
408
446
|
project_id: Project scope (None for global)
|
|
409
447
|
|
|
410
448
|
Returns:
|
|
@@ -434,9 +472,10 @@ class LocalSkillManager:
|
|
|
434
472
|
INSERT INTO skills (
|
|
435
473
|
id, name, description, content, version, license,
|
|
436
474
|
compatibility, allowed_tools, metadata, source_path,
|
|
437
|
-
source_type, source_ref,
|
|
475
|
+
source_type, source_ref, hub_name, hub_slug, hub_version,
|
|
476
|
+
enabled, always_apply, injection_format, project_id,
|
|
438
477
|
created_at, updated_at
|
|
439
|
-
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
478
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
440
479
|
""",
|
|
441
480
|
(
|
|
442
481
|
skill_id,
|
|
@@ -451,7 +490,12 @@ class LocalSkillManager:
|
|
|
451
490
|
source_path,
|
|
452
491
|
source_type,
|
|
453
492
|
source_ref,
|
|
493
|
+
hub_name,
|
|
494
|
+
hub_slug,
|
|
495
|
+
hub_version,
|
|
454
496
|
enabled,
|
|
497
|
+
always_apply,
|
|
498
|
+
injection_format,
|
|
455
499
|
project_id,
|
|
456
500
|
now,
|
|
457
501
|
now,
|
|
@@ -530,7 +574,12 @@ class LocalSkillManager:
|
|
|
530
574
|
source_path: str | None = _UNSET,
|
|
531
575
|
source_type: SkillSourceType | None = _UNSET,
|
|
532
576
|
source_ref: str | None = _UNSET,
|
|
577
|
+
hub_name: str | None = _UNSET,
|
|
578
|
+
hub_slug: str | None = _UNSET,
|
|
579
|
+
hub_version: str | None = _UNSET,
|
|
533
580
|
enabled: bool | None = None,
|
|
581
|
+
always_apply: bool | None = None,
|
|
582
|
+
injection_format: str | None = None,
|
|
534
583
|
) -> Skill:
|
|
535
584
|
"""Update an existing skill.
|
|
536
585
|
|
|
@@ -547,7 +596,12 @@ class LocalSkillManager:
|
|
|
547
596
|
source_path: New source path (use _UNSET to leave unchanged, None to clear)
|
|
548
597
|
source_type: New source type (use _UNSET to leave unchanged, None to clear)
|
|
549
598
|
source_ref: New source ref (use _UNSET to leave unchanged, None to clear)
|
|
599
|
+
hub_name: New hub name (use _UNSET to leave unchanged, None to clear)
|
|
600
|
+
hub_slug: New hub slug (use _UNSET to leave unchanged, None to clear)
|
|
601
|
+
hub_version: New hub version (use _UNSET to leave unchanged, None to clear)
|
|
550
602
|
enabled: New enabled state (optional)
|
|
603
|
+
always_apply: New always_apply state (optional)
|
|
604
|
+
injection_format: New injection format (optional)
|
|
551
605
|
|
|
552
606
|
Returns:
|
|
553
607
|
The updated Skill
|
|
@@ -591,9 +645,24 @@ class LocalSkillManager:
|
|
|
591
645
|
if source_ref is not _UNSET:
|
|
592
646
|
updates.append("source_ref = ?")
|
|
593
647
|
params.append(source_ref)
|
|
648
|
+
if hub_name is not _UNSET:
|
|
649
|
+
updates.append("hub_name = ?")
|
|
650
|
+
params.append(hub_name)
|
|
651
|
+
if hub_slug is not _UNSET:
|
|
652
|
+
updates.append("hub_slug = ?")
|
|
653
|
+
params.append(hub_slug)
|
|
654
|
+
if hub_version is not _UNSET:
|
|
655
|
+
updates.append("hub_version = ?")
|
|
656
|
+
params.append(hub_version)
|
|
594
657
|
if enabled is not None:
|
|
595
658
|
updates.append("enabled = ?")
|
|
596
659
|
params.append(enabled)
|
|
660
|
+
if always_apply is not None:
|
|
661
|
+
updates.append("always_apply = ?")
|
|
662
|
+
params.append(always_apply)
|
|
663
|
+
if injection_format is not None:
|
|
664
|
+
updates.append("injection_format = ?")
|
|
665
|
+
params.append(injection_format)
|
|
597
666
|
|
|
598
667
|
if not updates:
|
|
599
668
|
return self.get_skill(skill_id)
|
|
@@ -730,7 +799,7 @@ class LocalSkillManager:
|
|
|
730
799
|
return [Skill.from_row(row) for row in rows]
|
|
731
800
|
|
|
732
801
|
def list_core_skills(self, project_id: str | None = None) -> list[Skill]:
|
|
733
|
-
"""List skills with
|
|
802
|
+
"""List skills with always_apply=true (efficiently via column query).
|
|
734
803
|
|
|
735
804
|
Args:
|
|
736
805
|
project_id: Optional project scope
|
|
@@ -738,8 +807,19 @@ class LocalSkillManager:
|
|
|
738
807
|
Returns:
|
|
739
808
|
List of core skills (always-apply skills)
|
|
740
809
|
"""
|
|
741
|
-
|
|
742
|
-
|
|
810
|
+
query = "SELECT * FROM skills WHERE always_apply = 1 AND enabled = 1"
|
|
811
|
+
params: list[Any] = []
|
|
812
|
+
|
|
813
|
+
if project_id:
|
|
814
|
+
query += " AND (project_id = ? OR project_id IS NULL)"
|
|
815
|
+
params.append(project_id)
|
|
816
|
+
else:
|
|
817
|
+
query += " AND project_id IS NULL"
|
|
818
|
+
|
|
819
|
+
query += " ORDER BY name ASC"
|
|
820
|
+
|
|
821
|
+
rows = self.db.fetchall(query, tuple(params))
|
|
822
|
+
return [Skill.from_row(row) for row in rows]
|
|
743
823
|
|
|
744
824
|
def skill_exists(self, skill_id: str) -> bool:
|
|
745
825
|
"""Check if a skill with the given ID exists.
|
|
@@ -46,12 +46,7 @@ if TYPE_CHECKING:
|
|
|
46
46
|
logger = logging.getLogger(__name__)
|
|
47
47
|
|
|
48
48
|
# Default system prompt for external validators
|
|
49
|
-
|
|
50
|
-
"You are an objective QA validator reviewing code changes. "
|
|
51
|
-
"You have no prior context about this task - evaluate purely based on "
|
|
52
|
-
"the acceptance criteria and the changes provided. "
|
|
53
|
-
"Be thorough but fair in your assessment."
|
|
54
|
-
)
|
|
49
|
+
|
|
55
50
|
|
|
56
51
|
# Module-level loader (initialized lazily)
|
|
57
52
|
_loader: PromptLoader | None = None
|
|
@@ -62,10 +57,7 @@ def _get_loader(project_dir: Path | None = None) -> PromptLoader:
|
|
|
62
57
|
global _loader
|
|
63
58
|
if _loader is None:
|
|
64
59
|
_loader = PromptLoader(project_dir=project_dir)
|
|
65
|
-
|
|
66
|
-
_loader.register_fallback(
|
|
67
|
-
"external_validation/system", lambda: DEFAULT_EXTERNAL_SYSTEM_PROMPT
|
|
68
|
-
)
|
|
60
|
+
|
|
69
61
|
return _loader
|
|
70
62
|
|
|
71
63
|
|
|
@@ -218,13 +210,8 @@ async def _run_llm_validation(
|
|
|
218
210
|
# Build the validation prompt
|
|
219
211
|
prompt = _build_external_validation_prompt(task, changes_context)
|
|
220
212
|
|
|
221
|
-
#
|
|
222
|
-
system_prompt = (
|
|
223
|
-
"You are an objective QA validator reviewing code changes. "
|
|
224
|
-
"You have no prior context about this task - evaluate purely based on "
|
|
225
|
-
"the acceptance criteria and the changes provided. "
|
|
226
|
-
"Be thorough but fair in your assessment."
|
|
227
|
-
)
|
|
213
|
+
# Render system prompt
|
|
214
|
+
system_prompt = _get_loader().render("external_validation/system", {})
|
|
228
215
|
|
|
229
216
|
try:
|
|
230
217
|
provider = llm_service.get_provider(config.provider)
|
gobby/tasks/validation.py
CHANGED
|
@@ -27,51 +27,6 @@ from gobby.utils.json_helpers import extract_json_object
|
|
|
27
27
|
|
|
28
28
|
logger = logging.getLogger(__name__)
|
|
29
29
|
|
|
30
|
-
# Default prompts (fallbacks for strangler fig pattern)
|
|
31
|
-
DEFAULT_VALIDATE_PROMPT = """Validate if the following changes satisfy the requirements.
|
|
32
|
-
|
|
33
|
-
Task: {title}
|
|
34
|
-
{category_section}{criteria_text}
|
|
35
|
-
|
|
36
|
-
{changes_section}
|
|
37
|
-
IMPORTANT: Return ONLY a JSON object, nothing else. No explanation, no preamble.
|
|
38
|
-
Format: {{"status": "valid", "feedback": "..."}} or {{"status": "invalid", "feedback": "..."}}
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
DEFAULT_CRITERIA_PROMPT = """Generate validation criteria for this task.
|
|
42
|
-
|
|
43
|
-
Task: {title}
|
|
44
|
-
Description: {description}
|
|
45
|
-
|
|
46
|
-
CRITICAL RULES - You MUST follow these:
|
|
47
|
-
1. **Only stated requirements** - Include ONLY requirements explicitly written in the title or description
|
|
48
|
-
2. **No invented values** - Do NOT invent specific numbers, timeouts, thresholds, or limits unless they appear in the task
|
|
49
|
-
3. **No invented edge cases** - Do NOT add edge cases, error scenarios, or boundary conditions beyond what's described
|
|
50
|
-
4. **Proportional detail** - Vague tasks get vague criteria; detailed tasks get detailed criteria
|
|
51
|
-
5. **When in doubt, leave it out** - If something isn't mentioned, don't include it
|
|
52
|
-
|
|
53
|
-
For vague requirements like "fix X" or "add Y", use criteria like:
|
|
54
|
-
- "X no longer produces the reported error/warning"
|
|
55
|
-
- "Y functionality works as expected"
|
|
56
|
-
- "Existing tests continue to pass"
|
|
57
|
-
- "No regressions introduced"
|
|
58
|
-
|
|
59
|
-
DO NOT generate criteria like:
|
|
60
|
-
- "timeout defaults to 30 seconds" (unless 30 seconds is in the task description)
|
|
61
|
-
- "handles edge case Z" (unless Z is mentioned in the task)
|
|
62
|
-
- "logs with format X" (unless that format is specified)
|
|
63
|
-
|
|
64
|
-
Format as markdown checkboxes:
|
|
65
|
-
## Deliverable
|
|
66
|
-
- [ ] What the task explicitly asks for
|
|
67
|
-
|
|
68
|
-
## Functional Requirements
|
|
69
|
-
- [ ] Only requirements stated in the description
|
|
70
|
-
|
|
71
|
-
## Verification
|
|
72
|
-
- [ ] Tests pass (if applicable)
|
|
73
|
-
- [ ] No regressions
|
|
74
|
-
"""
|
|
75
30
|
|
|
76
31
|
# Default number of commits to look back when gathering context
|
|
77
32
|
DEFAULT_COMMIT_WINDOW = 10
|
|
@@ -490,10 +445,6 @@ class TaskValidator:
|
|
|
490
445
|
self.llm_service = llm_service
|
|
491
446
|
self._loader = PromptLoader(project_dir=project_dir)
|
|
492
447
|
|
|
493
|
-
# Register fallbacks for strangler fig pattern
|
|
494
|
-
self._loader.register_fallback("validation/validate", lambda: DEFAULT_VALIDATE_PROMPT)
|
|
495
|
-
self._loader.register_fallback("validation/criteria", lambda: DEFAULT_CRITERIA_PROMPT)
|
|
496
|
-
|
|
497
448
|
async def gather_validation_context(self, file_paths: list[str]) -> str:
|
|
498
449
|
"""
|
|
499
450
|
Gather context for validation from files.
|
|
@@ -588,35 +539,16 @@ class TaskValidator:
|
|
|
588
539
|
else:
|
|
589
540
|
category_section += "\n"
|
|
590
541
|
|
|
591
|
-
# Build prompt using PromptLoader
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
try:
|
|
602
|
-
prompt = self._loader.render(prompt_path, template_context)
|
|
603
|
-
except FileNotFoundError:
|
|
604
|
-
logger.debug(f"Prompt template '{prompt_path}' not found, using fallback")
|
|
605
|
-
prompt = DEFAULT_VALIDATE_PROMPT.format(**template_context)
|
|
606
|
-
if file_context:
|
|
607
|
-
prompt += f"\nFile Context:\n{file_context[:50000]}\n"
|
|
608
|
-
else:
|
|
609
|
-
# Default behavior
|
|
610
|
-
template_context = {
|
|
611
|
-
"title": title,
|
|
612
|
-
"category_section": category_section,
|
|
613
|
-
"criteria_text": criteria_text,
|
|
614
|
-
"changes_section": changes_section,
|
|
615
|
-
"file_context": file_context[:50000] if file_context else "",
|
|
616
|
-
}
|
|
617
|
-
prompt = DEFAULT_VALIDATE_PROMPT.format(**template_context)
|
|
618
|
-
if file_context:
|
|
619
|
-
prompt += f"\nFile Context:\n{file_context[:50000]}\n"
|
|
542
|
+
# Build prompt using PromptLoader
|
|
543
|
+
prompt_path = self.config.prompt_path or "validation/validate"
|
|
544
|
+
template_context = {
|
|
545
|
+
"title": title,
|
|
546
|
+
"category_section": category_section,
|
|
547
|
+
"criteria_text": criteria_text,
|
|
548
|
+
"changes_section": changes_section,
|
|
549
|
+
"file_context": file_context[:50000] if file_context else "",
|
|
550
|
+
}
|
|
551
|
+
prompt = self._loader.render(prompt_path, template_context)
|
|
620
552
|
|
|
621
553
|
try:
|
|
622
554
|
provider = self.llm_service.get_provider(self.config.provider)
|
|
@@ -670,19 +602,13 @@ class TaskValidator:
|
|
|
670
602
|
if not self.config.enabled:
|
|
671
603
|
return None
|
|
672
604
|
|
|
673
|
-
#
|
|
605
|
+
# Use PromptLoader
|
|
606
|
+
prompt_path = self.config.criteria_prompt_path or "validation/criteria"
|
|
674
607
|
template_context = {
|
|
675
608
|
"title": title,
|
|
676
609
|
"description": description or "(no description)",
|
|
677
610
|
}
|
|
678
|
-
|
|
679
|
-
# Use PromptLoader
|
|
680
|
-
prompt_path = self.config.criteria_prompt_path or "validation/criteria"
|
|
681
|
-
try:
|
|
682
|
-
prompt = self._loader.render(prompt_path, template_context)
|
|
683
|
-
except FileNotFoundError:
|
|
684
|
-
logger.debug(f"Prompt template '{prompt_path}' not found, using fallback")
|
|
685
|
-
prompt = DEFAULT_CRITERIA_PROMPT.format(**template_context)
|
|
611
|
+
prompt = self._loader.render(prompt_path, template_context)
|
|
686
612
|
|
|
687
613
|
try:
|
|
688
614
|
provider = self.llm_service.get_provider(self.config.provider)
|