wogiflow 2.26.2 → 2.29.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/wogi-bug.md +30 -0
- package/.claude/commands/wogi-debug-hypothesis.md +33 -0
- package/.claude/commands/wogi-morning.md +1 -2
- package/.claude/commands/wogi-review.md +31 -2
- package/.claude/commands/wogi-start.md +32 -0
- package/.claude/commands/wogi-statusline-setup.md +12 -0
- package/.claude/commands/wogi-story.md +3 -2
- package/.claude/docs/claude-code-compatibility.md +40 -0
- package/.claude/docs/phases/01-explore.md +2 -1
- package/.claude/docs/phases/03-implement.md +4 -0
- package/.claude/docs/phases/04-verify.md +45 -0
- package/.claude/rules/README.md +36 -0
- package/.claude/rules/_internal/worker-tool-first-turn.md +82 -0
- package/.claude/rules/alternative-execpolicy-toml-command-policy.md +11 -0
- package/.claude/rules/alternative-hand-edit-ready-json-to-register-orpha.md +11 -0
- package/.claude/rules/alternative-permission-ruleset-per-phase.md +11 -0
- package/.claude/rules/alternative-short-name.md +12 -0
- package/.claude/rules/alternative-wogi-flow-as-mcp-client-oauth-manager.md +11 -0
- package/.claude/rules/architecture/hook-three-layer.md +68 -0
- package/.claude/rules/dual-repo-architecture-2026-02-28.md +18 -0
- package/.claude/rules/github-release-workflow-2026-01-30.md +16 -0
- package/.claude/settings.json +1 -1
- package/.workflow/agents/logic-adversary.md +2 -1
- package/.workflow/agents/personas/README.md +48 -0
- package/.workflow/agents/personas/platform-rigor.md +38 -0
- package/.workflow/agents/personas/scale-skeptic.md +28 -0
- package/.workflow/agents/personas/security-hawk.md +34 -0
- package/.workflow/agents/personas/simplicity-champion.md +37 -0
- package/.workflow/agents/personas/user-advocate.md +36 -0
- package/.workflow/bridges/base-bridge.js +46 -23
- package/.workflow/templates/claude-md.hbs +44 -122
- package/.workflow/templates/partials/feature-dossiers.hbs +33 -0
- package/.workflow/templates/partials/intent-grounded-reasoning.hbs +2 -12
- package/.workflow/templates/partials/methodology-rules.hbs +85 -79
- package/.workflow/templates/tier3-dom-field-inventory.md +102 -0
- package/lib/fuzzy-patch.js +251 -0
- package/lib/installer.js +8 -0
- package/lib/memory-proposal-store.js +458 -0
- package/lib/mode-schema.js +255 -0
- package/lib/skill-proposal-store.js +432 -0
- package/lib/skill-registry.js +1 -1
- package/lib/wogi-claude +84 -9
- package/lib/wogi-claude-expect.exp +113 -76
- package/lib/workspace-channel-server.js +19 -0
- package/lib/workspace-contracts.js +1 -1
- package/lib/workspace-dispatch-tracking.js +144 -0
- package/lib/workspace-gates.js +1 -1
- package/lib/workspace-ipc-sqlite.js +550 -0
- package/lib/workspace-messages.js +92 -0
- package/lib/workspace-routing.js +1 -1
- package/lib/workspace-task-injector.js +223 -0
- package/lib/workspace.js +23 -0
- package/lib/worktree-review.js +315 -0
- package/package.json +2 -2
- package/scripts/base-workflow-step.js +1 -1
- package/scripts/flow +28 -4
- package/scripts/flow-ac-scope-preservation.js +238 -0
- package/scripts/flow-auto-review-worker.js +75 -0
- package/scripts/flow-auto-review.js +102 -0
- package/scripts/flow-autonomous-detector.js +118 -0
- package/scripts/flow-autonomous-mode.js +153 -0
- package/scripts/flow-best-of-n.js +1 -1
- package/scripts/flow-bulk-loop.js +1 -1
- package/scripts/flow-checkpoint.js +2 -6
- package/scripts/flow-community-sync.js +1 -1
- package/scripts/flow-completion-summary.js +176 -0
- package/scripts/flow-completion-truth-gate.js +343 -4
- package/scripts/flow-config-defaults.js +52 -5
- package/scripts/flow-context-compact/expander.js +1 -1
- package/scripts/flow-context-compact/section-extractor.js +2 -2
- package/scripts/flow-context-gatherer.js +1 -1
- package/scripts/flow-context-generator.js +1 -1
- package/scripts/flow-context-scoring.js +1 -1
- package/scripts/flow-correct.js +1 -1
- package/scripts/flow-decision-authority.js +66 -15
- package/scripts/flow-done.js +33 -1
- package/scripts/flow-epic-cascade.js +171 -0
- package/scripts/flow-epics.js +2 -7
- package/scripts/flow-eval-judge.js +1 -1
- package/scripts/flow-eval.js +1 -1
- package/scripts/flow-export-scanner.js +2 -6
- package/scripts/flow-failure-learning.js +1 -1
- package/scripts/flow-feature-dossier.js +787 -0
- package/scripts/flow-figma-extract.js +2 -2
- package/scripts/flow-figma-generate.js +1 -1
- package/scripts/flow-gate-confidence.js +1 -1
- package/scripts/flow-health.js +52 -1
- package/scripts/flow-hooks.js +1 -1
- package/scripts/flow-id.js +19 -3
- package/scripts/flow-instruction-richness.js +1 -1
- package/scripts/flow-knowledge-router.js +1 -1
- package/scripts/flow-knowledge-sync.js +1 -1
- package/scripts/flow-logic-adversary.js +76 -1
- package/scripts/flow-logic-rules.js +380 -0
- package/scripts/flow-long-input.js +5 -5
- package/scripts/flow-memory-sync.js +1 -1
- package/scripts/flow-memory.js +78 -7
- package/scripts/flow-migrate.js +1 -1
- package/scripts/flow-model-caller.js +1 -1
- package/scripts/flow-models.js +2 -2
- package/scripts/flow-morning.js +0 -17
- package/scripts/flow-multi-approach.js +1 -1
- package/scripts/flow-orchestrate-context.js +4 -4
- package/scripts/flow-orchestrate-templates.js +1 -1
- package/scripts/flow-orchestrate.js +8 -8
- package/scripts/flow-peer-review.js +1 -1
- package/scripts/flow-phase.js +9 -0
- package/scripts/flow-proactive-compact.js +1 -1
- package/scripts/flow-providers.js +1 -1
- package/scripts/flow-question-queue.js +255 -0
- package/scripts/flow-repo-map.js +312 -0
- package/scripts/flow-review-passes/index.js +1 -1
- package/scripts/flow-review-passes/integration.js +1 -1
- package/scripts/flow-review-passes/structure.js +1 -1
- package/scripts/flow-revision-tracker.js +1 -1
- package/scripts/flow-section-resolver.js +1 -1
- package/scripts/flow-session-end.js +74 -5
- package/scripts/flow-session-state.js +103 -1
- package/scripts/flow-setup-hooks.js +1 -1
- package/scripts/flow-skeptical-evaluator.js +274 -0
- package/scripts/flow-skill-generator.js +3 -3
- package/scripts/flow-skill-learn.js +3 -6
- package/scripts/flow-skill-manage.js +248 -0
- package/scripts/flow-spec-verifier.js +1 -1
- package/scripts/flow-standards-checker.js +75 -0
- package/scripts/flow-standards-gate.js +1 -1
- package/scripts/flow-statusline-setup.js +8 -2
- package/scripts/flow-step-changelog.js +2 -2
- package/scripts/flow-step-coverage.js +1 -1
- package/scripts/flow-step-knowledge.js +1 -1
- package/scripts/flow-step-regression.js +1 -1
- package/scripts/flow-step-simplifier.js +1 -1
- package/scripts/flow-task-analyzer.js +1 -1
- package/scripts/flow-task-classifier.js +1 -1
- package/scripts/flow-task-enforcer.js +1 -1
- package/scripts/flow-template-extractor.js +1 -1
- package/scripts/flow-trap-zone.js +1 -1
- package/scripts/flow-utils.js +4 -0
- package/scripts/flow-worker-question-classifier.js +51 -5
- package/scripts/flow-workspace-migrate-ipc.js +216 -0
- package/scripts/flow-workspace-summary.js +256 -0
- package/scripts/hooks/adapters/base-adapter.js +2 -2
- package/scripts/hooks/core/feature-dossier-gate.js +194 -0
- package/scripts/hooks/core/observation-capture.js +24 -0
- package/scripts/hooks/core/overdue-dispatches.js +20 -1
- package/scripts/hooks/core/phase-gate.js +15 -1
- package/scripts/hooks/core/phase-transition-auto-review.js +61 -0
- package/scripts/hooks/core/post-compact.js +5 -2
- package/scripts/hooks/core/pre-tool-orchestrator.js +21 -0
- package/scripts/hooks/core/routing-gate.js +58 -0
- package/scripts/hooks/core/session-context.js +108 -0
- package/scripts/hooks/core/session-end-memory-proposals.js +65 -0
- package/scripts/hooks/core/session-end-skill-proposals.js +58 -0
- package/scripts/hooks/core/session-end.js +25 -0
- package/scripts/hooks/core/setup-handler.js +1 -1
- package/scripts/hooks/core/task-boundary-reset.js +110 -4
- package/scripts/hooks/core/worker-boundary-gate.js +71 -0
- package/scripts/hooks/core/worker-tool-first-gate.js +275 -0
- package/scripts/hooks/entry/claude-code/post-tool-use.js +2 -2
- package/scripts/hooks/entry/claude-code/pre-tool-use.js +7 -2
- package/scripts/hooks/entry/claude-code/session-start.js +74 -30
- package/scripts/hooks/entry/claude-code/stop.js +47 -1
- package/scripts/hooks/entry/claude-code/user-prompt-submit.js +17 -0
- package/.workflow/templates/partials/user-commands.hbs +0 -20
|
@@ -0,0 +1,550 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wogi Workspace — SQLite IPC Index (wf-3635574e / G3)
|
|
5
|
+
*
|
|
6
|
+
* Per-worker SQLite-backed atomicity index over the JSON message bus.
|
|
7
|
+
*
|
|
8
|
+
* Layout (per AC2):
|
|
9
|
+
* .workspace/state/ipc/<repoName>/inbound.db — manager is sole writer
|
|
10
|
+
* .workspace/state/ipc/<repoName>/outbound.db — worker is sole writer
|
|
11
|
+
*
|
|
12
|
+
* Schema (per AC1):
|
|
13
|
+
* messages(id TEXT PK, kind TEXT, payload TEXT, created_at TEXT, consumed_at TEXT)
|
|
14
|
+
*
|
|
15
|
+
* Role (Path B — index on top of JSON):
|
|
16
|
+
* JSON files in `.workspace/messages/` remain the authoritative store.
|
|
17
|
+
* SQLite indexes (id, status, direction) and provides atomic read-and-mark
|
|
18
|
+
* for consumer hot paths. On SQLite unavailability, callers transparently
|
|
19
|
+
* use JSON (per AC5).
|
|
20
|
+
*
|
|
21
|
+
* Design notes:
|
|
22
|
+
* - sql.js (pure-WASM) is used — already a required dep.
|
|
23
|
+
* - Init is async; all public APIs are async.
|
|
24
|
+
* - Single-writer contract is enforced by directory layout + caller convention.
|
|
25
|
+
* - Persistence: db.export() + atomic temp-file-rename (same pattern as
|
|
26
|
+
* scripts/flow-memory-db.js).
|
|
27
|
+
*/
|
|
28
|
+
|
|
29
|
+
'use strict';
|
|
30
|
+
|
|
31
|
+
const fs = require('node:fs');
|
|
32
|
+
const path = require('node:path');
|
|
33
|
+
const { safeJsonParse } = require('../scripts/flow-io');
|
|
34
|
+
|
|
35
|
+
// ============================================================
|
|
36
|
+
// Module-level state
|
|
37
|
+
// ============================================================
|
|
38
|
+
|
|
39
|
+
let SQL = null;
|
|
40
|
+
let sqlInitPromise = null;
|
|
41
|
+
let sqlUnavailableReason = null;
|
|
42
|
+
const openDbs = new Map(); // dbPath -> { db, dirty }
|
|
43
|
+
|
|
44
|
+
// ============================================================
|
|
45
|
+
// Constants
|
|
46
|
+
// ============================================================
|
|
47
|
+
|
|
48
|
+
const VALID_DIRECTIONS = new Set(['inbound', 'outbound']);
|
|
49
|
+
const VALID_NAME = /^[a-zA-Z0-9_-]{1,64}$/;
|
|
50
|
+
|
|
51
|
+
const SCHEMA_SQL = `
|
|
52
|
+
CREATE TABLE IF NOT EXISTS messages (
|
|
53
|
+
id TEXT PRIMARY KEY,
|
|
54
|
+
kind TEXT NOT NULL,
|
|
55
|
+
payload TEXT NOT NULL,
|
|
56
|
+
created_at TEXT NOT NULL,
|
|
57
|
+
consumed_at TEXT
|
|
58
|
+
);
|
|
59
|
+
CREATE INDEX IF NOT EXISTS idx_unconsumed
|
|
60
|
+
ON messages(consumed_at, created_at);
|
|
61
|
+
CREATE INDEX IF NOT EXISTS idx_kind
|
|
62
|
+
ON messages(kind);
|
|
63
|
+
`;
|
|
64
|
+
|
|
65
|
+
// ============================================================
|
|
66
|
+
// sql.js lifecycle
|
|
67
|
+
// ============================================================
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Lazy-load sql.js. Idempotent. Caches unavailability reason on failure.
|
|
71
|
+
* @returns {Promise<object|null>} SQL namespace, or null if unavailable.
|
|
72
|
+
*/
|
|
73
|
+
async function ensureSqlJs() {
|
|
74
|
+
if (SQL) return SQL;
|
|
75
|
+
if (sqlUnavailableReason) return null;
|
|
76
|
+
if (sqlInitPromise) return sqlInitPromise;
|
|
77
|
+
|
|
78
|
+
sqlInitPromise = (async () => {
|
|
79
|
+
try {
|
|
80
|
+
const initSqlJs = require('sql.js');
|
|
81
|
+
SQL = await initSqlJs();
|
|
82
|
+
return SQL;
|
|
83
|
+
} catch (err) {
|
|
84
|
+
sqlUnavailableReason = err && err.message ? err.message : String(err);
|
|
85
|
+
return null;
|
|
86
|
+
}
|
|
87
|
+
})();
|
|
88
|
+
|
|
89
|
+
return sqlInitPromise;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Check if SQLite IPC is available in this process. Side-effect-free after first call.
|
|
94
|
+
* @returns {Promise<boolean>}
|
|
95
|
+
*/
|
|
96
|
+
async function isAvailable() {
|
|
97
|
+
const sql = await ensureSqlJs();
|
|
98
|
+
return !!sql;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Why SQLite is unavailable (if it is). For diagnostics + AC5 deprecation warning.
|
|
103
|
+
* @returns {string|null}
|
|
104
|
+
*/
|
|
105
|
+
function unavailableReason() {
|
|
106
|
+
return sqlUnavailableReason;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// ============================================================
|
|
110
|
+
// Path helpers
|
|
111
|
+
// ============================================================
|
|
112
|
+
|
|
113
|
+
function validateRepoName(repoName) {
|
|
114
|
+
if (!VALID_NAME.test(repoName || '')) {
|
|
115
|
+
throw new Error(`Invalid repoName: ${JSON.stringify(repoName)} (must match ${VALID_NAME})`);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
function validateDirection(direction) {
|
|
120
|
+
if (!VALID_DIRECTIONS.has(direction)) {
|
|
121
|
+
throw new Error(`Invalid direction: ${direction} (must be inbound|outbound)`);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
function ipcRoot(workspaceRoot) {
|
|
126
|
+
return path.join(workspaceRoot, '.workspace', 'state', 'ipc');
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
function dbPath(workspaceRoot, repoName, direction) {
|
|
130
|
+
validateRepoName(repoName);
|
|
131
|
+
validateDirection(direction);
|
|
132
|
+
return path.join(ipcRoot(workspaceRoot), repoName, `${direction}.db`);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ============================================================
|
|
136
|
+
// DB open / close / persist
|
|
137
|
+
// ============================================================
|
|
138
|
+
|
|
139
|
+
async function openDb(dbFilePath) {
|
|
140
|
+
const sql = await ensureSqlJs();
|
|
141
|
+
if (!sql) return null;
|
|
142
|
+
|
|
143
|
+
if (openDbs.has(dbFilePath)) {
|
|
144
|
+
return openDbs.get(dbFilePath).db;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
fs.mkdirSync(path.dirname(dbFilePath), { recursive: true });
|
|
148
|
+
|
|
149
|
+
let db;
|
|
150
|
+
if (fs.existsSync(dbFilePath)) {
|
|
151
|
+
const buf = fs.readFileSync(dbFilePath);
|
|
152
|
+
db = new sql.Database(buf);
|
|
153
|
+
} else {
|
|
154
|
+
db = new sql.Database();
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
db.run(SCHEMA_SQL);
|
|
158
|
+
openDbs.set(dbFilePath, { db, dirty: false });
|
|
159
|
+
return db;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
function markDirty(dbFilePath) {
|
|
163
|
+
const entry = openDbs.get(dbFilePath);
|
|
164
|
+
if (entry) entry.dirty = true;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
/**
|
|
168
|
+
* Persist the DB to disk atomically (temp file + rename).
|
|
169
|
+
* Safe against mid-write crash: readers see either old or new file, never torn.
|
|
170
|
+
*/
|
|
171
|
+
async function persistDb(dbFilePath) {
|
|
172
|
+
const entry = openDbs.get(dbFilePath);
|
|
173
|
+
if (!entry || !entry.dirty) return;
|
|
174
|
+
|
|
175
|
+
const data = entry.db.export();
|
|
176
|
+
const buffer = Buffer.from(data);
|
|
177
|
+
const tempPath = `${dbFilePath}.tmp.${process.pid}.${Date.now()}`;
|
|
178
|
+
|
|
179
|
+
try {
|
|
180
|
+
fs.writeFileSync(tempPath, buffer);
|
|
181
|
+
fs.renameSync(tempPath, dbFilePath);
|
|
182
|
+
entry.dirty = false;
|
|
183
|
+
} catch (err) {
|
|
184
|
+
try { fs.unlinkSync(tempPath); } catch (_err) { /* best effort */ }
|
|
185
|
+
throw err;
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Close a specific DB (persists first if dirty).
|
|
191
|
+
*/
|
|
192
|
+
async function closeDb(dbFilePath) {
|
|
193
|
+
const entry = openDbs.get(dbFilePath);
|
|
194
|
+
if (!entry) return;
|
|
195
|
+
if (entry.dirty) await persistDb(dbFilePath);
|
|
196
|
+
entry.db.close();
|
|
197
|
+
openDbs.delete(dbFilePath);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Close all open DBs. Useful for tests and process shutdown.
|
|
202
|
+
*/
|
|
203
|
+
async function closeAll() {
|
|
204
|
+
const paths = Array.from(openDbs.keys());
|
|
205
|
+
for (const p of paths) {
|
|
206
|
+
try { await closeDb(p); } catch (_err) { /* continue */ }
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// ============================================================
|
|
211
|
+
// Core operations
|
|
212
|
+
// ============================================================
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Index a message. Idempotent by id (UPSERT).
|
|
216
|
+
*
|
|
217
|
+
* @param {string} workspaceRoot
|
|
218
|
+
* @param {string} repoName — the per-worker DB the message belongs to
|
|
219
|
+
* @param {string} direction — 'inbound' (manager→worker) or 'outbound' (worker→manager)
|
|
220
|
+
* @param {Object} msg
|
|
221
|
+
* @param {string} msg.id
|
|
222
|
+
* @param {string} msg.kind — e.g. 'task-dispatch', 'task-complete', 'question'
|
|
223
|
+
* @param {Object} msg.payload — arbitrary JSON-serializable
|
|
224
|
+
* @param {string} [msg.createdAt] — ISO; defaults to now
|
|
225
|
+
* @param {string|null} [msg.consumedAt] — ISO or null; defaults to null
|
|
226
|
+
* @returns {Promise<boolean>} true if indexed, false if SQLite unavailable
|
|
227
|
+
*/
|
|
228
|
+
async function indexMessage(workspaceRoot, repoName, direction, msg) {
|
|
229
|
+
if (!msg || typeof msg.id !== 'string' || !msg.id) {
|
|
230
|
+
throw new Error('indexMessage: msg.id required');
|
|
231
|
+
}
|
|
232
|
+
if (typeof msg.kind !== 'string' || !msg.kind) {
|
|
233
|
+
throw new Error('indexMessage: msg.kind required');
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
const dbFilePath = dbPath(workspaceRoot, repoName, direction);
|
|
237
|
+
const db = await openDb(dbFilePath);
|
|
238
|
+
if (!db) return false;
|
|
239
|
+
|
|
240
|
+
const payloadJson = JSON.stringify(msg.payload ?? {});
|
|
241
|
+
const createdAt = msg.createdAt || new Date().toISOString();
|
|
242
|
+
const consumedAt = msg.consumedAt || null;
|
|
243
|
+
|
|
244
|
+
db.run(
|
|
245
|
+
`INSERT INTO messages (id, kind, payload, created_at, consumed_at)
|
|
246
|
+
VALUES ($id, $kind, $payload, $created_at, $consumed_at)
|
|
247
|
+
ON CONFLICT(id) DO UPDATE SET
|
|
248
|
+
kind = excluded.kind,
|
|
249
|
+
payload = excluded.payload,
|
|
250
|
+
created_at = excluded.created_at,
|
|
251
|
+
consumed_at = COALESCE(messages.consumed_at, excluded.consumed_at)`,
|
|
252
|
+
{
|
|
253
|
+
$id: msg.id,
|
|
254
|
+
$kind: msg.kind,
|
|
255
|
+
$payload: payloadJson,
|
|
256
|
+
$created_at: createdAt,
|
|
257
|
+
$consumed_at: consumedAt
|
|
258
|
+
}
|
|
259
|
+
);
|
|
260
|
+
|
|
261
|
+
markDirty(dbFilePath);
|
|
262
|
+
await persistDb(dbFilePath);
|
|
263
|
+
return true;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
/**
|
|
267
|
+
* List unconsumed messages (read-only — does NOT mark consumed).
|
|
268
|
+
*
|
|
269
|
+
* @param {string} workspaceRoot
|
|
270
|
+
* @param {string} repoName
|
|
271
|
+
* @param {string} direction
|
|
272
|
+
* @param {Object} [opts]
|
|
273
|
+
* @param {string} [opts.kind] — filter by kind
|
|
274
|
+
* @param {number} [opts.limit]
|
|
275
|
+
* @returns {Promise<Array<{id, kind, payload, createdAt}>>} empty array if unavailable
|
|
276
|
+
*/
|
|
277
|
+
async function listUnconsumed(workspaceRoot, repoName, direction, opts = {}) {
|
|
278
|
+
const dbFilePath = dbPath(workspaceRoot, repoName, direction);
|
|
279
|
+
if (!fs.existsSync(dbFilePath)) return [];
|
|
280
|
+
const db = await openDb(dbFilePath);
|
|
281
|
+
if (!db) return [];
|
|
282
|
+
|
|
283
|
+
let sql = `SELECT id, kind, payload, created_at
|
|
284
|
+
FROM messages
|
|
285
|
+
WHERE consumed_at IS NULL`;
|
|
286
|
+
const params = {};
|
|
287
|
+
if (opts.kind) {
|
|
288
|
+
sql += ` AND kind = $kind`;
|
|
289
|
+
params.$kind = opts.kind;
|
|
290
|
+
}
|
|
291
|
+
sql += ` ORDER BY created_at ASC`;
|
|
292
|
+
if (Number.isInteger(opts.limit) && opts.limit > 0) {
|
|
293
|
+
sql += ` LIMIT ${opts.limit}`;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
const stmt = db.prepare(sql);
|
|
297
|
+
stmt.bind(params);
|
|
298
|
+
const rows = [];
|
|
299
|
+
while (stmt.step()) {
|
|
300
|
+
const r = stmt.getAsObject();
|
|
301
|
+
rows.push({
|
|
302
|
+
id: r.id,
|
|
303
|
+
kind: r.kind,
|
|
304
|
+
payload: parsePayload(r.payload),
|
|
305
|
+
createdAt: r.created_at
|
|
306
|
+
});
|
|
307
|
+
}
|
|
308
|
+
stmt.free();
|
|
309
|
+
return rows;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/**
|
|
313
|
+
* Atomically read unconsumed messages and mark every examined row consumed.
|
|
314
|
+
*
|
|
315
|
+
* AC3. Within one process this is atomic (single-threaded JS + SQLite txn).
|
|
316
|
+
* Cross-process atomicity relies on single-writer contract (AC2) — only one
|
|
317
|
+
* process writes any given DB file.
|
|
318
|
+
*
|
|
319
|
+
* Optional verifier: receives each candidate row; return `true` to include
|
|
320
|
+
* the row in the return value, `false` to exclude. ALL examined rows (up
|
|
321
|
+
* to `limit`) get `consumed_at` set regardless — this prevents leaked
|
|
322
|
+
* index entries when JSON authority says a row is already resolved.
|
|
323
|
+
*
|
|
324
|
+
* @param {string} workspaceRoot
|
|
325
|
+
* @param {string} repoName
|
|
326
|
+
* @param {string} direction
|
|
327
|
+
* @param {Object} [opts]
|
|
328
|
+
* @param {string} [opts.kind]
|
|
329
|
+
* @param {number} [opts.limit]
|
|
330
|
+
* @param {(row) => boolean} [opts.verifier]
|
|
331
|
+
* @returns {Promise<Array<{id, kind, payload, createdAt, consumedAt}>>} returned rows
|
|
332
|
+
*/
|
|
333
|
+
async function readAndMarkConsumed(workspaceRoot, repoName, direction, opts = {}) {
|
|
334
|
+
const dbFilePath = dbPath(workspaceRoot, repoName, direction);
|
|
335
|
+
if (!fs.existsSync(dbFilePath)) return [];
|
|
336
|
+
const db = await openDb(dbFilePath);
|
|
337
|
+
if (!db) return [];
|
|
338
|
+
|
|
339
|
+
db.run('SAVEPOINT read_and_mark');
|
|
340
|
+
try {
|
|
341
|
+
const candidates = await listUnconsumed(workspaceRoot, repoName, direction, opts);
|
|
342
|
+
if (candidates.length === 0) {
|
|
343
|
+
db.run('RELEASE read_and_mark');
|
|
344
|
+
return [];
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
const verifier = typeof opts.verifier === 'function' ? opts.verifier : null;
|
|
348
|
+
const returned = verifier ? candidates.filter(verifier) : candidates;
|
|
349
|
+
|
|
350
|
+
// Mark ALL examined rows consumed — prevents index-leak when verifier skips.
|
|
351
|
+
const consumedAt = new Date().toISOString();
|
|
352
|
+
const allIds = candidates.map(r => r.id);
|
|
353
|
+
const placeholders = allIds.map(() => '?').join(',');
|
|
354
|
+
db.run(
|
|
355
|
+
`UPDATE messages SET consumed_at = ? WHERE id IN (${placeholders}) AND consumed_at IS NULL`,
|
|
356
|
+
[consumedAt, ...allIds]
|
|
357
|
+
);
|
|
358
|
+
db.run('RELEASE read_and_mark');
|
|
359
|
+
|
|
360
|
+
markDirty(dbFilePath);
|
|
361
|
+
await persistDb(dbFilePath);
|
|
362
|
+
|
|
363
|
+
return returned.map(r => ({ ...r, consumedAt }));
|
|
364
|
+
} catch (err) {
|
|
365
|
+
try { db.run('ROLLBACK TO read_and_mark'); db.run('RELEASE read_and_mark'); } catch (_err) { /* best effort */ }
|
|
366
|
+
throw err;
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
/**
|
|
371
|
+
* Sync the index from an authoritative JSON messages directory.
|
|
372
|
+
*
|
|
373
|
+
* Path B pattern: JSON files remain authoritative; SQLite is a derived index.
|
|
374
|
+
* This helper scans `.workspace/messages/msg-*.json` and indexes any ids not
|
|
375
|
+
* already present. Idempotent — safe to call before each atomic-consume.
|
|
376
|
+
*
|
|
377
|
+
* Does NOT overwrite existing rows' `consumed_at`; UPSERT preserves it via
|
|
378
|
+
* COALESCE in indexMessage's ON CONFLICT clause.
|
|
379
|
+
*
|
|
380
|
+
* @param {string} workspaceRoot
|
|
381
|
+
* @returns {Promise<{scanned, indexed, skipped}>}
|
|
382
|
+
*/
|
|
383
|
+
async function syncFromJsonDir(workspaceRoot) {
|
|
384
|
+
const messagesDir = path.join(workspaceRoot, '.workspace', 'messages');
|
|
385
|
+
if (!fs.existsSync(messagesDir)) return { scanned: 0, indexed: 0, skipped: 0 };
|
|
386
|
+
if (!(await isAvailable())) return { scanned: 0, indexed: 0, skipped: 0 };
|
|
387
|
+
|
|
388
|
+
const files = fs.readdirSync(messagesDir).filter(f => f.endsWith('.json'));
|
|
389
|
+
let indexed = 0;
|
|
390
|
+
let skipped = 0;
|
|
391
|
+
|
|
392
|
+
for (const file of files) {
|
|
393
|
+
const filePath = path.join(messagesDir, file);
|
|
394
|
+
const msg = safeJsonParse(filePath, null);
|
|
395
|
+
if (!msg || !msg.id) { skipped++; continue; }
|
|
396
|
+
|
|
397
|
+
const route = routeMessageForIndex(msg);
|
|
398
|
+
if (!route) { skipped++; continue; }
|
|
399
|
+
|
|
400
|
+
const consumedAt = inferConsumedFromJson(msg);
|
|
401
|
+
const ok = await indexMessage(workspaceRoot, route.repoName, route.direction, {
|
|
402
|
+
id: msg.id,
|
|
403
|
+
kind: typeof msg.type === 'string' ? msg.type : 'unknown',
|
|
404
|
+
payload: msg,
|
|
405
|
+
createdAt: msg.timestamp || new Date().toISOString(),
|
|
406
|
+
consumedAt
|
|
407
|
+
});
|
|
408
|
+
if (ok) indexed++; else skipped++;
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
return { scanned: files.length, indexed, skipped };
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
function routeMessageForIndex(msg) {
|
|
415
|
+
const from = typeof msg.from === 'string' ? msg.from : '';
|
|
416
|
+
const to = typeof msg.to === 'string' ? msg.to : '';
|
|
417
|
+
|
|
418
|
+
if (from === 'manager' && to && to !== 'all' && to !== 'manager') {
|
|
419
|
+
return { repoName: to, direction: 'inbound' };
|
|
420
|
+
}
|
|
421
|
+
if (to === 'manager' && from) {
|
|
422
|
+
return { repoName: from, direction: 'outbound' };
|
|
423
|
+
}
|
|
424
|
+
if (to === 'all' && from && from !== 'manager') {
|
|
425
|
+
return { repoName: from, direction: 'outbound' };
|
|
426
|
+
}
|
|
427
|
+
if (from && to && from !== to) {
|
|
428
|
+
return { repoName: to, direction: 'inbound' };
|
|
429
|
+
}
|
|
430
|
+
return null;
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
function inferConsumedFromJson(msg) {
|
|
434
|
+
if (typeof msg.consumed_at === 'string') return msg.consumed_at;
|
|
435
|
+
if (typeof msg.consumedAt === 'string') return msg.consumedAt;
|
|
436
|
+
if (msg.status && msg.status !== 'pending') {
|
|
437
|
+
return msg.updatedAt || msg.resolvedAt || null;
|
|
438
|
+
}
|
|
439
|
+
return null;
|
|
440
|
+
}
|
|
441
|
+
|
|
442
|
+
/**
|
|
443
|
+
* Mark a specific set of message ids as consumed (no read).
|
|
444
|
+
*
|
|
445
|
+
* @returns {Promise<number>} number of rows updated
|
|
446
|
+
*/
|
|
447
|
+
async function markConsumed(workspaceRoot, repoName, direction, ids) {
|
|
448
|
+
if (!Array.isArray(ids) || ids.length === 0) return 0;
|
|
449
|
+
const dbFilePath = dbPath(workspaceRoot, repoName, direction);
|
|
450
|
+
if (!fs.existsSync(dbFilePath)) return 0;
|
|
451
|
+
const db = await openDb(dbFilePath);
|
|
452
|
+
if (!db) return 0;
|
|
453
|
+
|
|
454
|
+
const consumedAt = new Date().toISOString();
|
|
455
|
+
const placeholders = ids.map(() => '?').join(',');
|
|
456
|
+
db.run(
|
|
457
|
+
`UPDATE messages SET consumed_at = ? WHERE id IN (${placeholders}) AND consumed_at IS NULL`,
|
|
458
|
+
[consumedAt, ...ids]
|
|
459
|
+
);
|
|
460
|
+
// sql.js doesn't expose rows-affected directly; count via SELECT changes()
|
|
461
|
+
const res = db.exec('SELECT changes() AS n');
|
|
462
|
+
const n = (res[0] && res[0].values[0] && res[0].values[0][0]) || 0;
|
|
463
|
+
markDirty(dbFilePath);
|
|
464
|
+
await persistDb(dbFilePath);
|
|
465
|
+
return n;
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
/**
|
|
469
|
+
* Count rows by status. Diagnostic / metrics.
|
|
470
|
+
* @returns {Promise<{total, unconsumed, consumed}>} zeros if unavailable
|
|
471
|
+
*/
|
|
472
|
+
async function stats(workspaceRoot, repoName, direction) {
|
|
473
|
+
const dbFilePath = dbPath(workspaceRoot, repoName, direction);
|
|
474
|
+
if (!fs.existsSync(dbFilePath)) return { total: 0, unconsumed: 0, consumed: 0 };
|
|
475
|
+
const db = await openDb(dbFilePath);
|
|
476
|
+
if (!db) return { total: 0, unconsumed: 0, consumed: 0 };
|
|
477
|
+
|
|
478
|
+
const row = db.exec(`
|
|
479
|
+
SELECT
|
|
480
|
+
COUNT(*) AS total,
|
|
481
|
+
COUNT(*) FILTER (WHERE consumed_at IS NULL) AS unconsumed,
|
|
482
|
+
COUNT(*) FILTER (WHERE consumed_at IS NOT NULL) AS consumed
|
|
483
|
+
FROM messages
|
|
484
|
+
`);
|
|
485
|
+
const vals = (row[0] && row[0].values[0]) || [0, 0, 0];
|
|
486
|
+
return { total: vals[0] || 0, unconsumed: vals[1] || 0, consumed: vals[2] || 0 };
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
/**
|
|
490
|
+
* List all repo names with IPC dirs under workspaceRoot.
|
|
491
|
+
* @returns {Array<string>}
|
|
492
|
+
*/
|
|
493
|
+
function listIndexedRepos(workspaceRoot) {
|
|
494
|
+
const root = ipcRoot(workspaceRoot);
|
|
495
|
+
if (!fs.existsSync(root)) return [];
|
|
496
|
+
return fs.readdirSync(root).filter(name => {
|
|
497
|
+
try {
|
|
498
|
+
const st = fs.statSync(path.join(root, name));
|
|
499
|
+
return st.isDirectory() && VALID_NAME.test(name);
|
|
500
|
+
} catch (_err) {
|
|
501
|
+
return false;
|
|
502
|
+
}
|
|
503
|
+
});
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
// ============================================================
|
|
507
|
+
// Helpers
|
|
508
|
+
// ============================================================
|
|
509
|
+
|
|
510
|
+
function parsePayload(raw) {
|
|
511
|
+
if (raw == null) return {};
|
|
512
|
+
try {
|
|
513
|
+
const parsed = JSON.parse(raw);
|
|
514
|
+
// Prototype-pollution guard
|
|
515
|
+
if (parsed && typeof parsed === 'object') {
|
|
516
|
+
delete parsed.__proto__;
|
|
517
|
+
delete parsed.constructor;
|
|
518
|
+
delete parsed.prototype;
|
|
519
|
+
}
|
|
520
|
+
return parsed;
|
|
521
|
+
} catch (_err) {
|
|
522
|
+
return { _raw: raw, _parseError: true };
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
// ============================================================
|
|
527
|
+
// Exports
|
|
528
|
+
// ============================================================
|
|
529
|
+
|
|
530
|
+
module.exports = {
|
|
531
|
+
// Lifecycle
|
|
532
|
+
isAvailable,
|
|
533
|
+
unavailableReason,
|
|
534
|
+
closeDb,
|
|
535
|
+
closeAll,
|
|
536
|
+
// Paths
|
|
537
|
+
ipcRoot,
|
|
538
|
+
dbPath,
|
|
539
|
+
listIndexedRepos,
|
|
540
|
+
// Core ops
|
|
541
|
+
indexMessage,
|
|
542
|
+
listUnconsumed,
|
|
543
|
+
readAndMarkConsumed,
|
|
544
|
+
markConsumed,
|
|
545
|
+
syncFromJsonDir,
|
|
546
|
+
routeMessageForIndex,
|
|
547
|
+
stats,
|
|
548
|
+
// Constants
|
|
549
|
+
SCHEMA_SQL
|
|
550
|
+
};
|
|
@@ -613,6 +613,94 @@ function broadcastDecision(fromRepo, decisionTitle, decisionContent, targetRepos
|
|
|
613
613
|
// Exports
|
|
614
614
|
// ============================================================
|
|
615
615
|
|
|
616
|
+
// ============================================================
|
|
617
|
+
// SQLite Index (wf-3635574e / G3, Path B)
|
|
618
|
+
// ============================================================
|
|
619
|
+
// JSON files above remain authoritative. These async helpers expose the
|
|
620
|
+
// SQLite atomicity index for hot-path consumers that need atomic
|
|
621
|
+
// read-and-mark-consumed semantics (AC3). Sync APIs above are unchanged —
|
|
622
|
+
// callers that don't need SQLite speedup continue to work without it.
|
|
623
|
+
|
|
624
|
+
/**
|
|
625
|
+
* Save a message to JSON AND update the SQLite index (best effort).
|
|
626
|
+
* Use from async contexts when you want the write to populate the atomicity
|
|
627
|
+
* index synchronously. Falls back silently if SQLite is unavailable (AC5).
|
|
628
|
+
*
|
|
629
|
+
* @param {string} workspaceRoot
|
|
630
|
+
* @param {Object} message
|
|
631
|
+
* @returns {Promise<{jsonPath: string, indexed: boolean}>}
|
|
632
|
+
*/
|
|
633
|
+
async function saveMessageIndexed(workspaceRoot, message) {
|
|
634
|
+
const jsonPath = saveMessage(workspaceRoot, message);
|
|
635
|
+
let indexed = false;
|
|
636
|
+
try {
|
|
637
|
+
const ipc = require('./workspace-ipc-sqlite');
|
|
638
|
+
if (await ipc.isAvailable()) {
|
|
639
|
+
const route = ipc.routeMessageForIndex(message);
|
|
640
|
+
if (route) {
|
|
641
|
+
indexed = await ipc.indexMessage(workspaceRoot, route.repoName, route.direction, {
|
|
642
|
+
id: message.id,
|
|
643
|
+
kind: message.type || 'unknown',
|
|
644
|
+
payload: message,
|
|
645
|
+
createdAt: message.timestamp || new Date().toISOString(),
|
|
646
|
+
consumedAt: null
|
|
647
|
+
});
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
} catch (_err) { /* AC5: fall back silently */ }
|
|
651
|
+
return { jsonPath, indexed };
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
/**
|
|
655
|
+
* Atomically read unconsumed messages addressed to `repoName` and mark them
|
|
656
|
+
* consumed. Verifier re-reads each JSON to confirm status is still 'pending'
|
|
657
|
+
* — JSON remains authoritative for status. Returns only rows whose JSON
|
|
658
|
+
* file currently says status='pending'; all examined rows get their SQLite
|
|
659
|
+
* consumed_at set regardless (no index leak).
|
|
660
|
+
*
|
|
661
|
+
* @param {string} workspaceRoot
|
|
662
|
+
* @param {string} repoName
|
|
663
|
+
* @param {Object} [opts]
|
|
664
|
+
* @param {string} [opts.kind]
|
|
665
|
+
* @param {number} [opts.limit]
|
|
666
|
+
* @returns {Promise<Array<Object>>} the underlying JSON message objects
|
|
667
|
+
*/
|
|
668
|
+
async function atomicConsumeFor(workspaceRoot, repoName, opts = {}) {
|
|
669
|
+
const ipc = require('./workspace-ipc-sqlite');
|
|
670
|
+
if (!(await ipc.isAvailable())) {
|
|
671
|
+
// AC5 fallback: non-atomic consumer semantics via sync JSON path.
|
|
672
|
+
const msgs = getUnreadMessages(workspaceRoot, repoName);
|
|
673
|
+
const filtered = opts.kind ? msgs.filter(m => m.type === opts.kind) : msgs;
|
|
674
|
+
const limited = Number.isInteger(opts.limit) ? filtered.slice(0, opts.limit) : filtered;
|
|
675
|
+
for (const m of limited) {
|
|
676
|
+
try { updateMessageStatus(workspaceRoot, m.id, 'acknowledged'); } catch (_err) { /* skip */ }
|
|
677
|
+
}
|
|
678
|
+
return limited;
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
// Ensure index reflects authoritative JSON state.
|
|
682
|
+
await ipc.syncFromJsonDir(workspaceRoot);
|
|
683
|
+
|
|
684
|
+
const verified = await ipc.readAndMarkConsumed(workspaceRoot, repoName, 'inbound', {
|
|
685
|
+
kind: opts.kind,
|
|
686
|
+
limit: opts.limit,
|
|
687
|
+
verifier: row => {
|
|
688
|
+
const p = row && row.payload;
|
|
689
|
+
return !!(p && p.status === 'pending');
|
|
690
|
+
}
|
|
691
|
+
});
|
|
692
|
+
|
|
693
|
+
// Return the underlying JSON payload objects (what callers expect).
|
|
694
|
+
const result = verified.map(r => r.payload);
|
|
695
|
+
|
|
696
|
+
// Update JSON status for returned messages to 'acknowledged' so the
|
|
697
|
+
// authoritative state matches the SQLite state.
|
|
698
|
+
for (const m of result) {
|
|
699
|
+
try { updateMessageStatus(workspaceRoot, m.id, 'acknowledged'); } catch (_err) { /* skip */ }
|
|
700
|
+
}
|
|
701
|
+
return result;
|
|
702
|
+
}
|
|
703
|
+
|
|
616
704
|
module.exports = {
|
|
617
705
|
// Message creation
|
|
618
706
|
createMessage,
|
|
@@ -626,6 +714,10 @@ module.exports = {
|
|
|
626
714
|
updateMessageStatus,
|
|
627
715
|
getUnreadMessages,
|
|
628
716
|
|
|
717
|
+
// SQLite index (async, opt-in)
|
|
718
|
+
saveMessageIndexed,
|
|
719
|
+
atomicConsumeFor,
|
|
720
|
+
|
|
629
721
|
// Change notifications
|
|
630
722
|
generateChangeNotifications,
|
|
631
723
|
|
package/lib/workspace-routing.js
CHANGED
|
@@ -541,7 +541,7 @@ function updateCrossRepoBlocking(workspaceRoot, manifest) {
|
|
|
541
541
|
for (const depId of blockedBy) {
|
|
542
542
|
// Check if the blocking task is completed in any member
|
|
543
543
|
let depCompleted = false;
|
|
544
|
-
for (const [
|
|
544
|
+
for (const [_depName, depData] of Object.entries(memberTasks)) {
|
|
545
545
|
if (depData.completed.some(t => t.id === depId)) {
|
|
546
546
|
depCompleted = true;
|
|
547
547
|
break;
|