@rubytech/create-maxy 1.0.805 → 1.0.807
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/payload/platform/neo4j/migrations/004-project-admin-agent.ts +247 -0
- package/payload/platform/neo4j/migrations/004-prune-alien-accounts.ts +134 -0
- package/payload/platform/plugins/docs/references/cloudflare.md +1 -1
- package/payload/platform/plugins/docs/references/graph.md +42 -0
- package/payload/platform/plugins/docs/references/internals.md +11 -1
- package/payload/platform/plugins/docs/references/plugins-guide.md +1 -1
- package/payload/platform/plugins/whatsapp-import/PLUGIN.md +18 -5
- package/payload/platform/plugins/whatsapp-import/skills/whatsapp-import-enrich/SKILL.md +314 -0
- package/payload/platform/templates/agents/admin/IDENTITY.md +3 -1
- package/payload/platform/templates/specialists/agents/database-operator.md +5 -2
- package/payload/server/chunk-LSUMH6OF.js +9993 -0
- package/payload/server/chunk-LTIWPCUF.js +3477 -0
- package/payload/server/chunk-SC3ZSD7N.js +9993 -0
- package/payload/server/chunk-YULDSPAC.js +3484 -0
- package/payload/server/client-pool-CD7WHZIK.js +31 -0
- package/payload/server/client-pool-LXE7RIRT.js +31 -0
- package/payload/server/maxy-edge.js +2 -2
- package/payload/server/neo4j-migrations-HEECOAGK.js +128 -0
- package/payload/server/public/assets/admin-CTM9Vb-j.js +352 -0
- package/payload/server/public/assets/{graph-CBu0rtrP.js → graph-CDwy6Qw1.js} +1 -1
- package/payload/server/public/assets/page-DEyK-lSN.js +50 -0
- package/payload/server/public/graph.html +2 -2
- package/payload/server/public/index.html +2 -2
- package/payload/server/server.js +348 -202
- package/payload/server/public/assets/admin-BYsaXlDv.js +0 -352
- package/payload/server/public/assets/page-BNM63zsb.js +0 -50
package/package.json
CHANGED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Migration 004 — Project the admin agent into the graph and clean up
|
|
3
|
+
* Conversation channel data (Task 864). Numbered 004 because the 003
|
|
4
|
+
* slot is held by `003-person-name-eradicate.cypher` (boot-time apply).
|
|
5
|
+
*
|
|
6
|
+
* Three idempotent passes:
|
|
7
|
+
*
|
|
8
|
+
* 1. For every account directory under data/accounts/<accountId>/agents/admin/
|
|
9
|
+
* that carries a config.json, call projectAgent(accountId, accountDir,
|
|
10
|
+
* 'admin'). The projector is the same function migration 002 uses for
|
|
11
|
+
* public agents and is content-agnostic — it reads config.json plus any
|
|
12
|
+
* IDENTITY/SOUL/KNOWLEDGE/KNOWLEDGE-SUMMARY files present and MERGEs the
|
|
13
|
+
* :Agent node + four owned :KnowledgeDocument projections. Re-running
|
|
14
|
+
* this migration produces no duplicate nodes or edges.
|
|
15
|
+
*
|
|
16
|
+
* Migration 002 explicitly SKIPS admin (line 60: `if (entry.name ===
|
|
17
|
+
* "admin") continue`); doing the projection here keeps that skip valid
|
|
18
|
+
* and isolates admin-specific concerns to one file.
|
|
19
|
+
*
|
|
20
|
+
* 2. For every existing :AdminConversation that does NOT yet have a
|
|
21
|
+
* :HANDLED_BY edge, MATCH the freshly-projected admin :Agent and MERGE
|
|
22
|
+
* the edge. Guarded with `WHERE NOT EXISTS((c)-[:HANDLED_BY]->(:Agent))`
|
|
23
|
+
* so re-runs short-circuit per conversation.
|
|
24
|
+
*
|
|
25
|
+
* 3. Backfill `c.channel = 'webchat'` for every Conversation node where
|
|
26
|
+
* channel IS NULL. Pre-Task-863 conversations were written without the
|
|
27
|
+
* property; channel='webchat' is the correct default — only WhatsApp
|
|
28
|
+
* and Telegram sessions ever set non-webchat values, and those have
|
|
29
|
+
* always come through sessionKeys prefixed `whatsapp:` or `telegram:`
|
|
30
|
+
* (see neo4j-store.ts:171). Idempotent: subsequent runs no-op because
|
|
31
|
+
* the WHERE clause matches zero rows.
|
|
32
|
+
*
|
|
33
|
+
* Run via the platform/ui standalone runtime so it picks up the same
|
|
34
|
+
* NEO4J_URI / accounts-directory resolution as the server:
|
|
35
|
+
*
|
|
36
|
+
* cd platform/ui && \
|
|
37
|
+
* NEO4J_URI=bolt://… NEO4J_PASSWORD=… \
|
|
38
|
+
* npx tsx ../neo4j/migrations/004-project-admin-agent.ts
|
|
39
|
+
*
|
|
40
|
+
* Output: structured `[admin-agent-graph-backfill]` lines per account + a
|
|
41
|
+
* final totals line. Non-zero exit code on any per-account agent-projection
|
|
42
|
+
* failure surfaces to the operator; subsequent accounts are still attempted.
|
|
43
|
+
*/
|
|
44
|
+
|
|
45
|
+
import { existsSync, readdirSync } from "node:fs";
|
|
46
|
+
import { resolve } from "node:path";
|
|
47
|
+
import { projectAgent, getSession } from "../../ui/app/lib/neo4j-store";
|
|
48
|
+
import { ACCOUNTS_DIR } from "../../ui/app/lib/claude-agent/account";
|
|
49
|
+
|
|
50
|
+
interface PerAccountStats {
|
|
51
|
+
accountId: string;
|
|
52
|
+
projected: 0 | 1;
|
|
53
|
+
failed: 0 | 1;
|
|
54
|
+
handledByCandidates: number;
|
|
55
|
+
handledByEdges: number;
|
|
56
|
+
channelBackfilled: number;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
async function projectAccountAdmin(
|
|
60
|
+
accountId: string,
|
|
61
|
+
accountDir: string,
|
|
62
|
+
): Promise<{ projected: 0 | 1; failed: 0 | 1 }> {
|
|
63
|
+
const adminDir = resolve(accountDir, "agents", "admin");
|
|
64
|
+
const configPath = resolve(adminDir, "config.json");
|
|
65
|
+
if (!existsSync(adminDir) || !existsSync(configPath)) {
|
|
66
|
+
return { projected: 0, failed: 0 };
|
|
67
|
+
}
|
|
68
|
+
try {
|
|
69
|
+
await projectAgent(accountId, accountDir, "admin");
|
|
70
|
+
return { projected: 1, failed: 0 };
|
|
71
|
+
} catch (err) {
|
|
72
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
73
|
+
console.error(
|
|
74
|
+
`[admin-agent-graph-backfill] account=${accountId.slice(0, 8)} project FAILED error="${msg}"`,
|
|
75
|
+
);
|
|
76
|
+
return { projected: 0, failed: 1 };
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
interface HandledByStats {
|
|
81
|
+
candidates: number;
|
|
82
|
+
edges: number;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Backfill HANDLED_BY edges from AdminConversation nodes to the admin Agent
|
|
87
|
+
* node. Guarded with NOT EXISTS so re-runs of this migration don't redo work
|
|
88
|
+
* — and so AdminConversations that already gained a HANDLED_BY edge through
|
|
89
|
+
* the forward path are skipped.
|
|
90
|
+
*
|
|
91
|
+
* `candidates` counts AdminConversations that LACK a HANDLED_BY edge; `edges`
|
|
92
|
+
* counts edges newly created. The admin :Agent must already exist (created
|
|
93
|
+
* by pass 1); if it doesn't, the OPTIONAL MATCH falls through and zero edges
|
|
94
|
+
* are written — surfaced through `candidates - edges`.
|
|
95
|
+
*/
|
|
96
|
+
async function backfillAdminHandledBy(
|
|
97
|
+
accountId: string,
|
|
98
|
+
): Promise<HandledByStats> {
|
|
99
|
+
const session = getSession();
|
|
100
|
+
try {
|
|
101
|
+
const result = await session.run(
|
|
102
|
+
`MATCH (c:AdminConversation {accountId: $accountId})
|
|
103
|
+
WHERE NOT EXISTS((c)-[:HANDLED_BY]->(:Agent))
|
|
104
|
+
OPTIONAL MATCH (a:Agent {accountId: $accountId, slug: 'admin'})
|
|
105
|
+
FOREACH (_ IN CASE WHEN a IS NULL THEN [] ELSE [1] END | MERGE (c)-[:HANDLED_BY]->(a))
|
|
106
|
+
RETURN
|
|
107
|
+
count(c) AS candidates,
|
|
108
|
+
sum(CASE WHEN a IS NULL THEN 0 ELSE 1 END) AS edges`,
|
|
109
|
+
{ accountId },
|
|
110
|
+
);
|
|
111
|
+
const toNum = (v: unknown): number => {
|
|
112
|
+
if (typeof v === "number") return v;
|
|
113
|
+
if (v && typeof (v as { toNumber: () => number }).toNumber === "function") {
|
|
114
|
+
return (v as { toNumber: () => number }).toNumber();
|
|
115
|
+
}
|
|
116
|
+
return 0;
|
|
117
|
+
};
|
|
118
|
+
return {
|
|
119
|
+
candidates: toNum(result.records[0]?.get("candidates")),
|
|
120
|
+
edges: toNum(result.records[0]?.get("edges")),
|
|
121
|
+
};
|
|
122
|
+
} finally {
|
|
123
|
+
await session.close();
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Backfill `c.channel = 'webchat'` on Conversation nodes that lack the
|
|
129
|
+
* property. Hits both AdminConversation and PublicConversation — the
|
|
130
|
+
* predicate is `c.channel IS NULL`, label-agnostic.
|
|
131
|
+
*
|
|
132
|
+
* Why default to 'webchat': new writes set channel from sessionKey prefix
|
|
133
|
+
* (neo4j-store.ts:171). Only `whatsapp:` and `telegram:` prefixes produce
|
|
134
|
+
* non-webchat values, and those prefixes have always existed. So a NULL
|
|
135
|
+
* channel can only mean "Conversation written before Task 863 added the
|
|
136
|
+
* SET clause" — which by definition was a webchat session.
|
|
137
|
+
*
|
|
138
|
+
* Idempotent: WHERE clause matches zero rows on re-run.
|
|
139
|
+
*/
|
|
140
|
+
async function backfillChannel(accountId: string): Promise<number> {
|
|
141
|
+
const session = getSession();
|
|
142
|
+
try {
|
|
143
|
+
const result = await session.run(
|
|
144
|
+
`MATCH (c:Conversation {accountId: $accountId})
|
|
145
|
+
WHERE c.channel IS NULL
|
|
146
|
+
SET c.channel = 'webchat'
|
|
147
|
+
RETURN count(c) AS backfilled`,
|
|
148
|
+
{ accountId },
|
|
149
|
+
);
|
|
150
|
+
const raw = result.records[0]?.get("backfilled");
|
|
151
|
+
if (typeof raw === "number") return raw;
|
|
152
|
+
if (raw && typeof (raw as { toNumber: () => number }).toNumber === "function") {
|
|
153
|
+
return (raw as { toNumber: () => number }).toNumber();
|
|
154
|
+
}
|
|
155
|
+
return 0;
|
|
156
|
+
} finally {
|
|
157
|
+
await session.close();
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
async function main(): Promise<void> {
|
|
162
|
+
const start = Date.now();
|
|
163
|
+
|
|
164
|
+
if (!existsSync(ACCOUNTS_DIR)) {
|
|
165
|
+
console.error(
|
|
166
|
+
`[admin-agent-graph-backfill] ACCOUNTS_DIR missing at ${ACCOUNTS_DIR} — nothing to do`,
|
|
167
|
+
);
|
|
168
|
+
process.exit(0);
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
const accountEntries = readdirSync(ACCOUNTS_DIR, { withFileTypes: true })
|
|
172
|
+
.filter((e) => e.isDirectory());
|
|
173
|
+
|
|
174
|
+
console.error(
|
|
175
|
+
`[admin-agent-graph-backfill] start accounts=${accountEntries.length}`,
|
|
176
|
+
);
|
|
177
|
+
|
|
178
|
+
let totalProjected = 0;
|
|
179
|
+
let totalFailed = 0;
|
|
180
|
+
let totalHandledByCandidates = 0;
|
|
181
|
+
let totalHandledByEdges = 0;
|
|
182
|
+
let totalChannelBackfilled = 0;
|
|
183
|
+
const perAccount: PerAccountStats[] = [];
|
|
184
|
+
|
|
185
|
+
for (const entry of accountEntries) {
|
|
186
|
+
const accountDir = resolve(ACCOUNTS_DIR, entry.name);
|
|
187
|
+
const accountId = entry.name;
|
|
188
|
+
const accountStart = Date.now();
|
|
189
|
+
|
|
190
|
+
const { projected, failed } = await projectAccountAdmin(
|
|
191
|
+
accountId,
|
|
192
|
+
accountDir,
|
|
193
|
+
);
|
|
194
|
+
totalProjected += projected;
|
|
195
|
+
totalFailed += failed;
|
|
196
|
+
|
|
197
|
+
let handledByStats: HandledByStats = { candidates: 0, edges: 0 };
|
|
198
|
+
let channelBackfilled = 0;
|
|
199
|
+
|
|
200
|
+
try {
|
|
201
|
+
handledByStats = await backfillAdminHandledBy(accountId);
|
|
202
|
+
totalHandledByCandidates += handledByStats.candidates;
|
|
203
|
+
totalHandledByEdges += handledByStats.edges;
|
|
204
|
+
} catch (err) {
|
|
205
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
206
|
+
console.error(
|
|
207
|
+
`[admin-agent-graph-backfill] account=${accountId.slice(0, 8)} handled-by-backfill FAILED error="${msg}"`,
|
|
208
|
+
);
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
try {
|
|
212
|
+
channelBackfilled = await backfillChannel(accountId);
|
|
213
|
+
totalChannelBackfilled += channelBackfilled;
|
|
214
|
+
} catch (err) {
|
|
215
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
216
|
+
console.error(
|
|
217
|
+
`[admin-agent-graph-backfill] account=${accountId.slice(0, 8)} channel-backfill FAILED error="${msg}"`,
|
|
218
|
+
);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
perAccount.push({
|
|
222
|
+
accountId,
|
|
223
|
+
projected,
|
|
224
|
+
failed,
|
|
225
|
+
handledByCandidates: handledByStats.candidates,
|
|
226
|
+
handledByEdges: handledByStats.edges,
|
|
227
|
+
channelBackfilled,
|
|
228
|
+
});
|
|
229
|
+
const ms = Date.now() - accountStart;
|
|
230
|
+
console.error(
|
|
231
|
+
`[admin-agent-graph-backfill] account=${accountId.slice(0, 8)} projected=${projected} failed=${failed} handled-by-candidates=${handledByStats.candidates} handled-by-edges=${handledByStats.edges} channel-backfilled=${channelBackfilled} ms=${ms}`,
|
|
232
|
+
);
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
const ms = Date.now() - start;
|
|
236
|
+
console.error(
|
|
237
|
+
`[admin-agent-graph-backfill] done totals: projected=${totalProjected} failed=${totalFailed} handled-by-candidates=${totalHandledByCandidates} handled-by-edges=${totalHandledByEdges} channel-backfilled=${totalChannelBackfilled} ms=${ms}`,
|
|
238
|
+
);
|
|
239
|
+
|
|
240
|
+
process.exit(totalFailed > 0 ? 1 : 0);
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
main().catch((err) => {
|
|
244
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
245
|
+
console.error(`[admin-agent-graph-backfill] fatal error="${msg}"`);
|
|
246
|
+
process.exit(2);
|
|
247
|
+
});
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Migration 004 — Prune alien-account nodes (Task 847).
|
|
3
|
+
*
|
|
4
|
+
* Deletes every node whose `accountId` is not present on disk under
|
|
5
|
+
* `${DATA_ROOT}/accounts/<uuid>/account.json`. Idempotent — silent when
|
|
6
|
+
* the graph is already clean.
|
|
7
|
+
*
|
|
8
|
+
* Why a backstop, not a writer fix: the leaked nodes were written by the
|
|
9
|
+
* `review-digest-compose` writer, which has since been removed in favour
|
|
10
|
+
* of a coming gbrain rewrite. With no live writer to fix, this is the
|
|
11
|
+
* surface that catches future writer drift before the next gbrain ships.
|
|
12
|
+
*
|
|
13
|
+
* Hard guard: refuses to run when the on-disk account set is empty
|
|
14
|
+
* (corrupt-install scenario). Refusing to wipe the graph is louder than
|
|
15
|
+
* silently wiping it.
|
|
16
|
+
*
|
|
17
|
+
* Doctrine in `.docs/neo4j.md` "Account isolation invariant" requires
|
|
18
|
+
* any writer that stamps `n.accountId` to verify the value against
|
|
19
|
+
* `${DATA_ROOT}/accounts/<id>/account.json` before write. This migration
|
|
20
|
+
* is a backstop, not a license.
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
import { readFileSync, readdirSync } from "node:fs";
|
|
24
|
+
import { resolve } from "node:path";
|
|
25
|
+
|
|
26
|
+
const UUID_RE =
|
|
27
|
+
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Structural alias for the `Driver` instance the runner passes in. We type
|
|
31
|
+
* structurally rather than importing `Driver` from `neo4j-driver` because
|
|
32
|
+
* this file lives outside `platform/ui/` — depending on the workspace's
|
|
33
|
+
* dedupe state, `neo4j-driver` resolves to a different node_modules copy
|
|
34
|
+
* here than at the runner site, and TS treats two same-shape types from
|
|
35
|
+
* different paths as nominally distinct. Structural typing sidesteps that.
|
|
36
|
+
*/
|
|
37
|
+
type Neo4jDriverLike = {
|
|
38
|
+
session(): {
|
|
39
|
+
run(
|
|
40
|
+
cypher: string,
|
|
41
|
+
params?: Record<string, unknown>,
|
|
42
|
+
): Promise<{ records: Array<{ get(key: string): unknown }> }>;
|
|
43
|
+
close(): Promise<void>;
|
|
44
|
+
};
|
|
45
|
+
};
|
|
46
|
+
|
|
47
|
+
export async function pruneAlienAccounts(
|
|
48
|
+
driver: Neo4jDriverLike,
|
|
49
|
+
platformRoot: string,
|
|
50
|
+
): Promise<void> {
|
|
51
|
+
const accountsDir = resolve(platformRoot, "..", "data", "accounts");
|
|
52
|
+
const validIds = enumerateValidAccountIds(accountsDir);
|
|
53
|
+
|
|
54
|
+
if (validIds.size === 0) {
|
|
55
|
+
throw new Error(
|
|
56
|
+
`refusing to prune: no valid accounts found under ${accountsDir} — corrupt install? not deleting anything to avoid wiping the entire graph.`,
|
|
57
|
+
);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const valid = Array.from(validIds);
|
|
61
|
+
const session = driver.session();
|
|
62
|
+
try {
|
|
63
|
+
// Two-step: first query collects the alien accountIds for the log
|
|
64
|
+
// line, second query deletes. Two cheap queries beat a single query
|
|
65
|
+
// that loses either the count or the id list under DELETE semantics.
|
|
66
|
+
const peek = await session.run(
|
|
67
|
+
`MATCH (n)
|
|
68
|
+
WHERE n.accountId IS NOT NULL AND NOT n.accountId IN $valid
|
|
69
|
+
RETURN DISTINCT n.accountId AS aid`,
|
|
70
|
+
{ valid },
|
|
71
|
+
);
|
|
72
|
+
const alienIds: string[] = [];
|
|
73
|
+
for (const record of peek.records) {
|
|
74
|
+
const aid: unknown = record.get("aid");
|
|
75
|
+
if (typeof aid === "string") alienIds.push(aid);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
if (alienIds.length === 0) return;
|
|
79
|
+
|
|
80
|
+
const result = await session.run(
|
|
81
|
+
`MATCH (n)
|
|
82
|
+
WHERE n.accountId IS NOT NULL AND NOT n.accountId IN $valid
|
|
83
|
+
DETACH DELETE n
|
|
84
|
+
RETURN count(n) AS pruned`,
|
|
85
|
+
{ valid },
|
|
86
|
+
);
|
|
87
|
+
const prunedRaw = result.records[0]?.get("pruned");
|
|
88
|
+
const pruned =
|
|
89
|
+
typeof prunedRaw === "number"
|
|
90
|
+
? prunedRaw
|
|
91
|
+
: (prunedRaw as { toNumber?: () => number })?.toNumber?.() ?? 0;
|
|
92
|
+
console.error(
|
|
93
|
+
`[graph-invariant] alien-accounts pruned=${pruned} accountIds=${alienIds.join(",")}`,
|
|
94
|
+
);
|
|
95
|
+
} finally {
|
|
96
|
+
await session.close();
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Enumerate accountIds with a parseable `account.json`. Directory name IS
|
|
102
|
+
* the canonical accountId (matches the UUID_RE.test(name) predicate at
|
|
103
|
+
* `platform/ui/server/routes/admin/files.ts`'s account-name resolver).
|
|
104
|
+
*
|
|
105
|
+
* Corruption discipline: a present-but-unparseable account.json is
|
|
106
|
+
* EXCLUDED from the valid set and emits a skip log line. Better to
|
|
107
|
+
* over-prune one suspect account than under-prune the leak it might
|
|
108
|
+
* be hiding.
|
|
109
|
+
*/
|
|
110
|
+
function enumerateValidAccountIds(accountsDir: string): Set<string> {
|
|
111
|
+
const valid = new Set<string>();
|
|
112
|
+
let names: string[];
|
|
113
|
+
try {
|
|
114
|
+
names = readdirSync(accountsDir);
|
|
115
|
+
} catch (err) {
|
|
116
|
+
if ((err as NodeJS.ErrnoException).code === "ENOENT") return valid;
|
|
117
|
+
throw err;
|
|
118
|
+
}
|
|
119
|
+
for (const name of names) {
|
|
120
|
+
if (!UUID_RE.test(name)) continue;
|
|
121
|
+
const configPath = resolve(accountsDir, name, "account.json");
|
|
122
|
+
try {
|
|
123
|
+
JSON.parse(readFileSync(configPath, "utf-8"));
|
|
124
|
+
valid.add(name);
|
|
125
|
+
} catch (err) {
|
|
126
|
+
const code = (err as NodeJS.ErrnoException).code ?? "parse-error";
|
|
127
|
+
if (code === "ENOENT") continue;
|
|
128
|
+
console.error(
|
|
129
|
+
`[graph-invariant] account-json-skip uuid=${name} reason=${code}`,
|
|
130
|
+
);
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
return valid;
|
|
134
|
+
}
|
|
@@ -29,7 +29,7 @@ When you submit, the `/api/admin/cloudflare/setup` endpoint runs — in strict o
|
|
|
29
29
|
- **Zone pre-flight** — for every non-apex hostname the script queries `1.1.1.1` for the registrable parent's NS records and refuses the whole run if they don't point at Cloudflare. Stream log: `step=zone-preflight result=ok|error zones_on_account=… missing_parent_for=…`. Catches "domain not on Cloudflare"; does not catch "domain on a different Cloudflare account than `cert.pem` is bound to" — that case surfaces later via `tunnel-status`.
|
|
30
30
|
- `cloudflared tunnel route dns` for each subdomain hostname. Apex hostnames cannot be routed this way — the script prints an **ACTION REQUIRED** block naming the exact dashboard record to add or edit. Stream log emits `step=route-dns hostname=… tunnel_id=…` before the call and `step=route-dns hostname=… result=ok|apex-skip|error` after; on error the bounded cloudflared stderr (≤400 chars) rides in the same phase line. **The script does not parse cloudflared's stdout** — exit code is the sole decision signal, so all three legitimate cloudflared output shapes (new record, overwrite, idempotent "already configured") are treated as success.
|
|
31
31
|
- `config.yml` and `tunnel.state` written under `${CFG_DIR}`.
|
|
32
|
-
- **Step-7 onboarding completion persisted** — the script writes `${ACCOUNT_DIR}/onboarding/step7-complete` (a JSON marker with the completion timestamp and tunnel ID) before arming the restart. Stream log: `step=onboarding-persist result=ok|error reason=<r>`. The marker is consumed by the next admin session's first state read and advances `OnboardingState.currentStep` to 7. Without this, the service restart below would SIGTERM the admin agent before it could persist step-7 completion, and the next session would re-ask the Cloudflare question you just finished.
|
|
32
|
+
- **Step-7 onboarding completion persisted** — the script writes `${ACCOUNT_DIR}/onboarding/step7-complete` (a JSON marker with the completion timestamp and tunnel ID) before arming the restart. Stream log: `step=onboarding-persist result=ok|error reason=<r>`. The marker is consumed by the next admin session's first state read and advances `OnboardingState.currentStep` to 7. Without this, the service restart below would SIGTERM the admin agent before it could persist step-7 completion, and the next session would re-ask the Cloudflare question you just finished. Both invocation surfaces (the form-driven action and the agent-via-Bash path) declare `ACCOUNT_DIR` explicitly because `systemd-run --user` does not inherit parent env — when ACCOUNT_DIR isn't reaching the script you'll see `result=skipped reason=no-account-dir` in the stream log instead of `result=ok`.
|
|
33
33
|
- `systemctl --user restart ${BRAND}.service` — restarts the platform service so the new tunnel spawns via the service's `ExecStartPre=resume-tunnel.sh`.
|
|
34
34
|
- Post-restart verification — `ps -ef | grep '[c]loudflared'` confirms the connector is alive, then `curl -I https://<hostname>` against each subdomain (up to 60 s per host) confirms a non-530 response.
|
|
35
35
|
|
|
@@ -70,6 +70,48 @@ border, with their zoom-tier labels intact. The `N msgs` count excludes
|
|
|
70
70
|
trashed Messages, so the detailed-tier label reflects only live turns in the
|
|
71
71
|
conversation.
|
|
72
72
|
|
|
73
|
+
## Filtering by channel and message kind
|
|
74
|
+
|
|
75
|
+
When you select **AdminConversation** or **PublicConversation** in the
|
|
76
|
+
filter popover, two extra rows appear underneath the chip list:
|
|
77
|
+
|
|
78
|
+
- **Channel** — Web / WhatsApp. Select one to scope the canvas to
|
|
79
|
+
conversations that came in over that channel only. Selecting both is
|
|
80
|
+
the same as selecting neither (all channels). After the migration that
|
|
81
|
+
ships with this release, every conversation carries an explicit
|
|
82
|
+
channel value — pre-existing conversations are backfilled to "Web"
|
|
83
|
+
because only the WhatsApp and Telegram intake paths ever set non-Web
|
|
84
|
+
values.
|
|
85
|
+
- **Message** — User / Assistant / WhatsApp. When you've also pivoted
|
|
86
|
+
into a conversation neighbourhood (or your search hits messages
|
|
87
|
+
directly), this row scopes the messages on canvas to the chosen kind.
|
|
88
|
+
WhatsApp messages persist with their own sublabel so you can isolate
|
|
89
|
+
the live-channel cohort from the agent-path cohort within the same
|
|
90
|
+
conversation.
|
|
91
|
+
|
|
92
|
+
These sub-facets compose with the chip selection. Searching with the
|
|
93
|
+
AdminConversation chip selected now also reaches the body text of every
|
|
94
|
+
admin message — typing a rare word like "ATM" returns every conversation
|
|
95
|
+
that mentions it, not just conversations with that word in the title.
|
|
96
|
+
|
|
97
|
+
## Sidebar conversations list
|
|
98
|
+
|
|
99
|
+
The Recents list above the chat sidebar carries a per-row marker:
|
|
100
|
+
WhatsApp conversations show a small WhatsApp glyph next to the
|
|
101
|
+
conversation name. The dropdown above the list filters Recents to a
|
|
102
|
+
specific channel — flipping it to **WhatsApp** hides web-chat
|
|
103
|
+
conversations and vice versa.
|
|
104
|
+
|
|
105
|
+
## Agents in the graph
|
|
106
|
+
|
|
107
|
+
Both admin and public agents appear as `:Agent` nodes in the graph. Open
|
|
108
|
+
the **Agents** entry from the sidebar to see them all. Each agent
|
|
109
|
+
carries a `:HANDLED_BY` edge from every conversation it has handled, so
|
|
110
|
+
you can pivot from an agent to the conversations it ran. The admin
|
|
111
|
+
agent's IDENTITY, SOUL, KNOWLEDGE, and KNOWLEDGE-SUMMARY documents
|
|
112
|
+
appear as :KnowledgeDocument nodes connected via `HAS_*` edges, the same
|
|
113
|
+
projection shape used for public agents.
|
|
114
|
+
|
|
73
115
|
## Agent-execution telemetry
|
|
74
116
|
|
|
75
117
|
`ToolCall`, `StepResult`, `WorkflowStep`, and `WorkflowRun` nodes are
|
|
@@ -140,6 +140,8 @@ WHERE node.accountId = $accountId
|
|
|
140
140
|
|
|
141
141
|
Multi-tenancy boundary. Every query is scoped to the requesting account. The `ACCOUNT_ID` environment variable is set at MCP server startup — it is not a tool parameter and cannot be overridden by the agent.
|
|
142
142
|
|
|
143
|
+
The read filter alone is not sufficient — it correctly *hides* alien-account nodes from every UI but does not prevent them existing. A writer that misresolves `accountId` (literal, undefined, or inferred-from-the-wrong-context) leaks nodes into the graph with no downstream symptom; the read filter then keeps them invisible indefinitely. The write-side doctrine is documented in `.docs/neo4j.md` "Account isolation invariant" — every writer that stamps `n.accountId` must verify the value against `${DATA_ROOT}/accounts/<id>/account.json` before write, and migration `004-prune-alien-accounts.ts` runs at every server boot as a backstop, deleting any node whose accountId is not on disk. Hard guard: refuses to prune when the on-disk account set is empty (corrupt-install scenario), surfaced as `[migration] failed prune-alien-accounts error="refusing to prune: …"`. Boot does not block on the failure.
|
|
144
|
+
|
|
143
145
|
---
|
|
144
146
|
|
|
145
147
|
## Query Classification
|
|
@@ -313,7 +315,7 @@ This tool is read-only and available to both public and admin agents.
|
|
|
313
315
|
|
|
314
316
|
### When conversations are created
|
|
315
317
|
|
|
316
|
-
`:Conversation` nodes on webchat (admin login, "New conversation" in the burger, a new public visitor) are created lazily. Opening the chat or logging in does not write anything to the graph — {{productName}} only records the conversation once the user sends a second message. This keeps `conversation-search` and the Conversations modal free of one-turn abandoned threads. WhatsApp and Telegram take the opposite posture:
|
|
318
|
+
`:Conversation` nodes on webchat (admin login, "New conversation" in the burger, a new public visitor) are created lazily. Opening the chat or logging in does not write anything to the graph — {{productName}} only records the conversation once the user sends a second message. This keeps `conversation-search` and the Conversations modal free of one-turn abandoned threads. WhatsApp and Telegram take the opposite posture: every inbound — DM or group, allowed or activation-off, agent-invoked or gated — MERGEs the `:Conversation` and writes a forensic `:Message:WhatsAppMessage` row before any access-control decision (Task 863). The graph is the durable record of every message the device received, not just the ones the agent replied to. See `.docs/web-chat.md` "Deferred conversation persistence (Task 650)" and `.docs/whatsapp.md` "Session continuity" for the full contract.
|
|
317
319
|
|
|
318
320
|
Each row in the Conversations modal exposes a `View logs` row-action that opens a popover with three links — **Stream**, **Errors**, **SSE** — each of which targets `/api/admin/logs?type={stream|error|sse}&conversationId={full-id}` in a new tab. The row's 8-char id chip is click-to-copy; hover reveals the full `conversationId` as a tooltip. See `.docs/web-chat.md` "In-chat retrieval" for the route contract and `console.debug` observability (Task 686).
|
|
319
321
|
|
|
@@ -464,3 +466,11 @@ grep '[persist] tool-call persisted' server.log | tail -10
|
|
|
464
466
|
```
|
|
465
467
|
|
|
466
468
|
Each log entry includes the tool name and a truncated conversation ID for correlation.
|
|
469
|
+
|
|
470
|
+
## Context compaction
|
|
471
|
+
|
|
472
|
+
When an admin turn crosses 75% of the model's context window, {{productName}} runs a silent compaction turn that asks the agent to call the `session-compact` MCP tool with a structured briefing (what you asked for, what was done, decisions made, work-in-progress, things you've shared about yourself). The briefing is written to Neo4j; the next admin turn injects it back into the system prompt, so continuity survives across the compaction boundary without re-sending the full transcript.
|
|
473
|
+
|
|
474
|
+
The compaction runs against a transient one-shot pool entry separate from the long-lived admin Query (Task 784). Operator-visible side effects:
|
|
475
|
+
- Compaction logs land in `claude-agent-compaction-stream-YYYY-MM-DD.log` alongside the main stream log. Look for `[compaction-start]`, `[compaction-summary-captured]`, `[compaction-failed]`, `[compaction-timeout]`, `[compaction-crashed]`, or `[compaction-spawn-error]` to triage. Subprocess stderr is captured inline as `[subproc-stderr] <line>` — there is no longer a separate `claude-agent-compaction-stderr-…log` file.
|
|
476
|
+
- The one-shot pool entry's lifecycle is greppable as `[client-cold-create] reason=compaction-one-shot …` paired with `[client-evict] reason=compaction-one-shot …`, distinguishable from the regular admin pool's lifecycle tags.
|
|
@@ -40,7 +40,7 @@ These are enabled during onboarding and can be added or removed at any time. Som
|
|
|
40
40
|
| `waitlist` | Waitlist lifecycle — extract sign-ups from conversations, review | — |
|
|
41
41
|
| `replicate` | Image generation — three models for photorealistic, design, and fast draft images | Content producer, Research assistant |
|
|
42
42
|
| `linkedin-import` | Import a LinkedIn Basic Data Export — Profile and Connections today, more CSVs as references land | Database operator |
|
|
43
|
-
| `whatsapp-import` | Import a WhatsApp `_chat.txt` export
|
|
43
|
+
| `whatsapp-import` | Import a WhatsApp `_chat.txt` export. Two-phase contract: **Phase 1 (load)** is a single Bash entry — `bash platform/plugins/whatsapp-import/bin/whatsapp-ingest.sh <archive> --owner-element-id <id> --scope <admin\|public>` runs parse → archive-write → Haiku insight in one process, landing `:Conversation:WhatsAppConversation` + `:Message:WhatsAppMessage` with NEXT chain, auto-created `:Person {participantStatus:'auto-created'}` participants, and `:Observation {observationStatus:'auto-extracted'}` rows for mentions/tasks/preferences/observed-relationships. **Phase 2 (enrich)** is operator-driven: ask "enrich the X chat" / "wire observations from yesterday's import" and the database-operator runs the `whatsapp-import-enrich` skill — walks the auto-created participants and auto-extracted observations row-by-row, writes operator-confirmed wiring (participant promotion/merge via `apoc.refactor.mergeNodes`, `:MENTIONS`/`:RELATED_TO` edges with evidence, `:Task` and `:Preference` nodes). Idempotent — re-running enrichment surfaces only items still in the auto-* states. Distinct from the live `whatsapp` plugin which is a Baileys QR-pairing channel. | Database operator |
|
|
44
44
|
|
|
45
45
|
### Claude Official (marketplace)
|
|
46
46
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: whatsapp-import
|
|
3
|
-
description: "Import a WhatsApp `_chat.txt` export (Conversation + Messages with chronological NEXT chain + analysis-derived insights) into the {{productName}} Neo4j graph. Skill-only plugin owned by the database-operator specialist. Opt-in per brand — not enabled by default. Distinct from the live `whatsapp` plugin (Baileys QR pairing + in-memory store)."
|
|
3
|
+
description: "Import a WhatsApp `_chat.txt` export (Conversation + Messages with chronological NEXT chain + analysis-derived insights) into the {{productName}} Neo4j graph as a two-phase contract: Phase 1 (`whatsapp-import` skill) lands raw shape via a deterministic Bash entry; Phase 2 (`whatsapp-import-enrich` skill) is operator-driven semantic resolution over the loaded conversation. Skill-only plugin owned by the database-operator specialist. Opt-in per brand — not enabled by default. Distinct from the live `whatsapp` plugin (Baileys QR pairing + in-memory store)."
|
|
4
4
|
tools: []
|
|
5
5
|
always: false
|
|
6
6
|
embed: false
|
|
@@ -12,9 +12,20 @@ metadata: {"platform":{"optional":true,"pluginKey":"whatsapp-import"}}
|
|
|
12
12
|
|
|
13
13
|
Ingests a WhatsApp "Export Chat" archive (the `_chat.txt` file plus media attachments) into the {{productName}} Neo4j graph. Skill-only plugin — no MCP server, no admin tools added. The skill runs under the `database-operator` specialist, which owns external-archive ingestion and ad-hoc graph operations.
|
|
14
14
|
|
|
15
|
+
## Two-phase contract (Task 855 + Task 859)
|
|
16
|
+
|
|
17
|
+
The plugin ships two skills that pair as a contract: deterministic load first, then operator-driven semantic enrichment. Splitting them removes the orchestration loop a single bundled phase imposed (Task 804's 7h orchestration), and lets re-imports reuse the load phase without redoing the semantic walk.
|
|
18
|
+
|
|
19
|
+
| Phase | Skill | What it does | Trigger phrase |
|
|
20
|
+
|-------|-------|--------------|----------------|
|
|
21
|
+
| 1 — load | [`whatsapp-import`](skills/whatsapp-import/SKILL.md) | Parses `_chat.txt`, writes `:Conversation:WhatsAppConversation` + `:Message:WhatsAppMessage` with NEXT chain, auto-creates one `:Person {participantStatus:'auto-created'}` per distinct senderName, lands `:Observation {observationStatus:'auto-extracted'}` rows from the chunked Haiku insight pass. Single Bash entry; no MCP envelope between steps. | Operator drops a `_chat.txt` file or its containing export folder into chat. |
|
|
22
|
+
| 2 — enrich | [`whatsapp-import-enrich`](skills/whatsapp-import-enrich/SKILL.md) | Walks `participantStatus='auto-created'` and `observationStatus='auto-extracted'` rows scoped to a chosen conversation, surfaces evidence per row, writes operator-confirmed wiring: `apoc.refactor.mergeNodes` for participant promotion/merge, `:MENTIONS`/`:RELATED_TO` edges with `evidenceSnippet`, `:Task` via `task-create`, `:Preference` via `memory-write`. Idempotent — re-running surfaces only items still in `auto-created`/`auto-extracted`. | Operator asks to "enrich the X chat", "promote auto-created participants from Y", "wire observations from yesterday's import". |
|
|
23
|
+
|
|
24
|
+
Phase 2 refuses to run against a Conversation whose `c.lastImportedAt` is null. Phase 1 always precedes Phase 2.
|
|
25
|
+
|
|
15
26
|
## When this applies
|
|
16
27
|
|
|
17
|
-
The admin agent delegates to `database-operator` when the operator drops a `_chat.txt` (or its containing folder) into chat.
|
|
28
|
+
The admin agent delegates to `database-operator` when the operator drops a `_chat.txt` (or its containing folder) into chat (→ Phase 1) or names enrichment of an already-loaded conversation (→ Phase 2). For Phase 1 the specialist runs the skill's archive-owner confirmation flow before any line is written, then invokes the deterministic Bash entry (`bin/whatsapp-ingest.sh`) once: parse, archive-write (via `memoryArchiveWrite` in-process), and Haiku insight all run in one Node process — no MCP envelope between steps (Task 855). For Phase 2 the specialist runs `whatsapp-import-enrich`'s bulk preview, asks the operator to confirm scope, then walks the rows with operator confirmation gates (Task 859).
|
|
18
29
|
|
|
19
30
|
## Accepted export shapes
|
|
20
31
|
|
|
@@ -28,6 +39,8 @@ WhatsApp's "Export Chat" emits `[DD/MM/YYYY, HH:MM:SS]` prefixes by default in m
|
|
|
28
39
|
|
|
29
40
|
## Relationship to other plugins
|
|
30
41
|
|
|
31
|
-
- **memory** —
|
|
32
|
-
- **
|
|
33
|
-
- **
|
|
42
|
+
- **memory** — Phase 1's underlying write surface, imported in-process by `bin/ingest.mjs` (`memoryArchiveWrite` for bulk Conversation+Messages; direct Cypher `:Observation` writes for the insight pass). Phase 2's enrich skill writes `:Preference` nodes via `mcp__memory__memory-write` and uses `mcp__memory__memory-search` for entity disambiguation. All writes carry `source='whatsapp'` + `createdByAgent='whatsapp-import'` (Phase 1) or `createdByAgent='whatsapp-import-enrich'` (Phase 2) provenance. The legacy `mcp__memory__whatsapp-export-parse` / `whatsapp-export-insight-write` MCP tools and the direct `memory-archive-write` MCP path with `archiveType=whatsapp-export` are blocked at the harness — the Bash entry is the only supported invocation surface for Phase 1 (Task 855).
|
|
43
|
+
- **tasks** — Phase 2's `:Task` writes go through `mcp__tasks__task-create` with `affects=$conversationElementId`. Database-operator's frontmatter `tools:` includes `mcp__tasks__task-create` for this path.
|
|
44
|
+
- **contacts** — Phase 2's mint-new-Person path (an auto-created participant the operator wants to land as a fresh contact) goes through `mcp__contacts__contact-create`.
|
|
45
|
+
- **database-operator specialist** — owns execution for both phases. See [admin/IDENTITY.md](../../../platform/templates/agents/admin/IDENTITY.md) delegation clause and [database-operator.md](../../../platform/templates/specialists/agents/database-operator.md) per-source archive list (which now names both phases under the WhatsApp entry).
|
|
46
|
+
- **linkedin-import** — sister plugin under the same pattern (LinkedIn Basic Data Export). LinkedIn ingestion is single-phase today (no enrich pass) because CSV rows already encode entity types deterministically — no auto-created participants, no auto-extracted observations to walk. Reading [linkedin-import/PLUGIN.md](../linkedin-import/PLUGIN.md) is the fastest way to understand the load-phase shape this plugin's Phase 1 follows.
|