@tekmidian/pai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/ARCHITECTURE.md +567 -0
  2. package/FEATURE.md +108 -0
  3. package/LICENSE +21 -0
  4. package/README.md +101 -0
  5. package/dist/auto-route-D7W6RE06.mjs +86 -0
  6. package/dist/auto-route-D7W6RE06.mjs.map +1 -0
  7. package/dist/cli/index.d.mts +1 -0
  8. package/dist/cli/index.mjs +5927 -0
  9. package/dist/cli/index.mjs.map +1 -0
  10. package/dist/config-DBh1bYM2.mjs +151 -0
  11. package/dist/config-DBh1bYM2.mjs.map +1 -0
  12. package/dist/daemon/index.d.mts +1 -0
  13. package/dist/daemon/index.mjs +56 -0
  14. package/dist/daemon/index.mjs.map +1 -0
  15. package/dist/daemon-mcp/index.d.mts +1 -0
  16. package/dist/daemon-mcp/index.mjs +185 -0
  17. package/dist/daemon-mcp/index.mjs.map +1 -0
  18. package/dist/daemon-v5O897D4.mjs +773 -0
  19. package/dist/daemon-v5O897D4.mjs.map +1 -0
  20. package/dist/db-4lSqLFb8.mjs +199 -0
  21. package/dist/db-4lSqLFb8.mjs.map +1 -0
  22. package/dist/db-BcDxXVBu.mjs +110 -0
  23. package/dist/db-BcDxXVBu.mjs.map +1 -0
  24. package/dist/detect-BHqYcjJ1.mjs +86 -0
  25. package/dist/detect-BHqYcjJ1.mjs.map +1 -0
  26. package/dist/detector-DKA83aTZ.mjs +74 -0
  27. package/dist/detector-DKA83aTZ.mjs.map +1 -0
  28. package/dist/embeddings-mfqv-jFu.mjs +91 -0
  29. package/dist/embeddings-mfqv-jFu.mjs.map +1 -0
  30. package/dist/factory-BDAiKtYR.mjs +42 -0
  31. package/dist/factory-BDAiKtYR.mjs.map +1 -0
  32. package/dist/index.d.mts +307 -0
  33. package/dist/index.d.mts.map +1 -0
  34. package/dist/index.mjs +11 -0
  35. package/dist/indexer-B20bPHL-.mjs +677 -0
  36. package/dist/indexer-B20bPHL-.mjs.map +1 -0
  37. package/dist/indexer-backend-BXaocO5r.mjs +360 -0
  38. package/dist/indexer-backend-BXaocO5r.mjs.map +1 -0
  39. package/dist/ipc-client-DPy7s3iu.mjs +156 -0
  40. package/dist/ipc-client-DPy7s3iu.mjs.map +1 -0
  41. package/dist/mcp/index.d.mts +1 -0
  42. package/dist/mcp/index.mjs +373 -0
  43. package/dist/mcp/index.mjs.map +1 -0
  44. package/dist/migrate-Bwj7qPaE.mjs +241 -0
  45. package/dist/migrate-Bwj7qPaE.mjs.map +1 -0
  46. package/dist/pai-marker-DX_mFLum.mjs +186 -0
  47. package/dist/pai-marker-DX_mFLum.mjs.map +1 -0
  48. package/dist/postgres-Ccvpc6fC.mjs +335 -0
  49. package/dist/postgres-Ccvpc6fC.mjs.map +1 -0
  50. package/dist/rolldown-runtime-95iHPtFO.mjs +18 -0
  51. package/dist/schemas-DjdwzIQ8.mjs +3405 -0
  52. package/dist/schemas-DjdwzIQ8.mjs.map +1 -0
  53. package/dist/search-PjftDxxs.mjs +282 -0
  54. package/dist/search-PjftDxxs.mjs.map +1 -0
  55. package/dist/sqlite-CHUrNtbI.mjs +90 -0
  56. package/dist/sqlite-CHUrNtbI.mjs.map +1 -0
  57. package/dist/tools-CLK4080-.mjs +805 -0
  58. package/dist/tools-CLK4080-.mjs.map +1 -0
  59. package/dist/utils-DEWdIFQ0.mjs +160 -0
  60. package/dist/utils-DEWdIFQ0.mjs.map +1 -0
  61. package/package.json +72 -0
  62. package/templates/README.md +181 -0
  63. package/templates/agent-prefs.example.md +362 -0
  64. package/templates/claude-md.template.md +733 -0
  65. package/templates/pai-project.template.md +13 -0
  66. package/templates/voices.example.json +251 -0
@@ -0,0 +1,773 @@
1
+ import { t as __exportAll } from "./rolldown-runtime-95iHPtFO.mjs";
2
+ import { n as openRegistry } from "./db-4lSqLFb8.mjs";
3
+ import { r as indexAll } from "./indexer-B20bPHL-.mjs";
4
+ import { t as configureEmbeddingModel } from "./embeddings-mfqv-jFu.mjs";
5
+ import { a as toolProjectHealth, c as toolProjectTodo, d as toolSessionRoute, i as toolProjectDetect, l as toolRegistrySearch, n as toolMemorySearch, o as toolProjectInfo, s as toolProjectList, t as toolMemoryGet, u as toolSessionList } from "./tools-CLK4080-.mjs";
6
+ import { n as CONFIG_FILE, s as DEFAULT_NOTIFICATION_CONFIG, t as CONFIG_DIR } from "./config-DBh1bYM2.mjs";
7
+ import { t as createStorageBackend } from "./factory-BDAiKtYR.mjs";
8
+ import { t as detectTopicShift } from "./detector-DKA83aTZ.mjs";
9
+ import { existsSync, mkdirSync, readFileSync, unlinkSync, writeFileSync } from "node:fs";
10
+ import { setPriority } from "node:os";
11
+ import { randomUUID } from "node:crypto";
12
+ import { connect, createServer } from "node:net";
13
+ import { spawn } from "node:child_process";
14
+
15
+ //#region src/notifications/config.ts
16
+ /**
17
+ * config.ts — Notification config persistence helpers
18
+ *
19
+ * Reads and writes the `notifications` section of ~/.config/pai/config.json.
20
+ * Deep-merges with defaults so partial configs work fine.
21
+ *
22
+ * This module is intentionally separate from the daemon's config loader
23
+ * so it can be used standalone (e.g. from CLI commands).
24
+ */
25
+ function deepMerge(target, source) {
26
+ const result = { ...target };
27
+ for (const key of Object.keys(source)) {
28
+ const srcVal = source[key];
29
+ if (srcVal === void 0 || srcVal === null) continue;
30
+ const tgtVal = target[key];
31
+ if (typeof srcVal === "object" && !Array.isArray(srcVal) && typeof tgtVal === "object" && tgtVal !== null && !Array.isArray(tgtVal)) result[key] = deepMerge(tgtVal, srcVal);
32
+ else result[key] = srcVal;
33
+ }
34
+ return result;
35
+ }
36
+ /**
37
+ * Load the notification config from the PAI config file.
38
+ * Returns defaults merged with any stored values.
39
+ */
40
+ function loadNotificationConfig() {
41
+ if (!existsSync(CONFIG_FILE)) return { ...DEFAULT_NOTIFICATION_CONFIG };
42
+ let raw;
43
+ try {
44
+ raw = readFileSync(CONFIG_FILE, "utf-8");
45
+ } catch {
46
+ return { ...DEFAULT_NOTIFICATION_CONFIG };
47
+ }
48
+ let parsed;
49
+ try {
50
+ parsed = JSON.parse(raw);
51
+ } catch {
52
+ return { ...DEFAULT_NOTIFICATION_CONFIG };
53
+ }
54
+ const stored = parsed["notifications"];
55
+ if (!stored || typeof stored !== "object") return { ...DEFAULT_NOTIFICATION_CONFIG };
56
+ return deepMerge(DEFAULT_NOTIFICATION_CONFIG, stored);
57
+ }
58
+ /**
59
+ * Persist the notification config by merging it into the existing
60
+ * ~/.config/pai/config.json. Creates the file if it does not exist.
61
+ */
62
+ function saveNotificationConfig(config) {
63
+ if (!existsSync(CONFIG_DIR)) mkdirSync(CONFIG_DIR, { recursive: true });
64
+ let full = {};
65
+ if (existsSync(CONFIG_FILE)) try {
66
+ full = JSON.parse(readFileSync(CONFIG_FILE, "utf-8"));
67
+ } catch {}
68
+ full["notifications"] = config;
69
+ writeFileSync(CONFIG_FILE, JSON.stringify(full, null, 2) + "\n", "utf-8");
70
+ }
71
+ /**
72
+ * Apply a partial update to the current notification config and persist it.
73
+ * Returns the new merged config.
74
+ */
75
+ function patchNotificationConfig(patch) {
76
+ const current = loadNotificationConfig();
77
+ if (patch.mode !== void 0) current.mode = patch.mode;
78
+ if (patch.channels) current.channels = deepMerge(current.channels, patch.channels);
79
+ if (patch.routing) current.routing = deepMerge(current.routing, patch.routing);
80
+ saveNotificationConfig(current);
81
+ return current;
82
+ }
83
+
84
+ //#endregion
85
+ //#region src/notifications/providers/ntfy.ts
86
+ var NtfyProvider = class {
87
+ channelId = "ntfy";
88
+ async send(payload, config) {
89
+ const cfg = config.channels.ntfy;
90
+ if (!cfg.enabled || !cfg.url) return false;
91
+ try {
92
+ const headers = { "Content-Type": "text/plain; charset=utf-8" };
93
+ if (payload.title) headers["Title"] = payload.title;
94
+ if (cfg.priority && cfg.priority !== "default") headers["Priority"] = cfg.priority;
95
+ return (await fetch(cfg.url, {
96
+ method: "POST",
97
+ headers,
98
+ body: payload.message
99
+ })).ok;
100
+ } catch {
101
+ return false;
102
+ }
103
+ }
104
+ };
105
+
106
+ //#endregion
107
+ //#region src/notifications/providers/whatsapp.ts
108
+ /**
109
+ * whatsapp.ts — WhatsApp notification provider (via Whazaa MCP)
110
+ *
111
+ * Sends notifications via the Whazaa Unix Domain Socket IPC protocol.
112
+ * Falls back gracefully if Whazaa is not running.
113
+ *
114
+ * Whazaa IPC socket: /tmp/whazaa.sock (standard Whazaa path)
115
+ *
116
+ * We use the same connect-per-call pattern as PaiClient to avoid
117
+ * requiring any persistent connection state.
118
+ */
119
+ const WHAZAA_SOCKET = "/tmp/whazaa.sock";
120
+ const WHAZAA_TIMEOUT_MS = 1e4;
121
+ /**
122
+ * Send a single IPC call to the Whazaa socket.
123
+ * Returns true on success, false if Whazaa is not available or errors.
124
+ */
125
+ function callWhazaa(method, params) {
126
+ return new Promise((resolve) => {
127
+ let done = false;
128
+ let buffer = "";
129
+ let timer = null;
130
+ function finish(ok) {
131
+ if (done) return;
132
+ done = true;
133
+ if (timer) {
134
+ clearTimeout(timer);
135
+ timer = null;
136
+ }
137
+ try {
138
+ socket?.destroy();
139
+ } catch {}
140
+ resolve(ok);
141
+ }
142
+ const socket = connect(WHAZAA_SOCKET, () => {
143
+ const request = {
144
+ jsonrpc: "2.0",
145
+ id: randomUUID(),
146
+ method,
147
+ params
148
+ };
149
+ socket.write(JSON.stringify(request) + "\n");
150
+ });
151
+ socket.on("data", (chunk) => {
152
+ buffer += chunk.toString();
153
+ const nl = buffer.indexOf("\n");
154
+ if (nl === -1) return;
155
+ try {
156
+ finish(!JSON.parse(buffer.slice(0, nl)).error);
157
+ } catch {
158
+ finish(false);
159
+ }
160
+ });
161
+ socket.on("error", () => finish(false));
162
+ socket.on("end", () => finish(false));
163
+ timer = setTimeout(() => finish(false), WHAZAA_TIMEOUT_MS);
164
+ });
165
+ }
166
+ var WhatsAppProvider = class {
167
+ channelId = "whatsapp";
168
+ async send(payload, config) {
169
+ const cfg = config.channels.whatsapp;
170
+ if (!cfg.enabled) return false;
171
+ const isVoiceMode = config.mode === "voice" || config.channels.voice.enabled;
172
+ const params = { message: payload.message };
173
+ if (cfg.recipient) params.recipient = cfg.recipient;
174
+ if (isVoiceMode && config.mode === "voice") params.voice = config.channels.voice.voiceName ?? "bm_george";
175
+ return callWhazaa("whatsapp_send", params);
176
+ }
177
+ };
178
+
179
+ //#endregion
180
+ //#region src/notifications/providers/macos.ts
181
+ /**
182
+ * macos.ts — macOS notification provider
183
+ *
184
+ * Uses the `osascript` command to display a macOS system notification.
185
+ * Non-blocking: spawns the process and returns success without waiting.
186
+ */
187
+ var MacOsProvider = class {
188
+ channelId = "macos";
189
+ async send(payload, config) {
190
+ if (!config.channels.macos.enabled) return false;
191
+ try {
192
+ const safeTitle = (payload.title ?? "PAI").replace(/'/g, "\\'");
193
+ const script = `display notification "${payload.message.replace(/'/g, "\\'")}" with title "${safeTitle}"`;
194
+ return new Promise((resolve) => {
195
+ const child = spawn("osascript", ["-e", script], {
196
+ detached: true,
197
+ stdio: "ignore"
198
+ });
199
+ child.unref();
200
+ child.on("error", () => resolve(false));
201
+ setTimeout(() => resolve(true), 200);
202
+ });
203
+ } catch {
204
+ return false;
205
+ }
206
+ }
207
+ };
208
+
209
+ //#endregion
210
+ //#region src/notifications/providers/cli.ts
211
+ var CliProvider = class {
212
+ channelId = "cli";
213
+ async send(payload, _config) {
214
+ const prefix = `[pai-notify:${payload.event}]`;
215
+ const title = payload.title ? ` ${payload.title}:` : "";
216
+ process.stderr.write(`${prefix}${title} ${payload.message}\n`);
217
+ return true;
218
+ }
219
+ };
220
+
221
+ //#endregion
222
+ //#region src/notifications/router.ts
223
+ const PROVIDERS = {
224
+ ntfy: new NtfyProvider(),
225
+ whatsapp: new WhatsAppProvider(),
226
+ macos: new MacOsProvider(),
227
+ voice: new WhatsAppProvider(),
228
+ cli: new CliProvider()
229
+ };
230
+ /**
231
+ * Given the current config, resolve which channels should receive a
232
+ * notification for the given event type.
233
+ *
234
+ * Mode overrides:
235
+ * "off" → no channels
236
+ * "auto" → use routing table, filtered by enabled channels
237
+ * "voice" → whatsapp (TTS enabled in provider)
238
+ * "whatsapp" → whatsapp
239
+ * "ntfy" → ntfy
240
+ * "macos" → macos
241
+ * "cli" → cli
242
+ */
243
+ function resolveChannels(config, event) {
244
+ const { mode, channels, routing } = config;
245
+ if (mode === "off") return [];
246
+ const modeToChannel = {
247
+ voice: "whatsapp",
248
+ whatsapp: "whatsapp",
249
+ ntfy: "ntfy",
250
+ macos: "macos",
251
+ cli: "cli"
252
+ };
253
+ if (mode !== "auto") {
254
+ const ch = modeToChannel[mode];
255
+ if (!ch) return [];
256
+ const cfg = channels[ch];
257
+ if (cfg && !cfg.enabled) return [ch];
258
+ return [ch];
259
+ }
260
+ return (routing[event] ?? []).filter((ch) => {
261
+ const cfg = channels[ch];
262
+ if (ch === "voice") return false;
263
+ return cfg?.enabled === true;
264
+ });
265
+ }
266
+ /**
267
+ * Route a notification to the appropriate channels.
268
+ *
269
+ * Sends to all resolved channels in parallel.
270
+ * Individual channel failures are non-fatal and logged to stderr.
271
+ *
272
+ * @param payload The notification to send
273
+ * @param config The current notification config (from daemon state)
274
+ */
275
+ async function routeNotification(payload, config) {
276
+ const channels = resolveChannels(config, payload.event);
277
+ if (channels.length === 0) return {
278
+ channelsAttempted: [],
279
+ channelsSucceeded: [],
280
+ channelsFailed: [],
281
+ mode: config.mode
282
+ };
283
+ const results = await Promise.allSettled(channels.map(async (ch) => {
284
+ const ok = await PROVIDERS[ch].send(payload, config);
285
+ if (!ok) process.stderr.write(`[pai-notify] Channel ${ch} failed for event ${payload.event}\n`);
286
+ return {
287
+ ch,
288
+ ok
289
+ };
290
+ }));
291
+ const succeeded = [];
292
+ const failed = [];
293
+ for (const r of results) if (r.status === "fulfilled") if (r.value.ok) succeeded.push(r.value.ch);
294
+ else failed.push(r.value.ch);
295
+ else failed.push(channels[results.indexOf(r)]);
296
+ return {
297
+ channelsAttempted: channels,
298
+ channelsSucceeded: succeeded,
299
+ channelsFailed: failed,
300
+ mode: config.mode
301
+ };
302
+ }
303
+
304
+ //#endregion
305
+ //#region src/daemon/daemon.ts
306
+ /**
307
+ * daemon.ts — The persistent PAI Daemon
308
+ *
309
+ * Provides shared database access, tool dispatch, and periodic index scheduling
310
+ * for multiple concurrent Claude Code sessions via a Unix Domain Socket.
311
+ *
312
+ * Architecture:
313
+ * MCP shims (Claude sessions) → Unix socket → PAI Daemon
314
+ * ├── registry.db (shared, WAL, always SQLite)
315
+ * ├── federation (SQLite or Postgres/pgvector)
316
+ * ├── Embedding model (singleton)
317
+ * └── Index scheduler (periodic)
318
+ *
319
+ * IPC protocol: NDJSON over Unix Domain Socket
320
+ *
321
+ * Request (shim → daemon):
322
+ * { "id": "uuid", "method": "tool_name_or_special", "params": {} }
323
+ *
324
+ * Response (daemon → shim):
325
+ * { "id": "uuid", "ok": true, "result": <any> }
326
+ * { "id": "uuid", "ok": false, "error": "message" }
327
+ *
328
+ * Special methods:
329
+ * status — Return daemon status (uptime, index state, db stats)
330
+ * index_now — Trigger immediate index run (non-blocking)
331
+ *
332
+ * All other methods are dispatched to the corresponding PAI tool function.
333
+ *
334
+ * Design notes:
335
+ * - Registry stays in SQLite (small, simple metadata).
336
+ * - Federation backend is configurable: SQLite (default) or Postgres/pgvector.
337
+ * - Auto-fallback: if Postgres is configured but unavailable, falls back to SQLite.
338
+ * - Index writes guarded by indexInProgress flag (not a mutex — index is idempotent).
339
+ * - Embedding model loaded lazily on first semantic/hybrid request, then kept alive.
340
+ * - Scheduler runs indexAll() every indexIntervalSecs (default 5 minutes).
341
+ */
342
+ var daemon_exports = /* @__PURE__ */ __exportAll({ serve: () => serve });
343
+ let registryDb;
344
+ let storageBackend;
345
+ let daemonConfig;
346
+ let startTime = Date.now();
347
+ let indexInProgress = false;
348
+ let lastIndexTime = 0;
349
+ let indexSchedulerTimer = null;
350
+ let embedInProgress = false;
351
+ let lastEmbedTime = 0;
352
+ let embedSchedulerTimer = null;
353
+ /** Mutable notification config — loaded from disk at startup, patchable at runtime */
354
+ let notificationConfig;
355
+ /**
356
+ * Set to true when a SIGTERM/SIGINT is received so that long-running loops
357
+ * (embed, index) can detect the signal and exit their inner loops before the
358
+ * pool/backend is closed. Checked by embedChunksWithBackend() via the
359
+ * `shouldStop` callback passed from runEmbed().
360
+ */
361
+ let shutdownRequested = false;
362
+ /**
363
+ * Run a full index pass. Guards against overlapping runs with indexInProgress.
364
+ * Called both by the scheduler and by the index_now IPC method.
365
+ *
366
+ * NOTE: We pass the raw SQLite federation DB to indexAll() for SQLite backend,
367
+ * or skip and use the backend interface for Postgres. The indexer currently
368
+ * uses better-sqlite3 directly; it will be refactored in a future phase.
369
+ * For now, we keep the SQLite indexer path and add a Postgres-aware path.
370
+ */
371
+ async function runIndex() {
372
+ if (indexInProgress) {
373
+ process.stderr.write("[pai-daemon] Index already in progress, skipping.\n");
374
+ return;
375
+ }
376
+ if (embedInProgress) {
377
+ process.stderr.write("[pai-daemon] Embed in progress, deferring index run.\n");
378
+ return;
379
+ }
380
+ indexInProgress = true;
381
+ const t0 = Date.now();
382
+ try {
383
+ process.stderr.write("[pai-daemon] Starting scheduled index run...\n");
384
+ if (storageBackend.backendType === "sqlite") {
385
+ const { SQLiteBackend } = await import("./sqlite-CHUrNtbI.mjs");
386
+ if (storageBackend instanceof SQLiteBackend) {
387
+ const { projects, result } = await indexAll(storageBackend.getRawDb(), registryDb);
388
+ const elapsed = Date.now() - t0;
389
+ lastIndexTime = Date.now();
390
+ process.stderr.write(`[pai-daemon] Index complete: ${projects} projects, ${result.filesProcessed} files, ${result.chunksCreated} chunks (${elapsed}ms)\n`);
391
+ }
392
+ } else {
393
+ const { indexAllWithBackend } = await import("./indexer-backend-BXaocO5r.mjs");
394
+ const { projects, result } = await indexAllWithBackend(storageBackend, registryDb);
395
+ const elapsed = Date.now() - t0;
396
+ lastIndexTime = Date.now();
397
+ process.stderr.write(`[pai-daemon] Index complete (postgres): ${projects} projects, ${result.filesProcessed} files, ${result.chunksCreated} chunks (${elapsed}ms)\n`);
398
+ }
399
+ } catch (e) {
400
+ const msg = e instanceof Error ? e.message : String(e);
401
+ process.stderr.write(`[pai-daemon] Index error: ${msg}\n`);
402
+ } finally {
403
+ indexInProgress = false;
404
+ }
405
+ }
406
+ /**
407
+ * Start the periodic index scheduler.
408
+ */
409
+ function startIndexScheduler() {
410
+ const intervalMs = daemonConfig.indexIntervalSecs * 1e3;
411
+ process.stderr.write(`[pai-daemon] Index scheduler: every ${daemonConfig.indexIntervalSecs}s\n`);
412
+ setTimeout(() => {
413
+ runIndex().catch((e) => {
414
+ process.stderr.write(`[pai-daemon] Startup index error: ${e}\n`);
415
+ });
416
+ }, 2e3);
417
+ indexSchedulerTimer = setInterval(() => {
418
+ runIndex().catch((e) => {
419
+ process.stderr.write(`[pai-daemon] Scheduled index error: ${e}\n`);
420
+ });
421
+ }, intervalMs);
422
+ if (indexSchedulerTimer.unref) indexSchedulerTimer.unref();
423
+ }
424
+ /**
425
+ * Run an embedding pass for all unembedded chunks (Postgres backend only).
426
+ * Guards against overlapping runs with embedInProgress.
427
+ * Skips if an index run is currently in progress to avoid contention.
428
+ */
429
+ async function runEmbed() {
430
+ if (embedInProgress) {
431
+ process.stderr.write("[pai-daemon] Embed already in progress, skipping.\n");
432
+ return;
433
+ }
434
+ if (indexInProgress) {
435
+ process.stderr.write("[pai-daemon] Index in progress, deferring embed pass.\n");
436
+ return;
437
+ }
438
+ if (storageBackend.backendType !== "postgres") return;
439
+ embedInProgress = true;
440
+ const t0 = Date.now();
441
+ try {
442
+ process.stderr.write("[pai-daemon] Starting scheduled embed pass...\n");
443
+ const { embedChunksWithBackend } = await import("./indexer-backend-BXaocO5r.mjs");
444
+ const count = await embedChunksWithBackend(storageBackend, () => shutdownRequested);
445
+ const elapsed = Date.now() - t0;
446
+ lastEmbedTime = Date.now();
447
+ process.stderr.write(`[pai-daemon] Embed pass complete: ${count} chunks embedded (${elapsed}ms)\n`);
448
+ } catch (e) {
449
+ const msg = e instanceof Error ? e.message : String(e);
450
+ process.stderr.write(`[pai-daemon] Embed error: ${msg}\n`);
451
+ } finally {
452
+ embedInProgress = false;
453
+ }
454
+ }
455
+ /**
456
+ * Start the periodic embed scheduler.
457
+ * Initial run is 30 seconds after startup (after the 2-second index startup run).
458
+ */
459
+ function startEmbedScheduler() {
460
+ const intervalMs = daemonConfig.embedIntervalSecs * 1e3;
461
+ process.stderr.write(`[pai-daemon] Embed scheduler: every ${daemonConfig.embedIntervalSecs}s\n`);
462
+ setTimeout(() => {
463
+ runEmbed().catch((e) => {
464
+ process.stderr.write(`[pai-daemon] Startup embed error: ${e}\n`);
465
+ });
466
+ }, 3e4);
467
+ embedSchedulerTimer = setInterval(() => {
468
+ runEmbed().catch((e) => {
469
+ process.stderr.write(`[pai-daemon] Scheduled embed error: ${e}\n`);
470
+ });
471
+ }, intervalMs);
472
+ if (embedSchedulerTimer.unref) embedSchedulerTimer.unref();
473
+ }
474
+ /**
475
+ * Dispatch an IPC tool call to the appropriate tool function.
476
+ * Returns the tool result or throws.
477
+ */
478
+ async function dispatchTool(method, params) {
479
+ const p = params;
480
+ switch (method) {
481
+ case "memory_search": return toolMemorySearch(registryDb, storageBackend, p);
482
+ case "memory_get": return toolMemoryGet(registryDb, p);
483
+ case "project_info": return toolProjectInfo(registryDb, p);
484
+ case "project_list": return toolProjectList(registryDb, p);
485
+ case "session_list": return toolSessionList(registryDb, p);
486
+ case "registry_search": return toolRegistrySearch(registryDb, p);
487
+ case "project_detect": return toolProjectDetect(registryDb, p);
488
+ case "project_health": return toolProjectHealth(registryDb, p);
489
+ case "project_todo": return toolProjectTodo(registryDb, p);
490
+ case "topic_check": return detectTopicShift(registryDb, storageBackend, p);
491
+ case "session_auto_route": return toolSessionRoute(registryDb, storageBackend, p);
492
+ default: throw new Error(`Unknown method: ${method}`);
493
+ }
494
+ }
495
+ function sendResponse(socket, response) {
496
+ try {
497
+ socket.write(JSON.stringify(response) + "\n");
498
+ } catch {}
499
+ }
500
+ /**
501
+ * Handle a single IPC request.
502
+ */
503
+ async function handleRequest(request, socket) {
504
+ const { id, method, params } = request;
505
+ if (method === "status") {
506
+ const dbStats = await (async () => {
507
+ try {
508
+ const fedStats = await storageBackend.getStats();
509
+ const projects = registryDb.prepare("SELECT COUNT(*) AS n FROM projects").get().n;
510
+ return {
511
+ files: fedStats.files,
512
+ chunks: fedStats.chunks,
513
+ projects
514
+ };
515
+ } catch {
516
+ return null;
517
+ }
518
+ })();
519
+ sendResponse(socket, {
520
+ id,
521
+ ok: true,
522
+ result: {
523
+ uptime: Math.floor((Date.now() - startTime) / 1e3),
524
+ indexInProgress,
525
+ lastIndexTime: lastIndexTime ? new Date(lastIndexTime).toISOString() : null,
526
+ indexIntervalSecs: daemonConfig.indexIntervalSecs,
527
+ embedInProgress,
528
+ lastEmbedTime: lastEmbedTime ? new Date(lastEmbedTime).toISOString() : null,
529
+ embedIntervalSecs: daemonConfig.embedIntervalSecs,
530
+ socketPath: daemonConfig.socketPath,
531
+ storageBackend: storageBackend.backendType,
532
+ db: dbStats
533
+ }
534
+ });
535
+ socket.end();
536
+ return;
537
+ }
538
+ if (method === "index_now") {
539
+ runIndex().catch((e) => {
540
+ process.stderr.write(`[pai-daemon] index_now error: ${e}\n`);
541
+ });
542
+ sendResponse(socket, {
543
+ id,
544
+ ok: true,
545
+ result: { triggered: true }
546
+ });
547
+ socket.end();
548
+ return;
549
+ }
550
+ if (method === "notification_get_config") {
551
+ sendResponse(socket, {
552
+ id,
553
+ ok: true,
554
+ result: {
555
+ config: notificationConfig,
556
+ activeChannels: Object.entries(notificationConfig.channels).filter(([ch, cfg]) => ch !== "voice" && cfg.enabled).map(([ch]) => ch)
557
+ }
558
+ });
559
+ socket.end();
560
+ return;
561
+ }
562
+ if (method === "notification_set_config") {
563
+ try {
564
+ const p = params;
565
+ notificationConfig = patchNotificationConfig({
566
+ mode: p.mode,
567
+ channels: p.channels,
568
+ routing: p.routing
569
+ });
570
+ sendResponse(socket, {
571
+ id,
572
+ ok: true,
573
+ result: { config: notificationConfig }
574
+ });
575
+ } catch (e) {
576
+ sendResponse(socket, {
577
+ id,
578
+ ok: false,
579
+ error: e instanceof Error ? e.message : String(e)
580
+ });
581
+ }
582
+ socket.end();
583
+ return;
584
+ }
585
+ if (method === "notification_send") {
586
+ const p = params;
587
+ if (!p.message) {
588
+ sendResponse(socket, {
589
+ id,
590
+ ok: false,
591
+ error: "notification_send: message is required"
592
+ });
593
+ socket.end();
594
+ return;
595
+ }
596
+ routeNotification({
597
+ event: p.event ?? "info",
598
+ message: p.message,
599
+ title: p.title
600
+ }, notificationConfig).then((result) => {
601
+ sendResponse(socket, {
602
+ id,
603
+ ok: true,
604
+ result
605
+ });
606
+ socket.end();
607
+ }).catch((e) => {
608
+ sendResponse(socket, {
609
+ id,
610
+ ok: false,
611
+ error: e instanceof Error ? e.message : String(e)
612
+ });
613
+ socket.end();
614
+ });
615
+ return;
616
+ }
617
+ try {
618
+ sendResponse(socket, {
619
+ id,
620
+ ok: true,
621
+ result: await dispatchTool(method, params)
622
+ });
623
+ } catch (e) {
624
+ sendResponse(socket, {
625
+ id,
626
+ ok: false,
627
+ error: e instanceof Error ? e.message : String(e)
628
+ });
629
+ }
630
+ socket.end();
631
+ }
632
+ /**
633
+ * Check whether an existing socket file is actually being served by a live process.
634
+ * Returns true if a daemon is already accepting connections, false otherwise.
635
+ */
636
+ function isSocketLive(path) {
637
+ return new Promise((resolve) => {
638
+ const client = connect(path);
639
+ const timer = setTimeout(() => {
640
+ client.destroy();
641
+ resolve(false);
642
+ }, 500);
643
+ client.on("connect", () => {
644
+ clearTimeout(timer);
645
+ client.end();
646
+ resolve(true);
647
+ });
648
+ client.on("error", () => {
649
+ clearTimeout(timer);
650
+ resolve(false);
651
+ });
652
+ });
653
+ }
654
+ /**
655
+ * Start the Unix Domain Socket IPC server.
656
+ */
657
+ async function startIpcServer(socketPath) {
658
+ if (existsSync(socketPath)) {
659
+ if (await isSocketLive(socketPath)) throw new Error("Another daemon is already running — socket is live. Aborting startup.");
660
+ try {
661
+ unlinkSync(socketPath);
662
+ process.stderr.write("[pai-daemon] Removed stale socket file.\n");
663
+ } catch {}
664
+ }
665
+ const server = createServer((socket) => {
666
+ let buffer = "";
667
+ socket.on("data", (chunk) => {
668
+ buffer += chunk.toString();
669
+ let nl;
670
+ while ((nl = buffer.indexOf("\n")) !== -1) {
671
+ const line = buffer.slice(0, nl);
672
+ buffer = buffer.slice(nl + 1);
673
+ if (line.trim() === "") continue;
674
+ let request;
675
+ try {
676
+ request = JSON.parse(line);
677
+ } catch {
678
+ sendResponse(socket, {
679
+ id: "?",
680
+ ok: false,
681
+ error: "Invalid JSON"
682
+ });
683
+ socket.destroy();
684
+ return;
685
+ }
686
+ handleRequest(request, socket).catch((e) => {
687
+ const msg = e instanceof Error ? e.message : String(e);
688
+ sendResponse(socket, {
689
+ id: request.id,
690
+ ok: false,
691
+ error: msg
692
+ });
693
+ socket.destroy();
694
+ });
695
+ }
696
+ });
697
+ socket.on("error", () => {});
698
+ });
699
+ server.on("error", (e) => {
700
+ process.stderr.write(`[pai-daemon] IPC server error: ${e}\n`);
701
+ });
702
+ server.listen(socketPath, () => {
703
+ process.stderr.write(`[pai-daemon] IPC server listening on ${socketPath}\n`);
704
+ });
705
+ return server;
706
+ }
707
+ async function serve(config) {
708
+ daemonConfig = config;
709
+ startTime = Date.now();
710
+ notificationConfig = loadNotificationConfig();
711
+ process.stderr.write("[pai-daemon] Starting daemon...\n");
712
+ process.stderr.write(`[pai-daemon] Socket: ${config.socketPath}\n`);
713
+ process.stderr.write(`[pai-daemon] Storage backend: ${config.storageBackend}\n`);
714
+ process.stderr.write(`[pai-daemon] Notification mode: ${notificationConfig.mode}\n`);
715
+ try {
716
+ setPriority(process.pid, 10);
717
+ } catch {}
718
+ configureEmbeddingModel(config.embeddingModel);
719
+ try {
720
+ registryDb = openRegistry();
721
+ process.stderr.write("[pai-daemon] Registry database opened.\n");
722
+ } catch (e) {
723
+ const msg = e instanceof Error ? e.message : String(e);
724
+ process.stderr.write(`[pai-daemon] Fatal: Could not open registry: ${msg}\n`);
725
+ process.exit(1);
726
+ }
727
+ try {
728
+ storageBackend = await createStorageBackend(config);
729
+ process.stderr.write(`[pai-daemon] Federation backend: ${storageBackend.backendType}\n`);
730
+ } catch (e) {
731
+ const msg = e instanceof Error ? e.message : String(e);
732
+ process.stderr.write(`[pai-daemon] Fatal: Could not open federation storage: ${msg}\n`);
733
+ process.exit(1);
734
+ }
735
+ startIndexScheduler();
736
+ if (storageBackend.backendType === "postgres") startEmbedScheduler();
737
+ else process.stderr.write("[pai-daemon] Embed scheduler: disabled (SQLite backend)\n");
738
+ const server = await startIpcServer(config.socketPath);
739
+ const shutdown = async (signal) => {
740
+ process.stderr.write(`\n[pai-daemon] ${signal} received. Stopping.\n`);
741
+ shutdownRequested = true;
742
+ if (indexSchedulerTimer) clearInterval(indexSchedulerTimer);
743
+ if (embedSchedulerTimer) clearInterval(embedSchedulerTimer);
744
+ server.close();
745
+ const SHUTDOWN_TIMEOUT_MS = 1e4;
746
+ const POLL_INTERVAL_MS = 100;
747
+ const deadline = Date.now() + SHUTDOWN_TIMEOUT_MS;
748
+ if (indexInProgress || embedInProgress) {
749
+ process.stderr.write(`[pai-daemon] Waiting for in-progress operations to finish (index=${indexInProgress}, embed=${embedInProgress})...\n`);
750
+ while ((indexInProgress || embedInProgress) && Date.now() < deadline) await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS));
751
+ if (indexInProgress || embedInProgress) process.stderr.write("[pai-daemon] Shutdown timeout reached — forcing exit.\n");
752
+ else process.stderr.write("[pai-daemon] In-progress operations finished.\n");
753
+ }
754
+ try {
755
+ await storageBackend.close();
756
+ } catch {}
757
+ try {
758
+ unlinkSync(config.socketPath);
759
+ } catch {}
760
+ process.exit(0);
761
+ };
762
+ process.on("SIGINT", () => {
763
+ shutdown("SIGINT").catch(() => process.exit(0));
764
+ });
765
+ process.on("SIGTERM", () => {
766
+ shutdown("SIGTERM").catch(() => process.exit(0));
767
+ });
768
+ await new Promise(() => {});
769
+ }
770
+
771
+ //#endregion
772
+ export { serve as n, daemon_exports as t };
773
+ //# sourceMappingURL=daemon-v5O897D4.mjs.map