@tekmidian/pai 0.5.7 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +72 -1
- package/README.md +87 -1
- package/dist/{auto-route-BG6I_4B1.mjs → auto-route-C-DrW6BL.mjs} +3 -3
- package/dist/{auto-route-BG6I_4B1.mjs.map → auto-route-C-DrW6BL.mjs.map} +1 -1
- package/dist/cli/index.mjs +1482 -1628
- package/dist/cli/index.mjs.map +1 -1
- package/dist/clusters-JIDQW65f.mjs +201 -0
- package/dist/clusters-JIDQW65f.mjs.map +1 -0
- package/dist/{config-Cf92lGX_.mjs → config-BuhHWyOK.mjs} +21 -6
- package/dist/config-BuhHWyOK.mjs.map +1 -0
- package/dist/daemon/index.mjs +11 -8
- package/dist/daemon/index.mjs.map +1 -1
- package/dist/{daemon-2ND5WO2j.mjs → daemon-D3hYb5_C.mjs} +669 -218
- package/dist/daemon-D3hYb5_C.mjs.map +1 -0
- package/dist/daemon-mcp/index.mjs +4597 -4
- package/dist/daemon-mcp/index.mjs.map +1 -1
- package/dist/db-DdUperSl.mjs +110 -0
- package/dist/db-DdUperSl.mjs.map +1 -0
- package/dist/{detect-BU3Nx_2L.mjs → detect-CdaA48EI.mjs} +1 -1
- package/dist/{detect-BU3Nx_2L.mjs.map → detect-CdaA48EI.mjs.map} +1 -1
- package/dist/{detector-Bp-2SM3x.mjs → detector-jGBuYQJM.mjs} +2 -2
- package/dist/{detector-Bp-2SM3x.mjs.map → detector-jGBuYQJM.mjs.map} +1 -1
- package/dist/{factory-Bzcy70G9.mjs → factory-Ygqe_bVZ.mjs} +7 -5
- package/dist/{factory-Bzcy70G9.mjs.map → factory-Ygqe_bVZ.mjs.map} +1 -1
- package/dist/helpers-BEST-4Gx.mjs +420 -0
- package/dist/helpers-BEST-4Gx.mjs.map +1 -0
- package/dist/hooks/capture-all-events.mjs +2 -2
- package/dist/hooks/capture-all-events.mjs.map +3 -3
- package/dist/hooks/capture-session-summary.mjs +38 -0
- package/dist/hooks/capture-session-summary.mjs.map +3 -3
- package/dist/hooks/cleanup-session-files.mjs +6 -12
- package/dist/hooks/cleanup-session-files.mjs.map +4 -4
- package/dist/hooks/context-compression-hook.mjs +93 -104
- package/dist/hooks/context-compression-hook.mjs.map +4 -4
- package/dist/hooks/initialize-session.mjs +14 -11
- package/dist/hooks/initialize-session.mjs.map +4 -4
- package/dist/hooks/inject-observations.mjs +220 -0
- package/dist/hooks/inject-observations.mjs.map +7 -0
- package/dist/hooks/load-core-context.mjs +2 -2
- package/dist/hooks/load-core-context.mjs.map +3 -3
- package/dist/hooks/load-project-context.mjs +90 -91
- package/dist/hooks/load-project-context.mjs.map +4 -4
- package/dist/hooks/observe.mjs +354 -0
- package/dist/hooks/observe.mjs.map +7 -0
- package/dist/hooks/stop-hook.mjs +94 -107
- package/dist/hooks/stop-hook.mjs.map +4 -4
- package/dist/hooks/sync-todo-to-md.mjs +31 -33
- package/dist/hooks/sync-todo-to-md.mjs.map +4 -4
- package/dist/index.d.mts +30 -7
- package/dist/index.d.mts.map +1 -1
- package/dist/index.mjs +5 -8
- package/dist/indexer-D53l5d1U.mjs +1 -0
- package/dist/{indexer-backend-CIMXedqk.mjs → indexer-backend-jcJFsmB4.mjs} +37 -127
- package/dist/indexer-backend-jcJFsmB4.mjs.map +1 -0
- package/dist/{ipc-client-Bjg_a1dc.mjs → ipc-client-CoyUHPod.mjs} +2 -7
- package/dist/{ipc-client-Bjg_a1dc.mjs.map → ipc-client-CoyUHPod.mjs.map} +1 -1
- package/dist/latent-ideas-bTJo6Omd.mjs +191 -0
- package/dist/latent-ideas-bTJo6Omd.mjs.map +1 -0
- package/dist/neighborhood-BYYbEkUJ.mjs +135 -0
- package/dist/neighborhood-BYYbEkUJ.mjs.map +1 -0
- package/dist/note-context-BK24bX8Y.mjs +126 -0
- package/dist/note-context-BK24bX8Y.mjs.map +1 -0
- package/dist/postgres-CKf-EDtS.mjs +846 -0
- package/dist/postgres-CKf-EDtS.mjs.map +1 -0
- package/dist/{reranker-D7bRAHi6.mjs → reranker-CMNZcfVx.mjs} +1 -1
- package/dist/{reranker-D7bRAHi6.mjs.map → reranker-CMNZcfVx.mjs.map} +1 -1
- package/dist/{search-_oHfguA5.mjs → search-DC1qhkKn.mjs} +2 -58
- package/dist/search-DC1qhkKn.mjs.map +1 -0
- package/dist/{sqlite-WWBq7_2C.mjs → sqlite-l-s9xPjY.mjs} +160 -3
- package/dist/sqlite-l-s9xPjY.mjs.map +1 -0
- package/dist/state-C6_vqz7w.mjs +102 -0
- package/dist/state-C6_vqz7w.mjs.map +1 -0
- package/dist/stop-words-BaMEGVeY.mjs +326 -0
- package/dist/stop-words-BaMEGVeY.mjs.map +1 -0
- package/dist/{indexer-CMPOiY1r.mjs → sync-BOsnEj2-.mjs} +14 -216
- package/dist/sync-BOsnEj2-.mjs.map +1 -0
- package/dist/themes-BvYF0W8T.mjs +148 -0
- package/dist/themes-BvYF0W8T.mjs.map +1 -0
- package/dist/{tools-DV_lsiCc.mjs → tools-DcaJlYDN.mjs} +162 -273
- package/dist/tools-DcaJlYDN.mjs.map +1 -0
- package/dist/trace-CRx9lPuc.mjs +137 -0
- package/dist/trace-CRx9lPuc.mjs.map +1 -0
- package/dist/{vault-indexer-k-kUlaZ-.mjs → vault-indexer-Bi2cRmn7.mjs} +134 -132
- package/dist/vault-indexer-Bi2cRmn7.mjs.map +1 -0
- package/dist/zettelkasten-cdajbnPr.mjs +708 -0
- package/dist/zettelkasten-cdajbnPr.mjs.map +1 -0
- package/package.json +1 -2
- package/src/hooks/ts/lib/project-utils/index.ts +50 -0
- package/src/hooks/ts/lib/project-utils/notify.ts +75 -0
- package/src/hooks/ts/lib/project-utils/paths.ts +218 -0
- package/src/hooks/ts/lib/project-utils/session-notes.ts +363 -0
- package/src/hooks/ts/lib/project-utils/todo.ts +178 -0
- package/src/hooks/ts/lib/project-utils/tokens.ts +39 -0
- package/src/hooks/ts/lib/project-utils.ts +40 -1018
- package/src/hooks/ts/post-tool-use/observe.ts +327 -0
- package/src/hooks/ts/session-end/capture-session-summary.ts +41 -0
- package/src/hooks/ts/session-start/inject-observations.ts +254 -0
- package/dist/chunker-CbnBe0s0.mjs +0 -191
- package/dist/chunker-CbnBe0s0.mjs.map +0 -1
- package/dist/config-Cf92lGX_.mjs.map +0 -1
- package/dist/daemon-2ND5WO2j.mjs.map +0 -1
- package/dist/db-Dp8VXIMR.mjs +0 -212
- package/dist/db-Dp8VXIMR.mjs.map +0 -1
- package/dist/indexer-CMPOiY1r.mjs.map +0 -1
- package/dist/indexer-backend-CIMXedqk.mjs.map +0 -1
- package/dist/mcp/index.d.mts +0 -1
- package/dist/mcp/index.mjs +0 -500
- package/dist/mcp/index.mjs.map +0 -1
- package/dist/postgres-FXrHDPcE.mjs +0 -358
- package/dist/postgres-FXrHDPcE.mjs.map +0 -1
- package/dist/schemas-BFIgGntb.mjs +0 -3405
- package/dist/schemas-BFIgGntb.mjs.map +0 -1
- package/dist/search-_oHfguA5.mjs.map +0 -1
- package/dist/sqlite-WWBq7_2C.mjs.map +0 -1
- package/dist/tools-DV_lsiCc.mjs.map +0 -1
- package/dist/vault-indexer-k-kUlaZ-.mjs.map +0 -1
- package/dist/zettelkasten-e-a4rW_6.mjs +0 -901
- package/dist/zettelkasten-e-a4rW_6.mjs.map +0 -1
- package/templates/README.md +0 -181
- package/templates/skills/CORE/Aesthetic.md +0 -333
- package/templates/skills/CORE/CONSTITUTION.md +0 -1502
- package/templates/skills/CORE/HistorySystem.md +0 -427
- package/templates/skills/CORE/HookSystem.md +0 -1082
- package/templates/skills/CORE/Prompting.md +0 -509
- package/templates/skills/CORE/ProsodyAgentTemplate.md +0 -53
- package/templates/skills/CORE/ProsodyGuide.md +0 -416
- package/templates/skills/CORE/SKILL.md +0 -741
- package/templates/skills/CORE/SkillSystem.md +0 -213
- package/templates/skills/CORE/TerminalTabs.md +0 -119
- package/templates/skills/CORE/VOICE.md +0 -106
- package/templates/skills/createskill-skill.template.md +0 -78
- package/templates/skills/history-system.template.md +0 -371
- package/templates/skills/hook-system.template.md +0 -913
- package/templates/skills/sessions-skill.template.md +0 -102
- package/templates/skills/skill-system.template.md +0 -214
- package/templates/skills/terminal-tabs.template.md +0 -120
- package/templates/templates.md +0 -20
|
@@ -0,0 +1,846 @@
|
|
|
1
|
+
import { t as STOP_WORDS } from "./stop-words-BaMEGVeY.mjs";
|
|
2
|
+
import { readFileSync } from "node:fs";
|
|
3
|
+
import { dirname, join } from "node:path";
|
|
4
|
+
import { fileURLToPath } from "node:url";
|
|
5
|
+
import pg from "pg";
|
|
6
|
+
|
|
7
|
+
//#region src/storage/postgres/helpers.ts
|
|
8
|
+
/**
|
|
9
|
+
* Internal helper utilities for the Postgres storage backend.
|
|
10
|
+
*/
|
|
11
|
+
/**
|
|
12
|
+
* Convert a Buffer of Float32 LE bytes (as stored in SQLite) to number[].
|
|
13
|
+
*/
|
|
14
|
+
function bufferToVector(buf) {
|
|
15
|
+
const floats = [];
|
|
16
|
+
for (let i = 0; i < buf.length; i += 4) floats.push(buf.readFloatLE(i));
|
|
17
|
+
return floats;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Convert a free-text query to a Postgres tsquery string.
|
|
21
|
+
*
|
|
22
|
+
* Uses OR (|) semantics so that a chunk matching ANY query term is returned,
|
|
23
|
+
* ranked by ts_rank (which scores higher when more terms match). AND (&)
|
|
24
|
+
* semantics are too strict for multi-word queries because all terms rarely
|
|
25
|
+
* co-occur in a single chunk.
|
|
26
|
+
*
|
|
27
|
+
* Example: "Synchrotech interview follow-up Gilles"
|
|
28
|
+
* → "synchrotech | interview | follow | gilles"
|
|
29
|
+
*/
|
|
30
|
+
function buildPgTsQuery(query) {
|
|
31
|
+
const tokens = query.toLowerCase().split(/[\s\p{P}]+/u).filter(Boolean).filter((t) => t.length >= 2).filter((t) => !STOP_WORDS.has(t)).map((t) => t.replace(/'/g, "''").replace(/[&|!():]/g, "")).filter(Boolean);
|
|
32
|
+
if (tokens.length === 0) return query.replace(/[^a-z0-9]/gi, " ").trim().split(/\s+/).filter(Boolean).join(" | ") || "";
|
|
33
|
+
return tokens.join(" | ");
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
//#endregion
|
|
37
|
+
//#region src/storage/postgres/search.ts
|
|
38
|
+
/**
|
|
39
|
+
* Full-text keyword search using Postgres tsvector/tsquery with 'simple' dictionary.
|
|
40
|
+
*/
|
|
41
|
+
async function searchKeyword(pool, query, opts) {
|
|
42
|
+
const maxResults = opts?.maxResults ?? 10;
|
|
43
|
+
const tsQuery = buildPgTsQuery(query);
|
|
44
|
+
if (!tsQuery) return [];
|
|
45
|
+
const conditions = ["fts_vector @@ to_tsquery('simple', $1)"];
|
|
46
|
+
const params = [tsQuery];
|
|
47
|
+
let paramIdx = 2;
|
|
48
|
+
if (opts?.projectIds && opts.projectIds.length > 0) {
|
|
49
|
+
const placeholders = opts.projectIds.map(() => `$${paramIdx++}`).join(", ");
|
|
50
|
+
conditions.push(`project_id IN (${placeholders})`);
|
|
51
|
+
params.push(...opts.projectIds);
|
|
52
|
+
}
|
|
53
|
+
if (opts?.sources && opts.sources.length > 0) {
|
|
54
|
+
const placeholders = opts.sources.map(() => `$${paramIdx++}`).join(", ");
|
|
55
|
+
conditions.push(`source IN (${placeholders})`);
|
|
56
|
+
params.push(...opts.sources);
|
|
57
|
+
}
|
|
58
|
+
if (opts?.tiers && opts.tiers.length > 0) {
|
|
59
|
+
const placeholders = opts.tiers.map(() => `$${paramIdx++}`).join(", ");
|
|
60
|
+
conditions.push(`tier IN (${placeholders})`);
|
|
61
|
+
params.push(...opts.tiers);
|
|
62
|
+
}
|
|
63
|
+
params.push(maxResults);
|
|
64
|
+
const limitParam = `$${paramIdx}`;
|
|
65
|
+
const sql = `
|
|
66
|
+
SELECT
|
|
67
|
+
project_id,
|
|
68
|
+
path,
|
|
69
|
+
start_line,
|
|
70
|
+
end_line,
|
|
71
|
+
text AS snippet,
|
|
72
|
+
tier,
|
|
73
|
+
source,
|
|
74
|
+
ts_rank(fts_vector, to_tsquery('simple', $1)) AS rank_score
|
|
75
|
+
FROM pai_chunks
|
|
76
|
+
WHERE ${conditions.join(" AND ")}
|
|
77
|
+
ORDER BY rank_score DESC
|
|
78
|
+
LIMIT ${limitParam}
|
|
79
|
+
`;
|
|
80
|
+
try {
|
|
81
|
+
return (await pool.query(sql, params)).rows.map((row) => ({
|
|
82
|
+
projectId: row.project_id,
|
|
83
|
+
path: row.path,
|
|
84
|
+
startLine: row.start_line,
|
|
85
|
+
endLine: row.end_line,
|
|
86
|
+
snippet: row.snippet,
|
|
87
|
+
score: row.rank_score,
|
|
88
|
+
tier: row.tier,
|
|
89
|
+
source: row.source
|
|
90
|
+
}));
|
|
91
|
+
} catch (e) {
|
|
92
|
+
process.stderr.write(`[pai-postgres] searchKeyword error: ${e}\n`);
|
|
93
|
+
return [];
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Semantic vector similarity search using pgvector cosine distance (<=>).
|
|
98
|
+
*/
|
|
99
|
+
async function searchSemantic(pool, queryEmbedding, opts) {
|
|
100
|
+
const maxResults = opts?.maxResults ?? 10;
|
|
101
|
+
const conditions = ["embedding IS NOT NULL"];
|
|
102
|
+
const params = [];
|
|
103
|
+
let paramIdx = 1;
|
|
104
|
+
const vecStr = "[" + Array.from(queryEmbedding).join(",") + "]";
|
|
105
|
+
params.push(vecStr);
|
|
106
|
+
const vecParam = `$${paramIdx++}`;
|
|
107
|
+
if (opts?.projectIds && opts.projectIds.length > 0) {
|
|
108
|
+
const placeholders = opts.projectIds.map(() => `$${paramIdx++}`).join(", ");
|
|
109
|
+
conditions.push(`project_id IN (${placeholders})`);
|
|
110
|
+
params.push(...opts.projectIds);
|
|
111
|
+
}
|
|
112
|
+
if (opts?.sources && opts.sources.length > 0) {
|
|
113
|
+
const placeholders = opts.sources.map(() => `$${paramIdx++}`).join(", ");
|
|
114
|
+
conditions.push(`source IN (${placeholders})`);
|
|
115
|
+
params.push(...opts.sources);
|
|
116
|
+
}
|
|
117
|
+
if (opts?.tiers && opts.tiers.length > 0) {
|
|
118
|
+
const placeholders = opts.tiers.map(() => `$${paramIdx++}`).join(", ");
|
|
119
|
+
conditions.push(`tier IN (${placeholders})`);
|
|
120
|
+
params.push(...opts.tiers);
|
|
121
|
+
}
|
|
122
|
+
params.push(maxResults);
|
|
123
|
+
const limitParam = `$${paramIdx}`;
|
|
124
|
+
const sql = `
|
|
125
|
+
SELECT
|
|
126
|
+
project_id,
|
|
127
|
+
path,
|
|
128
|
+
start_line,
|
|
129
|
+
end_line,
|
|
130
|
+
text AS snippet,
|
|
131
|
+
tier,
|
|
132
|
+
source,
|
|
133
|
+
1 - (embedding <=> ${vecParam}::vector) AS cosine_similarity
|
|
134
|
+
FROM pai_chunks
|
|
135
|
+
WHERE ${conditions.join(" AND ")}
|
|
136
|
+
ORDER BY embedding <=> ${vecParam}::vector
|
|
137
|
+
LIMIT ${limitParam}
|
|
138
|
+
`;
|
|
139
|
+
try {
|
|
140
|
+
const result = await pool.query(sql, params);
|
|
141
|
+
const minScore = opts?.minScore ?? -Infinity;
|
|
142
|
+
return result.rows.map((row) => ({
|
|
143
|
+
projectId: row.project_id,
|
|
144
|
+
path: row.path,
|
|
145
|
+
startLine: row.start_line,
|
|
146
|
+
endLine: row.end_line,
|
|
147
|
+
snippet: row.snippet,
|
|
148
|
+
score: row.cosine_similarity,
|
|
149
|
+
tier: row.tier,
|
|
150
|
+
source: row.source
|
|
151
|
+
})).filter((r) => r.score >= minScore);
|
|
152
|
+
} catch (e) {
|
|
153
|
+
process.stderr.write(`[pai-postgres] searchSemantic error: ${e}\n`);
|
|
154
|
+
return [];
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
//#endregion
|
|
159
|
+
//#region src/storage/postgres/vault.ts
|
|
160
|
+
async function upsertVaultFile(pool, file) {
|
|
161
|
+
await pool.query(`INSERT INTO vault_files (vault_path, inode, device, hash, title, indexed_at)
|
|
162
|
+
VALUES ($1, $2, $3, $4, $5, $6)
|
|
163
|
+
ON CONFLICT (vault_path) DO UPDATE SET
|
|
164
|
+
inode = EXCLUDED.inode, device = EXCLUDED.device,
|
|
165
|
+
hash = EXCLUDED.hash, title = EXCLUDED.title,
|
|
166
|
+
indexed_at = EXCLUDED.indexed_at`, [
|
|
167
|
+
file.vaultPath,
|
|
168
|
+
file.inode,
|
|
169
|
+
file.device,
|
|
170
|
+
file.hash,
|
|
171
|
+
file.title,
|
|
172
|
+
file.indexedAt
|
|
173
|
+
]);
|
|
174
|
+
}
|
|
175
|
+
async function deleteVaultFile(pool, vaultPath) {
|
|
176
|
+
const client = await pool.connect();
|
|
177
|
+
try {
|
|
178
|
+
await client.query("BEGIN");
|
|
179
|
+
await client.query("DELETE FROM vault_links WHERE source_path = $1", [vaultPath]);
|
|
180
|
+
await client.query("DELETE FROM vault_health WHERE vault_path = $1", [vaultPath]);
|
|
181
|
+
await client.query("DELETE FROM vault_name_index WHERE vault_path = $1", [vaultPath]);
|
|
182
|
+
await client.query("DELETE FROM vault_aliases WHERE vault_path = $1 OR canonical_path = $1", [vaultPath]);
|
|
183
|
+
await client.query("DELETE FROM vault_files WHERE vault_path = $1", [vaultPath]);
|
|
184
|
+
await client.query("COMMIT");
|
|
185
|
+
} catch (e) {
|
|
186
|
+
await client.query("ROLLBACK");
|
|
187
|
+
throw e;
|
|
188
|
+
} finally {
|
|
189
|
+
client.release();
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
function mapVaultFileRow(row) {
|
|
193
|
+
return {
|
|
194
|
+
vaultPath: row.vault_path,
|
|
195
|
+
inode: Number(row.inode),
|
|
196
|
+
device: Number(row.device),
|
|
197
|
+
hash: row.hash,
|
|
198
|
+
title: row.title,
|
|
199
|
+
indexedAt: Number(row.indexed_at)
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
async function getVaultFile(pool, vaultPath) {
|
|
203
|
+
const r = await pool.query("SELECT vault_path, inode, device, hash, title, indexed_at FROM vault_files WHERE vault_path = $1", [vaultPath]);
|
|
204
|
+
return r.rows.length === 0 ? null : mapVaultFileRow(r.rows[0]);
|
|
205
|
+
}
|
|
206
|
+
async function getVaultFileByInode(pool, inode, device) {
|
|
207
|
+
const r = await pool.query("SELECT vault_path, inode, device, hash, title, indexed_at FROM vault_files WHERE inode = $1 AND device = $2 LIMIT 1", [inode, device]);
|
|
208
|
+
return r.rows.length === 0 ? null : mapVaultFileRow(r.rows[0]);
|
|
209
|
+
}
|
|
210
|
+
async function getAllVaultFiles(pool) {
|
|
211
|
+
return (await pool.query("SELECT vault_path, inode, device, hash, title, indexed_at FROM vault_files")).rows.map(mapVaultFileRow);
|
|
212
|
+
}
|
|
213
|
+
async function getRecentVaultFiles(pool, sinceMs) {
|
|
214
|
+
return (await pool.query("SELECT vault_path, inode, device, hash, title, indexed_at FROM vault_files WHERE indexed_at > $1", [sinceMs])).rows.map(mapVaultFileRow);
|
|
215
|
+
}
|
|
216
|
+
async function countVaultFiles(pool) {
|
|
217
|
+
const r = await pool.query("SELECT COUNT(*)::text AS n FROM vault_files");
|
|
218
|
+
return parseInt(r.rows[0]?.n ?? "0", 10);
|
|
219
|
+
}
|
|
220
|
+
async function countVaultFilesWithPrefix(pool, prefix) {
|
|
221
|
+
const r = await pool.query("SELECT COUNT(*) AS n FROM vault_files WHERE vault_path LIKE $1", [`${prefix}%`]);
|
|
222
|
+
return Number(r.rows[0]?.n ?? 0);
|
|
223
|
+
}
|
|
224
|
+
async function countVaultFilesAfter(pool, sinceMs) {
|
|
225
|
+
const r = await pool.query("SELECT COUNT(*) AS n FROM vault_files WHERE indexed_at > $1", [sinceMs]);
|
|
226
|
+
return Number(r.rows[0]?.n ?? 0);
|
|
227
|
+
}
|
|
228
|
+
async function getVaultFilesByPaths(pool, paths) {
|
|
229
|
+
if (paths.length === 0) return [];
|
|
230
|
+
const placeholders = paths.map((_, i) => `$${i + 1}`).join(", ");
|
|
231
|
+
return (await pool.query(`SELECT vault_path, inode, device, hash, title, indexed_at FROM vault_files WHERE vault_path IN (${placeholders})`, paths)).rows.map(mapVaultFileRow);
|
|
232
|
+
}
|
|
233
|
+
async function getVaultFilesByPathsAfter(pool, paths, sinceMs) {
|
|
234
|
+
if (paths.length === 0) return [];
|
|
235
|
+
const placeholders = paths.map((_, i) => `$${i + 1}`).join(", ");
|
|
236
|
+
return (await pool.query(`SELECT vault_path, inode, device, hash, title, indexed_at FROM vault_files WHERE vault_path IN (${placeholders}) AND indexed_at >= $${paths.length + 1} ORDER BY indexed_at ASC`, [...paths, sinceMs])).rows.map(mapVaultFileRow);
|
|
237
|
+
}
|
|
238
|
+
async function getAllVaultFilePaths(pool) {
|
|
239
|
+
return (await pool.query("SELECT vault_path FROM vault_files")).rows.map((row) => row.vault_path);
|
|
240
|
+
}
|
|
241
|
+
async function getVaultFilePathsWithPrefix(pool, prefix) {
|
|
242
|
+
return (await pool.query("SELECT vault_path FROM vault_files WHERE vault_path LIKE $1", [`${prefix}%`])).rows.map((row) => row.vault_path);
|
|
243
|
+
}
|
|
244
|
+
async function getVaultFilePathsAfter(pool, sinceMs) {
|
|
245
|
+
return (await pool.query("SELECT vault_path FROM vault_files WHERE indexed_at > $1", [sinceMs])).rows.map((row) => row.vault_path);
|
|
246
|
+
}
|
|
247
|
+
async function upsertVaultAliases(pool, aliases) {
|
|
248
|
+
if (aliases.length === 0) return;
|
|
249
|
+
const client = await pool.connect();
|
|
250
|
+
try {
|
|
251
|
+
await client.query("BEGIN");
|
|
252
|
+
for (const a of aliases) await client.query(`INSERT INTO vault_aliases (vault_path, canonical_path, inode, device)
|
|
253
|
+
VALUES ($1, $2, $3, $4)
|
|
254
|
+
ON CONFLICT (vault_path) DO UPDATE SET
|
|
255
|
+
canonical_path = EXCLUDED.canonical_path,
|
|
256
|
+
inode = EXCLUDED.inode, device = EXCLUDED.device`, [
|
|
257
|
+
a.vaultPath,
|
|
258
|
+
a.canonicalPath,
|
|
259
|
+
a.inode,
|
|
260
|
+
a.device
|
|
261
|
+
]);
|
|
262
|
+
await client.query("COMMIT");
|
|
263
|
+
} catch (e) {
|
|
264
|
+
await client.query("ROLLBACK");
|
|
265
|
+
throw e;
|
|
266
|
+
} finally {
|
|
267
|
+
client.release();
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
async function deleteVaultAliases(pool, canonicalPath) {
|
|
271
|
+
await pool.query("DELETE FROM vault_aliases WHERE canonical_path = $1", [canonicalPath]);
|
|
272
|
+
}
|
|
273
|
+
async function getVaultAlias(pool, vaultPath) {
|
|
274
|
+
const r = await pool.query("SELECT canonical_path FROM vault_aliases WHERE vault_path = $1", [vaultPath]);
|
|
275
|
+
return r.rows.length > 0 ? { canonicalPath: r.rows[0].canonical_path } : null;
|
|
276
|
+
}
|
|
277
|
+
function mapVaultLinkRow(row) {
|
|
278
|
+
return {
|
|
279
|
+
sourcePath: row.source_path,
|
|
280
|
+
targetRaw: row.target_raw,
|
|
281
|
+
targetPath: row.target_path,
|
|
282
|
+
linkType: row.link_type,
|
|
283
|
+
lineNumber: row.line_number
|
|
284
|
+
};
|
|
285
|
+
}
|
|
286
|
+
async function replaceLinksForSources(pool, sourcePaths, links) {
|
|
287
|
+
const client = await pool.connect();
|
|
288
|
+
try {
|
|
289
|
+
await client.query("BEGIN");
|
|
290
|
+
if (sourcePaths.length > 0) await client.query("DELETE FROM vault_links WHERE source_path = ANY($1::text[])", [sourcePaths]);
|
|
291
|
+
for (let i = 0; i < links.length; i += 500) {
|
|
292
|
+
const batch = links.slice(i, i + 500);
|
|
293
|
+
const values = [];
|
|
294
|
+
const params = [];
|
|
295
|
+
let idx = 1;
|
|
296
|
+
for (const l of batch) {
|
|
297
|
+
values.push(`($${idx++}, $${idx++}, $${idx++}, $${idx++}, $${idx++})`);
|
|
298
|
+
params.push(l.sourcePath, l.targetRaw, l.targetPath, l.linkType, l.lineNumber);
|
|
299
|
+
}
|
|
300
|
+
await client.query(`INSERT INTO vault_links (source_path, target_raw, target_path, link_type, line_number)
|
|
301
|
+
VALUES ${values.join(", ")}
|
|
302
|
+
ON CONFLICT (source_path, target_raw, line_number) DO UPDATE SET
|
|
303
|
+
target_path = EXCLUDED.target_path, link_type = EXCLUDED.link_type`, params);
|
|
304
|
+
}
|
|
305
|
+
await client.query("COMMIT");
|
|
306
|
+
} catch (e) {
|
|
307
|
+
await client.query("ROLLBACK");
|
|
308
|
+
throw e;
|
|
309
|
+
} finally {
|
|
310
|
+
client.release();
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
async function getLinksFromSource(pool, sourcePath) {
|
|
314
|
+
return (await pool.query("SELECT source_path, target_raw, target_path, link_type, line_number FROM vault_links WHERE source_path = $1", [sourcePath])).rows.map(mapVaultLinkRow);
|
|
315
|
+
}
|
|
316
|
+
async function getLinksToTarget(pool, targetPath) {
|
|
317
|
+
return (await pool.query("SELECT source_path, target_raw, target_path, link_type, line_number FROM vault_links WHERE target_path = $1", [targetPath])).rows.map(mapVaultLinkRow);
|
|
318
|
+
}
|
|
319
|
+
async function getVaultLinkGraph(pool) {
|
|
320
|
+
return (await pool.query("SELECT source_path, target_path FROM vault_links WHERE target_path IS NOT NULL")).rows;
|
|
321
|
+
}
|
|
322
|
+
async function getDeadLinks(pool) {
|
|
323
|
+
return (await pool.query("SELECT source_path, target_raw FROM vault_links WHERE target_path IS NULL")).rows.map((row) => ({
|
|
324
|
+
sourcePath: row.source_path,
|
|
325
|
+
targetRaw: row.target_raw
|
|
326
|
+
}));
|
|
327
|
+
}
|
|
328
|
+
async function getDeadLinksWithLineNumbers(pool) {
|
|
329
|
+
return (await pool.query("SELECT source_path, target_raw, line_number FROM vault_links WHERE target_path IS NULL")).rows.map((row) => ({
|
|
330
|
+
sourcePath: row.source_path,
|
|
331
|
+
targetRaw: row.target_raw,
|
|
332
|
+
lineNumber: row.line_number
|
|
333
|
+
}));
|
|
334
|
+
}
|
|
335
|
+
async function getDeadLinksWithPrefix(pool, prefix) {
|
|
336
|
+
return (await pool.query("SELECT source_path, target_raw, line_number FROM vault_links WHERE target_path IS NULL AND source_path LIKE $1", [`${prefix}%`])).rows.map((row) => ({
|
|
337
|
+
sourcePath: row.source_path,
|
|
338
|
+
targetRaw: row.target_raw,
|
|
339
|
+
lineNumber: row.line_number
|
|
340
|
+
}));
|
|
341
|
+
}
|
|
342
|
+
async function getDeadLinksAfter(pool, sinceMs) {
|
|
343
|
+
return (await pool.query("SELECT source_path, target_raw, line_number FROM vault_links WHERE target_path IS NULL AND source_path IN (SELECT vault_path FROM vault_files WHERE indexed_at > $1)", [sinceMs])).rows.map((row) => ({
|
|
344
|
+
sourcePath: row.source_path,
|
|
345
|
+
targetRaw: row.target_raw,
|
|
346
|
+
lineNumber: row.line_number
|
|
347
|
+
}));
|
|
348
|
+
}
|
|
349
|
+
async function countVaultLinksWithPrefix(pool, prefix) {
|
|
350
|
+
const r = await pool.query("SELECT COUNT(*) AS n FROM vault_links WHERE source_path LIKE $1", [`${prefix}%`]);
|
|
351
|
+
return Number(r.rows[0]?.n ?? 0);
|
|
352
|
+
}
|
|
353
|
+
async function countVaultLinksAfter(pool, sinceMs) {
|
|
354
|
+
const r = await pool.query("SELECT COUNT(*) AS n FROM vault_links WHERE source_path IN (SELECT vault_path FROM vault_files WHERE indexed_at > $1)", [sinceMs]);
|
|
355
|
+
return Number(r.rows[0]?.n ?? 0);
|
|
356
|
+
}
|
|
357
|
+
async function getVaultLinksFromPaths(pool, sourcePaths) {
|
|
358
|
+
if (sourcePaths.length === 0) return [];
|
|
359
|
+
const placeholders = sourcePaths.map((_, i) => `$${i + 1}`).join(", ");
|
|
360
|
+
return (await pool.query(`SELECT source_path, target_raw, target_path, link_type, line_number FROM vault_links WHERE source_path IN (${placeholders}) AND target_path IS NOT NULL`, sourcePaths)).rows.map(mapVaultLinkRow);
|
|
361
|
+
}
|
|
362
|
+
async function getVaultLinkEdges(pool) {
|
|
363
|
+
return (await pool.query("SELECT DISTINCT source_path AS source, target_path AS target FROM vault_links WHERE target_path IS NOT NULL")).rows;
|
|
364
|
+
}
|
|
365
|
+
async function getVaultLinkEdgesWithPrefix(pool, prefix) {
|
|
366
|
+
return (await pool.query("SELECT DISTINCT source_path AS source, target_path AS target FROM vault_links WHERE target_path IS NOT NULL AND source_path LIKE $1", [`${prefix}%`])).rows;
|
|
367
|
+
}
|
|
368
|
+
async function getVaultLinkEdgesAfter(pool, sinceMs) {
|
|
369
|
+
return (await pool.query("SELECT DISTINCT source_path AS source, target_path AS target FROM vault_links WHERE target_path IS NOT NULL AND source_path IN (SELECT vault_path FROM vault_files WHERE indexed_at > $1)", [sinceMs])).rows;
|
|
370
|
+
}
|
|
371
|
+
function mapVaultHealthRow(row) {
|
|
372
|
+
return {
|
|
373
|
+
vaultPath: row.vault_path,
|
|
374
|
+
inboundCount: row.inbound_count,
|
|
375
|
+
outboundCount: row.outbound_count,
|
|
376
|
+
deadLinkCount: row.dead_link_count,
|
|
377
|
+
isOrphan: row.is_orphan === 1,
|
|
378
|
+
computedAt: Number(row.computed_at)
|
|
379
|
+
};
|
|
380
|
+
}
|
|
381
|
+
async function upsertVaultHealth(pool, rows) {
|
|
382
|
+
if (rows.length === 0) return;
|
|
383
|
+
const client = await pool.connect();
|
|
384
|
+
try {
|
|
385
|
+
await client.query("BEGIN");
|
|
386
|
+
for (const h of rows) await client.query(`INSERT INTO vault_health (vault_path, inbound_count, outbound_count, dead_link_count, is_orphan, computed_at)
|
|
387
|
+
VALUES ($1, $2, $3, $4, $5, $6)
|
|
388
|
+
ON CONFLICT (vault_path) DO UPDATE SET
|
|
389
|
+
inbound_count = EXCLUDED.inbound_count,
|
|
390
|
+
outbound_count = EXCLUDED.outbound_count,
|
|
391
|
+
dead_link_count = EXCLUDED.dead_link_count,
|
|
392
|
+
is_orphan = EXCLUDED.is_orphan,
|
|
393
|
+
computed_at = EXCLUDED.computed_at`, [
|
|
394
|
+
h.vaultPath,
|
|
395
|
+
h.inboundCount,
|
|
396
|
+
h.outboundCount,
|
|
397
|
+
h.deadLinkCount,
|
|
398
|
+
h.isOrphan ? 1 : 0,
|
|
399
|
+
h.computedAt
|
|
400
|
+
]);
|
|
401
|
+
await client.query("COMMIT");
|
|
402
|
+
} catch (e) {
|
|
403
|
+
await client.query("ROLLBACK");
|
|
404
|
+
throw e;
|
|
405
|
+
} finally {
|
|
406
|
+
client.release();
|
|
407
|
+
}
|
|
408
|
+
}
|
|
409
|
+
async function getVaultHealth(pool, vaultPath) {
|
|
410
|
+
const r = await pool.query("SELECT vault_path, inbound_count, outbound_count, dead_link_count, is_orphan, computed_at FROM vault_health WHERE vault_path = $1", [vaultPath]);
|
|
411
|
+
return r.rows.length === 0 ? null : mapVaultHealthRow(r.rows[0]);
|
|
412
|
+
}
|
|
413
|
+
async function getOrphans(pool) {
|
|
414
|
+
return (await pool.query("SELECT vault_path, inbound_count, outbound_count, dead_link_count, is_orphan, computed_at FROM vault_health WHERE is_orphan = 1")).rows.map((row) => ({
|
|
415
|
+
...mapVaultHealthRow(row),
|
|
416
|
+
isOrphan: true
|
|
417
|
+
}));
|
|
418
|
+
}
|
|
419
|
+
async function getOrphansWithPrefix(pool, prefix) {
|
|
420
|
+
return (await pool.query("SELECT vault_path FROM vault_health WHERE is_orphan = 1 AND vault_path LIKE $1", [`${prefix}%`])).rows.map((row) => row.vault_path);
|
|
421
|
+
}
|
|
422
|
+
async function getOrphansAfter(pool, sinceMs) {
|
|
423
|
+
return (await pool.query("SELECT vh.vault_path FROM vault_health vh JOIN vault_files vf ON vh.vault_path = vf.vault_path WHERE vh.is_orphan = 1 AND vf.indexed_at > $1", [sinceMs])).rows.map((row) => row.vault_path);
|
|
424
|
+
}
|
|
425
|
+
async function getLowConnectivity(pool) {
|
|
426
|
+
return (await pool.query("SELECT vault_path FROM vault_health WHERE inbound_count + outbound_count <= 1")).rows.map((row) => row.vault_path);
|
|
427
|
+
}
|
|
428
|
+
async function getLowConnectivityWithPrefix(pool, prefix) {
|
|
429
|
+
return (await pool.query("SELECT vault_path FROM vault_health WHERE inbound_count + outbound_count <= 1 AND vault_path LIKE $1", [`${prefix}%`])).rows.map((row) => row.vault_path);
|
|
430
|
+
}
|
|
431
|
+
async function getLowConnectivityAfter(pool, sinceMs) {
|
|
432
|
+
return (await pool.query("SELECT vh.vault_path FROM vault_health vh JOIN vault_files vf ON vh.vault_path = vf.vault_path WHERE vh.inbound_count + vh.outbound_count <= 1 AND vf.indexed_at > $1", [sinceMs])).rows.map((row) => row.vault_path);
|
|
433
|
+
}
|
|
434
|
+
async function upsertNameIndex(pool, entries) {
|
|
435
|
+
if (entries.length === 0) return;
|
|
436
|
+
const client = await pool.connect();
|
|
437
|
+
try {
|
|
438
|
+
await client.query("BEGIN");
|
|
439
|
+
for (const e of entries) await client.query(`INSERT INTO vault_name_index (name, vault_path)
|
|
440
|
+
VALUES ($1, $2) ON CONFLICT (name, vault_path) DO NOTHING`, [e.name, e.vaultPath]);
|
|
441
|
+
await client.query("COMMIT");
|
|
442
|
+
} catch (e_) {
|
|
443
|
+
await client.query("ROLLBACK");
|
|
444
|
+
throw e_;
|
|
445
|
+
} finally {
|
|
446
|
+
client.release();
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
async function replaceNameIndex(pool, entries) {
|
|
450
|
+
const client = await pool.connect();
|
|
451
|
+
try {
|
|
452
|
+
await client.query("BEGIN");
|
|
453
|
+
await client.query("DELETE FROM vault_name_index");
|
|
454
|
+
for (let i = 0; i < entries.length; i += 500) {
|
|
455
|
+
const batch = entries.slice(i, i + 500);
|
|
456
|
+
const values = [];
|
|
457
|
+
const params = [];
|
|
458
|
+
let idx = 1;
|
|
459
|
+
for (const e of batch) {
|
|
460
|
+
values.push(`($${idx++}, $${idx++})`);
|
|
461
|
+
params.push(e.name, e.vaultPath);
|
|
462
|
+
}
|
|
463
|
+
await client.query(`INSERT INTO vault_name_index (name, vault_path) VALUES ${values.join(", ")}`, params);
|
|
464
|
+
}
|
|
465
|
+
await client.query("COMMIT");
|
|
466
|
+
} catch (e) {
|
|
467
|
+
await client.query("ROLLBACK");
|
|
468
|
+
throw e;
|
|
469
|
+
} finally {
|
|
470
|
+
client.release();
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
async function resolveVaultName(pool, name) {
|
|
474
|
+
return (await pool.query("SELECT vault_path FROM vault_name_index WHERE name = $1", [name])).rows.map((row) => row.vault_path);
|
|
475
|
+
}
|
|
476
|
+
async function searchVaultNameIndex(pool, query, limit = 100) {
|
|
477
|
+
return (await pool.query("SELECT DISTINCT vault_path FROM vault_name_index WHERE lower(name) LIKE lower($1) LIMIT $2", [`%${query}%`, limit])).rows.map((row) => row.vault_path);
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
//#endregion
|
|
481
|
+
//#region src/storage/postgres/backend.ts
|
|
482
|
+
/**
|
|
483
|
+
* PostgresBackend — implements StorageBackend using PostgreSQL + pgvector.
|
|
484
|
+
*
|
|
485
|
+
* Vector similarity: pgvector's <=> cosine distance operator
|
|
486
|
+
* Full-text search: PostgreSQL tsvector/tsquery (replaces SQLite FTS5)
|
|
487
|
+
* Connection pooling: node-postgres Pool
|
|
488
|
+
*
|
|
489
|
+
* Schema is auto-initialized on first connection if tables don't exist.
|
|
490
|
+
* Per-user database isolation: each macOS user gets their own database (pai_<username>).
|
|
491
|
+
*/
|
|
492
|
+
const { Pool: PgPool } = pg;
|
|
493
|
+
var PostgresBackend = class {
|
|
494
|
+
backendType = "postgres";
|
|
495
|
+
pool;
|
|
496
|
+
/**
|
|
497
|
+
* Ensure the per-user database exists and has the required schema.
|
|
498
|
+
* Connects to the default 'postgres' database to CREATE DATABASE if needed,
|
|
499
|
+
* then connects to the target database to apply init.sql schema.
|
|
500
|
+
* Safe to call multiple times (fully idempotent).
|
|
501
|
+
*/
|
|
502
|
+
static async ensureDatabase(config) {
|
|
503
|
+
const connStr = config.connectionString ?? `postgresql://${config.user ?? "pai"}:${config.password ?? "pai"}@${config.host ?? "localhost"}:${config.port ?? 5432}/${config.database ?? "pai"}`;
|
|
504
|
+
const targetDb = new URL(connStr).pathname.slice(1);
|
|
505
|
+
const adminUrl = new URL(connStr);
|
|
506
|
+
adminUrl.pathname = "/postgres";
|
|
507
|
+
const adminPool = new PgPool({
|
|
508
|
+
connectionString: adminUrl.toString(),
|
|
509
|
+
max: 1,
|
|
510
|
+
connectionTimeoutMillis: 5e3
|
|
511
|
+
});
|
|
512
|
+
try {
|
|
513
|
+
if ((await adminPool.query("SELECT 1 FROM pg_database WHERE datname = $1", [targetDb])).rowCount === 0) {
|
|
514
|
+
await adminPool.query(`CREATE DATABASE "${targetDb}"`);
|
|
515
|
+
process.stderr.write(`[pai-postgres] Created database: ${targetDb}\n`);
|
|
516
|
+
}
|
|
517
|
+
} finally {
|
|
518
|
+
await adminPool.end();
|
|
519
|
+
}
|
|
520
|
+
const targetPool = new PgPool({
|
|
521
|
+
connectionString: connStr,
|
|
522
|
+
max: 1,
|
|
523
|
+
connectionTimeoutMillis: 5e3
|
|
524
|
+
});
|
|
525
|
+
try {
|
|
526
|
+
if ((await targetPool.query("SELECT 1 FROM information_schema.tables WHERE table_name = 'pai_chunks'")).rowCount === 0) {
|
|
527
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
528
|
+
const initSqlPath = join(__dirname, "../../docker/init.sql");
|
|
529
|
+
let initSql;
|
|
530
|
+
try {
|
|
531
|
+
initSql = readFileSync(initSqlPath, "utf-8");
|
|
532
|
+
} catch {
|
|
533
|
+
initSql = readFileSync(join(__dirname, "../docker/init.sql"), "utf-8");
|
|
534
|
+
}
|
|
535
|
+
await targetPool.query(initSql);
|
|
536
|
+
process.stderr.write(`[pai-postgres] Applied schema to database: ${targetDb}\n`);
|
|
537
|
+
}
|
|
538
|
+
} finally {
|
|
539
|
+
await targetPool.end();
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
constructor(config) {
|
|
543
|
+
this.pool = new PgPool({
|
|
544
|
+
connectionString: config.connectionString ?? `postgresql://${config.user ?? "pai"}:${config.password ?? "pai"}@${config.host ?? "localhost"}:${config.port ?? 5432}/${config.database ?? "pai"}`,
|
|
545
|
+
max: config.maxConnections ?? 5,
|
|
546
|
+
connectionTimeoutMillis: config.connectionTimeoutMs ?? 5e3,
|
|
547
|
+
idleTimeoutMillis: 3e4
|
|
548
|
+
});
|
|
549
|
+
this.pool.on("error", (err) => {
|
|
550
|
+
process.stderr.write(`[pai-postgres] Pool error: ${err.message}\n`);
|
|
551
|
+
});
|
|
552
|
+
}
|
|
553
|
+
async close() {
|
|
554
|
+
await this.pool.end();
|
|
555
|
+
}
|
|
556
|
+
/**
|
|
557
|
+
* Expose the underlying pg.Pool for callers that need direct query access
|
|
558
|
+
* (e.g. the daemon's observation IPC methods).
|
|
559
|
+
*/
|
|
560
|
+
getPool() {
|
|
561
|
+
return this.pool;
|
|
562
|
+
}
|
|
563
|
+
async getStats() {
|
|
564
|
+
const client = await this.pool.connect();
|
|
565
|
+
try {
|
|
566
|
+
const filesResult = await client.query("SELECT COUNT(*)::text AS n FROM pai_files");
|
|
567
|
+
const chunksResult = await client.query("SELECT COUNT(*)::text AS n FROM pai_chunks");
|
|
568
|
+
return {
|
|
569
|
+
files: parseInt(filesResult.rows[0]?.n ?? "0", 10),
|
|
570
|
+
chunks: parseInt(chunksResult.rows[0]?.n ?? "0", 10)
|
|
571
|
+
};
|
|
572
|
+
} finally {
|
|
573
|
+
client.release();
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
/**
|
|
577
|
+
* Test the connection by running a trivial query.
|
|
578
|
+
* Returns null on success, error message on failure.
|
|
579
|
+
*/
|
|
580
|
+
async testConnection() {
|
|
581
|
+
let client = null;
|
|
582
|
+
try {
|
|
583
|
+
client = await this.pool.connect();
|
|
584
|
+
await client.query("SELECT 1");
|
|
585
|
+
return null;
|
|
586
|
+
} catch (e) {
|
|
587
|
+
return e instanceof Error ? e.message : String(e);
|
|
588
|
+
} finally {
|
|
589
|
+
client?.release();
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
async getFileHash(projectId, path) {
|
|
593
|
+
return (await this.pool.query("SELECT hash FROM pai_files WHERE project_id = $1 AND path = $2", [projectId, path])).rows[0]?.hash;
|
|
594
|
+
}
|
|
595
|
+
async upsertFile(file) {
|
|
596
|
+
await this.pool.query(`INSERT INTO pai_files (project_id, path, source, tier, hash, mtime, size)
|
|
597
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
|
598
|
+
ON CONFLICT (project_id, path) DO UPDATE SET
|
|
599
|
+
source = EXCLUDED.source,
|
|
600
|
+
tier = EXCLUDED.tier,
|
|
601
|
+
hash = EXCLUDED.hash,
|
|
602
|
+
mtime = EXCLUDED.mtime,
|
|
603
|
+
size = EXCLUDED.size`, [
|
|
604
|
+
file.projectId,
|
|
605
|
+
file.path,
|
|
606
|
+
file.source,
|
|
607
|
+
file.tier,
|
|
608
|
+
file.hash,
|
|
609
|
+
file.mtime,
|
|
610
|
+
file.size
|
|
611
|
+
]);
|
|
612
|
+
}
|
|
613
|
+
async getChunkIds(projectId, path) {
|
|
614
|
+
return (await this.pool.query("SELECT id FROM pai_chunks WHERE project_id = $1 AND path = $2", [projectId, path])).rows.map((r) => r.id);
|
|
615
|
+
}
|
|
616
|
+
async deleteChunksForFile(projectId, path) {
|
|
617
|
+
await this.pool.query("DELETE FROM pai_chunks WHERE project_id = $1 AND path = $2", [projectId, path]);
|
|
618
|
+
}
|
|
619
|
+
async insertChunks(chunks) {
|
|
620
|
+
if (chunks.length === 0) return;
|
|
621
|
+
const client = await this.pool.connect();
|
|
622
|
+
try {
|
|
623
|
+
await client.query("BEGIN");
|
|
624
|
+
for (const c of chunks) {
|
|
625
|
+
const safeText = c.text.replace(/\0/g, "");
|
|
626
|
+
await client.query(`INSERT INTO pai_chunks
|
|
627
|
+
(id, project_id, source, tier, path, start_line, end_line, hash, text, updated_at, fts_vector)
|
|
628
|
+
VALUES
|
|
629
|
+
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10,
|
|
630
|
+
to_tsvector('simple', $9))
|
|
631
|
+
ON CONFLICT (id) DO UPDATE SET
|
|
632
|
+
project_id = EXCLUDED.project_id,
|
|
633
|
+
source = EXCLUDED.source,
|
|
634
|
+
tier = EXCLUDED.tier,
|
|
635
|
+
path = EXCLUDED.path,
|
|
636
|
+
start_line = EXCLUDED.start_line,
|
|
637
|
+
end_line = EXCLUDED.end_line,
|
|
638
|
+
hash = EXCLUDED.hash,
|
|
639
|
+
text = EXCLUDED.text,
|
|
640
|
+
updated_at = EXCLUDED.updated_at,
|
|
641
|
+
fts_vector = EXCLUDED.fts_vector`, [
|
|
642
|
+
c.id,
|
|
643
|
+
c.projectId,
|
|
644
|
+
c.source,
|
|
645
|
+
c.tier,
|
|
646
|
+
c.path,
|
|
647
|
+
c.startLine,
|
|
648
|
+
c.endLine,
|
|
649
|
+
c.hash,
|
|
650
|
+
safeText,
|
|
651
|
+
c.updatedAt
|
|
652
|
+
]);
|
|
653
|
+
}
|
|
654
|
+
await client.query("COMMIT");
|
|
655
|
+
} catch (e) {
|
|
656
|
+
await client.query("ROLLBACK");
|
|
657
|
+
throw e;
|
|
658
|
+
} finally {
|
|
659
|
+
client.release();
|
|
660
|
+
}
|
|
661
|
+
}
|
|
662
|
+
async getDistinctChunkPaths(projectId) {
|
|
663
|
+
return (await this.pool.query("SELECT DISTINCT path FROM pai_chunks WHERE project_id = $1", [projectId])).rows.map((r) => r.path);
|
|
664
|
+
}
|
|
665
|
+
async deletePaths(projectId, paths) {
|
|
666
|
+
if (paths.length === 0) return;
|
|
667
|
+
const client = await this.pool.connect();
|
|
668
|
+
try {
|
|
669
|
+
await client.query("BEGIN");
|
|
670
|
+
for (const path of paths) {
|
|
671
|
+
await client.query("DELETE FROM pai_chunks WHERE project_id = $1 AND path = $2", [projectId, path]);
|
|
672
|
+
await client.query("DELETE FROM pai_files WHERE project_id = $1 AND path = $2", [projectId, path]);
|
|
673
|
+
}
|
|
674
|
+
await client.query("COMMIT");
|
|
675
|
+
} catch (e) {
|
|
676
|
+
await client.query("ROLLBACK");
|
|
677
|
+
throw e;
|
|
678
|
+
} finally {
|
|
679
|
+
client.release();
|
|
680
|
+
}
|
|
681
|
+
}
|
|
682
|
+
async getUnembeddedChunkIds(projectId) {
|
|
683
|
+
if (projectId !== void 0) return (await this.pool.query("SELECT id, text, project_id, path FROM pai_chunks WHERE embedding IS NULL AND project_id = $1 ORDER BY id", [projectId])).rows;
|
|
684
|
+
return (await this.pool.query("SELECT id, text, project_id, path FROM pai_chunks WHERE embedding IS NULL ORDER BY id")).rows;
|
|
685
|
+
}
|
|
686
|
+
async updateEmbedding(chunkId, embedding) {
|
|
687
|
+
const vecStr = "[" + bufferToVector(embedding).join(",") + "]";
|
|
688
|
+
await this.pool.query("UPDATE pai_chunks SET embedding = $1::vector WHERE id = $2", [vecStr, chunkId]);
|
|
689
|
+
}
|
|
690
|
+
async searchKeyword(query, opts) {
|
|
691
|
+
return searchKeyword(this.pool, query, opts);
|
|
692
|
+
}
|
|
693
|
+
async searchSemantic(queryEmbedding, opts) {
|
|
694
|
+
return searchSemantic(this.pool, queryEmbedding, opts);
|
|
695
|
+
}
|
|
696
|
+
async upsertVaultFile(file) {
|
|
697
|
+
return upsertVaultFile(this.pool, file);
|
|
698
|
+
}
|
|
699
|
+
async deleteVaultFile(vaultPath) {
|
|
700
|
+
return deleteVaultFile(this.pool, vaultPath);
|
|
701
|
+
}
|
|
702
|
+
async getVaultFile(vaultPath) {
|
|
703
|
+
return getVaultFile(this.pool, vaultPath);
|
|
704
|
+
}
|
|
705
|
+
async getVaultFileByInode(inode, device) {
|
|
706
|
+
return getVaultFileByInode(this.pool, inode, device);
|
|
707
|
+
}
|
|
708
|
+
async getAllVaultFiles() {
|
|
709
|
+
return getAllVaultFiles(this.pool);
|
|
710
|
+
}
|
|
711
|
+
async getRecentVaultFiles(sinceMs) {
|
|
712
|
+
return getRecentVaultFiles(this.pool, sinceMs);
|
|
713
|
+
}
|
|
714
|
+
async countVaultFiles() {
|
|
715
|
+
return countVaultFiles(this.pool);
|
|
716
|
+
}
|
|
717
|
+
async countVaultFilesWithPrefix(prefix) {
|
|
718
|
+
return countVaultFilesWithPrefix(this.pool, prefix);
|
|
719
|
+
}
|
|
720
|
+
async countVaultFilesAfter(sinceMs) {
|
|
721
|
+
return countVaultFilesAfter(this.pool, sinceMs);
|
|
722
|
+
}
|
|
723
|
+
async getVaultFilesByPaths(paths) {
|
|
724
|
+
return getVaultFilesByPaths(this.pool, paths);
|
|
725
|
+
}
|
|
726
|
+
async getVaultFilesByPathsAfter(paths, sinceMs) {
|
|
727
|
+
return getVaultFilesByPathsAfter(this.pool, paths, sinceMs);
|
|
728
|
+
}
|
|
729
|
+
async getAllVaultFilePaths() {
|
|
730
|
+
return getAllVaultFilePaths(this.pool);
|
|
731
|
+
}
|
|
732
|
+
async getVaultFilePathsWithPrefix(prefix) {
|
|
733
|
+
return getVaultFilePathsWithPrefix(this.pool, prefix);
|
|
734
|
+
}
|
|
735
|
+
async getVaultFilePathsAfter(sinceMs) {
|
|
736
|
+
return getVaultFilePathsAfter(this.pool, sinceMs);
|
|
737
|
+
}
|
|
738
|
+
async upsertVaultAliases(aliases) {
|
|
739
|
+
return upsertVaultAliases(this.pool, aliases);
|
|
740
|
+
}
|
|
741
|
+
async deleteVaultAliases(canonicalPath) {
|
|
742
|
+
return deleteVaultAliases(this.pool, canonicalPath);
|
|
743
|
+
}
|
|
744
|
+
async getVaultAlias(vaultPath) {
|
|
745
|
+
return getVaultAlias(this.pool, vaultPath);
|
|
746
|
+
}
|
|
747
|
+
async replaceLinksForSources(sourcePaths, links) {
|
|
748
|
+
return replaceLinksForSources(this.pool, sourcePaths, links);
|
|
749
|
+
}
|
|
750
|
+
async getLinksFromSource(sourcePath) {
|
|
751
|
+
return getLinksFromSource(this.pool, sourcePath);
|
|
752
|
+
}
|
|
753
|
+
async getLinksToTarget(targetPath) {
|
|
754
|
+
return getLinksToTarget(this.pool, targetPath);
|
|
755
|
+
}
|
|
756
|
+
async getVaultLinkGraph() {
|
|
757
|
+
return getVaultLinkGraph(this.pool);
|
|
758
|
+
}
|
|
759
|
+
async getDeadLinks() {
|
|
760
|
+
return getDeadLinks(this.pool);
|
|
761
|
+
}
|
|
762
|
+
async getDeadLinksWithLineNumbers() {
|
|
763
|
+
return getDeadLinksWithLineNumbers(this.pool);
|
|
764
|
+
}
|
|
765
|
+
async getDeadLinksWithPrefix(prefix) {
|
|
766
|
+
return getDeadLinksWithPrefix(this.pool, prefix);
|
|
767
|
+
}
|
|
768
|
+
async getDeadLinksAfter(sinceMs) {
|
|
769
|
+
return getDeadLinksAfter(this.pool, sinceMs);
|
|
770
|
+
}
|
|
771
|
+
async countVaultLinksWithPrefix(prefix) {
|
|
772
|
+
return countVaultLinksWithPrefix(this.pool, prefix);
|
|
773
|
+
}
|
|
774
|
+
async countVaultLinksAfter(sinceMs) {
|
|
775
|
+
return countVaultLinksAfter(this.pool, sinceMs);
|
|
776
|
+
}
|
|
777
|
+
async getVaultLinksFromPaths(sourcePaths) {
|
|
778
|
+
return getVaultLinksFromPaths(this.pool, sourcePaths);
|
|
779
|
+
}
|
|
780
|
+
async getVaultLinkEdges() {
|
|
781
|
+
return getVaultLinkEdges(this.pool);
|
|
782
|
+
}
|
|
783
|
+
async getVaultLinkEdgesWithPrefix(prefix) {
|
|
784
|
+
return getVaultLinkEdgesWithPrefix(this.pool, prefix);
|
|
785
|
+
}
|
|
786
|
+
async getVaultLinkEdgesAfter(sinceMs) {
|
|
787
|
+
return getVaultLinkEdgesAfter(this.pool, sinceMs);
|
|
788
|
+
}
|
|
789
|
+
async upsertVaultHealth(rows) {
|
|
790
|
+
return upsertVaultHealth(this.pool, rows);
|
|
791
|
+
}
|
|
792
|
+
async getVaultHealth(vaultPath) {
|
|
793
|
+
return getVaultHealth(this.pool, vaultPath);
|
|
794
|
+
}
|
|
795
|
+
async getOrphans() {
|
|
796
|
+
return getOrphans(this.pool);
|
|
797
|
+
}
|
|
798
|
+
async getOrphansWithPrefix(prefix) {
|
|
799
|
+
return getOrphansWithPrefix(this.pool, prefix);
|
|
800
|
+
}
|
|
801
|
+
async getOrphansAfter(sinceMs) {
|
|
802
|
+
return getOrphansAfter(this.pool, sinceMs);
|
|
803
|
+
}
|
|
804
|
+
async getLowConnectivity() {
|
|
805
|
+
return getLowConnectivity(this.pool);
|
|
806
|
+
}
|
|
807
|
+
async getLowConnectivityWithPrefix(prefix) {
|
|
808
|
+
return getLowConnectivityWithPrefix(this.pool, prefix);
|
|
809
|
+
}
|
|
810
|
+
async getLowConnectivityAfter(sinceMs) {
|
|
811
|
+
return getLowConnectivityAfter(this.pool, sinceMs);
|
|
812
|
+
}
|
|
813
|
+
async upsertNameIndex(entries) {
|
|
814
|
+
return upsertNameIndex(this.pool, entries);
|
|
815
|
+
}
|
|
816
|
+
async replaceNameIndex(entries) {
|
|
817
|
+
return replaceNameIndex(this.pool, entries);
|
|
818
|
+
}
|
|
819
|
+
async resolveVaultName(name) {
|
|
820
|
+
return resolveVaultName(this.pool, name);
|
|
821
|
+
}
|
|
822
|
+
async searchVaultNameIndex(query, limit) {
|
|
823
|
+
return searchVaultNameIndex(this.pool, query, limit);
|
|
824
|
+
}
|
|
825
|
+
async getChunksWithEmbeddings(projectId, limit) {
|
|
826
|
+
return (await this.pool.query(`SELECT path, text, embedding FROM memory_chunks WHERE project_id = $1 AND embedding IS NOT NULL ORDER BY path, start_line LIMIT $2`, [projectId, limit])).rows;
|
|
827
|
+
}
|
|
828
|
+
async getChunksForPath(projectId, path, limit = 20) {
|
|
829
|
+
return (await this.pool.query(`SELECT text, embedding FROM memory_chunks WHERE project_id = $1 AND path = $2 AND embedding IS NOT NULL ORDER BY start_line LIMIT $3`, [
|
|
830
|
+
projectId,
|
|
831
|
+
path,
|
|
832
|
+
limit
|
|
833
|
+
])).rows;
|
|
834
|
+
}
|
|
835
|
+
async searchChunksByText(projectId, query, limit) {
|
|
836
|
+
return (await this.pool.query(`SELECT DISTINCT path, text FROM memory_chunks WHERE project_id = $1 AND lower(text) LIKE lower($2) LIMIT $3`, [
|
|
837
|
+
projectId,
|
|
838
|
+
`%${query}%`,
|
|
839
|
+
limit
|
|
840
|
+
])).rows;
|
|
841
|
+
}
|
|
842
|
+
};
|
|
843
|
+
|
|
844
|
+
//#endregion
|
|
845
|
+
export { PostgresBackend };
|
|
846
|
+
//# sourceMappingURL=postgres-CKf-EDtS.mjs.map
|