@ophan/cli 0.0.1 → 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +49 -59
- package/dist/auth.js +3 -3
- package/dist/index.js +381 -22
- package/dist/sync.js +300 -58
- package/dist/watch.js +190 -34
- package/ophan_logo.png +0 -0
- package/package.json +7 -3
- package/dist/sync.test.js +0 -288
- package/dist/test-utils.js +0 -161
package/dist/sync.js
CHANGED
|
@@ -36,13 +36,67 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
|
36
36
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
37
37
|
};
|
|
38
38
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
39
|
+
exports.getGitBranch = getGitBranch;
|
|
40
|
+
exports.getGitCommitHash = getGitCommitHash;
|
|
41
|
+
exports.getDefaultBranch = getDefaultBranch;
|
|
39
42
|
exports.syncToSupabase = syncToSupabase;
|
|
40
43
|
exports.pullFromSupabase = pullFromSupabase;
|
|
41
44
|
const better_sqlite3_1 = __importDefault(require("better-sqlite3"));
|
|
42
45
|
const child_process_1 = require("child_process");
|
|
43
46
|
const path = __importStar(require("path"));
|
|
44
47
|
const fs = __importStar(require("fs"));
|
|
48
|
+
const p_limit_1 = __importDefault(require("p-limit"));
|
|
45
49
|
const core_1 = require("@ophan/core");
|
|
50
|
+
const supabaseLimit = (0, p_limit_1.default)(3);
|
|
51
|
+
/** Run chunked Supabase operations in parallel with bounded concurrency. */
|
|
52
|
+
async function parallelChunks(rows, chunkSize, fn) {
|
|
53
|
+
const chunks = [];
|
|
54
|
+
for (let i = 0; i < rows.length; i += chunkSize) {
|
|
55
|
+
chunks.push(rows.slice(i, i + chunkSize));
|
|
56
|
+
}
|
|
57
|
+
await Promise.all(chunks.map((chunk) => supabaseLimit(() => fn(chunk))));
|
|
58
|
+
}
|
|
59
|
+
function getGitBranch(rootPath) {
|
|
60
|
+
try {
|
|
61
|
+
return ((0, child_process_1.execSync)("git rev-parse --abbrev-ref HEAD", {
|
|
62
|
+
cwd: rootPath,
|
|
63
|
+
encoding: "utf8",
|
|
64
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
65
|
+
}).trim() || null);
|
|
66
|
+
}
|
|
67
|
+
catch {
|
|
68
|
+
return null;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
function getGitCommitHash(rootPath) {
|
|
72
|
+
try {
|
|
73
|
+
return ((0, child_process_1.execSync)("git rev-parse HEAD", {
|
|
74
|
+
cwd: rootPath,
|
|
75
|
+
encoding: "utf8",
|
|
76
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
77
|
+
}).trim() || null);
|
|
78
|
+
}
|
|
79
|
+
catch {
|
|
80
|
+
return null;
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
function getDefaultBranch(rootPath) {
|
|
84
|
+
try {
|
|
85
|
+
const ref = (0, child_process_1.execSync)("git symbolic-ref refs/remotes/origin/HEAD", {
|
|
86
|
+
cwd: rootPath,
|
|
87
|
+
encoding: "utf8",
|
|
88
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
89
|
+
}).trim();
|
|
90
|
+
// ref = "refs/remotes/origin/main" → extract "main"
|
|
91
|
+
const match = ref.match(/refs\/remotes\/origin\/(.+)/);
|
|
92
|
+
if (match)
|
|
93
|
+
return match[1];
|
|
94
|
+
}
|
|
95
|
+
catch {
|
|
96
|
+
// No remote, shallow clone, or not a git repo
|
|
97
|
+
}
|
|
98
|
+
return "main";
|
|
99
|
+
}
|
|
46
100
|
function getGitRemoteUrl(rootPath) {
|
|
47
101
|
try {
|
|
48
102
|
return ((0, child_process_1.execSync)("git remote get-url origin", {
|
|
@@ -58,7 +112,7 @@ function getGitRemoteUrl(rootPath) {
|
|
|
58
112
|
async function syncToSupabase(rootPath, supabase, userId, onProgress) {
|
|
59
113
|
const dbPath = path.join(rootPath, ".ophan", "index.db");
|
|
60
114
|
if (!fs.existsSync(dbPath)) {
|
|
61
|
-
throw new Error(`No analysis database found at ${dbPath}\n Run \`ophan
|
|
115
|
+
throw new Error(`No analysis database found at ${dbPath}\n Run \`npx @ophan/cli init\` first.`);
|
|
62
116
|
}
|
|
63
117
|
const db = new better_sqlite3_1.default(dbPath);
|
|
64
118
|
// Ensure sync_meta table exists (may not if DB was created by older version)
|
|
@@ -86,12 +140,42 @@ async function syncToSupabase(rootPath, supabase, userId, onProgress) {
|
|
|
86
140
|
name: repoName,
|
|
87
141
|
remote_url: remoteUrl,
|
|
88
142
|
}, { onConflict: "user_id,name" })
|
|
89
|
-
.select("id")
|
|
143
|
+
.select("id, docs_branch")
|
|
90
144
|
.single();
|
|
91
145
|
if (repoError) {
|
|
92
146
|
throw new Error(`Failed to register repo: ${repoError.message}`);
|
|
93
147
|
}
|
|
94
148
|
const repoId = repo.id;
|
|
149
|
+
const docsBranch = repo.docs_branch ?? "main";
|
|
150
|
+
// Determine whether to sync architecture data (communities, edges, summaries).
|
|
151
|
+
// Gate: only sync when on the configured docs branch.
|
|
152
|
+
// Non-git repos (branch = null) bypass the gate — always sync.
|
|
153
|
+
const currentBranch = getGitBranch(rootPath);
|
|
154
|
+
const syncArchitecture = currentBranch === null || currentBranch === docsBranch;
|
|
155
|
+
if (!syncArchitecture) {
|
|
156
|
+
onProgress?.(`Skipping architecture sync (on '${currentBranch}', docs branch is '${docsBranch}')`);
|
|
157
|
+
}
|
|
158
|
+
// 1.5. Pull practices from Supabase → local SQLite
|
|
159
|
+
onProgress?.("Syncing practices...");
|
|
160
|
+
const { data: practiceRows } = await supabase
|
|
161
|
+
.from("repo_practices")
|
|
162
|
+
.select("practice_id, rule, severity")
|
|
163
|
+
.eq("repo_id", repoId);
|
|
164
|
+
const practicesSynced = practiceRows?.length ?? 0;
|
|
165
|
+
if (practiceRows && practiceRows.length > 0) {
|
|
166
|
+
(0, core_1.importPractices)(db, practiceRows.map((r) => ({
|
|
167
|
+
id: r.practice_id,
|
|
168
|
+
rule: r.rule,
|
|
169
|
+
severity: r.severity,
|
|
170
|
+
})));
|
|
171
|
+
}
|
|
172
|
+
else {
|
|
173
|
+
// No practices defined — clear local table
|
|
174
|
+
try {
|
|
175
|
+
db.prepare("DELETE FROM practices").run();
|
|
176
|
+
}
|
|
177
|
+
catch { }
|
|
178
|
+
}
|
|
95
179
|
// 2. Push unsynced analysis (now includes analysis_type + schema_version)
|
|
96
180
|
onProgress?.("Pushing analysis...");
|
|
97
181
|
const unsynced = db
|
|
@@ -110,16 +194,15 @@ async function syncToSupabase(rootPath, supabase, userId, onProgress) {
|
|
|
110
194
|
language: row.language,
|
|
111
195
|
entity_type: row.entity_type,
|
|
112
196
|
}));
|
|
113
|
-
// Batch upsert in chunks of 500
|
|
114
|
-
|
|
115
|
-
const chunk = rows.slice(i, i + 500);
|
|
197
|
+
// Batch upsert in chunks of 500 with bounded concurrency
|
|
198
|
+
await parallelChunks(rows, 500, async (chunk) => {
|
|
116
199
|
const { error } = await supabase
|
|
117
200
|
.from("function_analysis")
|
|
118
201
|
.upsert(chunk, { onConflict: "content_hash,repo_id,analysis_type" });
|
|
119
202
|
if (error) {
|
|
120
203
|
throw new Error(`Failed to push analysis: ${error.message}`);
|
|
121
204
|
}
|
|
122
|
-
}
|
|
205
|
+
});
|
|
123
206
|
// Mark as synced locally
|
|
124
207
|
const now = Math.floor(Date.now() / 1000);
|
|
125
208
|
const markSynced = db.prepare("UPDATE function_analysis SET synced_at = ? WHERE content_hash = ? AND analysis_type = ?");
|
|
@@ -140,28 +223,37 @@ async function syncToSupabase(rootPath, supabase, userId, onProgress) {
|
|
|
140
223
|
throw new Error(`Failed to clear locations: ${delError.message}`);
|
|
141
224
|
}
|
|
142
225
|
const locations = db
|
|
143
|
-
.prepare(`SELECT file_path, function_name, content_hash, language, entity_type
|
|
144
|
-
FROM file_functions
|
|
226
|
+
.prepare(`SELECT file_path, function_name, content_hash, language, entity_type, start_line
|
|
227
|
+
FROM file_functions
|
|
228
|
+
GROUP BY file_path, function_name`)
|
|
145
229
|
.all();
|
|
146
230
|
if (locations.length > 0) {
|
|
231
|
+
// Convert absolute paths to repo-relative for cloud storage
|
|
232
|
+
// (local DB stores absolute paths for file I/O, but cloud paths
|
|
233
|
+
// must be portable across machines)
|
|
234
|
+
const rootPrefix = rootPath.endsWith(path.sep)
|
|
235
|
+
? rootPath
|
|
236
|
+
: rootPath + path.sep;
|
|
147
237
|
const locationRows = locations.map((row) => ({
|
|
148
238
|
repo_id: repoId,
|
|
149
239
|
user_id: userId,
|
|
150
|
-
file_path: row.file_path
|
|
240
|
+
file_path: row.file_path.startsWith(rootPrefix)
|
|
241
|
+
? row.file_path.slice(rootPrefix.length)
|
|
242
|
+
: row.file_path,
|
|
151
243
|
function_name: row.function_name,
|
|
152
244
|
content_hash: row.content_hash,
|
|
153
245
|
language: row.language,
|
|
154
246
|
entity_type: row.entity_type,
|
|
247
|
+
start_line: row.start_line,
|
|
155
248
|
}));
|
|
156
|
-
|
|
157
|
-
const chunk = locationRows.slice(i, i + 500);
|
|
249
|
+
await parallelChunks(locationRows, 500, async (chunk) => {
|
|
158
250
|
const { error } = await supabase
|
|
159
251
|
.from("function_locations")
|
|
160
252
|
.insert(chunk);
|
|
161
253
|
if (error) {
|
|
162
254
|
throw new Error(`Failed to push locations: ${error.message}`);
|
|
163
255
|
}
|
|
164
|
-
}
|
|
256
|
+
});
|
|
165
257
|
}
|
|
166
258
|
// 4. Process GC tombstones
|
|
167
259
|
onProgress?.("Processing garbage collection...");
|
|
@@ -169,30 +261,36 @@ async function syncToSupabase(rootPath, supabase, userId, onProgress) {
|
|
|
169
261
|
.prepare("SELECT content_hash, analysis_type FROM function_gc WHERE synced_at IS NULL")
|
|
170
262
|
.all();
|
|
171
263
|
if (gcRows.length > 0) {
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
264
|
+
// Batch GC deletes using .in() for efficiency
|
|
265
|
+
const withType = gcRows.filter((r) => r.analysis_type);
|
|
266
|
+
const withoutType = gcRows.filter((r) => !r.analysis_type);
|
|
267
|
+
// Delete analysis rows with specific type (group by type)
|
|
268
|
+
if (withType.length > 0) {
|
|
269
|
+
const byType = new Map();
|
|
270
|
+
for (const row of withType) {
|
|
271
|
+
const hashes = byType.get(row.analysis_type) || [];
|
|
272
|
+
hashes.push(row.content_hash);
|
|
273
|
+
byType.set(row.analysis_type, hashes);
|
|
181
274
|
}
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
.delete()
|
|
187
|
-
.eq("content_hash", row.content_hash)
|
|
188
|
-
.eq("repo_id", repoId);
|
|
189
|
-
}
|
|
190
|
-
await supabase
|
|
191
|
-
.from("function_locations")
|
|
192
|
-
.delete()
|
|
193
|
-
.eq("content_hash", row.content_hash)
|
|
194
|
-
.eq("repo_id", repoId);
|
|
275
|
+
await Promise.all([...byType.entries()].map(([type, hashes]) => parallelChunks(hashes, 50, async (chunk) => {
|
|
276
|
+
await supabase.from("function_analysis").delete()
|
|
277
|
+
.eq("repo_id", repoId).eq("analysis_type", type).in("content_hash", chunk);
|
|
278
|
+
})));
|
|
195
279
|
}
|
|
280
|
+
// Delete analysis rows without type (legacy GC entries)
|
|
281
|
+
if (withoutType.length > 0) {
|
|
282
|
+
const legacyHashes = withoutType.map((r) => r.content_hash);
|
|
283
|
+
await parallelChunks(legacyHashes, 50, async (chunk) => {
|
|
284
|
+
await supabase.from("function_analysis").delete()
|
|
285
|
+
.eq("repo_id", repoId).in("content_hash", chunk);
|
|
286
|
+
});
|
|
287
|
+
}
|
|
288
|
+
// Delete location rows for all GC'd hashes
|
|
289
|
+
const allGcHashes = gcRows.map((r) => r.content_hash);
|
|
290
|
+
await parallelChunks(allGcHashes, 50, async (chunk) => {
|
|
291
|
+
await supabase.from("function_locations").delete()
|
|
292
|
+
.eq("repo_id", repoId).in("content_hash", chunk);
|
|
293
|
+
});
|
|
196
294
|
// Mark GC rows as synced locally, then delete them
|
|
197
295
|
const now = Math.floor(Date.now() / 1000);
|
|
198
296
|
db.transaction(() => {
|
|
@@ -200,10 +298,145 @@ async function syncToSupabase(rootPath, supabase, userId, onProgress) {
|
|
|
200
298
|
db.prepare("DELETE FROM function_gc WHERE synced_at IS NOT NULL").run();
|
|
201
299
|
})();
|
|
202
300
|
}
|
|
301
|
+
// 5. Replace communities (full sync, like function_locations)
|
|
302
|
+
// Steps 5-7 are gated by syncArchitecture (only sync on docs branch)
|
|
303
|
+
let communitiesSynced = 0;
|
|
304
|
+
if (!syncArchitecture) {
|
|
305
|
+
return {
|
|
306
|
+
pushed: unsynced.length,
|
|
307
|
+
locations: locations.length,
|
|
308
|
+
gcProcessed: gcRows.length,
|
|
309
|
+
practices: practicesSynced,
|
|
310
|
+
communities: 0,
|
|
311
|
+
communityEdges: 0,
|
|
312
|
+
summaries: 0,
|
|
313
|
+
skippedArchitecture: true,
|
|
314
|
+
commitHash: null,
|
|
315
|
+
};
|
|
316
|
+
}
|
|
317
|
+
const hasCommunities = db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='communities'").get();
|
|
318
|
+
if (hasCommunities) {
|
|
319
|
+
onProgress?.("Syncing community memberships...");
|
|
320
|
+
const { error: commDelError } = await supabase
|
|
321
|
+
.from("communities")
|
|
322
|
+
.delete()
|
|
323
|
+
.eq("repo_id", repoId);
|
|
324
|
+
if (commDelError) {
|
|
325
|
+
throw new Error(`Failed to clear communities: ${commDelError.message}`);
|
|
326
|
+
}
|
|
327
|
+
const communityRows = db.prepare("SELECT content_hash, level, community_id, algorithm FROM communities").all();
|
|
328
|
+
if (communityRows.length > 0) {
|
|
329
|
+
const rows = communityRows.map((row) => ({
|
|
330
|
+
content_hash: row.content_hash,
|
|
331
|
+
level: row.level,
|
|
332
|
+
community_id: row.community_id,
|
|
333
|
+
algorithm: row.algorithm,
|
|
334
|
+
repo_id: repoId,
|
|
335
|
+
user_id: userId,
|
|
336
|
+
}));
|
|
337
|
+
await parallelChunks(rows, 500, async (chunk) => {
|
|
338
|
+
const { error } = await supabase.from("communities").insert(chunk);
|
|
339
|
+
if (error) {
|
|
340
|
+
throw new Error(`Failed to push communities: ${error.message}`);
|
|
341
|
+
}
|
|
342
|
+
});
|
|
343
|
+
communitiesSynced = communityRows.length;
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
// 6. Replace community_edges (full sync, like communities)
|
|
347
|
+
let communityEdgesSynced = 0;
|
|
348
|
+
const hasCommunityEdges = db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='community_edges'").get();
|
|
349
|
+
if (hasCommunityEdges) {
|
|
350
|
+
onProgress?.("Syncing community edges...");
|
|
351
|
+
const { error: ceDelError } = await supabase
|
|
352
|
+
.from("community_edges")
|
|
353
|
+
.delete()
|
|
354
|
+
.eq("repo_id", repoId);
|
|
355
|
+
if (ceDelError) {
|
|
356
|
+
throw new Error(`Failed to clear community edges: ${ceDelError.message}`);
|
|
357
|
+
}
|
|
358
|
+
const ceRows = db.prepare("SELECT source_community, target_community, algorithm, weight, edge_count FROM community_edges").all();
|
|
359
|
+
if (ceRows.length > 0) {
|
|
360
|
+
const rows = ceRows.map((row) => ({
|
|
361
|
+
source_community: row.source_community,
|
|
362
|
+
target_community: row.target_community,
|
|
363
|
+
algorithm: row.algorithm,
|
|
364
|
+
repo_id: repoId,
|
|
365
|
+
user_id: userId,
|
|
366
|
+
weight: row.weight,
|
|
367
|
+
edge_count: row.edge_count,
|
|
368
|
+
}));
|
|
369
|
+
await parallelChunks(rows, 500, async (chunk) => {
|
|
370
|
+
const { error } = await supabase.from("community_edges").insert(chunk);
|
|
371
|
+
if (error) {
|
|
372
|
+
throw new Error(`Failed to push community edges: ${error.message}`);
|
|
373
|
+
}
|
|
374
|
+
});
|
|
375
|
+
communityEdgesSynced = ceRows.length;
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
// 7. Replace community summaries (full sync, like communities)
|
|
379
|
+
let summariesSynced = 0;
|
|
380
|
+
const hasSummaries = db.prepare("SELECT name FROM sqlite_master WHERE type='table' AND name='community_summaries'").get();
|
|
381
|
+
if (hasSummaries) {
|
|
382
|
+
onProgress?.("Syncing community summaries...");
|
|
383
|
+
const { error: sumDelError } = await supabase
|
|
384
|
+
.from("community_summaries")
|
|
385
|
+
.delete()
|
|
386
|
+
.eq("repo_id", repoId);
|
|
387
|
+
if (sumDelError) {
|
|
388
|
+
throw new Error(`Failed to clear summaries: ${sumDelError.message}`);
|
|
389
|
+
}
|
|
390
|
+
const summaryRows = db.prepare("SELECT community_id, level, algorithm, input_hash, summary, model_version, created_at FROM community_summaries").all();
|
|
391
|
+
if (summaryRows.length > 0) {
|
|
392
|
+
const rows = summaryRows.map((row) => ({
|
|
393
|
+
community_id: row.community_id,
|
|
394
|
+
level: row.level,
|
|
395
|
+
algorithm: row.algorithm,
|
|
396
|
+
repo_id: repoId,
|
|
397
|
+
user_id: userId,
|
|
398
|
+
input_hash: row.input_hash,
|
|
399
|
+
summary: JSON.parse(row.summary),
|
|
400
|
+
model_version: row.model_version,
|
|
401
|
+
}));
|
|
402
|
+
await parallelChunks(rows, 500, async (chunk) => {
|
|
403
|
+
const { error } = await supabase
|
|
404
|
+
.from("community_summaries")
|
|
405
|
+
.insert(chunk);
|
|
406
|
+
if (error) {
|
|
407
|
+
throw new Error(`Failed to push summaries: ${error.message}`);
|
|
408
|
+
}
|
|
409
|
+
});
|
|
410
|
+
summariesSynced = summaryRows.length;
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
// 8. Record docs sync commit hash (git repos only)
|
|
414
|
+
const commitHash = getGitCommitHash(rootPath);
|
|
415
|
+
if (commitHash && currentBranch) {
|
|
416
|
+
await supabase.from("docs_sync_history").insert({
|
|
417
|
+
repo_id: repoId,
|
|
418
|
+
user_id: userId,
|
|
419
|
+
commit_hash: commitHash,
|
|
420
|
+
branch: currentBranch,
|
|
421
|
+
});
|
|
422
|
+
await supabase
|
|
423
|
+
.from("repos")
|
|
424
|
+
.update({
|
|
425
|
+
last_docs_sync_commit: commitHash,
|
|
426
|
+
last_docs_sync_at: new Date().toISOString(),
|
|
427
|
+
})
|
|
428
|
+
.eq("id", repoId);
|
|
429
|
+
}
|
|
203
430
|
return {
|
|
204
431
|
pushed: unsynced.length,
|
|
205
432
|
locations: locations.length,
|
|
206
433
|
gcProcessed: gcRows.length,
|
|
434
|
+
practices: practicesSynced,
|
|
435
|
+
communities: communitiesSynced,
|
|
436
|
+
communityEdges: communityEdgesSynced,
|
|
437
|
+
summaries: summariesSynced,
|
|
438
|
+
skippedArchitecture: false,
|
|
439
|
+
commitHash,
|
|
207
440
|
};
|
|
208
441
|
}
|
|
209
442
|
finally {
|
|
@@ -221,32 +454,41 @@ async function pullFromSupabase(rootPath, supabase, userId, repoId, missingHashe
|
|
|
221
454
|
const dbPath = path.join(rootPath, ".ophan", "index.db");
|
|
222
455
|
onProgress?.(`Pulling ${missingHashes.length} cached analyses from cloud...`);
|
|
223
456
|
const allRows = [];
|
|
224
|
-
// Batch queries in chunks of
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
457
|
+
// Batch queries in chunks of 50 with bounded concurrency
|
|
458
|
+
// (SHA256 hashes are 64 chars each; 50 × 64 ≈ 3.2KB fits within URI limits)
|
|
459
|
+
const pullChunks = [];
|
|
460
|
+
for (let i = 0; i < missingHashes.length; i += 50) {
|
|
461
|
+
pullChunks.push(missingHashes.slice(i, i + 50));
|
|
462
|
+
}
|
|
463
|
+
await Promise.all(pullChunks.map((chunk) => supabaseLimit(async () => {
|
|
464
|
+
try {
|
|
465
|
+
const { data, error } = await supabase
|
|
466
|
+
.from("function_analysis")
|
|
467
|
+
.select("content_hash, analysis_type, analysis, model_version, schema_version, language, entity_type")
|
|
468
|
+
.eq("repo_id", repoId)
|
|
469
|
+
.in("content_hash", chunk);
|
|
470
|
+
if (error) {
|
|
471
|
+
onProgress?.(`Warning: pull failed: ${error.message}`);
|
|
472
|
+
return;
|
|
473
|
+
}
|
|
474
|
+
if (data) {
|
|
475
|
+
for (const row of data) {
|
|
476
|
+
allRows.push({
|
|
477
|
+
content_hash: row.content_hash,
|
|
478
|
+
analysis_type: row.analysis_type,
|
|
479
|
+
analysis: typeof row.analysis === "string" ? row.analysis : JSON.stringify(row.analysis),
|
|
480
|
+
model_version: row.model_version,
|
|
481
|
+
schema_version: row.schema_version ?? 1,
|
|
482
|
+
language: row.language ?? "typescript",
|
|
483
|
+
entity_type: row.entity_type ?? "function",
|
|
484
|
+
});
|
|
485
|
+
}
|
|
247
486
|
}
|
|
248
487
|
}
|
|
249
|
-
|
|
488
|
+
catch (err) {
|
|
489
|
+
onProgress?.(`Warning: pull request failed: ${err?.message ?? String(err)}`);
|
|
490
|
+
}
|
|
491
|
+
})));
|
|
250
492
|
if (allRows.length === 0)
|
|
251
493
|
return { pulled: 0 };
|
|
252
494
|
const imported = (0, core_1.importAnalysis)(dbPath, allRows);
|