brain-cache 0.1.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -29
- package/dist/{askCodebase-ECDSSTQ6.js → askCodebase-DTII3Y6P.js} +8 -8
- package/dist/buildContext-JKYV7CCP.js +14 -0
- package/dist/{chunk-PDQXJSH4.js → chunk-3SFDFUEX.js} +5 -1
- package/dist/{chunk-OKWMQNH6.js → chunk-5FXXZBZV.js} +1 -1
- package/dist/{chunk-XXWJ57QP.js → chunk-6MACVOTO.js} +2 -2
- package/dist/{chunk-7JLSJNKU.js → chunk-ABKGOJTC.js} +7 -7
- package/dist/{chunk-ZLB4VJQK.js → chunk-BF5UDEIF.js} +1 -1
- package/dist/{chunk-WCNMLSL2.js → chunk-GR6QXZ4J.js} +6 -8
- package/dist/{chunk-PA4BZBWS.js → chunk-MSI4MDIM.js} +1 -1
- package/dist/{chunk-P7WSTGLE.js → chunk-V4ARVFRG.js} +1 -1
- package/dist/cli.js +8 -8
- package/dist/{doctor-5775VUMA.js → doctor-3RIVSSNB.js} +3 -3
- package/dist/{embedder-KRANITVN.js → embedder-2UG2GDQO.js} +2 -2
- package/dist/{init-TRPFEOHF.js → init-SXC4MWOR.js} +26 -4
- package/dist/mcp.js +195 -177
- package/dist/{search-WKKGPNLV.js → search-BF7QY64J.js} +6 -6
- package/dist/{status-2SOIQ3LX.js → status-JYNMLSXZ.js} +3 -3
- package/dist/{workflows-MJLEPCZY.js → workflows-TWA2GDHJ.js} +194 -176
- package/package.json +1 -1
- package/dist/buildContext-6755TRND.js +0 -14
|
@@ -4,13 +4,13 @@ import {
|
|
|
4
4
|
} from "./chunk-GGOUKACO.js";
|
|
5
5
|
import {
|
|
6
6
|
countChunkTokens
|
|
7
|
-
} from "./chunk-
|
|
7
|
+
} from "./chunk-5FXXZBZV.js";
|
|
8
8
|
import {
|
|
9
9
|
embedBatchWithRetry
|
|
10
|
-
} from "./chunk-
|
|
10
|
+
} from "./chunk-GR6QXZ4J.js";
|
|
11
11
|
import {
|
|
12
12
|
isOllamaRunning
|
|
13
|
-
} from "./chunk-
|
|
13
|
+
} from "./chunk-V4ARVFRG.js";
|
|
14
14
|
import {
|
|
15
15
|
createVectorIndexIfNeeded,
|
|
16
16
|
deleteChunksByFilePath,
|
|
@@ -20,18 +20,19 @@ import {
|
|
|
20
20
|
readFileHashes,
|
|
21
21
|
writeFileHashes,
|
|
22
22
|
writeIndexState
|
|
23
|
-
} from "./chunk-
|
|
23
|
+
} from "./chunk-6MACVOTO.js";
|
|
24
24
|
import {
|
|
25
25
|
readProfile
|
|
26
|
-
} from "./chunk-
|
|
26
|
+
} from "./chunk-MSI4MDIM.js";
|
|
27
27
|
import {
|
|
28
28
|
DEFAULT_BATCH_SIZE,
|
|
29
29
|
DEFAULT_EMBEDDING_DIMENSION,
|
|
30
30
|
EMBEDDING_DIMENSIONS,
|
|
31
31
|
EMBED_MAX_TOKENS,
|
|
32
32
|
FILE_READ_CONCURRENCY,
|
|
33
|
-
childLogger
|
|
34
|
-
|
|
33
|
+
childLogger,
|
|
34
|
+
setLogLevel
|
|
35
|
+
} from "./chunk-3SFDFUEX.js";
|
|
35
36
|
|
|
36
37
|
// src/workflows/index.ts
|
|
37
38
|
import { resolve } from "path";
|
|
@@ -259,201 +260,218 @@ function hashContent(content) {
|
|
|
259
260
|
}
|
|
260
261
|
async function runIndex(targetPath, opts) {
|
|
261
262
|
const force = opts?.force ?? false;
|
|
262
|
-
const
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
263
|
+
const previousLogLevel = process.env.BRAIN_CACHE_LOG ?? "warn";
|
|
264
|
+
setLogLevel("silent");
|
|
265
|
+
const originalStderrWrite = process.stderr.write.bind(process.stderr);
|
|
266
|
+
process.stderr.write = ((chunk, ...args) => {
|
|
267
|
+
const str = typeof chunk === "string" ? chunk : chunk.toString();
|
|
268
|
+
if (/^\[[\d\-T:Z]+ WARN lance/.test(str) || /^\[[\d\-T:Z]+ INFO lance/.test(str)) {
|
|
269
|
+
return true;
|
|
270
|
+
}
|
|
271
|
+
return originalStderrWrite(chunk, ...args);
|
|
272
|
+
});
|
|
273
|
+
try {
|
|
274
|
+
const rootDir = resolve(targetPath ?? ".");
|
|
275
|
+
const profile = await readProfile();
|
|
276
|
+
if (profile === null) {
|
|
277
|
+
throw new Error("No profile found. Run 'brain-cache init' first.");
|
|
278
|
+
}
|
|
279
|
+
const running = await isOllamaRunning();
|
|
280
|
+
if (!running) {
|
|
281
|
+
throw new Error("Ollama is not running. Start it with 'ollama serve' or run 'brain-cache init'.");
|
|
282
|
+
}
|
|
283
|
+
const dim = EMBEDDING_DIMENSIONS[profile.embeddingModel] ?? DEFAULT_EMBEDDING_DIMENSION;
|
|
284
|
+
if (!(profile.embeddingModel in EMBEDDING_DIMENSIONS)) {
|
|
285
|
+
process.stderr.write(
|
|
286
|
+
`Warning: Unknown embedding model '${profile.embeddingModel}', defaulting to ${DEFAULT_EMBEDDING_DIMENSION} dimensions.
|
|
275
287
|
`
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
288
|
+
);
|
|
289
|
+
}
|
|
290
|
+
const db = await openDatabase(rootDir);
|
|
291
|
+
const table = await openOrCreateChunkTable(db, rootDir, profile.embeddingModel, dim);
|
|
292
|
+
const files = await crawlSourceFiles(rootDir);
|
|
293
|
+
process.stderr.write(`brain-cache: found ${files.length} source files
|
|
282
294
|
`);
|
|
283
|
-
|
|
284
|
-
|
|
295
|
+
if (files.length === 0) {
|
|
296
|
+
process.stderr.write(`No source files found in ${rootDir}
|
|
285
297
|
`);
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
298
|
+
return;
|
|
299
|
+
}
|
|
300
|
+
const contentMap = /* @__PURE__ */ new Map();
|
|
301
|
+
const currentHashes = {};
|
|
302
|
+
for (let groupStart = 0; groupStart < files.length; groupStart += FILE_READ_CONCURRENCY) {
|
|
303
|
+
const group = files.slice(groupStart, groupStart + FILE_READ_CONCURRENCY);
|
|
304
|
+
const results = await Promise.all(
|
|
305
|
+
group.map(async (filePath) => {
|
|
306
|
+
const content = await readFile2(filePath, "utf-8");
|
|
307
|
+
return { filePath, content, hash: hashContent(content) };
|
|
308
|
+
})
|
|
309
|
+
);
|
|
310
|
+
for (const { filePath, content, hash } of results) {
|
|
311
|
+
contentMap.set(filePath, content);
|
|
312
|
+
currentHashes[filePath] = hash;
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
const storedHashes = force ? {} : await readFileHashes(rootDir);
|
|
316
|
+
const crawledSet = new Set(files);
|
|
317
|
+
const newFiles = [];
|
|
318
|
+
const changedFiles = [];
|
|
319
|
+
const removedFiles = [];
|
|
320
|
+
const unchangedFiles = [];
|
|
321
|
+
for (const filePath of files) {
|
|
322
|
+
const currentHash = currentHashes[filePath];
|
|
323
|
+
if (!(filePath in storedHashes)) {
|
|
324
|
+
newFiles.push(filePath);
|
|
325
|
+
} else if (storedHashes[filePath] !== currentHash) {
|
|
326
|
+
changedFiles.push(filePath);
|
|
327
|
+
} else {
|
|
328
|
+
unchangedFiles.push(filePath);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
for (const filePath of Object.keys(storedHashes)) {
|
|
332
|
+
if (!crawledSet.has(filePath)) {
|
|
333
|
+
removedFiles.push(filePath);
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
process.stderr.write(
|
|
337
|
+
`brain-cache: incremental index -- ${newFiles.length} new, ${changedFiles.length} changed, ${removedFiles.length} removed (${unchangedFiles.length} unchanged)
|
|
338
|
+
`
|
|
297
339
|
);
|
|
298
|
-
for (const
|
|
299
|
-
|
|
300
|
-
currentHashes[filePath] = hash;
|
|
340
|
+
for (const filePath of [...removedFiles, ...changedFiles]) {
|
|
341
|
+
await deleteChunksByFilePath(table, filePath);
|
|
301
342
|
}
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
const newFiles = [];
|
|
306
|
-
const changedFiles = [];
|
|
307
|
-
const removedFiles = [];
|
|
308
|
-
const unchangedFiles = [];
|
|
309
|
-
for (const filePath of files) {
|
|
310
|
-
const currentHash = currentHashes[filePath];
|
|
311
|
-
if (!(filePath in storedHashes)) {
|
|
312
|
-
newFiles.push(filePath);
|
|
313
|
-
} else if (storedHashes[filePath] !== currentHash) {
|
|
314
|
-
changedFiles.push(filePath);
|
|
315
|
-
} else {
|
|
316
|
-
unchangedFiles.push(filePath);
|
|
343
|
+
const updatedHashes = { ...storedHashes };
|
|
344
|
+
for (const filePath of removedFiles) {
|
|
345
|
+
delete updatedHashes[filePath];
|
|
317
346
|
}
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
347
|
+
const filesToProcess = [...newFiles, ...changedFiles];
|
|
348
|
+
if (filesToProcess.length === 0) {
|
|
349
|
+
process.stderr.write(`brain-cache: nothing to re-index
|
|
350
|
+
`);
|
|
351
|
+
for (const filePath of files) {
|
|
352
|
+
updatedHashes[filePath] = currentHashes[filePath];
|
|
353
|
+
}
|
|
354
|
+
await writeFileHashes(rootDir, updatedHashes);
|
|
355
|
+
const totalFiles2 = unchangedFiles.length;
|
|
356
|
+
const chunkCount2 = await table.countRows();
|
|
357
|
+
await writeIndexState(rootDir, {
|
|
358
|
+
version: 1,
|
|
359
|
+
embeddingModel: profile.embeddingModel,
|
|
360
|
+
dimension: dim,
|
|
361
|
+
indexedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
362
|
+
fileCount: totalFiles2,
|
|
363
|
+
chunkCount: chunkCount2
|
|
364
|
+
});
|
|
365
|
+
process.stderr.write(
|
|
366
|
+
`brain-cache: indexing complete
|
|
367
|
+
Files: ${totalFiles2}
|
|
368
|
+
Chunks: ${chunkCount2}
|
|
369
|
+
Model: ${profile.embeddingModel}
|
|
370
|
+
Stored in: ${rootDir}/.brain-cache/
|
|
371
|
+
`
|
|
372
|
+
);
|
|
373
|
+
return;
|
|
322
374
|
}
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
375
|
+
let totalRawTokens = 0;
|
|
376
|
+
let totalChunkTokens = 0;
|
|
377
|
+
let totalChunks = 0;
|
|
378
|
+
let processedFiles = 0;
|
|
379
|
+
let processedChunks = 0;
|
|
380
|
+
let skippedChunks = 0;
|
|
381
|
+
for (let groupStart = 0; groupStart < filesToProcess.length; groupStart += FILE_READ_CONCURRENCY) {
|
|
382
|
+
const group = filesToProcess.slice(groupStart, groupStart + FILE_READ_CONCURRENCY);
|
|
383
|
+
const groupChunks = [];
|
|
384
|
+
for (const filePath of group) {
|
|
385
|
+
const content = contentMap.get(filePath);
|
|
386
|
+
totalRawTokens += countChunkTokens(content);
|
|
387
|
+
const chunks = chunkFile(filePath, content);
|
|
388
|
+
groupChunks.push(...chunks);
|
|
389
|
+
}
|
|
390
|
+
processedFiles += group.length;
|
|
391
|
+
totalChunks += groupChunks.length;
|
|
392
|
+
if (processedFiles % 10 === 0 || groupStart + FILE_READ_CONCURRENCY >= filesToProcess.length) {
|
|
393
|
+
process.stderr.write(`brain-cache: chunked ${processedFiles}/${filesToProcess.length} files
|
|
394
|
+
`);
|
|
395
|
+
}
|
|
396
|
+
for (let offset = 0; offset < groupChunks.length; offset += DEFAULT_BATCH_SIZE) {
|
|
397
|
+
const batch = groupChunks.slice(offset, offset + DEFAULT_BATCH_SIZE);
|
|
398
|
+
const embeddableBatch = batch.filter((chunk) => {
|
|
399
|
+
const tokens = countChunkTokens(chunk.content);
|
|
400
|
+
if (tokens > EMBED_MAX_TOKENS) {
|
|
401
|
+
skippedChunks++;
|
|
402
|
+
return false;
|
|
403
|
+
}
|
|
404
|
+
return true;
|
|
405
|
+
});
|
|
406
|
+
if (embeddableBatch.length === 0) continue;
|
|
407
|
+
const texts = embeddableBatch.map((chunk) => chunk.content);
|
|
408
|
+
totalChunkTokens += texts.reduce((sum, t) => sum + countChunkTokens(t), 0);
|
|
409
|
+
const { embeddings: vectors, skipped } = await embedBatchWithRetry(profile.embeddingModel, texts, dim);
|
|
410
|
+
skippedChunks += skipped;
|
|
411
|
+
const rows = embeddableBatch.map((chunk, i) => ({
|
|
412
|
+
id: chunk.id,
|
|
413
|
+
file_path: chunk.filePath,
|
|
414
|
+
chunk_type: chunk.chunkType,
|
|
415
|
+
scope: chunk.scope,
|
|
416
|
+
name: chunk.name,
|
|
417
|
+
content: chunk.content,
|
|
418
|
+
start_line: chunk.startLine,
|
|
419
|
+
end_line: chunk.endLine,
|
|
420
|
+
vector: vectors[i]
|
|
421
|
+
}));
|
|
422
|
+
await insertChunks(table, rows);
|
|
423
|
+
processedChunks += batch.length;
|
|
424
|
+
process.stderr.write(
|
|
425
|
+
`brain-cache: embedding ${processedChunks}/${totalChunks} chunks (${Math.round(processedChunks / totalChunks * 100)}%)
|
|
326
426
|
`
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
for (const filePath of removedFiles) {
|
|
333
|
-
delete updatedHashes[filePath];
|
|
334
|
-
}
|
|
335
|
-
const filesToProcess = [...newFiles, ...changedFiles];
|
|
336
|
-
if (filesToProcess.length === 0) {
|
|
337
|
-
process.stderr.write(`brain-cache: nothing to re-index
|
|
427
|
+
);
|
|
428
|
+
}
|
|
429
|
+
}
|
|
430
|
+
if (skippedChunks > 0) {
|
|
431
|
+
process.stderr.write(`brain-cache: ${skippedChunks} chunks skipped (too large for model context)
|
|
338
432
|
`);
|
|
339
|
-
|
|
433
|
+
}
|
|
434
|
+
process.stderr.write(
|
|
435
|
+
`brain-cache: ${totalChunks} chunks from ${filesToProcess.length} files
|
|
436
|
+
`
|
|
437
|
+
);
|
|
438
|
+
await createVectorIndexIfNeeded(table, profile.embeddingModel);
|
|
439
|
+
for (const filePath of filesToProcess) {
|
|
440
|
+
updatedHashes[filePath] = currentHashes[filePath];
|
|
441
|
+
}
|
|
442
|
+
for (const filePath of unchangedFiles) {
|
|
340
443
|
updatedHashes[filePath] = currentHashes[filePath];
|
|
341
444
|
}
|
|
342
445
|
await writeFileHashes(rootDir, updatedHashes);
|
|
343
|
-
const
|
|
344
|
-
const
|
|
446
|
+
const totalFiles = files.length;
|
|
447
|
+
const chunkCount = await table.countRows();
|
|
345
448
|
await writeIndexState(rootDir, {
|
|
346
449
|
version: 1,
|
|
347
450
|
embeddingModel: profile.embeddingModel,
|
|
348
451
|
dimension: dim,
|
|
349
452
|
indexedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
350
|
-
fileCount:
|
|
351
|
-
chunkCount
|
|
453
|
+
fileCount: totalFiles,
|
|
454
|
+
chunkCount
|
|
352
455
|
});
|
|
456
|
+
const reductionPct = totalRawTokens > 0 ? Math.round((1 - totalChunkTokens / totalRawTokens) * 100) : 0;
|
|
457
|
+
const savingsBlock = formatTokenSavings({
|
|
458
|
+
tokensSent: totalChunkTokens,
|
|
459
|
+
estimatedWithout: totalRawTokens,
|
|
460
|
+
reductionPct
|
|
461
|
+
}).split("\n").map((line) => ` ${line}`).join("\n");
|
|
353
462
|
process.stderr.write(
|
|
354
463
|
`brain-cache: indexing complete
|
|
355
|
-
Files: ${totalFiles2}
|
|
356
|
-
Chunks: ${chunkCount2}
|
|
357
|
-
Model: ${profile.embeddingModel}
|
|
358
|
-
Stored in: ${rootDir}/.brain-cache/
|
|
359
|
-
`
|
|
360
|
-
);
|
|
361
|
-
return;
|
|
362
|
-
}
|
|
363
|
-
let totalRawTokens = 0;
|
|
364
|
-
let totalChunkTokens = 0;
|
|
365
|
-
let totalChunks = 0;
|
|
366
|
-
let processedFiles = 0;
|
|
367
|
-
let processedChunks = 0;
|
|
368
|
-
for (let groupStart = 0; groupStart < filesToProcess.length; groupStart += FILE_READ_CONCURRENCY) {
|
|
369
|
-
const group = filesToProcess.slice(groupStart, groupStart + FILE_READ_CONCURRENCY);
|
|
370
|
-
const groupChunks = [];
|
|
371
|
-
for (const filePath of group) {
|
|
372
|
-
const content = contentMap.get(filePath);
|
|
373
|
-
totalRawTokens += countChunkTokens(content);
|
|
374
|
-
const chunks = chunkFile(filePath, content);
|
|
375
|
-
groupChunks.push(...chunks);
|
|
376
|
-
}
|
|
377
|
-
processedFiles += group.length;
|
|
378
|
-
totalChunks += groupChunks.length;
|
|
379
|
-
if (processedFiles % 10 === 0 || groupStart + FILE_READ_CONCURRENCY >= filesToProcess.length) {
|
|
380
|
-
process.stderr.write(`brain-cache: chunked ${processedFiles}/${filesToProcess.length} files
|
|
381
|
-
`);
|
|
382
|
-
}
|
|
383
|
-
for (let offset = 0; offset < groupChunks.length; offset += DEFAULT_BATCH_SIZE) {
|
|
384
|
-
const batch = groupChunks.slice(offset, offset + DEFAULT_BATCH_SIZE);
|
|
385
|
-
const embeddableBatch = batch.filter((chunk) => {
|
|
386
|
-
const tokens = countChunkTokens(chunk.content);
|
|
387
|
-
if (tokens > EMBED_MAX_TOKENS) {
|
|
388
|
-
process.stderr.write(
|
|
389
|
-
`
|
|
390
|
-
brain-cache: skipping oversized chunk (${tokens} tokens > ${EMBED_MAX_TOKENS} limit): ${chunk.filePath} lines ${chunk.startLine}-${chunk.endLine}
|
|
391
|
-
`
|
|
392
|
-
);
|
|
393
|
-
return false;
|
|
394
|
-
}
|
|
395
|
-
return true;
|
|
396
|
-
});
|
|
397
|
-
if (embeddableBatch.length === 0) continue;
|
|
398
|
-
const texts = embeddableBatch.map((chunk) => chunk.content);
|
|
399
|
-
totalChunkTokens += texts.reduce((sum, t) => sum + countChunkTokens(t), 0);
|
|
400
|
-
const vectors = await embedBatchWithRetry(profile.embeddingModel, texts, dim);
|
|
401
|
-
const rows = embeddableBatch.map((chunk, i) => ({
|
|
402
|
-
id: chunk.id,
|
|
403
|
-
file_path: chunk.filePath,
|
|
404
|
-
chunk_type: chunk.chunkType,
|
|
405
|
-
scope: chunk.scope,
|
|
406
|
-
name: chunk.name,
|
|
407
|
-
content: chunk.content,
|
|
408
|
-
start_line: chunk.startLine,
|
|
409
|
-
end_line: chunk.endLine,
|
|
410
|
-
vector: vectors[i]
|
|
411
|
-
}));
|
|
412
|
-
await insertChunks(table, rows);
|
|
413
|
-
processedChunks += batch.length;
|
|
414
|
-
process.stderr.write(
|
|
415
|
-
`\rbrain-cache: embedding ${processedChunks}/${totalChunks} chunks (${Math.round(processedChunks / totalChunks * 100)}%)`
|
|
416
|
-
);
|
|
417
|
-
}
|
|
418
|
-
}
|
|
419
|
-
process.stderr.write("\n");
|
|
420
|
-
process.stderr.write(
|
|
421
|
-
`brain-cache: ${totalChunks} chunks from ${filesToProcess.length} files
|
|
422
|
-
`
|
|
423
|
-
);
|
|
424
|
-
await createVectorIndexIfNeeded(table, profile.embeddingModel);
|
|
425
|
-
for (const filePath of filesToProcess) {
|
|
426
|
-
updatedHashes[filePath] = currentHashes[filePath];
|
|
427
|
-
}
|
|
428
|
-
for (const filePath of unchangedFiles) {
|
|
429
|
-
updatedHashes[filePath] = currentHashes[filePath];
|
|
430
|
-
}
|
|
431
|
-
await writeFileHashes(rootDir, updatedHashes);
|
|
432
|
-
const totalFiles = files.length;
|
|
433
|
-
const chunkCount = await table.countRows();
|
|
434
|
-
await writeIndexState(rootDir, {
|
|
435
|
-
version: 1,
|
|
436
|
-
embeddingModel: profile.embeddingModel,
|
|
437
|
-
dimension: dim,
|
|
438
|
-
indexedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
439
|
-
fileCount: totalFiles,
|
|
440
|
-
chunkCount
|
|
441
|
-
});
|
|
442
|
-
const reductionPct = totalRawTokens > 0 ? Math.round((1 - totalChunkTokens / totalRawTokens) * 100) : 0;
|
|
443
|
-
const savingsBlock = formatTokenSavings({
|
|
444
|
-
tokensSent: totalChunkTokens,
|
|
445
|
-
estimatedWithout: totalRawTokens,
|
|
446
|
-
reductionPct
|
|
447
|
-
}).split("\n").map((line) => ` ${line}`).join("\n");
|
|
448
|
-
process.stderr.write(
|
|
449
|
-
`brain-cache: indexing complete
|
|
450
464
|
Files: ${totalFiles}
|
|
451
465
|
Chunks: ${totalChunks}
|
|
452
466
|
Model: ${profile.embeddingModel}
|
|
453
467
|
${savingsBlock}
|
|
454
468
|
Stored in: ${rootDir}/.brain-cache/
|
|
455
469
|
`
|
|
456
|
-
|
|
470
|
+
);
|
|
471
|
+
} finally {
|
|
472
|
+
setLogLevel(previousLogLevel);
|
|
473
|
+
process.stderr.write = originalStderrWrite;
|
|
474
|
+
}
|
|
457
475
|
}
|
|
458
476
|
export {
|
|
459
477
|
runIndex
|
package/package.json
CHANGED
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
import {
|
|
3
|
-
runBuildContext
|
|
4
|
-
} from "./chunk-7JLSJNKU.js";
|
|
5
|
-
import "./chunk-OKWMQNH6.js";
|
|
6
|
-
import "./chunk-ZLB4VJQK.js";
|
|
7
|
-
import "./chunk-WCNMLSL2.js";
|
|
8
|
-
import "./chunk-P7WSTGLE.js";
|
|
9
|
-
import "./chunk-XXWJ57QP.js";
|
|
10
|
-
import "./chunk-PA4BZBWS.js";
|
|
11
|
-
import "./chunk-PDQXJSH4.js";
|
|
12
|
-
export {
|
|
13
|
-
runBuildContext
|
|
14
|
-
};
|