brain-cache 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,62 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ getOllamaVersion,
4
+ isOllamaInstalled,
5
+ isOllamaRunning,
6
+ modelMatches
7
+ } from "./chunk-P7WSTGLE.js";
8
+ import {
9
+ detectCapabilities,
10
+ readProfile
11
+ } from "./chunk-PA4BZBWS.js";
12
+ import {
13
+ PROFILE_PATH
14
+ } from "./chunk-PDQXJSH4.js";
15
+
16
+ // src/workflows/doctor.ts
17
+ import ollama from "ollama";
18
+ async function runDoctor() {
19
+ const saved = await readProfile();
20
+ if (!saved) {
21
+ throw new Error("No profile found. Run 'brain-cache init' first.");
22
+ }
23
+ const live = await detectCapabilities();
24
+ const installed = await isOllamaInstalled();
25
+ const running = installed ? await isOllamaRunning() : false;
26
+ const version = installed ? await getOllamaVersion() : null;
27
+ let modelPresent = false;
28
+ if (running) {
29
+ const list = await ollama.list();
30
+ modelPresent = list.models.some(
31
+ (m) => modelMatches(m.name, saved.embeddingModel)
32
+ );
33
+ }
34
+ const vramGiBDisplay = (gib) => gib !== null ? `${gib} GiB` : "N/A";
35
+ process.stderr.write(
36
+ `brain-cache doctor
37
+ \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500
38
+ Saved profile: ${PROFILE_PATH}
39
+ GPU: ${saved.gpuVendor} (${vramGiBDisplay(saved.vramGiB)})
40
+ VRAM tier: ${saved.vramTier}
41
+ Embedding model: ${saved.embeddingModel}
42
+ Platform: ${saved.platform}
43
+ Detected at: ${saved.detectedAt}
44
+
45
+ Live detection:
46
+ GPU: ${live.gpuVendor} (${vramGiBDisplay(live.vramGiB)})
47
+ VRAM tier: ${live.vramTier}
48
+
49
+ Ollama:
50
+ Installed: ${installed ? "yes" : "no"}
51
+ Running: ${running ? "yes" : "no"}
52
+ Version: ${version ?? "unknown"}
53
+ Model loaded: ${modelPresent ? "yes" : "no"}
54
+ ` + (!modelPresent && running ? `
55
+ Fix: run 'brain-cache init' to pull the model, or:
56
+ ollama pull ${saved.embeddingModel}
57
+ ` : "")
58
+ );
59
+ }
60
+ export {
61
+ runDoctor
62
+ };
@@ -0,0 +1,10 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ embedBatch,
4
+ embedBatchWithRetry
5
+ } from "./chunk-WCNMLSL2.js";
6
+ import "./chunk-PDQXJSH4.js";
7
+ export {
8
+ embedBatch,
9
+ embedBatchWithRetry
10
+ };
@@ -0,0 +1,89 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ getOllamaVersion,
4
+ isOllamaInstalled,
5
+ isOllamaRunning,
6
+ pullModelIfMissing,
7
+ startOllama
8
+ } from "./chunk-P7WSTGLE.js";
9
+ import {
10
+ detectCapabilities,
11
+ writeProfile
12
+ } from "./chunk-PA4BZBWS.js";
13
+ import "./chunk-PDQXJSH4.js";
14
+
15
+ // src/workflows/init.ts
16
+ import { existsSync, readFileSync, writeFileSync, appendFileSync } from "fs";
17
+ async function runInit() {
18
+ process.stderr.write("brain-cache: detecting hardware capabilities...\n");
19
+ const profile = await detectCapabilities();
20
+ const vramDisplay = profile.vramGiB !== null ? `${profile.vramGiB} GiB` : "N/A";
21
+ process.stderr.write(
22
+ `GPU: ${profile.gpuVendor} (${vramDisplay}) | Tier: ${profile.vramTier} | Model: ${profile.embeddingModel}
23
+ `
24
+ );
25
+ if (profile.vramTier === "none") {
26
+ process.stderr.write(
27
+ "Warning: No GPU detected. Embeddings will run on CPU (slower). Consider using a machine with a GPU for faster indexing.\n"
28
+ );
29
+ }
30
+ const installed = await isOllamaInstalled();
31
+ if (!installed) {
32
+ process.stderr.write(
33
+ "Error: Ollama is not installed.\n\nInstall Ollama:\n macOS: brew install ollama\n Linux: curl -fsSL https://ollama.com/install.sh | sh\n\nThen run: brain-cache init\n"
34
+ );
35
+ throw new Error("Ollama is not installed. Install from https://ollama.com then retry brain-cache init.");
36
+ }
37
+ const running = await isOllamaRunning();
38
+ if (!running) {
39
+ process.stderr.write("Ollama is not running. Starting...\n");
40
+ const started = await startOllama();
41
+ if (!started) {
42
+ process.stderr.write(
43
+ "Error: Could not start Ollama. Run 'ollama serve' manually, then retry 'brain-cache init'.\n"
44
+ );
45
+ throw new Error("Could not start Ollama. Run 'ollama serve' manually, then retry 'brain-cache init'.");
46
+ }
47
+ }
48
+ const ollamaVersion = await getOllamaVersion();
49
+ const profileWithVersion = { ...profile, ollamaVersion };
50
+ await pullModelIfMissing(profileWithVersion.embeddingModel);
51
+ process.stderr.write(
52
+ `brain-cache: warming model ${profileWithVersion.embeddingModel} into VRAM...
53
+ `
54
+ );
55
+ const { embedBatchWithRetry } = await import("./embedder-KRANITVN.js");
56
+ await embedBatchWithRetry(profileWithVersion.embeddingModel, ["warmup"]);
57
+ process.stderr.write("brain-cache: model warm.\n");
58
+ await writeProfile(profileWithVersion);
59
+ process.stderr.write(
60
+ `brain-cache initialized successfully.
61
+ Profile: ~/.brain-cache/profile.json
62
+ Embedding model: ${profileWithVersion.embeddingModel}
63
+ VRAM tier: ${profileWithVersion.vramTier}
64
+ `
65
+ );
66
+ const claudeMdPath = "CLAUDE.md";
67
+ const brainCacheSection = `
68
+ ## Brain-Cache MCP Tools
69
+
70
+ When answering "how does X work", "explain the architecture", or any question requiring multi-file reasoning, call \`mcp__brain-cache__build_context\` with the question BEFORE reading individual files. It returns pre-built, relevant context with token savings.
71
+
72
+ When locating functions, symbols, types, or definitions, call \`mcp__brain-cache__search_codebase\` instead of using Grep or file-find tools. It searches the local vector index and returns ranked results.
73
+ `;
74
+ if (existsSync(claudeMdPath)) {
75
+ const content = readFileSync(claudeMdPath, "utf-8");
76
+ if (content.includes("## Brain-Cache MCP Tools")) {
77
+ process.stderr.write("brain-cache: CLAUDE.md already contains Brain-Cache MCP Tools section, skipping.\n");
78
+ } else {
79
+ appendFileSync(claudeMdPath, brainCacheSection);
80
+ process.stderr.write("brain-cache: appended Brain-Cache MCP Tools section to CLAUDE.md.\n");
81
+ }
82
+ } else {
83
+ writeFileSync(claudeMdPath, brainCacheSection.trimStart());
84
+ process.stderr.write("brain-cache: created CLAUDE.md with Brain-Cache MCP Tools section.\n");
85
+ }
86
+ }
87
+ export {
88
+ runInit
89
+ };
package/dist/mcp.d.ts ADDED
@@ -0,0 +1,2 @@
1
+
2
+ export { }