brain-cache 0.4.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/.claude/skills/brain-cache/SKILL.md +52 -0
  2. package/README.md +49 -100
  3. package/dist/{askCodebase-BZIXS3EV.js → askCodebase-EE32B7BP.js} +9 -9
  4. package/dist/buildContext-GWVDAYH6.js +14 -0
  5. package/dist/{chunk-Y7BU7IYX.js → chunk-3HQRTLBH.js} +70 -6
  6. package/dist/{chunk-ZKVZTDND.js → chunk-4IOR54GU.js} +2 -1
  7. package/dist/chunk-6C2OYMKD.js +16 -0
  8. package/dist/{workflows-KYCBR7TC.js → chunk-CY34XQ2O.js} +115 -24
  9. package/dist/chunk-DFFMV3RR.js +171 -0
  10. package/dist/{chunk-PJQNHMQH.js → chunk-DPH5X5HL.js} +1 -1
  11. package/dist/{chunk-FQL4HV4R.js → chunk-HRJ3OT6Q.js} +1 -1
  12. package/dist/chunk-KMRPAVMM.js +967 -0
  13. package/dist/{chunk-KQZSBRRH.js → chunk-RKPICQU7.js} +1 -1
  14. package/dist/{chunk-EEC7KYPY.js → chunk-TXLCXXKY.js} +7 -8
  15. package/dist/claude-md-section-K47HUTE4.js +38 -0
  16. package/dist/cli.js +13 -9
  17. package/dist/{doctor-KRNLXE4R.js → doctor-FCET2MNJ.js} +3 -3
  18. package/dist/{embedder-ZLHAZZUI.js → embedder-HVEXDJAU.js} +2 -2
  19. package/dist/{init-QNN5H3DR.js → init-2E4JMZZC.js} +71 -6
  20. package/dist/mcp.js +1450 -130
  21. package/dist/{search-O4CFAH45.js → search-7ISZ7EXI.js} +16 -15
  22. package/dist/{status-7MT4IROA.js → status-VKTSG2SN.js} +3 -3
  23. package/dist/statusline-script-NFUDFOWK.js +95 -0
  24. package/dist/watch-QPMAB62P.js +128 -0
  25. package/dist/workflows-MWEY7OAI.js +14 -0
  26. package/package.json +5 -1
  27. package/dist/buildContext-APWOPZMJ.js +0 -14
  28. package/dist/chunk-JZQWPHAQ.js +0 -103
  29. package/dist/chunk-SBSMKI4B.js +0 -109
  30. package/dist/chunk-ZGYLHFHJ.js +0 -17
  31. package/dist/claude-md-section-6ZJ3TMO4.js +0 -34
@@ -0,0 +1,52 @@
1
+ ---
2
+ name: brain-cache
3
+ description: "Local codebase embeddings that save tokens and money. Use brain-cache MCP tools instead of reading files or grepping — they return better results with fewer tokens sent to Claude."
4
+ allowed-tools: Bash Read Grep
5
+ ---
6
+
7
+ ## What brain-cache does
8
+
9
+ brain-cache indexes your codebase locally using Ollama embeddings — no data leaves your machine. When you need to understand code, it retrieves only the relevant parts and fits them to a token budget before sending anything to Claude. This means Claude sees better, more focused context while you spend fewer tokens per query.
10
+
11
+ Use brain-cache tools before reading files or using Grep/Glob for codebase questions. They return better, token-efficient results.
12
+
13
+ ## Tool routing
14
+
15
+ | Query type | Tool | NOT this |
16
+ |-----------|------|---------|
17
+ | Locate a function, type, or symbol | `search_codebase` | `build_context` |
18
+ | Understand how specific code works across files | `build_context` | file reads |
19
+ | Diagnose brain-cache failures | `doctor` | -- |
20
+ | Reindex the project | `index_repo` | -- |
21
+
22
+ ## search_codebase (locate code)
23
+
24
+ Call `mcp__brain-cache__search_codebase` to find functions, types, definitions, or implementations by meaning rather than keyword match.
25
+
26
+ Use for: "Where is X defined?", "Find the auth middleware", "Which file handles request validation?"
27
+
28
+ Do NOT use for understanding how code works — use `build_context` once you have located the symbol.
29
+
30
+ ## build_context (understand behavior)
31
+
32
+ Call `mcp__brain-cache__build_context` with a focused question about how specific code works. It retrieves semantically relevant code, deduplicates results, and fits them to a token budget.
33
+
34
+ Use for: "How does X work?", "What does this function do?", debugging unfamiliar code paths.
35
+
36
+ Do NOT use for locating symbols — use `search_codebase` first to find where code lives.
37
+
38
+ Do NOT use just to get a file overview — ask a specific behavioral question.
39
+
40
+ ## index_repo (reindex)
41
+
42
+ Call `mcp__brain-cache__index_repo` only when the user explicitly asks to reindex, or after major code changes such as a large refactor or pulling a significant upstream diff.
43
+
44
+ Do not call proactively. Do not call at the start of each session.
45
+
46
+ ## doctor (diagnose issues)
47
+
48
+ Call `mcp__brain-cache__doctor` when any brain-cache tool fails or returns unexpected results. It checks index health and Ollama connectivity and tells you what to fix.
49
+
50
+ ## Status line
51
+
52
+ brain-cache displays cumulative token savings in the Claude Code status bar. After tool calls you will see `brain-cache down-arrow{pct}% {n} saved` — this confirms cost savings are working. If the status bar shows idle, no tools have been called yet in the current session.
package/README.md CHANGED
@@ -1,65 +1,42 @@
1
1
  # brain-cache
2
2
 
3
- > Stop sending your entire repo to Claude.
3
+ > Your local GPU finally has a job.
4
4
 
5
- brain-cache is an MCP server that gives Claude local, indexed access to your codebase — so it finds what matters instead of reading everything.
6
-
7
- → ~90% fewer tokens sent to Claude
8
- → Sharper, grounded answers
9
- → No data leaves your machine
5
+ brain-cache is a local AI runtime that sits between your codebase and Claude. It runs embeddings and retrieval on your machine — so Claude only sees what actually matters. Fewer tokens. Better answers. Your API bill stops looking like a mortgage payment.
10
6
 
11
7
  ![brain-cache only sends the parts of your codebase that matter — not everything.](assets/brain-cache.svg)
12
8
 
13
9
  ---
14
10
 
15
- ## Use inside Claude Code (MCP)
16
-
17
- The primary way to use brain-cache is as an MCP server. Run `brain-cache init` once — it auto-configures `.mcp.json` in your project root so Claude Code connects immediately. No manual JSON setup needed.
11
+ ## How it works
18
12
 
19
- Claude then has access to:
20
-
21
- - **`build_context`** Assembles relevant context for any question. Use this instead of reading files.
22
- - **`search_codebase`** Finds functions, types, and symbols by meaning, not keyword. Use this instead of grep.
23
- - **`index_repo`** — Rebuilds the local vector index.
24
- - **`doctor`** — Diagnoses index health and Ollama connectivity.
25
-
26
- No copy/pasting code into prompts. No manual file opens. Claude knows where to look.
13
+ 1. Embeds your query locally via Ollama (fast, free, no API calls)
14
+ 2. Retrieves the most relevant code chunks from its local vector index
15
+ 3. Trims and deduplicates the context to fit a tight token budget
16
+ 4. Hands Claude a clean, minimal context not your entire repo
27
17
 
28
18
  ---
29
19
 
30
- ## The problem
31
-
32
- When you ask Claude about your codebase, you either:
33
-
34
- - paste huge chunks of code ❌
35
- - rely on vague context ❌
36
- - or let tools send way too much ❌
37
-
38
- Result:
39
-
40
- - worse answers
41
- - hallucinations
42
- - massive token usage
20
+ ## Use inside Claude Code (MCP)
43
21
 
44
- ---
22
+ The primary way to use brain-cache is as an MCP server. Run `brain-cache init` once — it auto-configures `.mcp.json` in your project root so Claude Code connects immediately. No manual JSON setup needed.
45
23
 
46
- ## 🧠 How it works
24
+ Claude then has access to:
47
25
 
48
- brain-cache is the layer between your codebase and Claude.
26
+ - **`build_context`** Assembles relevant context for any question. Use instead of reading files.
27
+ - **`search_codebase`** — Finds functions, types, and symbols by meaning, not keyword. Use instead of grep.
28
+ - **`index_repo`** — Rebuilds the local vector index.
49
29
 
50
- 1. Your code is indexed locally using Ollama embeddings — nothing leaves your machine
51
- 2. When you ask Claude a question, it calls `build_context` or `search_codebase` automatically
52
- 3. brain-cache retrieves only the relevant files, trims duplicates, and fits them to a token budget
53
- 4. Claude gets tight, useful context — not your entire repo
30
+ Also included: **`doctor`** diagnoses index health and Ollama connectivity.
54
31
 
55
- AI should read the right parts and nothing else. brain-cache is the layer that makes that possible.
32
+ No copy/pasting code into prompts. No manual file opens. Claude knows where to look.
56
33
 
57
34
  ---
58
35
 
59
- ## 🔥 Example
36
+ ## Example
60
37
 
61
38
  ```
62
- > "Explain the overall architecture of this project"
39
+ > "How does the auth middleware work?"
63
40
 
64
41
  brain-cache: context assembled (74 tokens, 97% reduction)
65
42
 
@@ -68,11 +45,11 @@ Estimated without: ~2,795
68
45
  Reduction: 97%
69
46
  ```
70
47
 
71
- Claude gets only what matters answers are sharper and grounded.
48
+ Claude gets only what matters answers are sharper and grounded.
72
49
 
73
50
  ---
74
51
 
75
- ## Quick start
52
+ ## Quick start
76
53
 
77
54
  **Step 1: Install**
78
55
 
@@ -87,11 +64,11 @@ brain-cache init
87
64
  brain-cache index
88
65
  ```
89
66
 
90
- `brain-cache init` sets up your project: configures `.mcp.json` so Claude Code connects to brain-cache automatically, and appends MCP tool instructions to `CLAUDE.md`. Runs once; idempotent.
67
+ `brain-cache init` sets up your project: configures `.mcp.json` so Claude Code connects to brain-cache automatically, appends MCP tool instructions to `CLAUDE.md`, installs the brain-cache skill to `.claude/skills/brain-cache/SKILL.md`, and installs a status line in Claude Code that shows cumulative token savings. Runs once; idempotent.
91
68
 
92
69
  **Step 3: Use Claude normally**
93
70
 
94
- brain-cache tools are called automatically. You dont change how you work — the context just gets better.
71
+ brain-cache tools are called automatically. You don't change how you work — the context just gets better.
95
72
 
96
73
  > **Advanced:** `init` creates `.mcp.json` automatically. If you need to customise it manually, the expected shape is:
97
74
  > ```json
@@ -107,7 +84,24 @@ brain-cache tools are called automatically. You don’t change how you work —
107
84
 
108
85
  ---
109
86
 
110
- ## 📊 Optional: Token savings footer
87
+ ## Install as Claude Code skill
88
+
89
+ brain-cache ships as a Claude Code skill. After `brain-cache init`, the skill is
90
+ installed at `.claude/skills/brain-cache/SKILL.md` in your project. Claude
91
+ automatically learns when and how to use brain-cache tools.
92
+
93
+ To install manually, copy the `.claude/skills/brain-cache/` directory into your
94
+ project root.
95
+
96
+ ---
97
+
98
+ ## Status line
99
+
100
+ After `brain-cache init`, the status line in Claude Code's bottom bar shows your cumulative token savings session by session. You see the reduction without doing anything different.
101
+
102
+ ---
103
+
104
+ ## Optional: Token savings footer
111
105
 
112
106
  brain-cache returns token usage stats in its tool responses (tokens sent, estimated without, reduction %). By default, Claude decides whether to surface these — no footer is forced.
113
107
 
@@ -119,7 +113,7 @@ When using brain-cache build_context, include the token savings summary from the
119
113
 
120
114
  This keeps it transparent and under your control.
121
115
 
122
- ## 🎛 Tuning how much Claude uses brain-cache
116
+ ## Tuning how much Claude uses brain-cache
123
117
 
124
118
  `brain-cache init` adds a section to your project's `CLAUDE.md` with clear instructions to use brain-cache tools first. This works well for most users.
125
119
 
@@ -134,37 +128,7 @@ Or soften it if you prefer Claude to decide on its own. It's your `CLAUDE.md`
134
128
 
135
129
  ---
136
130
 
137
- ## 🧩 Core capabilities
138
-
139
- - 🧠 Local embeddings via Ollama — no API calls, no data sent out
140
- - 🔍 Semantic vector search over your codebase
141
- - ✂️ Context trimming and deduplication
142
- - 🎯 Token budget optimisation
143
- - 🤖 MCP server for Claude Code integration
144
- - ⚡ CLI for setup, debugging, and admin
145
-
146
- ---
147
-
148
- ## 🧠 Why it’s different
149
-
150
- Most AI coding tools:
151
-
152
- - send too much context
153
- - hide retrieval behind hosted services
154
- - require you to prompt-engineer your way to good answers
155
-
156
- brain-cache is:
157
-
158
- - 🏠 Local-first — embeddings run on your machine
159
- - 🔍 Transparent — you can inspect exactly what context gets sent
160
- - 🎯 Token-aware — every call shows the reduction
161
- - ⚙️ Developer-controlled — no vendor lock-in, no cloud dependency
162
-
163
- Think: **Vite, but for LLM context.**
164
-
165
- ---
166
-
167
- ## 🧪 CLI commands
131
+ ## CLI commands
168
132
 
169
133
  The CLI is the setup and admin interface. Use it to init, index, debug, and diagnose — not as the primary interface.
170
134
 
@@ -174,12 +138,13 @@ brain-cache index Build/rebuild the vector index
174
138
  brain-cache search "auth middleware" Manual search (useful for debugging)
175
139
  brain-cache context "auth flow" Manual context building (useful for debugging)
176
140
  brain-cache ask "how does auth work?" Direct Claude query via CLI
141
+ brain-cache status Show index and system status
177
142
  brain-cache doctor Check system health
178
143
  ```
179
144
 
180
145
  ---
181
146
 
182
- ## 📊 Token savings
147
+ ## Token savings
183
148
 
184
149
  Every call shows exactly what was saved:
185
150
 
@@ -187,41 +152,25 @@ Every call shows exactly what was saved:
187
152
  context: 1,240 tokens (93% reduction)
188
153
  ```
189
154
 
190
- Less noise better reasoning cheaper usage.
191
-
192
- ---
193
-
194
- ## 🧠 Built with GSD
195
-
196
- This project uses the GSD (Get Shit Done) framework — an AI-driven workflow for going from idea → research → plan → execution. brain-cache is both a product of that philosophy and a tool that makes it work better: tight context, better outcomes.
197
-
198
- ---
199
-
200
- ## ⚠️ Status
201
-
202
- Early stage — actively improving:
203
-
204
- - ⏳ reranking (planned)
205
- - ⏳ context compression
206
- - ⏳ live indexing (watch mode)
155
+ Less noise better reasoning cheaper usage.
207
156
 
208
157
  ---
209
158
 
210
- ## 🛠 Requirements
159
+ ## Requirements
211
160
 
212
- - Node.js 22+
213
- - Ollama running locally (`nomic-embed-text` model)
161
+ - Node.js >= 22
162
+ - Ollama running locally (`nomic-embed-text` model recommended)
214
163
  - Anthropic API key (for `ask` command only)
215
164
 
216
165
  ---
217
166
 
218
- ## ⭐️ If this is useful
167
+ ## If this is useful
219
168
 
220
169
  Give it a star — or try it on your repo and let me know what breaks.
221
170
 
222
171
  ---
223
172
 
224
- ## 📄 License
173
+ ## License
225
174
 
226
175
  MIT — see LICENSE for details.
227
176
 
@@ -1,19 +1,19 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  runBuildContext
4
- } from "./chunk-JZQWPHAQ.js";
5
- import "./chunk-ZKVZTDND.js";
4
+ } from "./chunk-KMRPAVMM.js";
5
+ import "./chunk-DFFMV3RR.js";
6
+ import "./chunk-4IOR54GU.js";
7
+ import "./chunk-3HQRTLBH.js";
6
8
  import {
7
9
  formatTokenSavings
8
- } from "./chunk-ZGYLHFHJ.js";
9
- import "./chunk-SBSMKI4B.js";
10
- import "./chunk-KQZSBRRH.js";
11
- import "./chunk-FQL4HV4R.js";
12
- import "./chunk-Y7BU7IYX.js";
13
- import "./chunk-PJQNHMQH.js";
10
+ } from "./chunk-6C2OYMKD.js";
11
+ import "./chunk-RKPICQU7.js";
12
+ import "./chunk-HRJ3OT6Q.js";
13
+ import "./chunk-DPH5X5HL.js";
14
14
  import {
15
15
  childLogger
16
- } from "./chunk-EEC7KYPY.js";
16
+ } from "./chunk-TXLCXXKY.js";
17
17
 
18
18
  // src/workflows/askCodebase.ts
19
19
  import Anthropic from "@anthropic-ai/sdk";
@@ -0,0 +1,14 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ runBuildContext
4
+ } from "./chunk-KMRPAVMM.js";
5
+ import "./chunk-DFFMV3RR.js";
6
+ import "./chunk-4IOR54GU.js";
7
+ import "./chunk-3HQRTLBH.js";
8
+ import "./chunk-RKPICQU7.js";
9
+ import "./chunk-HRJ3OT6Q.js";
10
+ import "./chunk-DPH5X5HL.js";
11
+ import "./chunk-TXLCXXKY.js";
12
+ export {
13
+ runBuildContext
14
+ };
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  IndexStateSchema
4
- } from "./chunk-PJQNHMQH.js";
4
+ } from "./chunk-DPH5X5HL.js";
5
5
  import {
6
6
  DEFAULT_EMBEDDING_DIMENSION,
7
7
  EMBEDDING_DIMENSIONS,
@@ -9,7 +9,7 @@ import {
9
9
  PROJECT_DATA_DIR,
10
10
  VECTOR_INDEX_THRESHOLD,
11
11
  childLogger
12
- } from "./chunk-EEC7KYPY.js";
12
+ } from "./chunk-TXLCXXKY.js";
13
13
 
14
14
  // src/services/lancedb.ts
15
15
  import * as lancedb from "@lancedb/lancedb";
@@ -18,6 +18,12 @@ import { Schema, Field, Utf8, Int32, Float32, FixedSizeList } from "apache-arrow
18
18
  import { join } from "path";
19
19
  import { readFile, writeFile, mkdir } from "fs/promises";
20
20
  var log = childLogger("lancedb");
21
+ var _writeMutex = Promise.resolve();
22
+ function withWriteLock(fn) {
23
+ const next = _writeMutex.then(() => fn());
24
+ _writeMutex = next.then(() => void 0, () => void 0);
25
+ return next;
26
+ }
21
27
  function chunkSchema(dim) {
22
28
  return new Schema([
23
29
  new Field("id", new Utf8(), false),
@@ -35,6 +41,16 @@ function chunkSchema(dim) {
35
41
  )
36
42
  ]);
37
43
  }
44
+ function edgeSchema() {
45
+ return new Schema([
46
+ new Field("from_chunk_id", new Utf8(), false),
47
+ new Field("from_file", new Utf8(), false),
48
+ new Field("from_symbol", new Utf8(), true),
49
+ new Field("to_symbol", new Utf8(), false),
50
+ new Field("to_file", new Utf8(), true),
51
+ new Field("edge_type", new Utf8(), false)
52
+ ]);
53
+ }
38
54
  async function openDatabase(projectRoot) {
39
55
  const dataDir = join(projectRoot, PROJECT_DATA_DIR);
40
56
  await mkdir(dataDir, { recursive: true });
@@ -52,6 +68,10 @@ async function openOrCreateChunkTable(db, projectRoot, model, dim) {
52
68
  "Embedding model or dimension changed \u2014 dropping and recreating chunks table"
53
69
  );
54
70
  await db.dropTable("chunks");
71
+ if (tableNames.includes("edges")) {
72
+ await db.dropTable("edges");
73
+ log.warn("Also dropped edges table (stale chunk IDs)");
74
+ }
55
75
  } else {
56
76
  log.info({ model, dim }, "Opened existing chunks table");
57
77
  return db.openTable("chunks");
@@ -67,8 +87,10 @@ async function insertChunks(table, rows) {
67
87
  if (rows.length === 0) {
68
88
  return;
69
89
  }
70
- await table.add(rows);
71
- log.debug({ count: rows.length }, "Inserted chunk rows");
90
+ await withWriteLock(async () => {
91
+ await table.add(rows);
92
+ log.debug({ count: rows.length }, "Inserted chunk rows");
93
+ });
72
94
  }
73
95
  async function createVectorIndexIfNeeded(table, embeddingModel) {
74
96
  const rowCount = await table.countRows();
@@ -135,10 +157,49 @@ async function writeFileHashes(projectRoot, hashes) {
135
157
  }
136
158
  async function deleteChunksByFilePath(table, filePath) {
137
159
  const escaped = filePath.replace(/'/g, "''");
138
- await table.delete(`file_path = '${escaped}'`);
160
+ await withWriteLock(async () => {
161
+ await table.delete(`file_path = '${escaped}'`);
162
+ });
163
+ }
164
+ async function openOrCreateEdgesTable(db, opts) {
165
+ const tableNames = await db.tableNames();
166
+ if (tableNames.includes("edges")) {
167
+ if (opts?.shouldReset) {
168
+ log.warn("Resetting edges table (chunks table was recreated)");
169
+ await db.dropTable("edges");
170
+ } else {
171
+ log.info("Opened existing edges table");
172
+ return db.openTable("edges");
173
+ }
174
+ }
175
+ const schema = edgeSchema();
176
+ const emptyData = lancedb.makeArrowTable([], { schema });
177
+ const table = await db.createTable("edges", emptyData, { mode: "overwrite" });
178
+ log.info("Created new edges table");
179
+ return table;
180
+ }
181
+ async function insertEdges(table, edges) {
182
+ if (edges.length === 0) return;
183
+ const rows = edges.map((e) => ({
184
+ from_chunk_id: e.fromChunkId,
185
+ from_file: e.fromFile,
186
+ from_symbol: e.fromSymbol,
187
+ to_symbol: e.toSymbol,
188
+ to_file: e.toFile,
189
+ edge_type: e.edgeType
190
+ }));
191
+ await withWriteLock(async () => {
192
+ await table.add(rows);
193
+ log.debug({ count: rows.length }, "Inserted edge rows");
194
+ });
195
+ }
196
+ async function queryEdgesFrom(edgesTable, fromChunkId) {
197
+ const escaped = fromChunkId.replace(/'/g, "''");
198
+ return edgesTable.query().where(`from_chunk_id = '${escaped}'`).toArray();
139
199
  }
140
200
 
141
201
  export {
202
+ withWriteLock,
142
203
  openDatabase,
143
204
  openOrCreateChunkTable,
144
205
  insertChunks,
@@ -147,5 +208,8 @@ export {
147
208
  writeIndexState,
148
209
  readFileHashes,
149
210
  writeFileHashes,
150
- deleteChunksByFilePath
211
+ deleteChunksByFilePath,
212
+ openOrCreateEdgesTable,
213
+ insertEdges,
214
+ queryEdgesFrom
151
215
  };
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
3
  childLogger
4
- } from "./chunk-EEC7KYPY.js";
4
+ } from "./chunk-TXLCXXKY.js";
5
5
 
6
6
  // src/services/tokenCounter.ts
7
7
  import { countTokens } from "@anthropic-ai/tokenizer";
@@ -36,5 +36,6 @@ function assembleContext(chunks, opts) {
36
36
 
37
37
  export {
38
38
  countChunkTokens,
39
+ formatChunk,
39
40
  assembleContext
40
41
  };
@@ -0,0 +1,16 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/lib/format.ts
4
+ import dedent from "dedent";
5
+ function formatTokenSavings(input) {
6
+ const fileSuffix = input.filesInContext !== 1 ? "s" : "";
7
+ return [
8
+ `Tokens sent to Claude: ${input.tokensSent.toLocaleString()}`,
9
+ `Estimated without: ~${input.estimatedWithout.toLocaleString()} (${input.filesInContext} file${fileSuffix} + overhead)`,
10
+ `Reduction: ${input.reductionPct}%`
11
+ ].join("\n");
12
+ }
13
+
14
+ export {
15
+ formatTokenSavings
16
+ };