claude-eidetic 0.1.3 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -16,8 +16,7 @@ claude plugin install eidetics/claude-eidetic
16
16
  ```
17
17
 
18
18
  ```bash
19
- export OPENAI_API_KEY=sk-... # for embeddings (default)
20
- export ANTHROPIC_API_KEY=sk-ant-... # for memory extraction (default)
19
+ export OPENAI_API_KEY=sk-... # for embeddings (default)
21
20
  ```
22
21
 
23
22
  Index your codebase once, then search by meaning:
@@ -157,16 +156,13 @@ Add to your `.mcp.json`:
157
156
  "command": "npx",
158
157
  "args": ["-y", "claude-eidetic"],
159
158
  "env": {
160
- "OPENAI_API_KEY": "sk-...",
161
- "ANTHROPIC_API_KEY": "sk-ant-..."
159
+ "OPENAI_API_KEY": "sk-..."
162
160
  }
163
161
  }
164
162
  }
165
163
  }
166
164
  ```
167
165
 
168
- `ANTHROPIC_API_KEY` is needed for the memory LLM (default provider). Omit it if using `MEMORY_LLM_PROVIDER=openai` or `ollama`.
169
-
170
166
  ### Global install
171
167
 
172
168
  ```bash
@@ -184,7 +180,7 @@ npm install && npx tsc && npm start
184
180
  ### Requirements
185
181
 
186
182
  - Node.js >= 20.0.0
187
- - An API key (OpenAI for embeddings, Anthropic for memory extraction, or Ollama for both free)
183
+ - An API key (OpenAI for embeddings, or Ollama for free local embeddings)
188
184
  - Docker (optional): Qdrant auto-provisions via Docker if not already running
189
185
  - C/C++ build tools: required by tree-sitter native bindings (`node-gyp`)
190
186
 
@@ -207,8 +203,7 @@ export MEMORY_LLM_PROVIDER=ollama
207
203
 
208
204
  | Variable | Default | Description |
209
205
  |---|---|---|
210
- | `OPENAI_API_KEY` | _(required for openai)_ | OpenAI API key for embeddings and/or memory |
211
- | `ANTHROPIC_API_KEY` | _(required for anthropic memory)_ | Anthropic API key for memory LLM |
206
+ | `OPENAI_API_KEY` | _(required for openai)_ | OpenAI API key for embeddings |
212
207
  | `EMBEDDING_PROVIDER` | `openai` | `openai`, `ollama`, or `local` |
213
208
  | `EMBEDDING_MODEL` | `text-embedding-3-small` (openai) / `nomic-embed-text` (ollama) | Embedding model name |
214
209
  | `EMBEDDING_BATCH_SIZE` | `100` | Batch size for embedding requests (1-2048) |
@@ -223,10 +218,6 @@ export MEMORY_LLM_PROVIDER=ollama
223
218
  | `EIDETIC_DATA_DIR` | `~/.eidetic/` | Data root for snapshots, memory DB, registry |
224
219
  | `CUSTOM_EXTENSIONS` | `[]` | JSON array of extra file extensions to index (e.g., `[".dart",".arb"]`) |
225
220
  | `CUSTOM_IGNORE_PATTERNS` | `[]` | JSON array of glob patterns to exclude |
226
- | `MEMORY_LLM_PROVIDER` | `anthropic` | `anthropic`, `openai`, or `ollama` |
227
- | `MEMORY_LLM_MODEL` | `claude-haiku-4-5-20251001` (anthropic) / `gpt-4o-mini` (openai) / `llama3.2` (ollama) | Model for memory extraction |
228
- | `MEMORY_LLM_BASE_URL` | _(none)_ | Custom base URL for memory LLM |
229
- | `MEMORY_LLM_API_KEY` | _(none)_ | API key override for memory LLM |
230
221
 
231
222
  </details>
232
223
 
package/dist/config.d.ts CHANGED
@@ -15,11 +15,6 @@ declare const configSchema: z.ZodEffects<z.ZodObject<{
15
15
  eideticDataDir: z.ZodDefault<z.ZodString>;
16
16
  customExtensions: z.ZodEffects<z.ZodDefault<z.ZodArray<z.ZodString, "many">>, string[], unknown>;
17
17
  customIgnorePatterns: z.ZodEffects<z.ZodDefault<z.ZodArray<z.ZodString, "many">>, string[], unknown>;
18
- memoryLlmProvider: z.ZodDefault<z.ZodEnum<["openai", "ollama", "anthropic"]>>;
19
- memoryLlmModel: z.ZodOptional<z.ZodString>;
20
- memoryLlmBaseUrl: z.ZodOptional<z.ZodString>;
21
- memoryLlmApiKey: z.ZodOptional<z.ZodString>;
22
- anthropicApiKey: z.ZodDefault<z.ZodString>;
23
18
  }, "strip", z.ZodTypeAny, {
24
19
  embeddingProvider: "openai" | "ollama" | "local";
25
20
  openaiApiKey: string;
@@ -32,15 +27,10 @@ declare const configSchema: z.ZodEffects<z.ZodObject<{
32
27
  eideticDataDir: string;
33
28
  customExtensions: string[];
34
29
  customIgnorePatterns: string[];
35
- memoryLlmProvider: "openai" | "ollama" | "anthropic";
36
- anthropicApiKey: string;
37
30
  openaiBaseUrl?: string | undefined;
38
31
  embeddingModel?: string | undefined;
39
32
  qdrantApiKey?: string | undefined;
40
33
  milvusToken?: string | undefined;
41
- memoryLlmModel?: string | undefined;
42
- memoryLlmBaseUrl?: string | undefined;
43
- memoryLlmApiKey?: string | undefined;
44
34
  }, {
45
35
  embeddingProvider?: "openai" | "ollama" | "local" | undefined;
46
36
  openaiApiKey?: string | undefined;
@@ -57,14 +47,8 @@ declare const configSchema: z.ZodEffects<z.ZodObject<{
57
47
  eideticDataDir?: string | undefined;
58
48
  customExtensions?: unknown;
59
49
  customIgnorePatterns?: unknown;
60
- memoryLlmProvider?: "openai" | "ollama" | "anthropic" | undefined;
61
- memoryLlmModel?: string | undefined;
62
- memoryLlmBaseUrl?: string | undefined;
63
- memoryLlmApiKey?: string | undefined;
64
- anthropicApiKey?: string | undefined;
65
50
  }>, {
66
51
  embeddingModel: string;
67
- memoryLlmModel: string;
68
52
  embeddingProvider: "openai" | "ollama" | "local";
69
53
  openaiApiKey: string;
70
54
  ollamaBaseUrl: string;
@@ -76,13 +60,9 @@ declare const configSchema: z.ZodEffects<z.ZodObject<{
76
60
  eideticDataDir: string;
77
61
  customExtensions: string[];
78
62
  customIgnorePatterns: string[];
79
- memoryLlmProvider: "openai" | "ollama" | "anthropic";
80
- anthropicApiKey: string;
81
63
  openaiBaseUrl?: string | undefined;
82
64
  qdrantApiKey?: string | undefined;
83
65
  milvusToken?: string | undefined;
84
- memoryLlmBaseUrl?: string | undefined;
85
- memoryLlmApiKey?: string | undefined;
86
66
  }, {
87
67
  embeddingProvider?: "openai" | "ollama" | "local" | undefined;
88
68
  openaiApiKey?: string | undefined;
@@ -99,11 +79,6 @@ declare const configSchema: z.ZodEffects<z.ZodObject<{
99
79
  eideticDataDir?: string | undefined;
100
80
  customExtensions?: unknown;
101
81
  customIgnorePatterns?: unknown;
102
- memoryLlmProvider?: "openai" | "ollama" | "anthropic" | undefined;
103
- memoryLlmModel?: string | undefined;
104
- memoryLlmBaseUrl?: string | undefined;
105
- memoryLlmApiKey?: string | undefined;
106
- anthropicApiKey?: string | undefined;
107
82
  }>;
108
83
  export type Config = z.infer<typeof configSchema>;
109
84
  export declare function loadConfig(): Config;
package/dist/config.js CHANGED
@@ -19,24 +19,12 @@ const configSchema = z
19
19
  eideticDataDir: z.string().default(path.join(os.homedir(), '.eidetic')),
20
20
  customExtensions: z.preprocess((val) => (typeof val === 'string' ? JSON.parse(val) : val), z.array(z.string()).default([])),
21
21
  customIgnorePatterns: z.preprocess((val) => (typeof val === 'string' ? JSON.parse(val) : val), z.array(z.string()).default([])),
22
- memoryLlmProvider: z.enum(['openai', 'ollama', 'anthropic']).default('anthropic'),
23
- memoryLlmModel: z.string().optional(),
24
- memoryLlmBaseUrl: z.string().optional(),
25
- memoryLlmApiKey: z.string().optional(),
26
- anthropicApiKey: z.string().default(''),
27
22
  })
28
23
  .transform((cfg) => ({
29
24
  ...cfg,
30
25
  // Default embedding model depends on provider
31
26
  embeddingModel: cfg.embeddingModel ??
32
27
  (cfg.embeddingProvider === 'ollama' ? 'nomic-embed-text' : 'text-embedding-3-small'),
33
- // Default memory LLM model depends on provider
34
- memoryLlmModel: cfg.memoryLlmModel ??
35
- (cfg.memoryLlmProvider === 'ollama'
36
- ? 'llama3.2'
37
- : cfg.memoryLlmProvider === 'anthropic'
38
- ? 'claude-haiku-4-5-20251001'
39
- : 'gpt-4o-mini'),
40
28
  }));
41
29
  let cachedConfig = null;
42
30
  export function loadConfig() {
@@ -56,11 +44,6 @@ export function loadConfig() {
56
44
  eideticDataDir: process.env.EIDETIC_DATA_DIR,
57
45
  customExtensions: process.env.CUSTOM_EXTENSIONS,
58
46
  customIgnorePatterns: process.env.CUSTOM_IGNORE_PATTERNS,
59
- memoryLlmProvider: process.env.MEMORY_LLM_PROVIDER,
60
- memoryLlmModel: process.env.MEMORY_LLM_MODEL || undefined,
61
- memoryLlmBaseUrl: process.env.MEMORY_LLM_BASE_URL || undefined,
62
- memoryLlmApiKey: process.env.MEMORY_LLM_API_KEY?.trim().replace(/^["']|["']$/g, '') || undefined,
63
- anthropicApiKey: (process.env.ANTHROPIC_API_KEY ?? '').trim().replace(/^["']|["']$/g, ''),
64
47
  };
65
48
  const result = configSchema.safeParse(raw);
66
49
  if (!result.success) {
@@ -148,7 +148,7 @@ export class OpenAIEmbedding {
148
148
  }
149
149
  async callWithRetry(texts) {
150
150
  let currentBatchSize = texts.length;
151
- for (let attempt = 0; attempt <= RETRY_DELAYS.length; attempt++) {
151
+ for (let attempt = 0; attempt < RETRY_DELAYS.length + 1; attempt++) {
152
152
  try {
153
153
  const allResults = [];
154
154
  for (let offset = 0; offset < texts.length; offset += currentBatchSize) {
package/dist/index.js CHANGED
@@ -58,7 +58,7 @@ const WORKFLOW_GUIDANCE = `# Eidetic Code Search Workflow
58
58
  - Stale docs (past TTL) still return results but are flagged \`[STALE]\`
59
59
 
60
60
  **Persistent memory (cross-session developer knowledge):**
61
- - \`add_memory(content="...")\` → extracts facts about coding style, tools, architecture, etc.
61
+ - \`add_memory(facts=[{fact:"...", category:"..."}])\` → stores pre-extracted facts about coding style, tools, architecture, etc.
62
62
  - \`search_memory(query="...")\` → find relevant memories by semantic search
63
63
  - \`list_memories()\` → see all stored memories grouped by category
64
64
  - \`delete_memory(id="...")\` → remove a specific memory
@@ -1,6 +1,6 @@
1
1
  import type { Embedding } from '../embedding/types.js';
2
2
  import type { VectorDB } from '../vectordb/types.js';
3
- import type { MemoryItem, MemoryAction } from './types.js';
3
+ import type { MemoryItem, MemoryAction, ExtractedFact } from './types.js';
4
4
  import { MemoryHistory } from './history.js';
5
5
  export declare class MemoryStore {
6
6
  private embedding;
@@ -9,12 +9,11 @@ export declare class MemoryStore {
9
9
  private initialized;
10
10
  constructor(embedding: Embedding, vectordb: VectorDB, history: MemoryHistory);
11
11
  private ensureCollection;
12
- addMemory(content: string, source?: string): Promise<MemoryAction[]>;
12
+ addMemory(facts: ExtractedFact[], source?: string): Promise<MemoryAction[]>;
13
13
  searchMemory(query: string, limit?: number, category?: string): Promise<MemoryItem[]>;
14
14
  listMemories(category?: string, limit?: number): Promise<MemoryItem[]>;
15
15
  deleteMemory(id: string): Promise<boolean>;
16
16
  getHistory(memoryId: string): import("./history.js").HistoryEntry[];
17
- private extractFacts;
18
17
  private processFact;
19
18
  }
20
19
  //# sourceMappingURL=store.d.ts.map
@@ -1,6 +1,4 @@
1
1
  import { randomUUID } from 'node:crypto';
2
- import { chatCompletion } from './llm.js';
3
- import { buildSystemPrompt, buildExtractionPrompt } from './prompts.js';
4
2
  import { hashMemory, reconcile } from './reconciler.js';
5
3
  const COLLECTION_NAME = 'eidetic_memory';
6
4
  const SEARCH_CANDIDATES = 5;
@@ -29,9 +27,8 @@ export class MemoryStore {
29
27
  }
30
28
  this.initialized = true;
31
29
  }
32
- async addMemory(content, source) {
30
+ async addMemory(facts, source) {
33
31
  await this.ensureCollection();
34
- const facts = await this.extractFacts(content);
35
32
  if (facts.length === 0)
36
33
  return [];
37
34
  const actions = [];
@@ -94,23 +91,6 @@ export class MemoryStore {
94
91
  getHistory(memoryId) {
95
92
  return this.history.getHistory(memoryId);
96
93
  }
97
- async extractFacts(content) {
98
- const userMessage = buildExtractionPrompt(content);
99
- const response = await chatCompletion(buildSystemPrompt(), userMessage);
100
- try {
101
- const parsed = JSON.parse(response);
102
- const facts = parsed.facts;
103
- if (!Array.isArray(facts))
104
- return [];
105
- return facts.filter((f) => typeof f === 'object' &&
106
- f !== null &&
107
- typeof f.fact === 'string' &&
108
- typeof f.category === 'string');
109
- }
110
- catch {
111
- return [];
112
- }
113
- }
114
94
  async processFact(fact, source) {
115
95
  const hash = hashMemory(fact.fact);
116
96
  const vector = await this.embedding.embed(fact.fact);
@@ -3,7 +3,7 @@
3
3
  * Hook entry point for PreCompact and SessionEnd events.
4
4
  *
5
5
  * PreCompact: Parses transcript, writes session note, updates index, spawns background indexer.
6
- * SessionEnd: Same as PreCompact + runs memory extraction pipeline (semantic facts Qdrant).
6
+ * SessionEnd: Same as PreCompact (writes session note if not already captured by PreCompact).
7
7
  */
8
8
  export {};
9
9
  //# sourceMappingURL=hook.d.ts.map
@@ -3,7 +3,7 @@
3
3
  * Hook entry point for PreCompact and SessionEnd events.
4
4
  *
5
5
  * PreCompact: Parses transcript, writes session note, updates index, spawns background indexer.
6
- * SessionEnd: Same as PreCompact + runs memory extraction pipeline (semantic facts Qdrant).
6
+ * SessionEnd: Same as PreCompact (writes session note if not already captured by PreCompact).
7
7
  */
8
8
  import { z } from 'zod';
9
9
  import path from 'node:path';
@@ -68,14 +68,11 @@ async function main() {
68
68
  updateSessionIndex(notesDir, session, noteFile);
69
69
  spawnBackgroundIndexer(notesDir, INDEX_RUNNER_PATH);
70
70
  }
71
- // Run memory extraction (best-effort — graceful failure if Qdrant unavailable)
72
- const memoryActions = await extractMemories(session);
73
71
  outputSuccess({
74
72
  noteFile,
75
73
  skippedNote,
76
74
  filesModified: session.filesModified.length,
77
75
  tasksCreated: session.tasksCreated.length,
78
- memoriesExtracted: memoryActions,
79
76
  });
80
77
  }
81
78
  else {
@@ -94,61 +91,6 @@ async function main() {
94
91
  outputError(err instanceof Error ? err.message : String(err));
95
92
  }
96
93
  }
97
- /**
98
- * Build content string for memory extraction from an ExtractedSession.
99
- */
100
- function buildMemoryContent(session) {
101
- const parts = [];
102
- if (session.userMessages.length > 0) {
103
- parts.push('User messages:');
104
- session.userMessages.forEach((msg, i) => {
105
- parts.push(`${i + 1}. ${msg}`);
106
- });
107
- }
108
- if (session.filesModified.length > 0) {
109
- parts.push(`\nFiles modified: ${session.filesModified.join(', ')}`);
110
- }
111
- if (session.tasksCreated.length > 0) {
112
- parts.push(`Tasks: ${session.tasksCreated.join(', ')}`);
113
- }
114
- if (session.branch) {
115
- parts.push(`Branch: ${session.branch}`);
116
- }
117
- return parts.join('\n');
118
- }
119
- /**
120
- * Run memory extraction pipeline. Returns count of actions taken.
121
- * Fails gracefully — logs to stderr if Qdrant or LLM unavailable.
122
- */
123
- async function extractMemories(session) {
124
- const content = buildMemoryContent(session);
125
- if (!content.trim())
126
- return 0;
127
- try {
128
- // Dynamic imports to avoid loading heavy deps on every hook invocation
129
- const [{ loadConfig }, { createEmbedding }, { QdrantVectorDB }, { MemoryHistory }, { MemoryStore }, { getMemoryDbPath },] = await Promise.all([
130
- import('../config.js'),
131
- import('../embedding/factory.js'),
132
- import('../vectordb/qdrant.js'),
133
- import('../memory/history.js'),
134
- import('../memory/store.js'),
135
- import('../paths.js'),
136
- ]);
137
- const config = loadConfig();
138
- const embedding = createEmbedding(config);
139
- await embedding.initialize();
140
- const vectordb = new QdrantVectorDB();
141
- const history = new MemoryHistory(getMemoryDbPath());
142
- const memoryStore = new MemoryStore(embedding, vectordb, history);
143
- const actions = await memoryStore.addMemory(content, 'session-end-hook');
144
- process.stderr.write(`[eidetic] Memory extraction: ${actions.length} action(s) (${actions.map((a) => a.event).join(', ') || 'none'})\n`);
145
- return actions.length;
146
- }
147
- catch (err) {
148
- process.stderr.write(`[eidetic] Memory extraction failed (non-fatal): ${err instanceof Error ? err.message : String(err)}\n`);
149
- return 0;
150
- }
151
- }
152
94
  async function readStdin() {
153
95
  const chunks = [];
154
96
  for await (const chunk of process.stdin) {
@@ -0,0 +1,10 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Inject stored memories at SessionStart.
4
+ * Called by session-start hook to surface previously learned knowledge.
5
+ *
6
+ * Outputs markdown to stdout for hook to capture and inject into session.
7
+ */
8
+ import type { MemoryItem } from '../memory/types.js';
9
+ export declare function formatMemoryContext(memories: MemoryItem[]): string;
10
+ //# sourceMappingURL=memory-inject.d.ts.map
@@ -0,0 +1,86 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Inject stored memories at SessionStart.
4
+ * Called by session-start hook to surface previously learned knowledge.
5
+ *
6
+ * Outputs markdown to stdout for hook to capture and inject into session.
7
+ */
8
+ import { execSync } from 'node:child_process';
9
+ import path from 'node:path';
10
+ async function main() {
11
+ try {
12
+ // Get cwd from environment (set by Claude Code) or detect from git
13
+ const cwd = process.env.CLAUDE_CWD || process.cwd();
14
+ // Detect project root from git
15
+ const projectPath = detectProjectRoot(cwd);
16
+ if (!projectPath) {
17
+ // Not in a git repo, nothing to inject
18
+ return;
19
+ }
20
+ const projectName = path.basename(projectPath);
21
+ // Dynamic imports — avoid loading heavy deps if not needed
22
+ const [{ loadConfig }, { createEmbedding }, { MemoryHistory }, { MemoryStore }] = await Promise.all([
23
+ import('../config.js'),
24
+ import('../embedding/factory.js'),
25
+ import('../memory/history.js'),
26
+ import('../memory/store.js'),
27
+ ]);
28
+ const config = loadConfig();
29
+ const embedding = createEmbedding(config);
30
+ await embedding.initialize();
31
+ // Respect VECTORDB_PROVIDER — connect without bootstrapping (hook assumes DB is already running)
32
+ let vectordb;
33
+ if (config.vectordbProvider === 'milvus') {
34
+ const { MilvusVectorDB } = await import('../vectordb/milvus.js');
35
+ vectordb = new MilvusVectorDB();
36
+ }
37
+ else {
38
+ const { QdrantVectorDB } = await import('../vectordb/qdrant.js');
39
+ vectordb = new QdrantVectorDB(config.qdrantUrl, config.qdrantApiKey);
40
+ }
41
+ // Quick-exit if no memory collection exists
42
+ const exists = await vectordb.hasCollection('eidetic_memory');
43
+ if (!exists) {
44
+ return;
45
+ }
46
+ const { getMemoryDbPath } = await import('../paths.js');
47
+ const history = new MemoryHistory(getMemoryDbPath());
48
+ const store = new MemoryStore(embedding, vectordb, history);
49
+ const memories = await store.searchMemory(`${projectName} development knowledge`, 7);
50
+ if (memories.length === 0) {
51
+ return;
52
+ }
53
+ process.stdout.write(formatMemoryContext(memories));
54
+ }
55
+ catch (err) {
56
+ // Write to stderr for debugging, but don't break session start
57
+ process.stderr.write(`Memory inject failed: ${String(err)}\n`);
58
+ }
59
+ }
60
+ function detectProjectRoot(cwd) {
61
+ try {
62
+ const result = execSync('git rev-parse --show-toplevel', {
63
+ cwd,
64
+ encoding: 'utf-8',
65
+ stdio: ['pipe', 'pipe', 'pipe'],
66
+ });
67
+ return result.trim();
68
+ }
69
+ catch {
70
+ return null;
71
+ }
72
+ }
73
+ export function formatMemoryContext(memories) {
74
+ const lines = [];
75
+ lines.push('## Remembered Knowledge');
76
+ for (const m of memories) {
77
+ const category = m.category ? `[${m.category}] ` : '';
78
+ lines.push(`- ${category}${m.memory}`);
79
+ }
80
+ lines.push('');
81
+ lines.push('_search_memory(query) for more. add_memory(facts) to save new findings._');
82
+ lines.push('');
83
+ return lines.join('\n');
84
+ }
85
+ void main();
86
+ //# sourceMappingURL=memory-inject.js.map
@@ -229,20 +229,35 @@ export declare const TOOL_DEFINITIONS: readonly [{
229
229
  };
230
230
  }, {
231
231
  readonly name: "add_memory";
232
- readonly description: "Extract and store developer knowledge from text. Uses LLM to identify facts about coding style, tools, architecture, conventions, debugging insights, and workflow preferences. Automatically deduplicates against existing memories.";
232
+ readonly description: "Store pre-extracted developer knowledge facts. Before calling, extract facts yourself from the relevant content. Each fact should be a concise, self-contained statement about coding style, tools, architecture, conventions, debugging insights, or workflow preferences. Automatically deduplicates against existing memories.";
233
233
  readonly inputSchema: {
234
234
  readonly type: "object";
235
235
  readonly properties: {
236
- readonly content: {
237
- readonly type: "string";
238
- readonly description: "Text containing developer knowledge to extract and store (conversation snippets, notes, preferences).";
236
+ readonly facts: {
237
+ readonly type: "array";
238
+ readonly description: "Array of facts to store. Extract these yourself before calling. Each fact must be a concise, self-contained statement.";
239
+ readonly items: {
240
+ readonly type: "object";
241
+ readonly properties: {
242
+ readonly fact: {
243
+ readonly type: "string";
244
+ readonly description: "A concise, self-contained statement of a developer preference or convention.";
245
+ };
246
+ readonly category: {
247
+ readonly type: "string";
248
+ readonly description: "Category: coding_style, tools, architecture, conventions, debugging, workflow, or preferences.";
249
+ readonly enum: readonly ["coding_style", "tools", "architecture", "conventions", "debugging", "workflow", "preferences"];
250
+ };
251
+ };
252
+ readonly required: readonly ["fact", "category"];
253
+ };
239
254
  };
240
255
  readonly source: {
241
256
  readonly type: "string";
242
257
  readonly description: "Optional source identifier (e.g., \"conversation\", \"claude-code\", \"user-note\").";
243
258
  };
244
259
  };
245
- readonly required: readonly ["content"];
260
+ readonly required: readonly ["facts"];
246
261
  };
247
262
  }, {
248
263
  readonly name: "search_memory";
@@ -289,20 +289,43 @@ export const TOOL_DEFINITIONS = [
289
289
  },
290
290
  {
291
291
  name: 'add_memory',
292
- description: 'Extract and store developer knowledge from text. Uses LLM to identify facts about coding style, tools, architecture, conventions, debugging insights, and workflow preferences. Automatically deduplicates against existing memories.',
292
+ description: 'Store pre-extracted developer knowledge facts. Before calling, extract facts yourself from the relevant content. Each fact should be a concise, self-contained statement about coding style, tools, architecture, conventions, debugging insights, or workflow preferences. Automatically deduplicates against existing memories.',
293
293
  inputSchema: {
294
294
  type: 'object',
295
295
  properties: {
296
- content: {
297
- type: 'string',
298
- description: 'Text containing developer knowledge to extract and store (conversation snippets, notes, preferences).',
296
+ facts: {
297
+ type: 'array',
298
+ description: 'Array of facts to store. Extract these yourself before calling. Each fact must be a concise, self-contained statement.',
299
+ items: {
300
+ type: 'object',
301
+ properties: {
302
+ fact: {
303
+ type: 'string',
304
+ description: 'A concise, self-contained statement of a developer preference or convention.',
305
+ },
306
+ category: {
307
+ type: 'string',
308
+ description: 'Category: coding_style, tools, architecture, conventions, debugging, workflow, or preferences.',
309
+ enum: [
310
+ 'coding_style',
311
+ 'tools',
312
+ 'architecture',
313
+ 'conventions',
314
+ 'debugging',
315
+ 'workflow',
316
+ 'preferences',
317
+ ],
318
+ },
319
+ },
320
+ required: ['fact', 'category'],
321
+ },
299
322
  },
300
323
  source: {
301
324
  type: 'string',
302
325
  description: 'Optional source identifier (e.g., "conversation", "claude-code", "user-note").',
303
326
  },
304
327
  },
305
- required: ['content'],
328
+ required: ['facts'],
306
329
  },
307
330
  },
308
331
  {
package/dist/tools.js CHANGED
@@ -257,12 +257,12 @@ export class ToolHandlers {
257
257
  async handleAddMemory(args) {
258
258
  if (!this.memoryStore)
259
259
  return textResult('Error: Memory system not initialized.');
260
- const content = args.content;
261
- if (!content)
262
- return textResult('Error: "content" is required. Provide text containing developer knowledge to extract.');
260
+ const facts = args.facts;
261
+ if (!facts || !Array.isArray(facts) || facts.length === 0)
262
+ return textResult('Error: "facts" is required. Provide an array of pre-extracted facts with fact and category fields.');
263
263
  const source = args.source;
264
264
  try {
265
- const actions = await this.memoryStore.addMemory(content, source);
265
+ const actions = await this.memoryStore.addMemory(facts, source);
266
266
  return textResult(formatMemoryActions(actions));
267
267
  }
268
268
  catch (err) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-eidetic",
3
- "version": "0.1.3",
3
+ "version": "0.1.4",
4
4
  "description": "Semantic code search MCP server — lean, correct, fast",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -37,7 +37,6 @@
37
37
  "release": "bash scripts/release.sh"
38
38
  },
39
39
  "dependencies": {
40
- "@anthropic-ai/sdk": "^0.78.0",
41
40
  "@modelcontextprotocol/sdk": "^1.12.1",
42
41
  "@qdrant/js-client-rest": "^1.13.0",
43
42
  "better-sqlite3": "^12.6.2",
@@ -81,7 +80,7 @@
81
80
  },
82
81
  "repository": {
83
82
  "type": "git",
84
- "url": "https://github.com/eidetics/claude-eidetic"
83
+ "url": "https://github.com/eidetics/claude-eidetic.git"
85
84
  },
86
85
  "license": "MIT"
87
86
  }
@@ -1,2 +0,0 @@
1
- export declare function chatCompletion(systemPrompt: string, userMessage: string): Promise<string>;
2
- //# sourceMappingURL=llm.d.ts.map
@@ -1,56 +0,0 @@
1
- import OpenAI from 'openai';
2
- import Anthropic from '@anthropic-ai/sdk';
3
- import { getConfig } from '../config.js';
4
- import { MemoryError } from '../errors.js';
5
- export async function chatCompletion(systemPrompt, userMessage) {
6
- const config = getConfig();
7
- if (config.memoryLlmProvider === 'anthropic') {
8
- const apiKey = config.memoryLlmApiKey ?? config.anthropicApiKey ?? config.openaiApiKey;
9
- if (!apiKey) {
10
- throw new MemoryError('No API key configured for memory LLM. Set MEMORY_LLM_API_KEY, ANTHROPIC_API_KEY, or OPENAI_API_KEY.');
11
- }
12
- const client = new Anthropic({ apiKey });
13
- try {
14
- const response = await client.messages.create({
15
- model: config.memoryLlmModel,
16
- max_tokens: 2048,
17
- system: systemPrompt,
18
- messages: [{ role: 'user', content: userMessage }],
19
- });
20
- const block = response.content[0];
21
- return block?.type === 'text' ? block.text : '{}';
22
- }
23
- catch (err) {
24
- throw new MemoryError('Memory LLM call failed', err);
25
- }
26
- }
27
- // OpenAI / Ollama path
28
- const apiKey = config.memoryLlmApiKey ?? config.openaiApiKey;
29
- if (!apiKey) {
30
- throw new MemoryError('No API key configured for memory LLM. Set MEMORY_LLM_API_KEY or OPENAI_API_KEY.');
31
- }
32
- let baseURL;
33
- if (config.memoryLlmBaseUrl) {
34
- baseURL = config.memoryLlmBaseUrl;
35
- }
36
- else if (config.memoryLlmProvider === 'ollama') {
37
- baseURL = config.ollamaBaseUrl;
38
- }
39
- const client = new OpenAI({ apiKey, ...(baseURL ? { baseURL } : {}) });
40
- try {
41
- const response = await client.chat.completions.create({
42
- model: config.memoryLlmModel,
43
- messages: [
44
- { role: 'system', content: systemPrompt },
45
- { role: 'user', content: userMessage },
46
- ],
47
- temperature: 0,
48
- response_format: { type: 'json_object' },
49
- });
50
- return response.choices[0]?.message?.content ?? '{}';
51
- }
52
- catch (err) {
53
- throw new MemoryError('Memory LLM call failed', err);
54
- }
55
- }
56
- //# sourceMappingURL=llm.js.map
@@ -1,5 +0,0 @@
1
- export declare const FACT_EXTRACTION_SYSTEM_PROMPT = "You are a developer knowledge extractor. Your job is to extract discrete, factual statements from conversations about software development.\n\nExtract facts about:\n- **coding_style**: Formatting preferences (tabs/spaces, naming conventions, line length), code style rules\n- **tools**: Preferred tools, frameworks, libraries, test runners, bundlers, linters, editors\n- **architecture**: Design patterns, architectural decisions, system design preferences\n- **conventions**: Project conventions, commit message formats, branch naming, PR workflows\n- **debugging**: Solutions to specific bugs, debugging techniques, known issues\n- **workflow**: Development habits, deployment processes, review preferences\n- **preferences**: General preferences, opinions, requirements that don't fit other categories\n\nRules:\n1. Extract only factual, concrete statements \u2014 not vague observations\n2. Each fact should be a single, self-contained statement\n3. Use third person (\"The user prefers...\" or state the fact directly)\n4. If the input contains no extractable facts, return an empty array\n5. Do NOT extract facts about the conversation itself (e.g., \"the user asked about...\")\n6. Do NOT extract temporary or session-specific information\n7. Prefer specific facts over general ones\n\nRespond with JSON: { \"facts\": [{ \"fact\": \"...\", \"category\": \"...\" }] }\n\nCategories: coding_style, tools, architecture, conventions, debugging, workflow, preferences";
2
- export declare const FACT_EXTRACTION_USER_TEMPLATE = "Extract developer knowledge facts from this text:\n\n<text>\n{content}\n</text>";
3
- export declare function buildSystemPrompt(): string;
4
- export declare function buildExtractionPrompt(content: string): string;
5
- //# sourceMappingURL=prompts.d.ts.map
@@ -1,36 +0,0 @@
1
- export const FACT_EXTRACTION_SYSTEM_PROMPT = `You are a developer knowledge extractor. Your job is to extract discrete, factual statements from conversations about software development.
2
-
3
- Extract facts about:
4
- - **coding_style**: Formatting preferences (tabs/spaces, naming conventions, line length), code style rules
5
- - **tools**: Preferred tools, frameworks, libraries, test runners, bundlers, linters, editors
6
- - **architecture**: Design patterns, architectural decisions, system design preferences
7
- - **conventions**: Project conventions, commit message formats, branch naming, PR workflows
8
- - **debugging**: Solutions to specific bugs, debugging techniques, known issues
9
- - **workflow**: Development habits, deployment processes, review preferences
10
- - **preferences**: General preferences, opinions, requirements that don't fit other categories
11
-
12
- Rules:
13
- 1. Extract only factual, concrete statements — not vague observations
14
- 2. Each fact should be a single, self-contained statement
15
- 3. Use third person ("The user prefers..." or state the fact directly)
16
- 4. If the input contains no extractable facts, return an empty array
17
- 5. Do NOT extract facts about the conversation itself (e.g., "the user asked about...")
18
- 6. Do NOT extract temporary or session-specific information
19
- 7. Prefer specific facts over general ones
20
-
21
- Respond with JSON: { "facts": [{ "fact": "...", "category": "..." }] }
22
-
23
- Categories: coding_style, tools, architecture, conventions, debugging, workflow, preferences`;
24
- export const FACT_EXTRACTION_USER_TEMPLATE = `Extract developer knowledge facts from this text:
25
-
26
- <text>
27
- {content}
28
- </text>`;
29
- export function buildSystemPrompt() {
30
- const today = new Date().toISOString().slice(0, 10);
31
- return `${FACT_EXTRACTION_SYSTEM_PROMPT}\n- Today's date is ${today}.`;
32
- }
33
- export function buildExtractionPrompt(content) {
34
- return FACT_EXTRACTION_USER_TEMPLATE.replace('{content}', content);
35
- }
36
- //# sourceMappingURL=prompts.js.map