@coreidentitylabs/open-graph-memory-mcp 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/.agents/skills/mcp-builder/LICENSE.txt +202 -0
  2. package/.agents/skills/mcp-builder/SKILL.md +236 -0
  3. package/.agents/skills/mcp-builder/reference/evaluation.md +602 -0
  4. package/.agents/skills/mcp-builder/reference/mcp_best_practices.md +249 -0
  5. package/.agents/skills/mcp-builder/reference/node_mcp_server.md +970 -0
  6. package/.agents/skills/mcp-builder/reference/python_mcp_server.md +719 -0
  7. package/.agents/skills/mcp-builder/scripts/connections.py +151 -0
  8. package/.agents/skills/mcp-builder/scripts/evaluation.py +373 -0
  9. package/.agents/skills/mcp-builder/scripts/example_evaluation.xml +22 -0
  10. package/.agents/skills/mcp-builder/scripts/requirements.txt +2 -0
  11. package/.env.example +26 -0
  12. package/Implementation Plan.md +358 -0
  13. package/README.md +187 -0
  14. package/dist/constants.d.ts +34 -0
  15. package/dist/constants.d.ts.map +1 -0
  16. package/dist/constants.js +40 -0
  17. package/dist/constants.js.map +1 -0
  18. package/dist/encoding/embedder.d.ts +12 -0
  19. package/dist/encoding/embedder.d.ts.map +1 -0
  20. package/dist/encoding/embedder.js +85 -0
  21. package/dist/encoding/embedder.js.map +1 -0
  22. package/dist/encoding/pipeline.d.ts +28 -0
  23. package/dist/encoding/pipeline.d.ts.map +1 -0
  24. package/dist/encoding/pipeline.js +146 -0
  25. package/dist/encoding/pipeline.js.map +1 -0
  26. package/dist/evolution/consolidator.d.ts +12 -0
  27. package/dist/evolution/consolidator.d.ts.map +1 -0
  28. package/dist/evolution/consolidator.js +212 -0
  29. package/dist/evolution/consolidator.js.map +1 -0
  30. package/dist/index.d.ts +3 -0
  31. package/dist/index.d.ts.map +1 -0
  32. package/dist/index.js +53 -0
  33. package/dist/index.js.map +1 -0
  34. package/dist/llm/openai-provider.d.ts +23 -0
  35. package/dist/llm/openai-provider.d.ts.map +1 -0
  36. package/dist/llm/openai-provider.js +141 -0
  37. package/dist/llm/openai-provider.js.map +1 -0
  38. package/dist/llm/prompts.d.ts +10 -0
  39. package/dist/llm/prompts.d.ts.map +1 -0
  40. package/dist/llm/prompts.js +63 -0
  41. package/dist/llm/prompts.js.map +1 -0
  42. package/dist/llm/provider.d.ts +7 -0
  43. package/dist/llm/provider.d.ts.map +1 -0
  44. package/dist/llm/provider.js +25 -0
  45. package/dist/llm/provider.js.map +1 -0
  46. package/dist/resources/context-resource.d.ts +8 -0
  47. package/dist/resources/context-resource.d.ts.map +1 -0
  48. package/dist/resources/context-resource.js +51 -0
  49. package/dist/resources/context-resource.js.map +1 -0
  50. package/dist/retrieval/search.d.ts +24 -0
  51. package/dist/retrieval/search.d.ts.map +1 -0
  52. package/dist/retrieval/search.js +143 -0
  53. package/dist/retrieval/search.js.map +1 -0
  54. package/dist/storage/factory.d.ts +10 -0
  55. package/dist/storage/factory.d.ts.map +1 -0
  56. package/dist/storage/factory.js +35 -0
  57. package/dist/storage/factory.js.map +1 -0
  58. package/dist/storage/json-store.d.ts +34 -0
  59. package/dist/storage/json-store.d.ts.map +1 -0
  60. package/dist/storage/json-store.js +248 -0
  61. package/dist/storage/json-store.js.map +1 -0
  62. package/dist/storage/neo4j-store.d.ts +31 -0
  63. package/dist/storage/neo4j-store.d.ts.map +1 -0
  64. package/dist/storage/neo4j-store.js +440 -0
  65. package/dist/storage/neo4j-store.js.map +1 -0
  66. package/dist/tools/memory-tools.d.ts +4 -0
  67. package/dist/tools/memory-tools.d.ts.map +1 -0
  68. package/dist/tools/memory-tools.js +873 -0
  69. package/dist/tools/memory-tools.js.map +1 -0
  70. package/dist/types.d.ts +129 -0
  71. package/dist/types.d.ts.map +1 -0
  72. package/dist/types.js +5 -0
  73. package/dist/types.js.map +1 -0
  74. package/implementation_plan.md.resolved.md +322 -0
  75. package/package.json +43 -0
  76. package/src/constants.ts +52 -0
  77. package/src/encoding/embedder.ts +93 -0
  78. package/src/encoding/pipeline.ts +197 -0
  79. package/src/evolution/consolidator.ts +281 -0
  80. package/src/index.ts +67 -0
  81. package/src/llm/openai-provider.ts +208 -0
  82. package/src/llm/prompts.ts +66 -0
  83. package/src/llm/provider.ts +37 -0
  84. package/src/resources/context-resource.ts +74 -0
  85. package/src/retrieval/search.ts +203 -0
  86. package/src/storage/factory.ts +48 -0
  87. package/src/storage/json-store.ts +325 -0
  88. package/src/storage/neo4j-store.ts +564 -0
  89. package/src/tools/memory-tools.ts +1067 -0
  90. package/src/types.ts +207 -0
  91. package/tsconfig.json +21 -0
@@ -0,0 +1,208 @@
1
+ // =============================================================================
2
+ // OpenAI-Compatible LLM Provider
3
+ // =============================================================================
4
+ // Works with: OpenAI, Azure OpenAI, Ollama (OpenAI-compat mode), and any
5
+ // provider exposing the OpenAI chat completions & embeddings API.
6
+ // =============================================================================
7
+
8
+ import type {
9
+ LLMProvider,
10
+ ExtractionResult,
11
+ ExtractedEntity,
12
+ ExtractedRelation,
13
+ } from "../types.js";
14
+ import {
15
+ getExtractionSystemPrompt,
16
+ getExtractionUserPrompt,
17
+ } from "./prompts.js";
18
+
19
+ interface ChatMessage {
20
+ role: "system" | "user" | "assistant";
21
+ content: string;
22
+ }
23
+
24
+ interface ChatCompletionResponse {
25
+ choices: Array<{
26
+ message: {
27
+ content: string;
28
+ };
29
+ }>;
30
+ }
31
+
32
+ interface EmbeddingResponse {
33
+ data: Array<{
34
+ embedding: number[];
35
+ }>;
36
+ }
37
+
38
+ export class OpenAIProvider implements LLMProvider {
39
+ private apiKey: string;
40
+ private baseUrl: string;
41
+ private chatModel: string;
42
+ private embeddingModel: string;
43
+
44
+ constructor(
45
+ apiKey: string,
46
+ baseUrl: string,
47
+ chatModel: string,
48
+ embeddingModel: string,
49
+ ) {
50
+ this.apiKey = apiKey;
51
+ this.baseUrl = baseUrl.replace(/\/$/, ""); // strip trailing slash
52
+ this.chatModel = chatModel;
53
+ this.embeddingModel = embeddingModel;
54
+ }
55
+
56
+ /**
57
+ * Extract entities and relationships from text using the chat model.
58
+ */
59
+ async extractEntitiesAndRelations(
60
+ text: string,
61
+ existingEntities: string[],
62
+ ): Promise<ExtractionResult> {
63
+ const messages: ChatMessage[] = [
64
+ { role: "system", content: getExtractionSystemPrompt(existingEntities) },
65
+ { role: "user", content: getExtractionUserPrompt(text) },
66
+ ];
67
+
68
+ const responseText = await this.chatCompletion(messages);
69
+
70
+ try {
71
+ // Clean response — strip markdown code fences if present
72
+ let cleaned = responseText.trim();
73
+ if (cleaned.startsWith("```")) {
74
+ cleaned = cleaned
75
+ .replace(/^```(?:json)?\s*\n?/, "")
76
+ .replace(/\n?```\s*$/, "");
77
+ }
78
+
79
+ const parsed = JSON.parse(cleaned) as {
80
+ entities?: unknown[];
81
+ relations?: unknown[];
82
+ };
83
+
84
+ const entities: ExtractedEntity[] = (parsed.entities ?? [])
85
+ .map((e: unknown) => {
86
+ const entity = e as Record<string, unknown>;
87
+ return {
88
+ name: String(entity.name ?? ""),
89
+ type: validateNodeType(String(entity.type ?? "entity")),
90
+ description: String(entity.description ?? ""),
91
+ metadata: (entity.metadata as Record<string, unknown>) ?? {},
92
+ };
93
+ })
94
+ .filter((e) => e.name.length > 0);
95
+
96
+ const relations: ExtractedRelation[] = (parsed.relations ?? [])
97
+ .map((r: unknown) => {
98
+ const rel = r as Record<string, unknown>;
99
+ return {
100
+ source: String(rel.source ?? ""),
101
+ target: String(rel.target ?? ""),
102
+ relation: String(rel.relation ?? "related_to"),
103
+ description: rel.description ? String(rel.description) : undefined,
104
+ weight: typeof rel.weight === "number" ? rel.weight : 0.8,
105
+ };
106
+ })
107
+ .filter((r) => r.source.length > 0 && r.target.length > 0);
108
+
109
+ return { entities, relations };
110
+ } catch (parseError) {
111
+ console.error(
112
+ `[open-memory] Failed to parse LLM extraction response:`,
113
+ parseError,
114
+ );
115
+ console.error(`[open-memory] Raw response:`, responseText);
116
+ return { entities: [], relations: [] };
117
+ }
118
+ }
119
+
120
+ /**
121
+ * Generate a single embedding vector.
122
+ */
123
+ async generateEmbedding(text: string): Promise<number[]> {
124
+ const result = await this.embeddingRequest([text]);
125
+ return result[0] ?? [];
126
+ }
127
+
128
+ /**
129
+ * Generate embeddings for multiple texts in a single batch.
130
+ */
131
+ async generateEmbeddingBatch(texts: string[]): Promise<number[][]> {
132
+ return this.embeddingRequest(texts);
133
+ }
134
+
135
+ // ---------------------------------------------------------------------------
136
+ // HTTP Helpers
137
+ // ---------------------------------------------------------------------------
138
+
139
+ private async chatCompletion(messages: ChatMessage[]): Promise<string> {
140
+ const url = `${this.baseUrl}/chat/completions`;
141
+
142
+ const response = await fetch(url, {
143
+ method: "POST",
144
+ headers: {
145
+ "Content-Type": "application/json",
146
+ Authorization: `Bearer ${this.apiKey}`,
147
+ },
148
+ body: JSON.stringify({
149
+ model: this.chatModel,
150
+ messages,
151
+ temperature: 0.1,
152
+ max_tokens: 4096,
153
+ }),
154
+ });
155
+
156
+ if (!response.ok) {
157
+ const errorBody = await response.text();
158
+ throw new Error(
159
+ `LLM chat completion failed (${response.status}): ${errorBody}`,
160
+ );
161
+ }
162
+
163
+ const data = (await response.json()) as ChatCompletionResponse;
164
+ return data.choices[0]?.message?.content ?? "";
165
+ }
166
+
167
+ private async embeddingRequest(inputs: string[]): Promise<number[][]> {
168
+ const url = `${this.baseUrl}/embeddings`;
169
+
170
+ const response = await fetch(url, {
171
+ method: "POST",
172
+ headers: {
173
+ "Content-Type": "application/json",
174
+ Authorization: `Bearer ${this.apiKey}`,
175
+ },
176
+ body: JSON.stringify({
177
+ model: this.embeddingModel,
178
+ input: inputs,
179
+ }),
180
+ });
181
+
182
+ if (!response.ok) {
183
+ const errorBody = await response.text();
184
+ throw new Error(
185
+ `LLM embedding request failed (${response.status}): ${errorBody}`,
186
+ );
187
+ }
188
+
189
+ const data = (await response.json()) as EmbeddingResponse;
190
+ return data.data.map((d) => d.embedding);
191
+ }
192
+ }
193
+
194
+ // ---------------------------------------------------------------------------
195
+ // Helpers
196
+ // ---------------------------------------------------------------------------
197
+
198
+ function validateNodeType(type: string): ExtractedEntity["type"] {
199
+ const valid = [
200
+ "entity",
201
+ "concept",
202
+ "event",
203
+ "code_pattern",
204
+ "decision",
205
+ "conversation",
206
+ ];
207
+ return valid.includes(type) ? (type as ExtractedEntity["type"]) : "entity";
208
+ }
@@ -0,0 +1,66 @@
1
+ // =============================================================================
2
+ // Extraction Prompts — Optimized for developer conversations
3
+ // =============================================================================
4
+
5
+ /**
6
+ * System prompt for entity and relationship extraction from developer conversations.
7
+ * Instructs the LLM to return structured JSON with entities and relations.
8
+ */
9
+ export function getExtractionSystemPrompt(existingEntities: string[]): string {
10
+ const existingList =
11
+ existingEntities.length > 0
12
+ ? `\n\nKnown entities already in memory (use these exact names for entity resolution):\n${existingEntities.map((e) => `- ${e}`).join("\n")}`
13
+ : "";
14
+
15
+ return `You are a specialized entity and relationship extraction system for developer conversations.
16
+
17
+ Your task is to extract structured entities and relationships from the given text.
18
+
19
+ ## Entity Types
20
+ - **entity**: People, teams, organizations, tools, libraries, frameworks, services, APIs, databases
21
+ - **concept**: Abstract ideas, design patterns, paradigms, methodologies
22
+ - **event**: Meetings, deployments, incidents, releases, decisions made at a point in time
23
+ - **code_pattern**: Recurring code structures, architectural patterns, implementation approaches
24
+ - **decision**: Technical decisions with rationale (e.g., "chose React Query over SWR because...")
25
+ - **conversation**: Conversation summaries or session references
26
+
27
+ ## Extraction Rules
28
+ 1. Extract SPECIFIC entities, not generic terms (e.g., "React Query" not "library")
29
+ 2. For each entity, provide a concise but informative description
30
+ 3. For relationships, use clear verb-based relation types (uses, depends_on, replaced_by, decided_to, part_of, implements, configures, deployed_to, etc.)
31
+ 4. If an entity matches one of the known entities below, use the EXACT same name
32
+ 5. Capture temporal information when present (e.g., "decided last week" → include in metadata)
33
+ 6. Focus on information a developer would want to recall later
34
+ ${existingList}
35
+
36
+ ## Output Format
37
+ Return ONLY valid JSON in this exact format:
38
+ {
39
+ "entities": [
40
+ {
41
+ "name": "EntityName",
42
+ "type": "entity|concept|event|code_pattern|decision",
43
+ "description": "Concise description of what this entity is and its relevance",
44
+ "metadata": { "optional": "key-value pairs for extra context" }
45
+ }
46
+ ],
47
+ "relations": [
48
+ {
49
+ "source": "SourceEntityName",
50
+ "target": "TargetEntityName",
51
+ "relation": "verb_based_relation",
52
+ "description": "Brief description of this relationship"
53
+ }
54
+ ]
55
+ }
56
+
57
+ Return an empty arrays if no meaningful entities or relationships can be extracted.
58
+ Do NOT include markdown code fences or any text outside the JSON.`;
59
+ }
60
+
61
+ /**
62
+ * Build the user message for extraction.
63
+ */
64
+ export function getExtractionUserPrompt(text: string): string {
65
+ return `Extract entities and relationships from the following text:\n\n${text}`;
66
+ }
@@ -0,0 +1,37 @@
1
+ // =============================================================================
2
+ // LLM Provider — Interface & Factory
3
+ // =============================================================================
4
+ // Optional: only loaded if LLM_API_KEY is configured.
5
+ // Powers the memory_encode_text tool for server-side entity extraction.
6
+ // =============================================================================
7
+
8
+ import type { LLMProvider } from "../types.js";
9
+ import { ENV_KEYS } from "../constants.js";
10
+ import { OpenAIProvider } from "./openai-provider.js";
11
+
12
+ /**
13
+ * Create an LLM provider if configured, or return null.
14
+ * The LLM provider is OPTIONAL — the agent-driven flow works without it.
15
+ */
16
+ export function createLLMProvider(): LLMProvider | null {
17
+ const apiKey = process.env[ENV_KEYS.LLM_API_KEY];
18
+
19
+ if (!apiKey) {
20
+ console.error(
21
+ "[open-memory] No LLM_API_KEY configured. Server-side encoding (memory_encode_text) disabled.",
22
+ );
23
+ return null;
24
+ }
25
+
26
+ const baseUrl =
27
+ process.env[ENV_KEYS.LLM_BASE_URL] ?? "https://api.openai.com/v1";
28
+ const chatModel = process.env[ENV_KEYS.LLM_CHAT_MODEL] ?? "gpt-4o-mini";
29
+ const embeddingModel =
30
+ process.env[ENV_KEYS.LLM_EMBEDDING_MODEL] ?? "text-embedding-3-small";
31
+
32
+ console.error(
33
+ `[open-memory] LLM provider enabled: ${baseUrl} (chat: ${chatModel}, embed: ${embeddingModel})`,
34
+ );
35
+
36
+ return new OpenAIProvider(apiKey, baseUrl, chatModel, embeddingModel);
37
+ }
@@ -0,0 +1,74 @@
1
+ // =============================================================================
2
+ // MCP Resource — Auto-injectable recent memory context
3
+ // =============================================================================
4
+
5
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
6
+ import type { StorageBackend } from "../types.js";
7
+
8
+ /**
9
+ * Register MCP resources for automatic context injection.
10
+ * Agents can read these resources to get relevant context without explicit tool calls.
11
+ */
12
+ export function registerMemoryResources(
13
+ server: McpServer,
14
+ store: StorageBackend,
15
+ ): void {
16
+ // Recent entities resource
17
+ server.registerResource(
18
+ "recent_entities",
19
+ "memory://entities/recent",
20
+ {
21
+ description: "Most recently updated entities in the memory graph",
22
+ mimeType: "application/json",
23
+ },
24
+ async () => {
25
+ const { nodes } = await store.getAllNodes(20, 0);
26
+ const sorted = nodes
27
+ .sort(
28
+ (a, b) =>
29
+ new Date(b.updatedAt).getTime() - new Date(a.updatedAt).getTime(),
30
+ )
31
+ .slice(0, 20);
32
+
33
+ const content = sorted.map((n) => ({
34
+ name: n.name,
35
+ type: n.type,
36
+ description: n.description.substring(0, 200),
37
+ updatedAt: n.updatedAt,
38
+ }));
39
+
40
+ return {
41
+ contents: [
42
+ {
43
+ uri: "memory://entities/recent",
44
+ mimeType: "application/json",
45
+ text: JSON.stringify(content, null, 2),
46
+ },
47
+ ],
48
+ };
49
+ },
50
+ );
51
+
52
+ // Graph stats resource
53
+ server.registerResource(
54
+ "graph_stats",
55
+ "memory://stats",
56
+ {
57
+ description:
58
+ "Current memory graph statistics (node counts, edge counts, backend type)",
59
+ mimeType: "application/json",
60
+ },
61
+ async () => {
62
+ const stats = await store.getStats();
63
+ return {
64
+ contents: [
65
+ {
66
+ uri: "memory://stats",
67
+ mimeType: "application/json",
68
+ text: JSON.stringify(stats, null, 2),
69
+ },
70
+ ],
71
+ };
72
+ },
73
+ );
74
+ }
@@ -0,0 +1,203 @@
1
+ // =============================================================================
2
+ // Hybrid Retrieval Engine — Semantic + Graph + Text Search
3
+ // =============================================================================
4
+
5
+ import type {
6
+ StorageBackend,
7
+ MemoryNode,
8
+ MemoryEdge,
9
+ ScoredNode,
10
+ SearchResult,
11
+ NodeType,
12
+ } from "../types.js";
13
+ import {
14
+ generateLocalEmbedding,
15
+ cosineSimilarity,
16
+ } from "../encoding/embedder.js";
17
+ import { DEFAULT_TOP_K, DEFAULT_TRAVERSAL_DEPTH } from "../constants.js";
18
+
19
+ export interface SearchOptions {
20
+ query: string;
21
+ topK?: number;
22
+ type?: NodeType;
23
+ timeRange?: {
24
+ after?: string;
25
+ before?: string;
26
+ };
27
+ traversalDepth?: number;
28
+ }
29
+
30
+ /**
31
+ * Hybrid retrieval combining:
32
+ * 1. Text match on node names and descriptions
33
+ * 2. Cosine similarity on embeddings (semantic anchors)
34
+ * 3. Graph traversal from top anchors (2-hop neighborhood)
35
+ * 4. Ranking by: score = α·semantic + β·textMatch + γ·recency + δ·accessFreq
36
+ */
37
+ export async function hybridSearch(
38
+ store: StorageBackend,
39
+ options: SearchOptions,
40
+ ): Promise<SearchResult> {
41
+ const {
42
+ query,
43
+ topK = DEFAULT_TOP_K,
44
+ type,
45
+ timeRange,
46
+ traversalDepth = DEFAULT_TRAVERSAL_DEPTH,
47
+ } = options;
48
+
49
+ const queryLower = query.toLowerCase();
50
+ const queryEmbedding = generateLocalEmbedding(query);
51
+
52
+ // Step 1: Get all candidate nodes
53
+ const { nodes: allNodes } = await store.getAllNodes(10000, 0);
54
+
55
+ // Step 2: Score each node
56
+ const scored: ScoredNode[] = [];
57
+
58
+ for (const node of allNodes) {
59
+ // Apply type filter
60
+ if (type && node.type !== type) continue;
61
+
62
+ // Apply time range filter
63
+ if (timeRange?.after && node.createdAt < timeRange.after) continue;
64
+ if (timeRange?.before && node.createdAt > timeRange.before) continue;
65
+
66
+ let totalScore = 0;
67
+ let matchType: ScoredNode["matchType"] = "text";
68
+
69
+ // Text match scoring
70
+ const nameMatch = node.name.toLowerCase().includes(queryLower) ? 0.6 : 0;
71
+ const descMatch = node.description.toLowerCase().includes(queryLower)
72
+ ? 0.4
73
+ : 0;
74
+
75
+ // Partial word matching
76
+ const queryWords = queryLower.split(/\s+/).filter((w) => w.length > 2);
77
+ let wordMatchScore = 0;
78
+ for (const word of queryWords) {
79
+ if (node.name.toLowerCase().includes(word)) wordMatchScore += 0.15;
80
+ if (node.description.toLowerCase().includes(word)) wordMatchScore += 0.1;
81
+ }
82
+
83
+ const textScore = Math.min(1, nameMatch + descMatch + wordMatchScore);
84
+
85
+ // Semantic similarity scoring
86
+ let semanticScore = 0;
87
+ if (node.embedding && node.embedding.length > 0) {
88
+ semanticScore = cosineSimilarity(queryEmbedding, node.embedding);
89
+ if (semanticScore > textScore) matchType = "semantic";
90
+ }
91
+
92
+ // Recency boost (decays over 30 days)
93
+ const ageMs = Date.now() - new Date(node.updatedAt).getTime();
94
+ const ageDays = ageMs / (1000 * 60 * 60 * 24);
95
+ const recencyBoost = Math.max(0, 1 - ageDays / 30) * 0.1;
96
+
97
+ // Access frequency boost (capped)
98
+ const accessBoost = Math.min(node.accessCount / 100, 0.1);
99
+
100
+ // Weighted combination
101
+ totalScore =
102
+ 0.4 * semanticScore +
103
+ 0.4 * textScore +
104
+ 0.1 * recencyBoost +
105
+ 0.1 * accessBoost;
106
+
107
+ if (totalScore > 0.05) {
108
+ scored.push({ node, score: totalScore, matchType });
109
+ }
110
+ }
111
+
112
+ // Step 3: Sort by score, take top K
113
+ scored.sort((a, b) => b.score - a.score);
114
+ const topResults = scored.slice(0, topK);
115
+
116
+ // Step 4: Graph traversal from top anchors to find related context
117
+ const relatedEdgesMap = new Map<string, MemoryEdge>();
118
+ const graphNodeIds = new Set(topResults.map((r) => r.node.id));
119
+
120
+ // Traverse from top 3 anchors
121
+ const anchors = topResults.slice(0, 3);
122
+ for (const anchor of anchors) {
123
+ const neighborhood = await store.getNeighborhood(
124
+ anchor.node.id,
125
+ traversalDepth,
126
+ );
127
+
128
+ for (const edge of neighborhood.edges) {
129
+ relatedEdgesMap.set(edge.id, edge);
130
+ }
131
+
132
+ // Add graph-discovered nodes that aren't already in results
133
+ for (const neighbor of neighborhood.nodes) {
134
+ if (!graphNodeIds.has(neighbor.id)) {
135
+ graphNodeIds.add(neighbor.id);
136
+ topResults.push({
137
+ node: neighbor,
138
+ score: anchor.score * 0.5, // Discounted score for graph neighbors
139
+ matchType: "graph",
140
+ });
141
+ }
142
+ }
143
+ }
144
+
145
+ // Step 5: Update access counts for returned nodes
146
+ for (const result of topResults) {
147
+ await store.updateNode(result.node.id, {
148
+ accessCount: result.node.accessCount + 1,
149
+ lastAccessedAt: new Date().toISOString(),
150
+ });
151
+ }
152
+
153
+ // Re-sort after adding graph neighbors
154
+ topResults.sort((a, b) => b.score - a.score);
155
+
156
+ return {
157
+ nodes: topResults.slice(0, topK),
158
+ relatedEdges: Array.from(relatedEdgesMap.values()),
159
+ totalNodes: scored.length,
160
+ };
161
+ }
162
+
163
+ /**
164
+ * Get context for a specific topic, formatted for prompt injection.
165
+ */
166
+ export async function getContextForTopic(
167
+ store: StorageBackend,
168
+ topic: string,
169
+ maxTokens: number = 2000,
170
+ ): Promise<string> {
171
+ const results = await hybridSearch(store, {
172
+ query: topic,
173
+ topK: 10,
174
+ traversalDepth: 1,
175
+ });
176
+
177
+ const lines: string[] = [];
178
+ let estimatedTokens = 0;
179
+
180
+ lines.push(`## Relevant Memory Context: "${topic}"\n`);
181
+
182
+ for (const { node, score } of results.nodes) {
183
+ if (estimatedTokens > maxTokens) break;
184
+
185
+ const entry = `### ${node.name} (${node.type})\n${node.description}\n`;
186
+ estimatedTokens += entry.length / 4; // rough token estimate
187
+ lines.push(entry);
188
+ }
189
+
190
+ if (results.relatedEdges.length > 0) {
191
+ lines.push(`### Relationships\n`);
192
+ for (const edge of results.relatedEdges.slice(0, 10)) {
193
+ const fromNode = results.nodes.find((n) => n.node.id === edge.source);
194
+ const toNode = results.nodes.find((n) => n.node.id === edge.target);
195
+ const fromName = fromNode?.node.name ?? edge.source;
196
+ const toName = toNode?.node.name ?? edge.target;
197
+ lines.push(`- ${fromName} **${edge.relation}** ${toName}`);
198
+ if (edge.description) lines.push(` _${edge.description}_`);
199
+ }
200
+ }
201
+
202
+ return lines.join("\n");
203
+ }
@@ -0,0 +1,48 @@
1
+ // =============================================================================
2
+ // Storage Backend Factory
3
+ // =============================================================================
4
+
5
+ import type { StorageBackend } from "../types.js";
6
+ import { JsonStore } from "./json-store.js";
7
+ import { Neo4jStore } from "./neo4j-store.js";
8
+ import { ENV_KEYS, DEFAULT_MEMORY_STORE_PATH } from "../constants.js";
9
+
10
+ export type StorageType = "json" | "neo4j";
11
+
12
+ /**
13
+ * Create a storage backend based on environment configuration.
14
+ *
15
+ * STORAGE_BACKEND=json → Local JSON file (default, zero-config)
16
+ * STORAGE_BACKEND=neo4j → Neo4j graph database
17
+ */
18
+ export function createStorageBackend(): StorageBackend {
19
+ const backendType = (
20
+ process.env[ENV_KEYS.STORAGE_BACKEND] ?? "json"
21
+ ).toLowerCase() as StorageType;
22
+
23
+ switch (backendType) {
24
+ case "neo4j": {
25
+ const uri = process.env[ENV_KEYS.NEO4J_URI];
26
+ const user = process.env[ENV_KEYS.NEO4J_USER];
27
+ const password = process.env[ENV_KEYS.NEO4J_PASSWORD];
28
+
29
+ if (!uri || !user || !password) {
30
+ console.error(
31
+ `[open-memory] ERROR: Neo4j requires NEO4J_URI, NEO4J_USER, and NEO4J_PASSWORD environment variables.`,
32
+ );
33
+ process.exit(1);
34
+ }
35
+
36
+ console.error(`[open-memory] Using Neo4j storage backend at ${uri}`);
37
+ return new Neo4jStore(uri, user, password);
38
+ }
39
+
40
+ case "json":
41
+ default: {
42
+ const filePath =
43
+ process.env[ENV_KEYS.MEMORY_STORE_PATH] ?? DEFAULT_MEMORY_STORE_PATH;
44
+ console.error(`[open-memory] Using JSON storage backend at ${filePath}`);
45
+ return new JsonStore(filePath);
46
+ }
47
+ }
48
+ }