@prmichaelsen/remember-mcp 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/.env.example +65 -0
  2. package/AGENT.md +840 -0
  3. package/README.md +72 -0
  4. package/agent/design/.gitkeep +0 -0
  5. package/agent/design/access-control-result-pattern.md +458 -0
  6. package/agent/design/action-audit-memory-types.md +637 -0
  7. package/agent/design/common-template-fields.md +282 -0
  8. package/agent/design/complete-tool-set.md +407 -0
  9. package/agent/design/content-types-expansion.md +521 -0
  10. package/agent/design/cross-database-id-strategy.md +358 -0
  11. package/agent/design/default-template-library.md +423 -0
  12. package/agent/design/firestore-wrapper-analysis.md +606 -0
  13. package/agent/design/llm-provider-abstraction.md +691 -0
  14. package/agent/design/location-handling-architecture.md +523 -0
  15. package/agent/design/memory-templates-design.md +364 -0
  16. package/agent/design/permissions-storage-architecture.md +680 -0
  17. package/agent/design/relationship-storage-strategy.md +361 -0
  18. package/agent/design/remember-mcp-implementation-tasks.md +417 -0
  19. package/agent/design/remember-mcp-progress.yaml +141 -0
  20. package/agent/design/requirements-enhancements.md +468 -0
  21. package/agent/design/requirements.md +56 -0
  22. package/agent/design/template-storage-strategy.md +412 -0
  23. package/agent/design/template-suggestion-system.md +853 -0
  24. package/agent/design/trust-escalation-prevention.md +343 -0
  25. package/agent/design/trust-system-implementation.md +592 -0
  26. package/agent/design/user-preferences.md +683 -0
  27. package/agent/design/weaviate-collection-strategy.md +461 -0
  28. package/agent/milestones/.gitkeep +0 -0
  29. package/agent/milestones/milestone-1-project-foundation.md +121 -0
  30. package/agent/milestones/milestone-2-core-memory-system.md +150 -0
  31. package/agent/milestones/milestone-3-relationships-graph.md +116 -0
  32. package/agent/milestones/milestone-4-user-preferences.md +103 -0
  33. package/agent/milestones/milestone-5-template-system.md +126 -0
  34. package/agent/milestones/milestone-6-auth-multi-tenancy.md +124 -0
  35. package/agent/milestones/milestone-7-trust-permissions.md +133 -0
  36. package/agent/milestones/milestone-8-testing-quality.md +137 -0
  37. package/agent/milestones/milestone-9-deployment-documentation.md +147 -0
  38. package/agent/patterns/.gitkeep +0 -0
  39. package/agent/patterns/bootstrap.md +1271 -0
  40. package/agent/patterns/firebase-admin-sdk-v8-usage.md +950 -0
  41. package/agent/patterns/firestore-users-pattern-best-practices.md +347 -0
  42. package/agent/patterns/library-services.md +454 -0
  43. package/agent/patterns/testing-colocated.md +316 -0
  44. package/agent/progress.yaml +395 -0
  45. package/agent/tasks/.gitkeep +0 -0
  46. package/agent/tasks/task-1-initialize-project-structure.md +266 -0
  47. package/agent/tasks/task-2-install-dependencies.md +199 -0
  48. package/agent/tasks/task-3-setup-weaviate-client.md +330 -0
  49. package/agent/tasks/task-4-setup-firestore-client.md +362 -0
  50. package/agent/tasks/task-5-create-basic-mcp-server.md +114 -0
  51. package/agent/tasks/task-6-create-integration-tests.md +195 -0
  52. package/agent/tasks/task-7-finalize-milestone-1.md +363 -0
  53. package/agent/tasks/task-8-setup-utility-scripts.md +382 -0
  54. package/agent/tasks/task-9-create-server-factory.md +404 -0
  55. package/dist/config.d.ts +26 -0
  56. package/dist/constants/content-types.d.ts +60 -0
  57. package/dist/firestore/init.d.ts +14 -0
  58. package/dist/firestore/paths.d.ts +53 -0
  59. package/dist/firestore/paths.spec.d.ts +2 -0
  60. package/dist/server-factory.d.ts +40 -0
  61. package/dist/server-factory.js +1741 -0
  62. package/dist/server-factory.spec.d.ts +2 -0
  63. package/dist/server.d.ts +3 -0
  64. package/dist/server.js +1690 -0
  65. package/dist/tools/create-memory.d.ts +94 -0
  66. package/dist/tools/delete-memory.d.ts +47 -0
  67. package/dist/tools/search-memory.d.ts +88 -0
  68. package/dist/types/memory.d.ts +183 -0
  69. package/dist/utils/logger.d.ts +7 -0
  70. package/dist/weaviate/client.d.ts +39 -0
  71. package/dist/weaviate/client.spec.d.ts +2 -0
  72. package/dist/weaviate/schema.d.ts +29 -0
  73. package/esbuild.build.js +60 -0
  74. package/esbuild.watch.js +25 -0
  75. package/jest.config.js +31 -0
  76. package/jest.e2e.config.js +17 -0
  77. package/package.json +68 -0
  78. package/src/.gitkeep +0 -0
  79. package/src/config.ts +56 -0
  80. package/src/constants/content-types.ts +454 -0
  81. package/src/firestore/init.ts +68 -0
  82. package/src/firestore/paths.spec.ts +75 -0
  83. package/src/firestore/paths.ts +124 -0
  84. package/src/server-factory.spec.ts +60 -0
  85. package/src/server-factory.ts +215 -0
  86. package/src/server.ts +243 -0
  87. package/src/tools/create-memory.ts +198 -0
  88. package/src/tools/delete-memory.ts +126 -0
  89. package/src/tools/search-memory.ts +216 -0
  90. package/src/types/memory.ts +276 -0
  91. package/src/utils/logger.ts +42 -0
  92. package/src/weaviate/client.spec.ts +58 -0
  93. package/src/weaviate/client.ts +114 -0
  94. package/src/weaviate/schema.ts +288 -0
  95. package/tsconfig.json +26 -0
@@ -0,0 +1,691 @@
1
+ # LLM Provider Abstraction Strategy
2
+
3
+ **Concept**: Provider-agnostic LLM configuration supporting multiple providers
4
+ **Created**: 2026-02-11
5
+ **Status**: Design Specification
6
+
7
+ ---
8
+
9
+ ## Overview
10
+
11
+ The remember-mcp system needs LLM capabilities for:
12
+ 1. **Trust validation** - Validating responses don't leak low-trust memory details
13
+ 2. **Query interpretation** - `remember_query_memory` tool (RAG)
14
+ 3. **Template suggestions** - Suggesting appropriate templates
15
+ 4. **Relationship discovery** - Identifying connections between memories
16
+
17
+ **Requirement**: Support multiple LLM providers (OpenAI, Anthropic/Bedrock, Cohere, etc.) with easy swapping.
18
+
19
+ ---
20
+
21
+ ## Current State
22
+
23
+ **Weaviate Embeddings**: Currently assumes OpenAI
24
+ ```typescript
25
+ // Weaviate uses OpenAI for embeddings
26
+ headers: { 'X-OpenAI-Api-Key': config.openai.apiKey }
27
+ ```
28
+
29
+ **LLM Calls**: Not yet implemented (will be in M2-M3)
30
+
31
+ ---
32
+
33
+ ## Proposed Environment Variables
34
+
35
+ ### Provider-Agnostic Configuration
36
+
37
+ ```env
38
+ # LLM Provider Configuration
39
+ LLM_PROVIDER=bedrock # bedrock | openai | anthropic | cohere | custom
40
+ LLM_MODEL=anthropic.claude-3-5-sonnet-20241022-v2:0
41
+
42
+ # AWS Bedrock Configuration (when LLM_PROVIDER=bedrock)
43
+ AWS_REGION=us-east-1
44
+ AWS_ACCESS_KEY_ID=
45
+ AWS_SECRET_ACCESS_KEY=
46
+ AWS_SESSION_TOKEN= # Optional, for temporary credentials
47
+
48
+ # OpenAI Configuration (when LLM_PROVIDER=openai)
49
+ OPENAI_API_KEY=sk-...
50
+ OPENAI_ORG_ID= # Optional
51
+
52
+ # Anthropic Direct Configuration (when LLM_PROVIDER=anthropic)
53
+ ANTHROPIC_API_KEY=sk-ant-...
54
+
55
+ # Cohere Configuration (when LLM_PROVIDER=cohere)
56
+ COHERE_API_KEY=
57
+
58
+ # Embeddings Provider (separate from LLM)
59
+ EMBEDDINGS_PROVIDER=openai # openai | cohere | huggingface | custom
60
+ EMBEDDINGS_MODEL=text-embedding-3-small
61
+
62
+ # OpenAI Embeddings (when EMBEDDINGS_PROVIDER=openai)
63
+ OPENAI_EMBEDDINGS_API_KEY=sk-... # Can be different from LLM key
64
+
65
+ # Cohere Embeddings (when EMBEDDINGS_PROVIDER=cohere)
66
+ COHERE_EMBEDDINGS_API_KEY=
67
+ ```
68
+
69
+ ---
70
+
71
+ ## Configuration Structure
72
+
73
+ ### config.ts Enhancement
74
+
75
+ ```typescript
76
+ export const config = {
77
+ // ... existing config ...
78
+
79
+ // LLM Configuration
80
+ llm: {
81
+ provider: (process.env.LLM_PROVIDER || 'openai') as LLMProvider,
82
+ model: process.env.LLM_MODEL || 'gpt-4o-mini',
83
+
84
+ // Provider-specific configs
85
+ openai: {
86
+ apiKey: process.env.OPENAI_API_KEY || '',
87
+ orgId: process.env.OPENAI_ORG_ID || '',
88
+ },
89
+
90
+ anthropic: {
91
+ apiKey: process.env.ANTHROPIC_API_KEY || '',
92
+ },
93
+
94
+ bedrock: {
95
+ region: process.env.AWS_REGION || 'us-east-1',
96
+ accessKeyId: process.env.AWS_ACCESS_KEY_ID || '',
97
+ secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY || '',
98
+ sessionToken: process.env.AWS_SESSION_TOKEN || '',
99
+ },
100
+
101
+ cohere: {
102
+ apiKey: process.env.COHERE_API_KEY || '',
103
+ },
104
+ },
105
+
106
+ // Embeddings Configuration (separate from LLM)
107
+ embeddings: {
108
+ provider: (process.env.EMBEDDINGS_PROVIDER || 'openai') as EmbeddingsProvider,
109
+ model: process.env.EMBEDDINGS_MODEL || 'text-embedding-3-small',
110
+
111
+ openai: {
112
+ apiKey: process.env.OPENAI_EMBEDDINGS_API_KEY || process.env.OPENAI_API_KEY || '',
113
+ },
114
+
115
+ cohere: {
116
+ apiKey: process.env.COHERE_EMBEDDINGS_API_KEY || process.env.COHERE_API_KEY || '',
117
+ },
118
+ },
119
+ } as const;
120
+
121
+ export type LLMProvider = 'openai' | 'anthropic' | 'bedrock' | 'cohere' | 'custom';
122
+ export type EmbeddingsProvider = 'openai' | 'cohere' | 'huggingface' | 'custom';
123
+ ```
124
+
125
+ ---
126
+
127
+ ## LLM Abstraction Layer
128
+
129
+ ### Interface Definition
130
+
131
+ ```typescript
132
+ // src/llm/types.ts
133
+
134
+ export interface LLMMessage {
135
+ role: 'system' | 'user' | 'assistant';
136
+ content: string;
137
+ }
138
+
139
+ export interface LLMCompletionOptions {
140
+ model?: string;
141
+ temperature?: number;
142
+ maxTokens?: number;
143
+ stopSequences?: string[];
144
+ topP?: number;
145
+ }
146
+
147
+ export interface LLMCompletionResult {
148
+ content: string;
149
+ model: string;
150
+ usage: {
151
+ inputTokens: number;
152
+ outputTokens: number;
153
+ totalTokens: number;
154
+ };
155
+ finishReason: 'stop' | 'length' | 'content_filter' | 'error';
156
+ }
157
+
158
+ export interface LLMProvider {
159
+ complete(messages: LLMMessage[], options?: LLMCompletionOptions): Promise<LLMCompletionResult>;
160
+ validateConfig(): void;
161
+ }
162
+ ```
163
+
164
+ ### Provider Implementations
165
+
166
+ **src/llm/providers/bedrock.provider.ts**:
167
+ ```typescript
168
+ import { BedrockRuntimeClient, InvokeModelCommand } from '@aws-sdk/client-bedrock-runtime';
169
+ import type { LLMProvider, LLMMessage, LLMCompletionOptions, LLMCompletionResult } from '../types.js';
170
+ import { config } from '../../config.js';
171
+
172
+ export class BedrockLLMProvider implements LLMProvider {
173
+ private client: BedrockRuntimeClient;
174
+
175
+ constructor() {
176
+ this.client = new BedrockRuntimeClient({
177
+ region: config.llm.bedrock.region,
178
+ credentials: {
179
+ accessKeyId: config.llm.bedrock.accessKeyId,
180
+ secretAccessKey: config.llm.bedrock.secretAccessKey,
181
+ sessionToken: config.llm.bedrock.sessionToken || undefined,
182
+ },
183
+ });
184
+ }
185
+
186
+ async complete(messages: LLMMessage[], options?: LLMCompletionOptions): Promise<LLMCompletionResult> {
187
+ const model = options?.model || config.llm.model;
188
+
189
+ // Anthropic format for Bedrock
190
+ const body = {
191
+ anthropic_version: 'bedrock-2023-05-31',
192
+ max_tokens: options?.maxTokens || 4096,
193
+ temperature: options?.temperature || 0.7,
194
+ messages: messages.filter(m => m.role !== 'system'),
195
+ system: messages.find(m => m.role === 'system')?.content,
196
+ };
197
+
198
+ const command = new InvokeModelCommand({
199
+ modelId: model,
200
+ contentType: 'application/json',
201
+ accept: 'application/json',
202
+ body: JSON.stringify(body),
203
+ });
204
+
205
+ const response = await this.client.send(command);
206
+ const result = JSON.parse(new TextDecoder().decode(response.body));
207
+
208
+ return {
209
+ content: result.content[0].text,
210
+ model: model,
211
+ usage: {
212
+ inputTokens: result.usage.input_tokens,
213
+ outputTokens: result.usage.output_tokens,
214
+ totalTokens: result.usage.input_tokens + result.usage.output_tokens,
215
+ },
216
+ finishReason: result.stop_reason === 'end_turn' ? 'stop' : result.stop_reason,
217
+ };
218
+ }
219
+
220
+ validateConfig(): void {
221
+ if (!config.llm.bedrock.region) throw new Error('AWS_REGION required');
222
+ if (!config.llm.bedrock.accessKeyId) throw new Error('AWS_ACCESS_KEY_ID required');
223
+ if (!config.llm.bedrock.secretAccessKey) throw new Error('AWS_SECRET_ACCESS_KEY required');
224
+ }
225
+ }
226
+ ```
227
+
228
+ **src/llm/providers/openai.provider.ts**:
229
+ ```typescript
230
+ import type { LLMProvider, LLMMessage, LLMCompletionOptions, LLMCompletionResult } from '../types.js';
231
+ import { config } from '../../config.js';
232
+
233
+ export class OpenAILLMProvider implements LLMProvider {
234
+ async complete(messages: LLMMessage[], options?: LLMCompletionOptions): Promise<LLMCompletionResult> {
235
+ const model = options?.model || config.llm.model;
236
+
237
+ const response = await fetch('https://api.openai.com/v1/chat/completions', {
238
+ method: 'POST',
239
+ headers: {
240
+ 'Authorization': `Bearer ${config.llm.openai.apiKey}`,
241
+ 'Content-Type': 'application/json',
242
+ },
243
+ body: JSON.stringify({
244
+ model,
245
+ messages,
246
+ temperature: options?.temperature || 0.7,
247
+ max_tokens: options?.maxTokens || 4096,
248
+ stop: options?.stopSequences,
249
+ top_p: options?.topP,
250
+ }),
251
+ });
252
+
253
+ if (!response.ok) {
254
+ throw new Error(`OpenAI API error: ${response.statusText}`);
255
+ }
256
+
257
+ const result = await response.json();
258
+ const choice = result.choices[0];
259
+
260
+ return {
261
+ content: choice.message.content,
262
+ model: result.model,
263
+ usage: {
264
+ inputTokens: result.usage.prompt_tokens,
265
+ outputTokens: result.usage.completion_tokens,
266
+ totalTokens: result.usage.total_tokens,
267
+ },
268
+ finishReason: choice.finish_reason,
269
+ };
270
+ }
271
+
272
+ validateConfig(): void {
273
+ if (!config.llm.openai.apiKey) throw new Error('OPENAI_API_KEY required');
274
+ }
275
+ }
276
+ ```
277
+
278
+ **src/llm/providers/anthropic.provider.ts**:
279
+ ```typescript
280
+ import Anthropic from '@anthropic-ai/sdk';
281
+ import type { LLMProvider, LLMMessage, LLMCompletionOptions, LLMCompletionResult } from '../types.js';
282
+ import { config } from '../../config.js';
283
+
284
+ export class AnthropicLLMProvider implements LLMProvider {
285
+ private client: Anthropic;
286
+
287
+ constructor() {
288
+ this.client = new Anthropic({
289
+ apiKey: config.llm.anthropic.apiKey,
290
+ });
291
+ }
292
+
293
+ async complete(messages: LLMMessage[], options?: LLMCompletionOptions): Promise<LLMCompletionResult> {
294
+ const model = options?.model || config.llm.model;
295
+
296
+ const systemMessage = messages.find(m => m.role === 'system');
297
+ const conversationMessages = messages.filter(m => m.role !== 'system');
298
+
299
+ const response = await this.client.messages.create({
300
+ model,
301
+ max_tokens: options?.maxTokens || 4096,
302
+ temperature: options?.temperature || 0.7,
303
+ system: systemMessage?.content,
304
+ messages: conversationMessages.map(m => ({
305
+ role: m.role as 'user' | 'assistant',
306
+ content: m.content,
307
+ })),
308
+ });
309
+
310
+ return {
311
+ content: response.content[0].type === 'text' ? response.content[0].text : '',
312
+ model: response.model,
313
+ usage: {
314
+ inputTokens: response.usage.input_tokens,
315
+ outputTokens: response.usage.output_tokens,
316
+ totalTokens: response.usage.input_tokens + response.usage.output_tokens,
317
+ },
318
+ finishReason: response.stop_reason === 'end_turn' ? 'stop' : response.stop_reason,
319
+ };
320
+ }
321
+
322
+ validateConfig(): void {
323
+ if (!config.llm.anthropic.apiKey) throw new Error('ANTHROPIC_API_KEY required');
324
+ }
325
+ }
326
+ ```
327
+
328
+ ### Provider Factory
329
+
330
+ **src/llm/factory.ts**:
331
+ ```typescript
332
+ import type { LLMProvider } from './types.js';
333
+ import { BedrockLLMProvider } from './providers/bedrock.provider.js';
334
+ import { OpenAILLMProvider } from './providers/openai.provider.js';
335
+ import { AnthropicLLMProvider } from './providers/anthropic.provider.js';
336
+ import { config } from '../config.js';
337
+
338
+ let providerInstance: LLMProvider | null = null;
339
+
340
+ export function getLLMProvider(): LLMProvider {
341
+ if (providerInstance) {
342
+ return providerInstance;
343
+ }
344
+
345
+ switch (config.llm.provider) {
346
+ case 'bedrock':
347
+ providerInstance = new BedrockLLMProvider();
348
+ break;
349
+
350
+ case 'openai':
351
+ providerInstance = new OpenAILLMProvider();
352
+ break;
353
+
354
+ case 'anthropic':
355
+ providerInstance = new AnthropicLLMProvider();
356
+ break;
357
+
358
+ default:
359
+ throw new Error(`Unsupported LLM provider: ${config.llm.provider}`);
360
+ }
361
+
362
+ providerInstance.validateConfig();
363
+ console.log(`[LLM] Using provider: ${config.llm.provider}`);
364
+
365
+ return providerInstance;
366
+ }
367
+
368
+ // Convenience function
369
+ export async function completeLLM(
370
+ messages: LLMMessage[],
371
+ options?: LLMCompletionOptions
372
+ ): Promise<LLMCompletionResult> {
373
+ const provider = getLLMProvider();
374
+ return await provider.complete(messages, options);
375
+ }
376
+ ```
377
+
378
+ ---
379
+
380
+ ## Usage Examples
381
+
382
+ ### Trust Validation (Phase 2)
383
+
384
+ ```typescript
385
+ import { completeLLM } from '@/llm/factory.js';
386
+
387
+ async function validateTrustCompliance(
388
+ response: string,
389
+ lowTrustMemories: Memory[]
390
+ ): Promise<ValidationResult> {
391
+ const validationPrompt = `
392
+ You are a trust compliance validator...
393
+ Response to validate: "${response}"
394
+ `;
395
+
396
+ const result = await completeLLM([
397
+ { role: 'system', content: 'You are a trust compliance validator.' },
398
+ { role: 'user', content: validationPrompt }
399
+ ], {
400
+ temperature: 0.3, // Low temperature for consistent validation
401
+ maxTokens: 1000
402
+ });
403
+
404
+ return parseValidationResult(result.content);
405
+ }
406
+ ```
407
+
408
+ ### Query Interpretation (Phase 2)
409
+
410
+ ```typescript
411
+ import { completeLLM } from '@/llm/factory.js';
412
+
413
+ async function interpretQuery(
414
+ question: string,
415
+ memories: Memory[]
416
+ ): Promise<string> {
417
+ const context = memories.map(m => m.content).join('\n\n');
418
+
419
+ const result = await completeLLM([
420
+ { role: 'system', content: 'You are a helpful assistant with access to user memories.' },
421
+ { role: 'user', content: `Context:\n${context}\n\nQuestion: ${question}` }
422
+ ], {
423
+ temperature: 0.7,
424
+ maxTokens: 2000
425
+ });
426
+
427
+ return result.content;
428
+ }
429
+ ```
430
+
431
+ ---
432
+
433
+ ## Embeddings Abstraction
434
+
435
+ ### Weaviate Vectorizer Configuration
436
+
437
+ Weaviate supports multiple vectorizers. We should configure based on EMBEDDINGS_PROVIDER:
438
+
439
+ ```typescript
440
+ // src/weaviate/schema.ts
441
+
442
+ import weaviate from 'weaviate-client';
443
+ import { config } from '../config.js';
444
+
445
+ export function getVectorizerConfig() {
446
+ switch (config.embeddings.provider) {
447
+ case 'openai':
448
+ return weaviate.configure.vectorizer.text2VecOpenAI({
449
+ model: config.embeddings.model,
450
+ // Weaviate will use X-OpenAI-Api-Key header
451
+ });
452
+
453
+ case 'cohere':
454
+ return weaviate.configure.vectorizer.text2VecCohere({
455
+ model: config.embeddings.model,
456
+ // Weaviate will use X-Cohere-Api-Key header
457
+ });
458
+
459
+ case 'huggingface':
460
+ return weaviate.configure.vectorizer.text2VecHuggingFace({
461
+ model: config.embeddings.model,
462
+ });
463
+
464
+ default:
465
+ throw new Error(`Unsupported embeddings provider: ${config.embeddings.provider}`);
466
+ }
467
+ }
468
+
469
+ export function getEmbeddingsHeaders(): Record<string, string> {
470
+ switch (config.embeddings.provider) {
471
+ case 'openai':
472
+ return { 'X-OpenAI-Api-Key': config.embeddings.openai.apiKey };
473
+
474
+ case 'cohere':
475
+ return { 'X-Cohere-Api-Key': config.embeddings.cohere.apiKey };
476
+
477
+ default:
478
+ return {};
479
+ }
480
+ }
481
+ ```
482
+
483
+ ### Update Weaviate Client
484
+
485
+ ```typescript
486
+ // src/weaviate/client.ts
487
+
488
+ import { getEmbeddingsHeaders } from './schema.js';
489
+
490
+ export async function initWeaviateClient(): Promise<WeaviateClient> {
491
+ if (client) {
492
+ return client;
493
+ }
494
+
495
+ client = await weaviate.connectToWeaviateCloud(config.weaviate.url, {
496
+ authCredentials: config.weaviate.apiKey
497
+ ? new weaviate.ApiKey(config.weaviate.apiKey)
498
+ : undefined,
499
+ headers: getEmbeddingsHeaders(), // ✅ Provider-agnostic headers
500
+ });
501
+
502
+ console.log('[Weaviate] Client initialized');
503
+ return client;
504
+ }
505
+ ```
506
+
507
+ ---
508
+
509
+ ## Recommended .env.example
510
+
511
+ ```env
512
+ # Weaviate Configuration
513
+ WEAVIATE_URL=http://localhost:8080
514
+ WEAVIATE_API_KEY=
515
+
516
+ # LLM Provider Configuration
517
+ # Options: bedrock | openai | anthropic | cohere
518
+ LLM_PROVIDER=bedrock
519
+ LLM_MODEL=anthropic.claude-3-5-sonnet-20241022-v2:0
520
+
521
+ # AWS Bedrock Configuration (when LLM_PROVIDER=bedrock)
522
+ AWS_REGION=us-east-1
523
+ AWS_ACCESS_KEY_ID=
524
+ AWS_SECRET_ACCESS_KEY=
525
+ AWS_SESSION_TOKEN=
526
+
527
+ # OpenAI Configuration (when LLM_PROVIDER=openai)
528
+ # OPENAI_API_KEY=sk-...
529
+ # OPENAI_ORG_ID=
530
+
531
+ # Anthropic Direct Configuration (when LLM_PROVIDER=anthropic)
532
+ # ANTHROPIC_API_KEY=sk-ant-...
533
+
534
+ # Cohere Configuration (when LLM_PROVIDER=cohere)
535
+ # COHERE_API_KEY=
536
+
537
+ # Embeddings Provider Configuration (for Weaviate)
538
+ # Options: openai | cohere | huggingface
539
+ EMBEDDINGS_PROVIDER=openai
540
+ EMBEDDINGS_MODEL=text-embedding-3-small
541
+
542
+ # OpenAI Embeddings (when EMBEDDINGS_PROVIDER=openai)
543
+ OPENAI_EMBEDDINGS_API_KEY=sk-...
544
+
545
+ # Cohere Embeddings (when EMBEDDINGS_PROVIDER=cohere)
546
+ # COHERE_EMBEDDINGS_API_KEY=
547
+
548
+ # Firebase Admin Configuration (using firebase-admin-sdk-v8)
549
+ FIREBASE_ADMIN_SERVICE_ACCOUNT_KEY='{"type":"service_account",...}'
550
+ FIREBASE_PROJECT_ID=remember-mcp-dev
551
+
552
+ # Firebase Client Configuration (for utility scripts)
553
+ FIREBASE_CLIENT_API_KEY=
554
+ FIREBASE_CLIENT_AUTH_DOMAIN=
555
+ FIREBASE_CLIENT_PROJECT_ID=
556
+ FIREBASE_CLIENT_STORAGE_BUCKET=
557
+ FIREBASE_CLIENT_MESSAGING_SENDER_ID=
558
+ FIREBASE_CLIENT_APP_ID=
559
+ FIREBASE_CLIENT_MEASUREMENT_ID=
560
+
561
+ # Server Configuration
562
+ PORT=3000
563
+ NODE_ENV=development
564
+ LOG_LEVEL=info
565
+
566
+ # MCP Configuration
567
+ MCP_TRANSPORT=sse
568
+ ```
569
+
570
+ ---
571
+
572
+ ## Benefits
573
+
574
+ ### 1. **Provider Flexibility**
575
+ - Switch providers by changing one env var
576
+ - Support multiple providers simultaneously
577
+ - Easy to add new providers
578
+
579
+ ### 2. **Cost Optimization**
580
+ - Use cheaper providers for embeddings
581
+ - Use powerful providers for complex tasks
582
+ - Mix and match based on needs
583
+
584
+ ### 3. **Vendor Independence**
585
+ - Not locked into one provider
586
+ - Can migrate if pricing changes
587
+ - Can use different providers per environment
588
+
589
+ ### 4. **Development Flexibility**
590
+ - Use OpenAI in development (simple)
591
+ - Use Bedrock in production (cost-effective)
592
+ - Test with multiple providers
593
+
594
+ ---
595
+
596
+ ## Implementation Phases
597
+
598
+ ### Phase 1: Configuration (M1 - Now)
599
+ - ✅ Add LLM and embeddings config to .env.example
600
+ - ✅ Update config.ts with provider configs
601
+ - ✅ Document in design doc
602
+
603
+ ### Phase 2: Embeddings (M2 - Core Memory System)
604
+ - Implement embeddings provider abstraction
605
+ - Update Weaviate client to use provider-agnostic headers
606
+ - Support OpenAI, Cohere, HuggingFace
607
+
608
+ ### Phase 3: LLM Providers (M3-M5 - Advanced Features)
609
+ - Implement LLM provider interface
610
+ - Create provider implementations (Bedrock, OpenAI, Anthropic)
611
+ - Use for trust validation, query interpretation, template suggestions
612
+
613
+ ---
614
+
615
+ ## Dependencies
616
+
617
+ ### Required Packages
618
+
619
+ ```json
620
+ {
621
+ "dependencies": {
622
+ // For Bedrock
623
+ "@aws-sdk/client-bedrock-runtime": "^3.x.x",
624
+
625
+ // For Anthropic Direct
626
+ "@anthropic-ai/sdk": "^0.x.x",
627
+
628
+ // For Cohere
629
+ "cohere-ai": "^7.x.x"
630
+ }
631
+ }
632
+ ```
633
+
634
+ **Note**: Install only the providers you plan to use. Not all are required.
635
+
636
+ ---
637
+
638
+ ## Testing
639
+
640
+ ### Mock Provider for Tests
641
+
642
+ ```typescript
643
+ // tests/mocks/llm.mock.ts
644
+
645
+ export class MockLLMProvider implements LLMProvider {
646
+ async complete(messages: LLMMessage[]): Promise<LLMCompletionResult> {
647
+ return {
648
+ content: 'Mock response',
649
+ model: 'mock-model',
650
+ usage: { inputTokens: 10, outputTokens: 20, totalTokens: 30 },
651
+ finishReason: 'stop',
652
+ };
653
+ }
654
+
655
+ validateConfig(): void {
656
+ // No-op for tests
657
+ }
658
+ }
659
+ ```
660
+
661
+ ---
662
+
663
+ ## Recommendation
664
+
665
+ ### For remember-mcp
666
+
667
+ **Phase 1 (M1 - Now)**:
668
+ - ✅ Add provider-agnostic config to .env.example
669
+ - ✅ Update config.ts structure
670
+ - ✅ Document strategy in this design doc
671
+ - ⏳ Don't implement providers yet (not needed until M2-M3)
672
+
673
+ **Phase 2 (M2 - When needed)**:
674
+ - Implement embeddings abstraction for Weaviate
675
+ - Support OpenAI embeddings (primary)
676
+ - Optional: Add Cohere support
677
+
678
+ **Phase 3 (M3-M5 - When needed)**:
679
+ - Implement LLM provider interface
680
+ - Add Bedrock provider (your preference)
681
+ - Optional: Add OpenAI, Anthropic providers
682
+
683
+ ### Immediate Action
684
+
685
+ Update .env.example to include LLM provider configuration, but don't implement providers until they're actually needed.
686
+
687
+ ---
688
+
689
+ **Status**: Design Specification
690
+ **Implementation**: Phase 1 (config only) for M1, full implementation in M2-M5
691
+ **Recommendation**: Configure now, implement later when LLM features are needed