@cortexmemory/cli 0.27.3 → 0.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/db.d.ts.map +1 -1
- package/dist/commands/db.js +18 -6
- package/dist/commands/db.js.map +1 -1
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +191 -80
- package/dist/commands/deploy.js.map +1 -1
- package/dist/commands/dev.js +3 -2
- package/dist/commands/dev.js.map +1 -1
- package/dist/commands/init.d.ts.map +1 -1
- package/dist/commands/init.js +12 -0
- package/dist/commands/init.js.map +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/utils/app-template-sync.d.ts.map +1 -1
- package/dist/utils/app-template-sync.js +35 -13
- package/dist/utils/app-template-sync.js.map +1 -1
- package/dist/utils/init/quickstart-setup.d.ts.map +1 -1
- package/dist/utils/init/quickstart-setup.js.map +1 -1
- package/package.json +4 -4
- package/templates/basic/.env.local.example +23 -0
- package/templates/basic/README.md +181 -56
- package/templates/basic/package-lock.json +2180 -406
- package/templates/basic/package.json +23 -5
- package/templates/basic/src/__tests__/chat.test.ts +340 -0
- package/templates/basic/src/__tests__/cortex.test.ts +260 -0
- package/templates/basic/src/__tests__/display.test.ts +455 -0
- package/templates/basic/src/__tests__/e2e/fact-extraction.test.ts +498 -0
- package/templates/basic/src/__tests__/e2e/memory-flow.test.ts +355 -0
- package/templates/basic/src/__tests__/e2e/server-e2e.test.ts +414 -0
- package/templates/basic/src/__tests__/helpers/test-utils.ts +345 -0
- package/templates/basic/src/__tests__/integration/chat-flow.test.ts +422 -0
- package/templates/basic/src/__tests__/integration/server.test.ts +441 -0
- package/templates/basic/src/__tests__/llm.test.ts +344 -0
- package/templates/basic/src/chat.ts +300 -0
- package/templates/basic/src/cortex.ts +203 -0
- package/templates/basic/src/display.ts +425 -0
- package/templates/basic/src/index.ts +194 -64
- package/templates/basic/src/llm.ts +214 -0
- package/templates/basic/src/server.ts +280 -0
- package/templates/basic/vitest.config.ts +33 -0
- package/templates/basic/vitest.e2e.config.ts +28 -0
- package/templates/basic/vitest.integration.config.ts +25 -0
- package/templates/vercel-ai-quickstart/app/api/auth/check/route.ts +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +61 -19
- package/templates/vercel-ai-quickstart/app/api/auth/register/route.ts +14 -18
- package/templates/vercel-ai-quickstart/app/api/auth/setup/route.ts +4 -7
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +95 -23
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +339 -0
- package/templates/vercel-ai-quickstart/app/api/conversations/route.ts +16 -16
- package/templates/vercel-ai-quickstart/app/globals.css +24 -9
- package/templates/vercel-ai-quickstart/app/page.tsx +41 -15
- package/templates/vercel-ai-quickstart/components/AdminSetup.tsx +3 -1
- package/templates/vercel-ai-quickstart/components/AuthProvider.tsx +6 -6
- package/templates/vercel-ai-quickstart/components/ChatHistorySidebar.tsx +19 -8
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +46 -16
- package/templates/vercel-ai-quickstart/components/LoginScreen.tsx +10 -5
- package/templates/vercel-ai-quickstart/jest.config.js +8 -1
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +165 -0
- package/templates/vercel-ai-quickstart/lib/password.ts +5 -5
- package/templates/vercel-ai-quickstart/lib/versions.ts +60 -0
- package/templates/vercel-ai-quickstart/next.config.js +10 -2
- package/templates/vercel-ai-quickstart/package.json +23 -12
- package/templates/vercel-ai-quickstart/test-api.mjs +303 -0
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +483 -0
- package/templates/vercel-ai-quickstart/tests/helpers/mock-cortex.ts +40 -40
- package/templates/vercel-ai-quickstart/tests/integration/auth.test.ts +8 -8
- package/templates/vercel-ai-quickstart/tests/integration/conversations.test.ts +12 -8
- package/templates/vercel-ai-quickstart/tests/unit/password.test.ts +4 -1
|
@@ -1,85 +1,215 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Cortex Memory
|
|
2
|
+
* Cortex Memory - CLI Demo
|
|
3
3
|
*
|
|
4
|
-
*
|
|
5
|
-
*
|
|
4
|
+
* Interactive command-line interface for demonstrating Cortex Memory SDK.
|
|
5
|
+
* Shows the "thinking" behind each memory layer as messages are processed.
|
|
6
6
|
*
|
|
7
|
-
*
|
|
8
|
-
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* npm start # Start CLI mode
|
|
9
|
+
* npm run server # Start HTTP server mode
|
|
10
|
+
*
|
|
11
|
+
* Commands:
|
|
12
|
+
* /recall <query> # Search memories without storing
|
|
13
|
+
* /facts # List all stored facts
|
|
14
|
+
* /history # Show conversation history
|
|
15
|
+
* /new # Start a new conversation
|
|
16
|
+
* /config # Show current configuration
|
|
17
|
+
* /clear # Clear the screen
|
|
18
|
+
* /exit # Exit the demo
|
|
9
19
|
*/
|
|
10
20
|
|
|
11
|
-
import
|
|
21
|
+
import * as readline from "readline";
|
|
22
|
+
import { closeCortex } from "./cortex.js";
|
|
23
|
+
import {
|
|
24
|
+
chat,
|
|
25
|
+
recallMemories,
|
|
26
|
+
listFacts,
|
|
27
|
+
getHistory,
|
|
28
|
+
newConversation,
|
|
29
|
+
printConfig,
|
|
30
|
+
getConversationId,
|
|
31
|
+
} from "./chat.js";
|
|
32
|
+
import { printWelcome, printError, printInfo, printSuccess } from "./display.js";
|
|
33
|
+
|
|
34
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
35
|
+
// CLI Interface
|
|
36
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
12
37
|
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
38
|
+
const rl = readline.createInterface({
|
|
39
|
+
input: process.stdin,
|
|
40
|
+
output: process.stdout,
|
|
16
41
|
});
|
|
17
42
|
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
43
|
+
/**
|
|
44
|
+
* Process a line of input
|
|
45
|
+
*/
|
|
46
|
+
async function processInput(input: string): Promise<boolean> {
|
|
47
|
+
const trimmed = input.trim();
|
|
48
|
+
|
|
49
|
+
if (!trimmed) {
|
|
50
|
+
return true; // Continue
|
|
51
|
+
}
|
|
21
52
|
|
|
22
|
-
//
|
|
23
|
-
|
|
24
|
-
|
|
53
|
+
// Handle commands
|
|
54
|
+
if (trimmed.startsWith("/")) {
|
|
55
|
+
return handleCommand(trimmed);
|
|
56
|
+
}
|
|
25
57
|
|
|
58
|
+
// Regular chat message
|
|
26
59
|
try {
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
60
|
+
console.log("");
|
|
61
|
+
const result = await chat(trimmed);
|
|
62
|
+
|
|
63
|
+
// Print assistant response
|
|
64
|
+
console.log("\x1b[36mAssistant:\x1b[0m");
|
|
65
|
+
console.log(result.response);
|
|
66
|
+
console.log("");
|
|
67
|
+
|
|
68
|
+
return true;
|
|
69
|
+
} catch (error) {
|
|
70
|
+
printError("Failed to process message", error instanceof Error ? error : undefined);
|
|
71
|
+
return true;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Handle slash commands
|
|
77
|
+
*/
|
|
78
|
+
async function handleCommand(input: string): Promise<boolean> {
|
|
79
|
+
const parts = input.slice(1).split(" ");
|
|
80
|
+
const command = parts[0].toLowerCase();
|
|
81
|
+
const args = parts.slice(1).join(" ");
|
|
82
|
+
|
|
83
|
+
switch (command) {
|
|
84
|
+
case "exit":
|
|
85
|
+
case "quit":
|
|
86
|
+
case "q":
|
|
87
|
+
printInfo("Goodbye!");
|
|
88
|
+
return false;
|
|
89
|
+
|
|
90
|
+
case "clear":
|
|
91
|
+
case "cls":
|
|
92
|
+
console.clear();
|
|
93
|
+
printWelcome("cli");
|
|
94
|
+
return true;
|
|
95
|
+
|
|
96
|
+
case "recall":
|
|
97
|
+
case "search":
|
|
98
|
+
if (!args) {
|
|
99
|
+
printInfo("Usage: /recall <query>");
|
|
100
|
+
} else {
|
|
101
|
+
await recallMemories(args);
|
|
102
|
+
}
|
|
103
|
+
return true;
|
|
104
|
+
|
|
105
|
+
case "facts":
|
|
106
|
+
await listFacts();
|
|
107
|
+
return true;
|
|
108
|
+
|
|
109
|
+
case "history":
|
|
110
|
+
case "h":
|
|
111
|
+
await getHistory();
|
|
112
|
+
return true;
|
|
113
|
+
|
|
114
|
+
case "new":
|
|
115
|
+
newConversation();
|
|
116
|
+
return true;
|
|
117
|
+
|
|
118
|
+
case "config":
|
|
119
|
+
case "status":
|
|
120
|
+
printConfig();
|
|
121
|
+
return true;
|
|
45
122
|
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
123
|
+
case "help":
|
|
124
|
+
case "?":
|
|
125
|
+
printHelp();
|
|
126
|
+
return true;
|
|
127
|
+
|
|
128
|
+
default:
|
|
129
|
+
printInfo(`Unknown command: /${command}. Type /help for available commands.`);
|
|
130
|
+
return true;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Print help message
|
|
136
|
+
*/
|
|
137
|
+
function printHelp(): void {
|
|
138
|
+
console.log("");
|
|
139
|
+
console.log("📖 Available Commands:");
|
|
140
|
+
console.log("");
|
|
141
|
+
console.log(" /recall <query> Search memories without storing");
|
|
142
|
+
console.log(" /facts List all stored facts");
|
|
143
|
+
console.log(" /history Show conversation history");
|
|
144
|
+
console.log(" /new Start a new conversation");
|
|
145
|
+
console.log(" /config Show current configuration");
|
|
146
|
+
console.log(" /clear Clear the screen");
|
|
147
|
+
console.log(" /help Show this help message");
|
|
148
|
+
console.log(" /exit Exit the demo");
|
|
149
|
+
console.log("");
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Prompt for input
|
|
154
|
+
*/
|
|
155
|
+
function prompt(): void {
|
|
156
|
+
rl.question("\x1b[33mYou:\x1b[0m ", async (input) => {
|
|
157
|
+
const shouldContinue = await processInput(input);
|
|
158
|
+
|
|
159
|
+
if (shouldContinue) {
|
|
160
|
+
prompt();
|
|
161
|
+
} else {
|
|
162
|
+
cleanup();
|
|
62
163
|
}
|
|
164
|
+
});
|
|
165
|
+
}
|
|
63
166
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
167
|
+
/**
|
|
168
|
+
* Cleanup on exit
|
|
169
|
+
*/
|
|
170
|
+
function cleanup(): void {
|
|
171
|
+
closeCortex();
|
|
172
|
+
rl.close();
|
|
173
|
+
process.exit(0);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
177
|
+
// Main Entry Point
|
|
178
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
179
|
+
|
|
180
|
+
async function main(): Promise<void> {
|
|
181
|
+
// Check for required environment
|
|
182
|
+
if (!process.env.CONVEX_URL) {
|
|
183
|
+
printError(
|
|
184
|
+
"CONVEX_URL is required. Set it in .env.local or run: cortex init",
|
|
68
185
|
);
|
|
69
|
-
|
|
70
|
-
console.log(" - Set up graph database for advanced queries");
|
|
71
|
-
console.log(" - Build multi-agent systems with context chains\n");
|
|
72
|
-
} catch (error) {
|
|
73
|
-
console.error("❌ Error:", error);
|
|
74
|
-
throw error;
|
|
75
|
-
} finally {
|
|
76
|
-
// Cleanup
|
|
77
|
-
cortex.close();
|
|
186
|
+
process.exit(1);
|
|
78
187
|
}
|
|
188
|
+
|
|
189
|
+
// Print welcome
|
|
190
|
+
printWelcome("cli");
|
|
191
|
+
|
|
192
|
+
// Initialize conversation
|
|
193
|
+
const convId = getConversationId();
|
|
194
|
+
printSuccess(`Conversation: ${convId}`);
|
|
195
|
+
console.log("");
|
|
196
|
+
|
|
197
|
+
// Handle signals
|
|
198
|
+
process.on("SIGINT", () => {
|
|
199
|
+
console.log("");
|
|
200
|
+
cleanup();
|
|
201
|
+
});
|
|
202
|
+
|
|
203
|
+
process.on("SIGTERM", () => {
|
|
204
|
+
cleanup();
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
// Start prompt loop
|
|
208
|
+
prompt();
|
|
79
209
|
}
|
|
80
210
|
|
|
81
|
-
// Run
|
|
211
|
+
// Run
|
|
82
212
|
main().catch((error) => {
|
|
83
|
-
|
|
213
|
+
printError("Fatal error", error instanceof Error ? error : undefined);
|
|
84
214
|
process.exit(1);
|
|
85
215
|
});
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Integration (Optional)
|
|
3
|
+
*
|
|
4
|
+
* Provides AI-powered responses when OPENAI_API_KEY is set.
|
|
5
|
+
* Falls back to echo mode with memory context when no API key is available.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import type { Memory, Fact } from "./chat.js";
|
|
9
|
+
import { CONFIG } from "./cortex.js";
|
|
10
|
+
|
|
11
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
12
|
+
// Configuration
|
|
13
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
14
|
+
|
|
15
|
+
const SYSTEM_PROMPT = `You are a helpful AI assistant with long-term memory powered by Cortex.
|
|
16
|
+
|
|
17
|
+
Your capabilities:
|
|
18
|
+
- You remember everything users tell you across conversations
|
|
19
|
+
- You can recall facts, preferences, and context from past interactions
|
|
20
|
+
- You naturally reference what you've learned about the user
|
|
21
|
+
|
|
22
|
+
Behavior guidelines:
|
|
23
|
+
- When you remember something from a previous conversation, mention it naturally
|
|
24
|
+
- If asked about something you learned, reference it specifically
|
|
25
|
+
- Be conversational and friendly
|
|
26
|
+
- Help demonstrate the memory system by showing what you remember
|
|
27
|
+
|
|
28
|
+
Example interactions:
|
|
29
|
+
- User: "My name is Alex" → Remember and use their name
|
|
30
|
+
- User: "I work at Acme Corp" → Remember their employer
|
|
31
|
+
- User: "My favorite color is blue" → Remember their preference
|
|
32
|
+
- User: "What do you know about me?" → List everything you remember`;
|
|
33
|
+
|
|
34
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
35
|
+
// LLM State
|
|
36
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
37
|
+
|
|
38
|
+
let openaiClient: unknown = null;
|
|
39
|
+
let llmAvailable = false;
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Check if LLM is available
|
|
43
|
+
*/
|
|
44
|
+
export function isLLMAvailable(): boolean {
|
|
45
|
+
return llmAvailable || !!process.env.OPENAI_API_KEY;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Initialize OpenAI client if API key is available
|
|
50
|
+
*/
|
|
51
|
+
async function getOpenAIClient(): Promise<unknown> {
|
|
52
|
+
if (openaiClient) return openaiClient;
|
|
53
|
+
|
|
54
|
+
if (!process.env.OPENAI_API_KEY) {
|
|
55
|
+
return null;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
try {
|
|
59
|
+
const { default: OpenAI } = await import("openai");
|
|
60
|
+
openaiClient = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
|
61
|
+
llmAvailable = true;
|
|
62
|
+
return openaiClient;
|
|
63
|
+
} catch {
|
|
64
|
+
// OpenAI not installed
|
|
65
|
+
return null;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
70
|
+
// Response Generation
|
|
71
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Generate a response using LLM or echo mode
|
|
75
|
+
*/
|
|
76
|
+
export async function generateResponse(
|
|
77
|
+
userMessage: string,
|
|
78
|
+
memories: Memory[],
|
|
79
|
+
facts: Fact[],
|
|
80
|
+
): Promise<string> {
|
|
81
|
+
const client = await getOpenAIClient();
|
|
82
|
+
|
|
83
|
+
if (client) {
|
|
84
|
+
return generateLLMResponse(client, userMessage, memories, facts);
|
|
85
|
+
} else {
|
|
86
|
+
return generateEchoResponse(userMessage, memories, facts);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Generate response using OpenAI
|
|
92
|
+
*/
|
|
93
|
+
async function generateLLMResponse(
|
|
94
|
+
client: unknown,
|
|
95
|
+
userMessage: string,
|
|
96
|
+
memories: Memory[],
|
|
97
|
+
facts: Fact[],
|
|
98
|
+
): Promise<string> {
|
|
99
|
+
// Build context from memories and facts
|
|
100
|
+
const context = buildContext(memories, facts);
|
|
101
|
+
|
|
102
|
+
// Build messages array
|
|
103
|
+
const messages = [
|
|
104
|
+
{ role: "system" as const, content: SYSTEM_PROMPT },
|
|
105
|
+
];
|
|
106
|
+
|
|
107
|
+
// Add memory context if available
|
|
108
|
+
if (context) {
|
|
109
|
+
messages.push({
|
|
110
|
+
role: "system" as const,
|
|
111
|
+
content: `Here is what you remember about this user:\n\n${context}`,
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Add user message
|
|
116
|
+
messages.push({ role: "user" as const, content: userMessage });
|
|
117
|
+
|
|
118
|
+
try {
|
|
119
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
120
|
+
const openai = client as any;
|
|
121
|
+
const completion = await openai.chat.completions.create({
|
|
122
|
+
model: "gpt-4o-mini",
|
|
123
|
+
messages,
|
|
124
|
+
max_tokens: 500,
|
|
125
|
+
temperature: 0.7,
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
return completion.choices[0]?.message?.content || "I couldn't generate a response.";
|
|
129
|
+
} catch (error) {
|
|
130
|
+
if (CONFIG.debug) {
|
|
131
|
+
console.error("[Debug] OpenAI error:", error);
|
|
132
|
+
}
|
|
133
|
+
// Fall back to echo mode on error
|
|
134
|
+
return generateEchoResponse(userMessage, memories, facts);
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
/**
|
|
139
|
+
* Generate echo response (no LLM)
|
|
140
|
+
*/
|
|
141
|
+
function generateEchoResponse(
|
|
142
|
+
userMessage: string,
|
|
143
|
+
memories: Memory[],
|
|
144
|
+
facts: Fact[],
|
|
145
|
+
): string {
|
|
146
|
+
const lines: string[] = [];
|
|
147
|
+
|
|
148
|
+
lines.push(`I heard you say: "${userMessage}"`);
|
|
149
|
+
lines.push("");
|
|
150
|
+
|
|
151
|
+
if (memories.length > 0 || facts.length > 0) {
|
|
152
|
+
lines.push("📚 Here's what I remember about you:");
|
|
153
|
+
lines.push("");
|
|
154
|
+
|
|
155
|
+
if (facts.length > 0) {
|
|
156
|
+
lines.push("Facts:");
|
|
157
|
+
for (const fact of facts.slice(0, 5)) {
|
|
158
|
+
const type = fact.factType ? ` [${fact.factType}]` : "";
|
|
159
|
+
lines.push(` • ${fact.content}${type}`);
|
|
160
|
+
}
|
|
161
|
+
if (facts.length > 5) {
|
|
162
|
+
lines.push(` ... and ${facts.length - 5} more facts`);
|
|
163
|
+
}
|
|
164
|
+
lines.push("");
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
if (memories.length > 0) {
|
|
168
|
+
lines.push("Recent conversations:");
|
|
169
|
+
for (const mem of memories.slice(0, 3)) {
|
|
170
|
+
const content = mem.content?.slice(0, 80) || "";
|
|
171
|
+
lines.push(` • ${content}${content.length >= 80 ? "..." : ""}`);
|
|
172
|
+
}
|
|
173
|
+
if (memories.length > 3) {
|
|
174
|
+
lines.push(` ... and ${memories.length - 3} more memories`);
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
} else {
|
|
178
|
+
lines.push("💭 I don't have any memories of you yet.");
|
|
179
|
+
lines.push(" Tell me something about yourself!");
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
lines.push("");
|
|
183
|
+
lines.push("ℹ️ Running in echo mode (no OPENAI_API_KEY)");
|
|
184
|
+
lines.push(" Set OPENAI_API_KEY in .env.local for AI responses.");
|
|
185
|
+
|
|
186
|
+
return lines.join("\n");
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Build context string from memories and facts
|
|
191
|
+
*/
|
|
192
|
+
function buildContext(memories: Memory[], facts: Fact[]): string {
|
|
193
|
+
const parts: string[] = [];
|
|
194
|
+
|
|
195
|
+
// Add facts
|
|
196
|
+
if (facts.length > 0) {
|
|
197
|
+
parts.push("Known facts about the user:");
|
|
198
|
+
for (const fact of facts) {
|
|
199
|
+
const type = fact.factType ? ` (${fact.factType})` : "";
|
|
200
|
+
parts.push(`- ${fact.content}${type}`);
|
|
201
|
+
}
|
|
202
|
+
parts.push("");
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// Add relevant memories
|
|
206
|
+
if (memories.length > 0) {
|
|
207
|
+
parts.push("Relevant past conversations:");
|
|
208
|
+
for (const mem of memories.slice(0, 5)) {
|
|
209
|
+
parts.push(`- ${mem.content}`);
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
return parts.join("\n");
|
|
214
|
+
}
|