@cortexmemory/cli 0.27.4 → 0.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/db.d.ts.map +1 -1
- package/dist/commands/db.js +18 -6
- package/dist/commands/db.js.map +1 -1
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +74 -34
- package/dist/commands/deploy.js.map +1 -1
- package/dist/commands/dev.js +3 -2
- package/dist/commands/dev.js.map +1 -1
- package/dist/commands/init.d.ts.map +1 -1
- package/dist/commands/init.js +12 -0
- package/dist/commands/init.js.map +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/utils/app-template-sync.d.ts.map +1 -1
- package/dist/utils/app-template-sync.js +3 -1
- package/dist/utils/app-template-sync.js.map +1 -1
- package/dist/utils/init/quickstart-setup.d.ts.map +1 -1
- package/dist/utils/init/quickstart-setup.js.map +1 -1
- package/package.json +4 -4
- package/templates/basic/.env.local.example +23 -0
- package/templates/basic/README.md +181 -56
- package/templates/basic/package-lock.json +2180 -406
- package/templates/basic/package.json +23 -5
- package/templates/basic/src/__tests__/chat.test.ts +340 -0
- package/templates/basic/src/__tests__/cortex.test.ts +260 -0
- package/templates/basic/src/__tests__/display.test.ts +455 -0
- package/templates/basic/src/__tests__/e2e/fact-extraction.test.ts +498 -0
- package/templates/basic/src/__tests__/e2e/memory-flow.test.ts +355 -0
- package/templates/basic/src/__tests__/e2e/server-e2e.test.ts +414 -0
- package/templates/basic/src/__tests__/helpers/test-utils.ts +345 -0
- package/templates/basic/src/__tests__/integration/chat-flow.test.ts +422 -0
- package/templates/basic/src/__tests__/integration/server.test.ts +441 -0
- package/templates/basic/src/__tests__/llm.test.ts +344 -0
- package/templates/basic/src/chat.ts +300 -0
- package/templates/basic/src/cortex.ts +203 -0
- package/templates/basic/src/display.ts +425 -0
- package/templates/basic/src/index.ts +194 -64
- package/templates/basic/src/llm.ts +214 -0
- package/templates/basic/src/server.ts +280 -0
- package/templates/basic/vitest.config.ts +33 -0
- package/templates/basic/vitest.e2e.config.ts +28 -0
- package/templates/basic/vitest.integration.config.ts +25 -0
- package/templates/vercel-ai-quickstart/app/api/auth/check/route.ts +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +6 -9
- package/templates/vercel-ai-quickstart/app/api/auth/register/route.ts +14 -18
- package/templates/vercel-ai-quickstart/app/api/auth/setup/route.ts +4 -7
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +28 -11
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +19 -13
- package/templates/vercel-ai-quickstart/app/api/conversations/route.ts +16 -16
- package/templates/vercel-ai-quickstart/app/globals.css +24 -9
- package/templates/vercel-ai-quickstart/app/page.tsx +25 -13
- package/templates/vercel-ai-quickstart/components/AdminSetup.tsx +3 -1
- package/templates/vercel-ai-quickstart/components/AuthProvider.tsx +6 -6
- package/templates/vercel-ai-quickstart/components/ChatHistorySidebar.tsx +19 -8
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +41 -14
- package/templates/vercel-ai-quickstart/components/LoginScreen.tsx +10 -5
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +3 -3
- package/templates/vercel-ai-quickstart/lib/password.ts +5 -5
- package/templates/vercel-ai-quickstart/next.config.js +10 -2
- package/templates/vercel-ai-quickstart/package.json +18 -11
- package/templates/vercel-ai-quickstart/test-api.mjs +131 -100
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +73 -44
- package/templates/vercel-ai-quickstart/tests/helpers/mock-cortex.ts +40 -40
- package/templates/vercel-ai-quickstart/tests/integration/auth.test.ts +8 -8
- package/templates/vercel-ai-quickstart/tests/integration/conversations.test.ts +12 -8
- package/templates/vercel-ai-quickstart/tests/unit/password.test.ts +4 -1
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unit tests for llm.ts
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
|
6
|
+
|
|
7
|
+
// Mock cortex.js CONFIG
|
|
8
|
+
vi.mock("../cortex.js", () => ({
|
|
9
|
+
CONFIG: {
|
|
10
|
+
debug: false,
|
|
11
|
+
},
|
|
12
|
+
}));
|
|
13
|
+
|
|
14
|
+
describe("llm", () => {
|
|
15
|
+
const originalEnv = process.env;
|
|
16
|
+
|
|
17
|
+
beforeEach(() => {
|
|
18
|
+
vi.resetModules();
|
|
19
|
+
process.env = { ...originalEnv };
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
afterEach(() => {
|
|
23
|
+
process.env = originalEnv;
|
|
24
|
+
vi.clearAllMocks();
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
describe("isLLMAvailable", () => {
|
|
28
|
+
it("returns false when OPENAI_API_KEY is not set", async () => {
|
|
29
|
+
delete process.env.OPENAI_API_KEY;
|
|
30
|
+
|
|
31
|
+
const { isLLMAvailable } = await import("../llm.js");
|
|
32
|
+
|
|
33
|
+
expect(isLLMAvailable()).toBe(false);
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
it("returns true when OPENAI_API_KEY is set", async () => {
|
|
37
|
+
process.env.OPENAI_API_KEY = "sk-test-key";
|
|
38
|
+
|
|
39
|
+
const { isLLMAvailable } = await import("../llm.js");
|
|
40
|
+
|
|
41
|
+
expect(isLLMAvailable()).toBe(true);
|
|
42
|
+
});
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
describe("generateResponse", () => {
|
|
46
|
+
describe("echo mode (no LLM)", () => {
|
|
47
|
+
beforeEach(() => {
|
|
48
|
+
delete process.env.OPENAI_API_KEY;
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
it("echoes the user message", async () => {
|
|
52
|
+
const { generateResponse } = await import("../llm.js");
|
|
53
|
+
|
|
54
|
+
const response = await generateResponse("Hello world", [], []);
|
|
55
|
+
|
|
56
|
+
expect(response).toContain('I heard you say: "Hello world"');
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
it("shows empty memory message when no memories", async () => {
|
|
60
|
+
const { generateResponse } = await import("../llm.js");
|
|
61
|
+
|
|
62
|
+
const response = await generateResponse("Hello", [], []);
|
|
63
|
+
|
|
64
|
+
expect(response).toContain("I don't have any memories of you yet");
|
|
65
|
+
expect(response).toContain("Tell me something about yourself");
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
it("shows facts when available", async () => {
|
|
69
|
+
const { generateResponse } = await import("../llm.js");
|
|
70
|
+
|
|
71
|
+
const facts = [
|
|
72
|
+
{ content: "User's name is Alex", factType: "identity" },
|
|
73
|
+
{ content: "User likes coffee", factType: "preference" },
|
|
74
|
+
];
|
|
75
|
+
|
|
76
|
+
const response = await generateResponse("Hello", [], facts);
|
|
77
|
+
|
|
78
|
+
expect(response).toContain("Here's what I remember about you");
|
|
79
|
+
expect(response).toContain("Facts:");
|
|
80
|
+
expect(response).toContain("User's name is Alex");
|
|
81
|
+
expect(response).toContain("[identity]");
|
|
82
|
+
expect(response).toContain("User likes coffee");
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
it("shows memories when available", async () => {
|
|
86
|
+
const { generateResponse } = await import("../llm.js");
|
|
87
|
+
|
|
88
|
+
const memories = [
|
|
89
|
+
{ content: "User said hello yesterday" },
|
|
90
|
+
{ content: "User asked about the weather" },
|
|
91
|
+
];
|
|
92
|
+
|
|
93
|
+
const response = await generateResponse("Hello", memories, []);
|
|
94
|
+
|
|
95
|
+
expect(response).toContain("Here's what I remember about you");
|
|
96
|
+
expect(response).toContain("Recent conversations:");
|
|
97
|
+
expect(response).toContain("User said hello yesterday");
|
|
98
|
+
expect(response).toContain("User asked about the weather");
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
it("truncates long memory content", async () => {
|
|
102
|
+
const { generateResponse } = await import("../llm.js");
|
|
103
|
+
|
|
104
|
+
const memories = [
|
|
105
|
+
{
|
|
106
|
+
content: "A".repeat(100), // 100 characters
|
|
107
|
+
},
|
|
108
|
+
];
|
|
109
|
+
|
|
110
|
+
const response = await generateResponse("Hello", memories, []);
|
|
111
|
+
|
|
112
|
+
// Should truncate at 80 chars with "..."
|
|
113
|
+
expect(response).toContain("A".repeat(80) + "...");
|
|
114
|
+
});
|
|
115
|
+
|
|
116
|
+
it("truncates long fact lists", async () => {
|
|
117
|
+
const { generateResponse } = await import("../llm.js");
|
|
118
|
+
|
|
119
|
+
const facts = Array(10)
|
|
120
|
+
.fill(null)
|
|
121
|
+
.map((_, i) => ({ content: `Fact ${i}` }));
|
|
122
|
+
|
|
123
|
+
const response = await generateResponse("Hello", [], facts);
|
|
124
|
+
|
|
125
|
+
expect(response).toContain("... and 5 more facts");
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
it("truncates long memory lists", async () => {
|
|
129
|
+
const { generateResponse } = await import("../llm.js");
|
|
130
|
+
|
|
131
|
+
const memories = Array(10)
|
|
132
|
+
.fill(null)
|
|
133
|
+
.map((_, i) => ({ content: `Memory ${i}` }));
|
|
134
|
+
|
|
135
|
+
const response = await generateResponse("Hello", memories, []);
|
|
136
|
+
|
|
137
|
+
expect(response).toContain("... and 7 more memories");
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
it("shows info about echo mode", async () => {
|
|
141
|
+
const { generateResponse } = await import("../llm.js");
|
|
142
|
+
|
|
143
|
+
const response = await generateResponse("Hello", [], []);
|
|
144
|
+
|
|
145
|
+
expect(response).toContain("Running in echo mode");
|
|
146
|
+
expect(response).toContain("OPENAI_API_KEY");
|
|
147
|
+
});
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
describe("LLM mode", () => {
|
|
151
|
+
it("calls OpenAI API when API key is set", async () => {
|
|
152
|
+
process.env.OPENAI_API_KEY = "sk-test-key";
|
|
153
|
+
|
|
154
|
+
const mockCreate = vi.fn().mockResolvedValue({
|
|
155
|
+
choices: [{ message: { content: "Hello from AI!" } }],
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
vi.doMock("openai", () => ({
|
|
159
|
+
default: vi.fn().mockImplementation(() => ({
|
|
160
|
+
chat: {
|
|
161
|
+
completions: {
|
|
162
|
+
create: mockCreate,
|
|
163
|
+
},
|
|
164
|
+
},
|
|
165
|
+
})),
|
|
166
|
+
}));
|
|
167
|
+
|
|
168
|
+
const { generateResponse } = await import("../llm.js");
|
|
169
|
+
|
|
170
|
+
const response = await generateResponse("Hello", [], []);
|
|
171
|
+
|
|
172
|
+
expect(response).toBe("Hello from AI!");
|
|
173
|
+
expect(mockCreate).toHaveBeenCalledWith(
|
|
174
|
+
expect.objectContaining({
|
|
175
|
+
model: "gpt-4o-mini",
|
|
176
|
+
messages: expect.arrayContaining([
|
|
177
|
+
expect.objectContaining({ role: "system" }),
|
|
178
|
+
expect.objectContaining({ role: "user", content: "Hello" }),
|
|
179
|
+
]),
|
|
180
|
+
}),
|
|
181
|
+
);
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
it("includes memory context in system message", async () => {
|
|
185
|
+
process.env.OPENAI_API_KEY = "sk-test-key";
|
|
186
|
+
|
|
187
|
+
const mockCreate = vi.fn().mockResolvedValue({
|
|
188
|
+
choices: [{ message: { content: "AI response" } }],
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
vi.doMock("openai", () => ({
|
|
192
|
+
default: vi.fn().mockImplementation(() => ({
|
|
193
|
+
chat: {
|
|
194
|
+
completions: {
|
|
195
|
+
create: mockCreate,
|
|
196
|
+
},
|
|
197
|
+
},
|
|
198
|
+
})),
|
|
199
|
+
}));
|
|
200
|
+
|
|
201
|
+
const { generateResponse } = await import("../llm.js");
|
|
202
|
+
|
|
203
|
+
const facts = [{ content: "User's name is Alex", factType: "identity" }];
|
|
204
|
+
const memories = [{ content: "User said hello" }];
|
|
205
|
+
|
|
206
|
+
await generateResponse("Hello", memories, facts);
|
|
207
|
+
|
|
208
|
+
expect(mockCreate).toHaveBeenCalledWith(
|
|
209
|
+
expect.objectContaining({
|
|
210
|
+
messages: expect.arrayContaining([
|
|
211
|
+
expect.objectContaining({
|
|
212
|
+
role: "system",
|
|
213
|
+
content: expect.stringContaining("Alex"),
|
|
214
|
+
}),
|
|
215
|
+
]),
|
|
216
|
+
}),
|
|
217
|
+
);
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
it("falls back to echo mode on API error", async () => {
|
|
221
|
+
process.env.OPENAI_API_KEY = "sk-test-key";
|
|
222
|
+
|
|
223
|
+
vi.doMock("openai", () => ({
|
|
224
|
+
default: vi.fn().mockImplementation(() => ({
|
|
225
|
+
chat: {
|
|
226
|
+
completions: {
|
|
227
|
+
create: vi.fn().mockRejectedValue(new Error("API Error")),
|
|
228
|
+
},
|
|
229
|
+
},
|
|
230
|
+
})),
|
|
231
|
+
}));
|
|
232
|
+
|
|
233
|
+
const { generateResponse } = await import("../llm.js");
|
|
234
|
+
|
|
235
|
+
const response = await generateResponse("Hello", [], []);
|
|
236
|
+
|
|
237
|
+
// Should fall back to echo mode
|
|
238
|
+
expect(response).toContain('I heard you say: "Hello"');
|
|
239
|
+
expect(response).toContain("echo mode");
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
it("handles empty API response", async () => {
|
|
243
|
+
process.env.OPENAI_API_KEY = "sk-test-key";
|
|
244
|
+
|
|
245
|
+
vi.doMock("openai", () => ({
|
|
246
|
+
default: vi.fn().mockImplementation(() => ({
|
|
247
|
+
chat: {
|
|
248
|
+
completions: {
|
|
249
|
+
create: vi.fn().mockResolvedValue({
|
|
250
|
+
choices: [{ message: { content: null } }],
|
|
251
|
+
}),
|
|
252
|
+
},
|
|
253
|
+
},
|
|
254
|
+
})),
|
|
255
|
+
}));
|
|
256
|
+
|
|
257
|
+
const { generateResponse } = await import("../llm.js");
|
|
258
|
+
|
|
259
|
+
const response = await generateResponse("Hello", [], []);
|
|
260
|
+
|
|
261
|
+
expect(response).toBe("I couldn't generate a response.");
|
|
262
|
+
});
|
|
263
|
+
});
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
describe("context building", () => {
|
|
267
|
+
it("builds context from facts and memories", async () => {
|
|
268
|
+
delete process.env.OPENAI_API_KEY;
|
|
269
|
+
|
|
270
|
+
const { generateResponse } = await import("../llm.js");
|
|
271
|
+
|
|
272
|
+
const facts = [
|
|
273
|
+
{ content: "User's name is Alex", factType: "identity" },
|
|
274
|
+
];
|
|
275
|
+
const memories = [
|
|
276
|
+
{ content: "User mentioned they like TypeScript" },
|
|
277
|
+
];
|
|
278
|
+
|
|
279
|
+
const response = await generateResponse("Hello", memories, facts);
|
|
280
|
+
|
|
281
|
+
expect(response).toContain("Alex");
|
|
282
|
+
expect(response).toContain("TypeScript");
|
|
283
|
+
});
|
|
284
|
+
|
|
285
|
+
it("handles facts without factType", async () => {
|
|
286
|
+
delete process.env.OPENAI_API_KEY;
|
|
287
|
+
|
|
288
|
+
const { generateResponse } = await import("../llm.js");
|
|
289
|
+
|
|
290
|
+
const facts = [{ content: "Some fact without type" }];
|
|
291
|
+
|
|
292
|
+
const response = await generateResponse("Hello", [], facts);
|
|
293
|
+
|
|
294
|
+
expect(response).toContain("Some fact without type");
|
|
295
|
+
// Should not have brackets for type
|
|
296
|
+
expect(response).not.toMatch(/\[\]/);
|
|
297
|
+
});
|
|
298
|
+
|
|
299
|
+
it("handles memories without content", async () => {
|
|
300
|
+
delete process.env.OPENAI_API_KEY;
|
|
301
|
+
|
|
302
|
+
const { generateResponse } = await import("../llm.js");
|
|
303
|
+
|
|
304
|
+
const memories = [{ content: undefined } as { content?: string }];
|
|
305
|
+
|
|
306
|
+
// Should not throw
|
|
307
|
+
const response = await generateResponse("Hello", memories, []);
|
|
308
|
+
|
|
309
|
+
expect(response).toBeDefined();
|
|
310
|
+
});
|
|
311
|
+
});
|
|
312
|
+
|
|
313
|
+
describe("system prompt", () => {
|
|
314
|
+
it("includes capabilities and guidelines", async () => {
|
|
315
|
+
process.env.OPENAI_API_KEY = "sk-test-key";
|
|
316
|
+
|
|
317
|
+
let capturedMessages: unknown[] = [];
|
|
318
|
+
vi.doMock("openai", () => ({
|
|
319
|
+
default: vi.fn().mockImplementation(() => ({
|
|
320
|
+
chat: {
|
|
321
|
+
completions: {
|
|
322
|
+
create: vi.fn().mockImplementation((opts: { messages: unknown[] }) => {
|
|
323
|
+
capturedMessages = opts.messages;
|
|
324
|
+
return { choices: [{ message: { content: "response" } }] };
|
|
325
|
+
}),
|
|
326
|
+
},
|
|
327
|
+
},
|
|
328
|
+
})),
|
|
329
|
+
}));
|
|
330
|
+
|
|
331
|
+
const { generateResponse } = await import("../llm.js");
|
|
332
|
+
|
|
333
|
+
await generateResponse("Hello", [], []);
|
|
334
|
+
|
|
335
|
+
const systemMessage = capturedMessages.find(
|
|
336
|
+
(m: unknown) => (m as { role: string }).role === "system",
|
|
337
|
+
) as { content: string } | undefined;
|
|
338
|
+
|
|
339
|
+
expect(systemMessage?.content).toContain("Cortex");
|
|
340
|
+
expect(systemMessage?.content).toContain("remember");
|
|
341
|
+
expect(systemMessage?.content).toContain("recall");
|
|
342
|
+
});
|
|
343
|
+
});
|
|
344
|
+
});
|
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core Chat Logic
|
|
3
|
+
*
|
|
4
|
+
* Handles the chat flow: recall → generate response → remember
|
|
5
|
+
* This mirrors the Vercel AI quickstart's chat route logic.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import {
|
|
9
|
+
getCortex,
|
|
10
|
+
CONFIG,
|
|
11
|
+
buildRememberParams,
|
|
12
|
+
createLayerObserver,
|
|
13
|
+
} from "./cortex.js";
|
|
14
|
+
import {
|
|
15
|
+
printRecallResults,
|
|
16
|
+
printOrchestrationComplete,
|
|
17
|
+
printInfo,
|
|
18
|
+
startSpinner,
|
|
19
|
+
stopSpinner,
|
|
20
|
+
} from "./display.js";
|
|
21
|
+
import { generateResponse, isLLMAvailable } from "./llm.js";
|
|
22
|
+
|
|
23
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
24
|
+
// Types
|
|
25
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
26
|
+
|
|
27
|
+
export interface ChatResult {
|
|
28
|
+
response: string;
|
|
29
|
+
conversationId: string;
|
|
30
|
+
memoriesRecalled: number;
|
|
31
|
+
factsRecalled: number;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export interface Memory {
|
|
35
|
+
content?: string;
|
|
36
|
+
importance?: number;
|
|
37
|
+
source?: string;
|
|
38
|
+
conversationId?: string;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export interface Fact {
|
|
42
|
+
content?: string;
|
|
43
|
+
factType?: string;
|
|
44
|
+
confidence?: number;
|
|
45
|
+
subject?: string;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
49
|
+
// Conversation State
|
|
50
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
51
|
+
|
|
52
|
+
let currentConversationId: string | null = null;
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Generate a new conversation ID
|
|
56
|
+
*/
|
|
57
|
+
export function generateConversationId(): string {
|
|
58
|
+
return `conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Get or create current conversation ID
|
|
63
|
+
*/
|
|
64
|
+
export function getConversationId(): string {
|
|
65
|
+
if (!currentConversationId) {
|
|
66
|
+
currentConversationId = generateConversationId();
|
|
67
|
+
}
|
|
68
|
+
return currentConversationId;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Start a new conversation
|
|
73
|
+
*/
|
|
74
|
+
export function newConversation(): string {
|
|
75
|
+
currentConversationId = generateConversationId();
|
|
76
|
+
printInfo(`Started new conversation: ${currentConversationId}`);
|
|
77
|
+
return currentConversationId;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
81
|
+
// Main Chat Function
|
|
82
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Process a chat message through the full memory pipeline
|
|
86
|
+
*
|
|
87
|
+
* 1. Recall relevant memories and facts
|
|
88
|
+
* 2. Generate response (LLM or echo)
|
|
89
|
+
* 3. Remember the exchange (triggers layer orchestration)
|
|
90
|
+
*/
|
|
91
|
+
export async function chat(
|
|
92
|
+
userMessage: string,
|
|
93
|
+
conversationId?: string,
|
|
94
|
+
): Promise<ChatResult> {
|
|
95
|
+
const cortex = getCortex();
|
|
96
|
+
const convId = conversationId || getConversationId();
|
|
97
|
+
|
|
98
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
99
|
+
// Step 1: Recall relevant memories
|
|
100
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
101
|
+
|
|
102
|
+
let memories: Memory[] = [];
|
|
103
|
+
let facts: Fact[] = [];
|
|
104
|
+
|
|
105
|
+
startSpinner("Searching memories...");
|
|
106
|
+
|
|
107
|
+
try {
|
|
108
|
+
// Use the unified recall API (v0.23.0+)
|
|
109
|
+
const recallResult = await cortex.memory.recall({
|
|
110
|
+
memorySpaceId: CONFIG.memorySpaceId,
|
|
111
|
+
query: userMessage,
|
|
112
|
+
limit: 10,
|
|
113
|
+
sources: {
|
|
114
|
+
vector: true,
|
|
115
|
+
facts: true,
|
|
116
|
+
graph: CONFIG.enableGraphMemory,
|
|
117
|
+
},
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
// Extract memories and facts from the correct result structure
|
|
121
|
+
// SDK returns: result.sources.vector.items and result.sources.facts.items
|
|
122
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
123
|
+
const result = recallResult as any;
|
|
124
|
+
memories = (result.sources?.vector?.items || result.memories || []) as Memory[];
|
|
125
|
+
facts = (result.sources?.facts?.items || result.facts || []) as Fact[];
|
|
126
|
+
|
|
127
|
+
stopSpinner(true, `Found ${memories.length} memories, ${facts.length} facts`);
|
|
128
|
+
|
|
129
|
+
// Display recall results
|
|
130
|
+
printRecallResults(memories, facts);
|
|
131
|
+
} catch (error) {
|
|
132
|
+
stopSpinner(false, "No memories found (starting fresh)");
|
|
133
|
+
// Recall might fail if no memories exist yet - that's ok
|
|
134
|
+
if (CONFIG.debug) {
|
|
135
|
+
console.log("[Debug] Recall error (may be empty):", error);
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
140
|
+
// Step 2: Generate response
|
|
141
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
142
|
+
|
|
143
|
+
startSpinner("Thinking...");
|
|
144
|
+
const response = await generateResponse(userMessage, memories, facts);
|
|
145
|
+
stopSpinner(true, "Response generated");
|
|
146
|
+
|
|
147
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
148
|
+
// Step 3: Remember the exchange (triggers orchestration)
|
|
149
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
150
|
+
|
|
151
|
+
const startTime = Date.now();
|
|
152
|
+
|
|
153
|
+
try {
|
|
154
|
+
const params = await buildRememberParams({
|
|
155
|
+
userMessage,
|
|
156
|
+
agentResponse: response,
|
|
157
|
+
conversationId: convId,
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
// Add layer observer for console output - uses 'observer' not 'layerObserver'
|
|
161
|
+
await cortex.memory.remember({
|
|
162
|
+
...params,
|
|
163
|
+
observer: createLayerObserver(),
|
|
164
|
+
});
|
|
165
|
+
|
|
166
|
+
// Print orchestration summary
|
|
167
|
+
const totalMs = Date.now() - startTime;
|
|
168
|
+
printOrchestrationComplete(totalMs);
|
|
169
|
+
} catch (error) {
|
|
170
|
+
console.error("Failed to store memory:", error);
|
|
171
|
+
// Still return the response even if storage fails
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
return {
|
|
175
|
+
response,
|
|
176
|
+
conversationId: convId,
|
|
177
|
+
memoriesRecalled: memories.length,
|
|
178
|
+
factsRecalled: facts.length,
|
|
179
|
+
};
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
183
|
+
// Query Functions
|
|
184
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
185
|
+
|
|
186
|
+
/**
|
|
187
|
+
* Search memories without storing anything
|
|
188
|
+
*/
|
|
189
|
+
export async function recallMemories(query: string): Promise<void> {
|
|
190
|
+
const cortex = getCortex();
|
|
191
|
+
|
|
192
|
+
startSpinner("Searching memories...");
|
|
193
|
+
|
|
194
|
+
try {
|
|
195
|
+
const recallResult = await cortex.memory.recall({
|
|
196
|
+
memorySpaceId: CONFIG.memorySpaceId,
|
|
197
|
+
query,
|
|
198
|
+
limit: 10,
|
|
199
|
+
sources: {
|
|
200
|
+
vector: true,
|
|
201
|
+
facts: true,
|
|
202
|
+
graph: CONFIG.enableGraphMemory,
|
|
203
|
+
},
|
|
204
|
+
});
|
|
205
|
+
|
|
206
|
+
// Extract from correct result structure
|
|
207
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
208
|
+
const result = recallResult as any;
|
|
209
|
+
const memories = (result.sources?.vector?.items || result.memories || []) as Memory[];
|
|
210
|
+
const facts = (result.sources?.facts?.items || result.facts || []) as Fact[];
|
|
211
|
+
|
|
212
|
+
stopSpinner(true, `Found ${memories.length} memories, ${facts.length} facts`);
|
|
213
|
+
printRecallResults(memories, facts);
|
|
214
|
+
} catch (error) {
|
|
215
|
+
stopSpinner(false, "Recall failed");
|
|
216
|
+
console.error("Recall failed:", error);
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* List all facts in the memory space
|
|
222
|
+
*/
|
|
223
|
+
export async function listFacts(): Promise<void> {
|
|
224
|
+
const cortex = getCortex();
|
|
225
|
+
|
|
226
|
+
startSpinner("Loading facts...");
|
|
227
|
+
|
|
228
|
+
try {
|
|
229
|
+
const result = await cortex.facts.list({
|
|
230
|
+
memorySpaceId: CONFIG.memorySpaceId,
|
|
231
|
+
limit: 20,
|
|
232
|
+
});
|
|
233
|
+
|
|
234
|
+
const facts = (result.facts || result || []) as Fact[];
|
|
235
|
+
|
|
236
|
+
stopSpinner(true, `Found ${facts.length} facts`);
|
|
237
|
+
printRecallResults([], facts);
|
|
238
|
+
} catch (error) {
|
|
239
|
+
stopSpinner(false, "Failed to load facts");
|
|
240
|
+
console.error("List facts failed:", error);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
/**
|
|
245
|
+
* Get conversation history
|
|
246
|
+
*/
|
|
247
|
+
export async function getHistory(): Promise<void> {
|
|
248
|
+
const cortex = getCortex();
|
|
249
|
+
const convId = currentConversationId;
|
|
250
|
+
|
|
251
|
+
if (!convId) {
|
|
252
|
+
printInfo("No active conversation");
|
|
253
|
+
return;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
startSpinner("Loading history...");
|
|
257
|
+
|
|
258
|
+
try {
|
|
259
|
+
const conversation = await cortex.conversations.get(convId);
|
|
260
|
+
|
|
261
|
+
if (conversation && conversation.messages) {
|
|
262
|
+
stopSpinner(true, `Found ${conversation.messages.length} messages`);
|
|
263
|
+
console.log("");
|
|
264
|
+
console.log(`📜 Conversation: ${convId}`);
|
|
265
|
+
console.log(` Messages: ${conversation.messages.length}`);
|
|
266
|
+
console.log("");
|
|
267
|
+
|
|
268
|
+
for (const msg of conversation.messages.slice(-10)) {
|
|
269
|
+
const role = msg.role === "user" ? "You" : "Assistant";
|
|
270
|
+
const content =
|
|
271
|
+
typeof msg.content === "string"
|
|
272
|
+
? msg.content
|
|
273
|
+
: JSON.stringify(msg.content);
|
|
274
|
+
console.log(` ${role}: ${content.slice(0, 60)}${content.length > 60 ? "..." : ""}`);
|
|
275
|
+
}
|
|
276
|
+
console.log("");
|
|
277
|
+
} else {
|
|
278
|
+
stopSpinner(false, "Conversation not found or empty");
|
|
279
|
+
}
|
|
280
|
+
} catch (error) {
|
|
281
|
+
stopSpinner(false, "Failed to load history");
|
|
282
|
+
console.error("Get history failed:", error);
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
/**
|
|
287
|
+
* Print current configuration
|
|
288
|
+
*/
|
|
289
|
+
export function printConfig(): void {
|
|
290
|
+
console.log("");
|
|
291
|
+
console.log("⚙️ Configuration:");
|
|
292
|
+
console.log(` Memory Space: ${CONFIG.memorySpaceId}`);
|
|
293
|
+
console.log(` User: ${CONFIG.userId} (${CONFIG.userName})`);
|
|
294
|
+
console.log(` Agent: ${CONFIG.agentId} (${CONFIG.agentName})`);
|
|
295
|
+
console.log(` Fact Extraction: ${CONFIG.enableFactExtraction ? "enabled" : "disabled"}`);
|
|
296
|
+
console.log(` Graph Sync: ${CONFIG.enableGraphMemory ? "enabled" : "disabled"}`);
|
|
297
|
+
console.log(` LLM: ${isLLMAvailable() ? "OpenAI (enabled)" : "Echo mode (no API key)"}`);
|
|
298
|
+
console.log(` Conversation: ${currentConversationId || "none"}`);
|
|
299
|
+
console.log("");
|
|
300
|
+
}
|