@cortexmemory/cli 0.27.4 → 0.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/db.d.ts.map +1 -1
- package/dist/commands/db.js +18 -6
- package/dist/commands/db.js.map +1 -1
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +74 -34
- package/dist/commands/deploy.js.map +1 -1
- package/dist/commands/dev.js +3 -2
- package/dist/commands/dev.js.map +1 -1
- package/dist/commands/init.d.ts.map +1 -1
- package/dist/commands/init.js +12 -0
- package/dist/commands/init.js.map +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/utils/app-template-sync.d.ts.map +1 -1
- package/dist/utils/app-template-sync.js +3 -1
- package/dist/utils/app-template-sync.js.map +1 -1
- package/dist/utils/init/quickstart-setup.d.ts.map +1 -1
- package/dist/utils/init/quickstart-setup.js.map +1 -1
- package/package.json +4 -4
- package/templates/basic/.env.local.example +23 -0
- package/templates/basic/README.md +181 -56
- package/templates/basic/package-lock.json +2180 -406
- package/templates/basic/package.json +23 -5
- package/templates/basic/src/__tests__/chat.test.ts +340 -0
- package/templates/basic/src/__tests__/cortex.test.ts +260 -0
- package/templates/basic/src/__tests__/display.test.ts +455 -0
- package/templates/basic/src/__tests__/e2e/fact-extraction.test.ts +498 -0
- package/templates/basic/src/__tests__/e2e/memory-flow.test.ts +355 -0
- package/templates/basic/src/__tests__/e2e/server-e2e.test.ts +414 -0
- package/templates/basic/src/__tests__/helpers/test-utils.ts +345 -0
- package/templates/basic/src/__tests__/integration/chat-flow.test.ts +422 -0
- package/templates/basic/src/__tests__/integration/server.test.ts +441 -0
- package/templates/basic/src/__tests__/llm.test.ts +344 -0
- package/templates/basic/src/chat.ts +300 -0
- package/templates/basic/src/cortex.ts +203 -0
- package/templates/basic/src/display.ts +425 -0
- package/templates/basic/src/index.ts +194 -64
- package/templates/basic/src/llm.ts +214 -0
- package/templates/basic/src/server.ts +280 -0
- package/templates/basic/vitest.config.ts +33 -0
- package/templates/basic/vitest.e2e.config.ts +28 -0
- package/templates/basic/vitest.integration.config.ts +25 -0
- package/templates/vercel-ai-quickstart/app/api/auth/check/route.ts +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +6 -9
- package/templates/vercel-ai-quickstart/app/api/auth/register/route.ts +14 -18
- package/templates/vercel-ai-quickstart/app/api/auth/setup/route.ts +4 -7
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +28 -11
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +19 -13
- package/templates/vercel-ai-quickstart/app/api/conversations/route.ts +16 -16
- package/templates/vercel-ai-quickstart/app/globals.css +24 -9
- package/templates/vercel-ai-quickstart/app/page.tsx +25 -13
- package/templates/vercel-ai-quickstart/components/AdminSetup.tsx +3 -1
- package/templates/vercel-ai-quickstart/components/AuthProvider.tsx +6 -6
- package/templates/vercel-ai-quickstart/components/ChatHistorySidebar.tsx +19 -8
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +41 -14
- package/templates/vercel-ai-quickstart/components/LoginScreen.tsx +10 -5
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +3 -3
- package/templates/vercel-ai-quickstart/lib/password.ts +5 -5
- package/templates/vercel-ai-quickstart/next.config.js +10 -2
- package/templates/vercel-ai-quickstart/package.json +18 -11
- package/templates/vercel-ai-quickstart/test-api.mjs +131 -100
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +73 -44
- package/templates/vercel-ai-quickstart/tests/helpers/mock-cortex.ts +40 -40
- package/templates/vercel-ai-quickstart/tests/integration/auth.test.ts +8 -8
- package/templates/vercel-ai-quickstart/tests/integration/conversations.test.ts +12 -8
- package/templates/vercel-ai-quickstart/tests/unit/password.test.ts +4 -1
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* E2E Tests: HTTP Server
|
|
3
|
+
*
|
|
4
|
+
* Tests HTTP API endpoints against real Convex backend.
|
|
5
|
+
* Requires: CONVEX_URL environment variable
|
|
6
|
+
* Optional: OPENAI_API_KEY for LLM responses
|
|
7
|
+
*
|
|
8
|
+
* NOTE: These tests require the server to be running:
|
|
9
|
+
* npm run server
|
|
10
|
+
*
|
|
11
|
+
* Run with: CONVEX_URL=<url> npm run test:e2e
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { describe, it, expect, beforeAll, beforeEach, afterAll } from "vitest";
|
|
15
|
+
import {
|
|
16
|
+
shouldSkipE2E,
|
|
17
|
+
generateTestId,
|
|
18
|
+
createTestConversationId,
|
|
19
|
+
wait,
|
|
20
|
+
makeServerRequest,
|
|
21
|
+
} from "../helpers/test-utils.js";
|
|
22
|
+
|
|
23
|
+
// Skip all tests if CONVEX_URL not set
|
|
24
|
+
const SKIP_E2E = shouldSkipE2E();
|
|
25
|
+
|
|
26
|
+
// Server URL - assumes server is running on default port
|
|
27
|
+
const SERVER_URL = process.env.SERVER_URL || "http://localhost:3001";
|
|
28
|
+
|
|
29
|
+
// Check if server is running (must return JSON with status: "ok")
|
|
30
|
+
async function isServerRunning(): Promise<boolean> {
|
|
31
|
+
try {
|
|
32
|
+
const response = await fetch(`${SERVER_URL}/health`, { method: "GET" });
|
|
33
|
+
if (!response.ok) return false;
|
|
34
|
+
|
|
35
|
+
const contentType = response.headers.get("content-type");
|
|
36
|
+
if (!contentType?.includes("application/json")) return false;
|
|
37
|
+
|
|
38
|
+
const data = await response.json();
|
|
39
|
+
return data.status === "ok";
|
|
40
|
+
} catch {
|
|
41
|
+
return false;
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
describe("HTTP Server E2E", () => {
|
|
46
|
+
let serverRunning: boolean = false;
|
|
47
|
+
|
|
48
|
+
beforeAll(async () => {
|
|
49
|
+
if (SKIP_E2E) {
|
|
50
|
+
console.log("Skipping E2E tests - CONVEX_URL not configured");
|
|
51
|
+
return;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Check if server is running
|
|
55
|
+
serverRunning = await isServerRunning();
|
|
56
|
+
if (!serverRunning) {
|
|
57
|
+
console.log(`
|
|
58
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
59
|
+
Server not running at ${SERVER_URL}
|
|
60
|
+
|
|
61
|
+
To run these tests, start the server first:
|
|
62
|
+
CONVEX_URL=<your-url> npm run server
|
|
63
|
+
|
|
64
|
+
Then run tests in another terminal:
|
|
65
|
+
npm run test:e2e
|
|
66
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
67
|
+
`);
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
72
|
+
// Health Check
|
|
73
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
74
|
+
|
|
75
|
+
(SKIP_E2E ? describe.skip : describe)("GET /health", () => {
|
|
76
|
+
it("should return health status", async () => {
|
|
77
|
+
if (!serverRunning) return;
|
|
78
|
+
|
|
79
|
+
const { status, data } = await makeServerRequest("/health", {
|
|
80
|
+
baseUrl: SERVER_URL,
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
expect(status).toBe(200);
|
|
84
|
+
expect(data.status).toBe("ok");
|
|
85
|
+
expect(data.memorySpaceId).toBeDefined();
|
|
86
|
+
expect(data.agentId).toBeDefined();
|
|
87
|
+
expect(data.features).toBeDefined();
|
|
88
|
+
});
|
|
89
|
+
|
|
90
|
+
it("should report feature flags", async () => {
|
|
91
|
+
if (!serverRunning) return;
|
|
92
|
+
|
|
93
|
+
const { data } = await makeServerRequest("/health", {
|
|
94
|
+
baseUrl: SERVER_URL,
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
expect(data.features.factExtraction).toBeDefined();
|
|
98
|
+
expect(data.features.graphSync).toBeDefined();
|
|
99
|
+
expect(data.features.llm).toBeDefined();
|
|
100
|
+
});
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
104
|
+
// Chat Endpoint
|
|
105
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
106
|
+
|
|
107
|
+
(SKIP_E2E ? describe.skip : describe)("POST /chat", () => {
|
|
108
|
+
it("should process chat message", async () => {
|
|
109
|
+
if (!serverRunning) return;
|
|
110
|
+
|
|
111
|
+
const { status, data } = await makeServerRequest("/chat", {
|
|
112
|
+
method: "POST",
|
|
113
|
+
body: {
|
|
114
|
+
message: "Hello, this is a test message",
|
|
115
|
+
},
|
|
116
|
+
baseUrl: SERVER_URL,
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
expect(status).toBe(200);
|
|
120
|
+
expect(data.response).toBeDefined();
|
|
121
|
+
expect(data.conversationId).toBeDefined();
|
|
122
|
+
expect(typeof data.memoriesRecalled).toBe("number");
|
|
123
|
+
expect(typeof data.factsRecalled).toBe("number");
|
|
124
|
+
}, 30000);
|
|
125
|
+
|
|
126
|
+
it("should use provided conversation ID", async () => {
|
|
127
|
+
if (!serverRunning) return;
|
|
128
|
+
|
|
129
|
+
const customConvId = createTestConversationId();
|
|
130
|
+
|
|
131
|
+
const { status, data } = await makeServerRequest("/chat", {
|
|
132
|
+
method: "POST",
|
|
133
|
+
body: {
|
|
134
|
+
message: "Testing with custom conversation ID",
|
|
135
|
+
conversationId: customConvId,
|
|
136
|
+
},
|
|
137
|
+
baseUrl: SERVER_URL,
|
|
138
|
+
});
|
|
139
|
+
|
|
140
|
+
expect(status).toBe(200);
|
|
141
|
+
expect(data.conversationId).toBe(customConvId);
|
|
142
|
+
}, 30000);
|
|
143
|
+
|
|
144
|
+
it("should return 400 for missing message", async () => {
|
|
145
|
+
if (!serverRunning) return;
|
|
146
|
+
|
|
147
|
+
const { status, data } = await makeServerRequest("/chat", {
|
|
148
|
+
method: "POST",
|
|
149
|
+
body: {},
|
|
150
|
+
baseUrl: SERVER_URL,
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
expect(status).toBe(400);
|
|
154
|
+
expect(data.error).toBe("message is required");
|
|
155
|
+
});
|
|
156
|
+
|
|
157
|
+
it("should persist messages across calls in same conversation", async () => {
|
|
158
|
+
if (!serverRunning) return;
|
|
159
|
+
|
|
160
|
+
const conversationId = createTestConversationId();
|
|
161
|
+
|
|
162
|
+
// First message
|
|
163
|
+
await makeServerRequest("/chat", {
|
|
164
|
+
method: "POST",
|
|
165
|
+
body: {
|
|
166
|
+
message: "My name is TestUser",
|
|
167
|
+
conversationId,
|
|
168
|
+
},
|
|
169
|
+
baseUrl: SERVER_URL,
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
await wait(2000);
|
|
173
|
+
|
|
174
|
+
// Second message in same conversation
|
|
175
|
+
const { status, data } = await makeServerRequest("/chat", {
|
|
176
|
+
method: "POST",
|
|
177
|
+
body: {
|
|
178
|
+
message: "What is my name?",
|
|
179
|
+
conversationId,
|
|
180
|
+
},
|
|
181
|
+
baseUrl: SERVER_URL,
|
|
182
|
+
});
|
|
183
|
+
|
|
184
|
+
expect(status).toBe(200);
|
|
185
|
+
expect(data.conversationId).toBe(conversationId);
|
|
186
|
+
// May or may not recall the name depending on LLM and memory
|
|
187
|
+
}, 60000);
|
|
188
|
+
});
|
|
189
|
+
|
|
190
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
191
|
+
// Recall Endpoint
|
|
192
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
193
|
+
|
|
194
|
+
(SKIP_E2E ? describe.skip : describe)("GET /recall", () => {
|
|
195
|
+
it("should search memories", async () => {
|
|
196
|
+
if (!serverRunning) return;
|
|
197
|
+
|
|
198
|
+
// First store some memories
|
|
199
|
+
await makeServerRequest("/chat", {
|
|
200
|
+
method: "POST",
|
|
201
|
+
body: {
|
|
202
|
+
message: "I love programming in TypeScript",
|
|
203
|
+
},
|
|
204
|
+
baseUrl: SERVER_URL,
|
|
205
|
+
});
|
|
206
|
+
|
|
207
|
+
await wait(2000);
|
|
208
|
+
|
|
209
|
+
// Then recall
|
|
210
|
+
const { status, data } = await makeServerRequest("/recall?query=TypeScript", {
|
|
211
|
+
baseUrl: SERVER_URL,
|
|
212
|
+
});
|
|
213
|
+
|
|
214
|
+
expect(status).toBe(200);
|
|
215
|
+
expect(data.memories).toBeDefined();
|
|
216
|
+
expect(data.facts).toBeDefined();
|
|
217
|
+
expect(data.query).toBe("TypeScript");
|
|
218
|
+
}, 30000);
|
|
219
|
+
|
|
220
|
+
it("should return 400 for missing query", async () => {
|
|
221
|
+
if (!serverRunning) return;
|
|
222
|
+
|
|
223
|
+
const { status, data } = await makeServerRequest("/recall", {
|
|
224
|
+
baseUrl: SERVER_URL,
|
|
225
|
+
});
|
|
226
|
+
|
|
227
|
+
expect(status).toBe(400);
|
|
228
|
+
expect(data.error).toBe("query parameter is required");
|
|
229
|
+
});
|
|
230
|
+
|
|
231
|
+
it("should return empty arrays for no matches", async () => {
|
|
232
|
+
if (!serverRunning) return;
|
|
233
|
+
|
|
234
|
+
const randomQuery = generateTestId("nonexistent");
|
|
235
|
+
|
|
236
|
+
const { status, data } = await makeServerRequest(`/recall?query=${randomQuery}`, {
|
|
237
|
+
baseUrl: SERVER_URL,
|
|
238
|
+
});
|
|
239
|
+
|
|
240
|
+
expect(status).toBe(200);
|
|
241
|
+
expect(data.memories).toBeDefined();
|
|
242
|
+
expect(data.facts).toBeDefined();
|
|
243
|
+
});
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
247
|
+
// Facts Endpoint
|
|
248
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
249
|
+
|
|
250
|
+
(SKIP_E2E ? describe.skip : describe)("GET /facts", () => {
|
|
251
|
+
it("should list facts", async () => {
|
|
252
|
+
if (!serverRunning) return;
|
|
253
|
+
|
|
254
|
+
const { status, data } = await makeServerRequest("/facts", {
|
|
255
|
+
baseUrl: SERVER_URL,
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
expect(status).toBe(200);
|
|
259
|
+
expect(data.facts).toBeDefined();
|
|
260
|
+
expect(typeof data.count).toBe("number");
|
|
261
|
+
});
|
|
262
|
+
|
|
263
|
+
it("should return count of facts", async () => {
|
|
264
|
+
if (!serverRunning) return;
|
|
265
|
+
|
|
266
|
+
const { data } = await makeServerRequest("/facts", {
|
|
267
|
+
baseUrl: SERVER_URL,
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
expect(data.count).toBe(data.facts.length);
|
|
271
|
+
});
|
|
272
|
+
});
|
|
273
|
+
|
|
274
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
275
|
+
// History Endpoint
|
|
276
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
277
|
+
|
|
278
|
+
(SKIP_E2E ? describe.skip : describe)("GET /history/:conversationId", () => {
|
|
279
|
+
it("should return conversation history", async () => {
|
|
280
|
+
if (!serverRunning) return;
|
|
281
|
+
|
|
282
|
+
const conversationId = createTestConversationId();
|
|
283
|
+
|
|
284
|
+
// Create conversation with messages
|
|
285
|
+
await makeServerRequest("/chat", {
|
|
286
|
+
method: "POST",
|
|
287
|
+
body: {
|
|
288
|
+
message: "First message in history test",
|
|
289
|
+
conversationId,
|
|
290
|
+
},
|
|
291
|
+
baseUrl: SERVER_URL,
|
|
292
|
+
});
|
|
293
|
+
|
|
294
|
+
await makeServerRequest("/chat", {
|
|
295
|
+
method: "POST",
|
|
296
|
+
body: {
|
|
297
|
+
message: "Second message in history test",
|
|
298
|
+
conversationId,
|
|
299
|
+
},
|
|
300
|
+
baseUrl: SERVER_URL,
|
|
301
|
+
});
|
|
302
|
+
|
|
303
|
+
await wait(2000);
|
|
304
|
+
|
|
305
|
+
// Get history
|
|
306
|
+
const { status, data } = await makeServerRequest(`/history/${conversationId}`, {
|
|
307
|
+
baseUrl: SERVER_URL,
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
// May be 200 or 404 depending on whether conversation was persisted
|
|
311
|
+
expect([200, 404]).toContain(status);
|
|
312
|
+
|
|
313
|
+
if (status === 200) {
|
|
314
|
+
expect(data.conversationId).toBe(conversationId);
|
|
315
|
+
expect(data.messages).toBeDefined();
|
|
316
|
+
}
|
|
317
|
+
}, 60000);
|
|
318
|
+
|
|
319
|
+
it("should return 404 for non-existent conversation", async () => {
|
|
320
|
+
if (!serverRunning) return;
|
|
321
|
+
|
|
322
|
+
const nonExistentId = createTestConversationId();
|
|
323
|
+
|
|
324
|
+
const { status, data } = await makeServerRequest(`/history/${nonExistentId}`, {
|
|
325
|
+
baseUrl: SERVER_URL,
|
|
326
|
+
});
|
|
327
|
+
|
|
328
|
+
expect(status).toBe(404);
|
|
329
|
+
expect(data.error).toBe("Conversation not found");
|
|
330
|
+
});
|
|
331
|
+
});
|
|
332
|
+
|
|
333
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
334
|
+
// Full Chat Flow
|
|
335
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
336
|
+
|
|
337
|
+
(SKIP_E2E ? describe.skip : describe)("full chat flow", () => {
|
|
338
|
+
it("should complete a full conversation flow", async () => {
|
|
339
|
+
if (!serverRunning) return;
|
|
340
|
+
|
|
341
|
+
const conversationId = createTestConversationId();
|
|
342
|
+
|
|
343
|
+
// Step 1: Introduce with facts
|
|
344
|
+
console.log("Step 1: Introducing user...");
|
|
345
|
+
const intro = await makeServerRequest("/chat", {
|
|
346
|
+
method: "POST",
|
|
347
|
+
body: {
|
|
348
|
+
message: "Hi! My name is Alice and I work as a designer in New York.",
|
|
349
|
+
conversationId,
|
|
350
|
+
},
|
|
351
|
+
baseUrl: SERVER_URL,
|
|
352
|
+
});
|
|
353
|
+
|
|
354
|
+
expect(intro.status).toBe(200);
|
|
355
|
+
console.log(` Response: ${intro.data.response?.slice(0, 50)}...`);
|
|
356
|
+
|
|
357
|
+
await wait(3000);
|
|
358
|
+
|
|
359
|
+
// Step 2: Ask a follow-up
|
|
360
|
+
console.log("Step 2: Asking follow-up...");
|
|
361
|
+
const followUp = await makeServerRequest("/chat", {
|
|
362
|
+
method: "POST",
|
|
363
|
+
body: {
|
|
364
|
+
message: "What do you know about me?",
|
|
365
|
+
conversationId,
|
|
366
|
+
},
|
|
367
|
+
baseUrl: SERVER_URL,
|
|
368
|
+
});
|
|
369
|
+
|
|
370
|
+
expect(followUp.status).toBe(200);
|
|
371
|
+
console.log(` Response: ${followUp.data.response?.slice(0, 100)}...`);
|
|
372
|
+
console.log(` Memories recalled: ${followUp.data.memoriesRecalled}`);
|
|
373
|
+
console.log(` Facts recalled: ${followUp.data.factsRecalled}`);
|
|
374
|
+
|
|
375
|
+
// Step 3: Check recall
|
|
376
|
+
console.log("Step 3: Testing recall...");
|
|
377
|
+
const recall = await makeServerRequest("/recall?query=Alice", {
|
|
378
|
+
baseUrl: SERVER_URL,
|
|
379
|
+
});
|
|
380
|
+
|
|
381
|
+
expect(recall.status).toBe(200);
|
|
382
|
+
console.log(` Found ${recall.data.memories?.length || 0} memories, ${recall.data.facts?.length || 0} facts`);
|
|
383
|
+
|
|
384
|
+
// Step 4: List facts
|
|
385
|
+
console.log("Step 4: Listing facts...");
|
|
386
|
+
const facts = await makeServerRequest("/facts", {
|
|
387
|
+
baseUrl: SERVER_URL,
|
|
388
|
+
});
|
|
389
|
+
|
|
390
|
+
expect(facts.status).toBe(200);
|
|
391
|
+
console.log(` Total facts in system: ${facts.data.count}`);
|
|
392
|
+
|
|
393
|
+
console.log("✓ Full flow completed successfully");
|
|
394
|
+
}, 120000);
|
|
395
|
+
});
|
|
396
|
+
|
|
397
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
398
|
+
// Root Endpoint
|
|
399
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
400
|
+
|
|
401
|
+
(SKIP_E2E ? describe.skip : describe)("GET /", () => {
|
|
402
|
+
it("should return API documentation", async () => {
|
|
403
|
+
if (!serverRunning) return;
|
|
404
|
+
|
|
405
|
+
const { status, data } = await makeServerRequest("/", {
|
|
406
|
+
baseUrl: SERVER_URL,
|
|
407
|
+
});
|
|
408
|
+
|
|
409
|
+
expect(status).toBe(200);
|
|
410
|
+
expect(data.name).toContain("Cortex Memory");
|
|
411
|
+
expect(data.endpoints).toBeDefined();
|
|
412
|
+
});
|
|
413
|
+
});
|
|
414
|
+
});
|