@cortexmemory/cli 0.27.3 → 0.27.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +127 -56
- package/dist/commands/deploy.js.map +1 -1
- package/dist/utils/app-template-sync.d.ts.map +1 -1
- package/dist/utils/app-template-sync.js +32 -12
- package/dist/utils/app-template-sync.js.map +1 -1
- package/package.json +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +56 -11
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +70 -15
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +333 -0
- package/templates/vercel-ai-quickstart/app/page.tsx +18 -4
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +5 -2
- package/templates/vercel-ai-quickstart/jest.config.js +8 -1
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +165 -0
- package/templates/vercel-ai-quickstart/lib/versions.ts +60 -0
- package/templates/vercel-ai-quickstart/package.json +5 -1
- package/templates/vercel-ai-quickstart/test-api.mjs +272 -0
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +454 -0
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Memory-Enabled Agent (AI SDK v6)
|
|
3
|
+
*
|
|
4
|
+
* This file demonstrates how to create a reusable agent with
|
|
5
|
+
* Cortex Memory integration using AI SDK v6's ToolLoopAgent.
|
|
6
|
+
*
|
|
7
|
+
* The agent:
|
|
8
|
+
* - Automatically injects relevant memories into context
|
|
9
|
+
* - Can be used with both generate() and stream()
|
|
10
|
+
* - Supports type-safe call options for userId, memorySpaceId, etc.
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* const result = await memoryAgent.generate({
|
|
15
|
+
* prompt: 'What do you remember about me?',
|
|
16
|
+
* options: {
|
|
17
|
+
* userId: 'user_123',
|
|
18
|
+
* memorySpaceId: 'my-app',
|
|
19
|
+
* },
|
|
20
|
+
* });
|
|
21
|
+
* ```
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
import { ToolLoopAgent, tool, stepCountIs } from "ai";
|
|
25
|
+
import { openai } from "@ai-sdk/openai";
|
|
26
|
+
import { z } from "zod";
|
|
27
|
+
import {
|
|
28
|
+
createCortexCallOptionsSchema,
|
|
29
|
+
createMemoryPrepareCall,
|
|
30
|
+
type CortexCallOptions,
|
|
31
|
+
} from "@cortexmemory/vercel-ai-provider";
|
|
32
|
+
|
|
33
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
34
|
+
// Agent Configuration
|
|
35
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
36
|
+
|
|
37
|
+
const SYSTEM_PROMPT = `You are a helpful AI assistant with long-term memory powered by Cortex.
|
|
38
|
+
|
|
39
|
+
Your capabilities:
|
|
40
|
+
- You remember everything users tell you across conversations
|
|
41
|
+
- You can recall facts, preferences, and context from past interactions
|
|
42
|
+
- You naturally reference what you've learned about the user
|
|
43
|
+
|
|
44
|
+
Behavior guidelines:
|
|
45
|
+
- When you remember something from a previous conversation, mention it naturally
|
|
46
|
+
- If asked about something you learned, reference it specifically
|
|
47
|
+
- Be conversational and friendly
|
|
48
|
+
- Help demonstrate the memory system by showing what you remember`;
|
|
49
|
+
|
|
50
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
51
|
+
// Memory Agent Definition
|
|
52
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* A memory-enabled agent using AI SDK v6's ToolLoopAgent.
|
|
56
|
+
*
|
|
57
|
+
* This agent demonstrates:
|
|
58
|
+
* - callOptionsSchema for type-safe runtime config (userId, memorySpaceId, etc.)
|
|
59
|
+
* - prepareCall for automatic memory context injection via Cortex's recall() API
|
|
60
|
+
* - Built-in tools for memory operations (optional)
|
|
61
|
+
*
|
|
62
|
+
* The callOptionsSchema ensures TypeScript type safety when calling the agent:
|
|
63
|
+
* - userId: required for memory isolation per user
|
|
64
|
+
* - memorySpaceId: required for data partitioning
|
|
65
|
+
* - conversationId: optional for session continuity
|
|
66
|
+
* - agentId: optional agent identifier
|
|
67
|
+
*/
|
|
68
|
+
export const memoryAgent = new ToolLoopAgent({
|
|
69
|
+
id: "cortex-memory-agent",
|
|
70
|
+
model: openai("gpt-4o-mini"),
|
|
71
|
+
instructions: SYSTEM_PROMPT,
|
|
72
|
+
|
|
73
|
+
// ┌─────────────────────────────────────────────────────────────────┐
|
|
74
|
+
// │ callOptionsSchema: Type-Safe Runtime Configuration │
|
|
75
|
+
// │ │
|
|
76
|
+
// │ This Zod schema defines what options must/can be passed when │
|
|
77
|
+
// │ calling the agent. AI SDK v6 validates these at runtime. │
|
|
78
|
+
// │ │
|
|
79
|
+
// │ Example usage: │
|
|
80
|
+
// │ await memoryAgent.generate({ │
|
|
81
|
+
// │ prompt: 'Hello!', │
|
|
82
|
+
// │ options: { userId: 'u1', memorySpaceId: 'app1' }, // typed!│
|
|
83
|
+
// │ }); │
|
|
84
|
+
// └─────────────────────────────────────────────────────────────────┘
|
|
85
|
+
callOptionsSchema: createCortexCallOptionsSchema(),
|
|
86
|
+
|
|
87
|
+
// ┌─────────────────────────────────────────────────────────────────┐
|
|
88
|
+
// │ prepareCall: Memory Context Injection │
|
|
89
|
+
// │ │
|
|
90
|
+
// │ Called before each agent invocation. This hook: │
|
|
91
|
+
// │ 1. Extracts the user's query from messages │
|
|
92
|
+
// │ 2. Calls Cortex memory.recall() with userId + memorySpaceId │
|
|
93
|
+
// │ 3. Injects the returned context into instructions │
|
|
94
|
+
// │ │
|
|
95
|
+
// │ The recall() API orchestrates all memory layers: │
|
|
96
|
+
// │ - Vector memories (semantic search) │
|
|
97
|
+
// │ - Facts (extracted knowledge) │
|
|
98
|
+
// │ - Graph relationships (if configured) │
|
|
99
|
+
// └─────────────────────────────────────────────────────────────────┘
|
|
100
|
+
prepareCall: createMemoryPrepareCall({
|
|
101
|
+
convexUrl: process.env.CONVEX_URL!,
|
|
102
|
+
maxMemories: 20, // Max items to inject from recall
|
|
103
|
+
includeFacts: true, // Include Layer 3 facts
|
|
104
|
+
includeVector: true, // Include Layer 2 vector memories
|
|
105
|
+
includeGraph: true, // Expand through graph relationships
|
|
106
|
+
}),
|
|
107
|
+
|
|
108
|
+
// Default to 5 steps (sufficient for most chat interactions)
|
|
109
|
+
stopWhen: stepCountIs(5),
|
|
110
|
+
|
|
111
|
+
// Optional: Add memory-specific tools for explicit memory operations
|
|
112
|
+
// Uncomment to let the agent actively search/store memories
|
|
113
|
+
/*
|
|
114
|
+
tools: {
|
|
115
|
+
searchMemory: tool({
|
|
116
|
+
description: 'Search for specific memories about the user',
|
|
117
|
+
inputSchema: z.object({
|
|
118
|
+
query: z.string().describe('What to search for in memory'),
|
|
119
|
+
}),
|
|
120
|
+
execute: async ({ query }, { options }) => {
|
|
121
|
+
const { Cortex } = await import('@cortexmemory/sdk');
|
|
122
|
+
const cortex = new Cortex({ convexUrl: process.env.CONVEX_URL! });
|
|
123
|
+
const result = await cortex.memory.recall({
|
|
124
|
+
memorySpaceId: options.memorySpaceId,
|
|
125
|
+
query,
|
|
126
|
+
userId: options.userId,
|
|
127
|
+
limit: 5,
|
|
128
|
+
});
|
|
129
|
+
return result.context || 'No memories found.';
|
|
130
|
+
},
|
|
131
|
+
}),
|
|
132
|
+
},
|
|
133
|
+
*/
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
137
|
+
// Type Exports for Client Components
|
|
138
|
+
// ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Inferred UIMessage type for this agent.
|
|
142
|
+
*
|
|
143
|
+
* Use this in your client components for full type safety:
|
|
144
|
+
*
|
|
145
|
+
* ```typescript
|
|
146
|
+
* import { useChat } from '@ai-sdk/react';
|
|
147
|
+
* import type { MemoryAgentUIMessage } from '@/lib/agents/memory-agent';
|
|
148
|
+
*
|
|
149
|
+
* const { messages } = useChat<MemoryAgentUIMessage>();
|
|
150
|
+
* ```
|
|
151
|
+
*/
|
|
152
|
+
export type MemoryAgentUIMessage = {
|
|
153
|
+
id: string;
|
|
154
|
+
role: "user" | "assistant" | "system";
|
|
155
|
+
createdAt?: Date;
|
|
156
|
+
parts?: Array<
|
|
157
|
+
| { type: "text"; text: string }
|
|
158
|
+
| { type: "tool-invocation"; toolCallId: string; state: string }
|
|
159
|
+
>;
|
|
160
|
+
};
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Re-export call options type for convenience.
|
|
164
|
+
*/
|
|
165
|
+
export type { CortexCallOptions };
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Version detection utilities for displaying SDK versions in the UI.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export interface VersionInfo {
|
|
6
|
+
cortexSdk: string;
|
|
7
|
+
aiSdk: string;
|
|
8
|
+
aiSdkMajor: number;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Detect installed SDK versions at runtime.
|
|
13
|
+
* Returns version strings for display in the UI.
|
|
14
|
+
*/
|
|
15
|
+
export async function detectVersions(): Promise<VersionInfo> {
|
|
16
|
+
let cortexSdk = "unknown";
|
|
17
|
+
let aiSdk = "unknown";
|
|
18
|
+
let aiSdkMajor = 5;
|
|
19
|
+
|
|
20
|
+
// Detect Cortex SDK version
|
|
21
|
+
try {
|
|
22
|
+
const cortexModule = await import("@cortexmemory/sdk");
|
|
23
|
+
// Check for version export or use package version
|
|
24
|
+
if ("VERSION" in cortexModule) {
|
|
25
|
+
cortexSdk = cortexModule.VERSION as string;
|
|
26
|
+
} else {
|
|
27
|
+
// Fallback: try to detect from package
|
|
28
|
+
cortexSdk = "0.24.0";
|
|
29
|
+
}
|
|
30
|
+
} catch {
|
|
31
|
+
cortexSdk = "0.24.0";
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// Detect AI SDK version by checking for v6-specific exports
|
|
35
|
+
try {
|
|
36
|
+
const aiModule = await import("ai");
|
|
37
|
+
|
|
38
|
+
// v6 has these specific exports
|
|
39
|
+
const hasV6Features =
|
|
40
|
+
"ToolLoopAgent" in aiModule ||
|
|
41
|
+
"createAgentUIStreamResponse" in aiModule ||
|
|
42
|
+
"Output" in aiModule;
|
|
43
|
+
|
|
44
|
+
if (hasV6Features) {
|
|
45
|
+
aiSdkMajor = 6;
|
|
46
|
+
aiSdk = "v6";
|
|
47
|
+
} else {
|
|
48
|
+
aiSdkMajor = 5;
|
|
49
|
+
aiSdk = "v5";
|
|
50
|
+
}
|
|
51
|
+
} catch {
|
|
52
|
+
aiSdk = "v5";
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
cortexSdk,
|
|
57
|
+
aiSdk,
|
|
58
|
+
aiSdkMajor,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
@@ -9,7 +9,11 @@
|
|
|
9
9
|
"build": "next build --webpack",
|
|
10
10
|
"start": "next start",
|
|
11
11
|
"lint": "next lint",
|
|
12
|
-
"test": "jest",
|
|
12
|
+
"test": "jest --selectProjects unit integration",
|
|
13
|
+
"test:unit": "jest --selectProjects unit",
|
|
14
|
+
"test:integration": "jest --selectProjects integration",
|
|
15
|
+
"test:e2e": "jest --selectProjects e2e",
|
|
16
|
+
"test:all": "jest",
|
|
13
17
|
"test:watch": "jest --watch",
|
|
14
18
|
"convex:dev": "convex dev",
|
|
15
19
|
"convex:deploy": "convex deploy"
|
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Programmatic API Route Tests
|
|
4
|
+
* Tests both v5 and v6 routes for memory functionality
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const BASE_URL = process.env.QUICKSTART_URL || 'http://localhost:3000';
|
|
8
|
+
|
|
9
|
+
// Test configuration
|
|
10
|
+
const TEST_USER_ID = `test-user-${Date.now()}`;
|
|
11
|
+
const TEST_MEMORY_SPACE_ID = 'quickstart-demo';
|
|
12
|
+
|
|
13
|
+
async function sleep(ms) {
|
|
14
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
async function testRoute(routePath, routeName) {
|
|
18
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
19
|
+
console.log(`Testing ${routeName} (${routePath})`);
|
|
20
|
+
console.log('='.repeat(60));
|
|
21
|
+
|
|
22
|
+
const conversationId = `conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
23
|
+
|
|
24
|
+
// Test 1: Send a message with a fact
|
|
25
|
+
console.log('\n📤 Test 1: Sending message with a fact...');
|
|
26
|
+
const message1 = {
|
|
27
|
+
id: `msg-${Date.now()}-1`,
|
|
28
|
+
role: 'user',
|
|
29
|
+
content: `My favorite programming language is TypeScript and I work at a company called TechCorp.`,
|
|
30
|
+
createdAt: new Date().toISOString(),
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
try {
|
|
34
|
+
const response1 = await fetch(`${BASE_URL}${routePath}`, {
|
|
35
|
+
method: 'POST',
|
|
36
|
+
headers: { 'Content-Type': 'application/json' },
|
|
37
|
+
body: JSON.stringify({
|
|
38
|
+
messages: [message1],
|
|
39
|
+
userId: TEST_USER_ID,
|
|
40
|
+
memorySpaceId: TEST_MEMORY_SPACE_ID,
|
|
41
|
+
conversationId,
|
|
42
|
+
}),
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
if (!response1.ok) {
|
|
46
|
+
const errorText = await response1.text();
|
|
47
|
+
console.log(`❌ Request failed with status ${response1.status}`);
|
|
48
|
+
console.log(` Error: ${errorText.slice(0, 500)}`);
|
|
49
|
+
return { success: false, route: routeName, error: `HTTP ${response1.status}` };
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Read streaming response
|
|
53
|
+
const reader = response1.body.getReader();
|
|
54
|
+
const decoder = new TextDecoder();
|
|
55
|
+
let fullResponse = '';
|
|
56
|
+
let chunks = 0;
|
|
57
|
+
|
|
58
|
+
while (true) {
|
|
59
|
+
const { done, value } = await reader.read();
|
|
60
|
+
if (done) break;
|
|
61
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
62
|
+
fullResponse += chunk;
|
|
63
|
+
chunks++;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
console.log(`✅ Response received (${chunks} chunks, ${fullResponse.length} bytes)`);
|
|
67
|
+
|
|
68
|
+
// Check for layer observer data
|
|
69
|
+
const hasLayerData = fullResponse.includes('FACTS_LAYER') ||
|
|
70
|
+
fullResponse.includes('MEMORY_LAYER') ||
|
|
71
|
+
fullResponse.includes('layerId');
|
|
72
|
+
console.log(` Layer observer data: ${hasLayerData ? '✅ Present' : '❌ Missing'}`);
|
|
73
|
+
|
|
74
|
+
// Check for text content
|
|
75
|
+
const hasTextContent = fullResponse.includes('"type":"text"') ||
|
|
76
|
+
fullResponse.includes('text-delta');
|
|
77
|
+
console.log(` Text content: ${hasTextContent ? '✅ Present' : '❌ Missing'}`);
|
|
78
|
+
|
|
79
|
+
} catch (error) {
|
|
80
|
+
console.log(`❌ Request error: ${error.message}`);
|
|
81
|
+
return { success: false, route: routeName, error: error.message };
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Wait for async processing (fact extraction, etc.)
|
|
85
|
+
console.log('\n⏳ Waiting for memory processing...');
|
|
86
|
+
await sleep(3000);
|
|
87
|
+
|
|
88
|
+
// Test 2: Check if facts were stored
|
|
89
|
+
console.log('\n🔍 Test 2: Checking stored facts...');
|
|
90
|
+
try {
|
|
91
|
+
const factsResponse = await fetch(
|
|
92
|
+
`${BASE_URL}/api/facts?userId=${TEST_USER_ID}&memorySpaceId=${TEST_MEMORY_SPACE_ID}`,
|
|
93
|
+
{ method: 'GET' }
|
|
94
|
+
);
|
|
95
|
+
|
|
96
|
+
if (factsResponse.ok) {
|
|
97
|
+
const factsData = await factsResponse.json();
|
|
98
|
+
const facts = factsData.facts || factsData || [];
|
|
99
|
+
console.log(`✅ Facts API returned: ${Array.isArray(facts) ? facts.length : 'unknown'} facts`);
|
|
100
|
+
|
|
101
|
+
if (Array.isArray(facts) && facts.length > 0) {
|
|
102
|
+
console.log(' Sample facts:');
|
|
103
|
+
facts.slice(0, 3).forEach((fact, i) => {
|
|
104
|
+
const content = fact.content || fact.text || JSON.stringify(fact).slice(0, 100);
|
|
105
|
+
console.log(` ${i + 1}. ${content.slice(0, 80)}...`);
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
} else {
|
|
109
|
+
console.log(`⚠️ Facts API returned ${factsResponse.status}`);
|
|
110
|
+
}
|
|
111
|
+
} catch (error) {
|
|
112
|
+
console.log(`⚠️ Could not check facts: ${error.message}`);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Test 3: Send follow-up message and verify recall
|
|
116
|
+
console.log('\n📤 Test 3: Sending follow-up to test recall...');
|
|
117
|
+
const message2 = {
|
|
118
|
+
id: `msg-${Date.now()}-2`,
|
|
119
|
+
role: 'user',
|
|
120
|
+
content: 'What programming language do I prefer?',
|
|
121
|
+
createdAt: new Date().toISOString(),
|
|
122
|
+
};
|
|
123
|
+
|
|
124
|
+
// Include previous assistant response for context
|
|
125
|
+
const message1Response = {
|
|
126
|
+
id: `msg-${Date.now()}-1r`,
|
|
127
|
+
role: 'assistant',
|
|
128
|
+
content: 'Great! TypeScript is a fantastic choice for type-safe development.',
|
|
129
|
+
createdAt: new Date().toISOString(),
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
try {
|
|
133
|
+
const response2 = await fetch(`${BASE_URL}${routePath}`, {
|
|
134
|
+
method: 'POST',
|
|
135
|
+
headers: { 'Content-Type': 'application/json' },
|
|
136
|
+
body: JSON.stringify({
|
|
137
|
+
messages: [message1, message1Response, message2],
|
|
138
|
+
userId: TEST_USER_ID,
|
|
139
|
+
memorySpaceId: TEST_MEMORY_SPACE_ID,
|
|
140
|
+
conversationId,
|
|
141
|
+
}),
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
if (!response2.ok) {
|
|
145
|
+
console.log(`❌ Follow-up request failed with status ${response2.status}`);
|
|
146
|
+
return { success: false, route: routeName, error: `Follow-up HTTP ${response2.status}` };
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const reader = response2.body.getReader();
|
|
150
|
+
const decoder = new TextDecoder();
|
|
151
|
+
let fullResponse = '';
|
|
152
|
+
|
|
153
|
+
while (true) {
|
|
154
|
+
const { done, value } = await reader.read();
|
|
155
|
+
if (done) break;
|
|
156
|
+
fullResponse += decoder.decode(value, { stream: true });
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// Check if TypeScript was mentioned in the response (recall working)
|
|
160
|
+
const mentionsTypeScript = fullResponse.toLowerCase().includes('typescript');
|
|
161
|
+
console.log(`✅ Response received`);
|
|
162
|
+
console.log(` Recalls TypeScript preference: ${mentionsTypeScript ? '✅ Yes' : '❌ No'}`);
|
|
163
|
+
|
|
164
|
+
return {
|
|
165
|
+
success: true,
|
|
166
|
+
route: routeName,
|
|
167
|
+
recallWorks: mentionsTypeScript
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
} catch (error) {
|
|
171
|
+
console.log(`❌ Follow-up request error: ${error.message}`);
|
|
172
|
+
return { success: false, route: routeName, error: error.message };
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
async function testConversationAPI() {
|
|
177
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
178
|
+
console.log('Testing Conversation API');
|
|
179
|
+
console.log('='.repeat(60));
|
|
180
|
+
|
|
181
|
+
try {
|
|
182
|
+
const response = await fetch(
|
|
183
|
+
`${BASE_URL}/api/conversations?userId=${TEST_USER_ID}&memorySpaceId=${TEST_MEMORY_SPACE_ID}`,
|
|
184
|
+
{ method: 'GET' }
|
|
185
|
+
);
|
|
186
|
+
|
|
187
|
+
if (response.ok) {
|
|
188
|
+
const data = await response.json();
|
|
189
|
+
const conversations = data.conversations || data || [];
|
|
190
|
+
console.log(`✅ Conversations API works: ${Array.isArray(conversations) ? conversations.length : 'unknown'} conversations`);
|
|
191
|
+
return true;
|
|
192
|
+
} else {
|
|
193
|
+
console.log(`❌ Conversations API returned ${response.status}`);
|
|
194
|
+
return false;
|
|
195
|
+
}
|
|
196
|
+
} catch (error) {
|
|
197
|
+
console.log(`❌ Conversations API error: ${error.message}`);
|
|
198
|
+
return false;
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
async function checkServerHealth() {
|
|
203
|
+
console.log('🏥 Checking server health...');
|
|
204
|
+
try {
|
|
205
|
+
const response = await fetch(`${BASE_URL}/api/health`, {
|
|
206
|
+
method: 'GET',
|
|
207
|
+
signal: AbortSignal.timeout(5000)
|
|
208
|
+
});
|
|
209
|
+
if (response.ok) {
|
|
210
|
+
console.log('✅ Server is healthy');
|
|
211
|
+
return true;
|
|
212
|
+
} else {
|
|
213
|
+
console.log(`❌ Health check failed: ${response.status}`);
|
|
214
|
+
return false;
|
|
215
|
+
}
|
|
216
|
+
} catch (error) {
|
|
217
|
+
console.log(`❌ Server not reachable: ${error.message}`);
|
|
218
|
+
return false;
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
async function main() {
|
|
223
|
+
console.log('🧪 Cortex Memory API Route Tests');
|
|
224
|
+
console.log(`📍 Testing server at: ${BASE_URL}`);
|
|
225
|
+
console.log(`👤 Test user: ${TEST_USER_ID}`);
|
|
226
|
+
console.log(`📦 Memory space: ${TEST_MEMORY_SPACE_ID}`);
|
|
227
|
+
|
|
228
|
+
// Check server health first
|
|
229
|
+
const healthy = await checkServerHealth();
|
|
230
|
+
if (!healthy) {
|
|
231
|
+
console.log('\n❌ Server is not running. Please start the quickstart first:');
|
|
232
|
+
console.log(' cd packages/vercel-ai-provider/quickstart && npm run dev');
|
|
233
|
+
process.exit(1);
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
const results = [];
|
|
237
|
+
|
|
238
|
+
// Test conversation API
|
|
239
|
+
await testConversationAPI();
|
|
240
|
+
|
|
241
|
+
// Test v5 route
|
|
242
|
+
const v5Result = await testRoute('/api/chat', 'V5 Route');
|
|
243
|
+
results.push(v5Result);
|
|
244
|
+
|
|
245
|
+
// Test v6 route
|
|
246
|
+
const v6Result = await testRoute('/api/chat-v6', 'V6 Route');
|
|
247
|
+
results.push(v6Result);
|
|
248
|
+
|
|
249
|
+
// Summary
|
|
250
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
251
|
+
console.log('📊 Test Summary');
|
|
252
|
+
console.log('='.repeat(60));
|
|
253
|
+
|
|
254
|
+
let allPassed = true;
|
|
255
|
+
for (const result of results) {
|
|
256
|
+
const status = result.success ? '✅ PASS' : '❌ FAIL';
|
|
257
|
+
const recall = result.recallWorks !== undefined
|
|
258
|
+
? (result.recallWorks ? ' (recall ✅)' : ' (recall ❌)')
|
|
259
|
+
: '';
|
|
260
|
+
const error = result.error ? ` - ${result.error}` : '';
|
|
261
|
+
console.log(`${status} ${result.route}${recall}${error}`);
|
|
262
|
+
if (!result.success) allPassed = false;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
console.log('');
|
|
266
|
+
process.exit(allPassed ? 0 : 1);
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
main().catch(error => {
|
|
270
|
+
console.error('Test runner error:', error);
|
|
271
|
+
process.exit(1);
|
|
272
|
+
});
|