@cortexmemory/cli 0.27.3 → 0.27.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +127 -56
- package/dist/commands/deploy.js.map +1 -1
- package/dist/utils/app-template-sync.d.ts.map +1 -1
- package/dist/utils/app-template-sync.js +32 -12
- package/dist/utils/app-template-sync.js.map +1 -1
- package/package.json +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +56 -11
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +70 -15
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +333 -0
- package/templates/vercel-ai-quickstart/app/page.tsx +18 -4
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +5 -2
- package/templates/vercel-ai-quickstart/jest.config.js +8 -1
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +165 -0
- package/templates/vercel-ai-quickstart/lib/versions.ts +60 -0
- package/templates/vercel-ai-quickstart/package.json +5 -1
- package/templates/vercel-ai-quickstart/test-api.mjs +272 -0
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +454 -0
|
@@ -7,25 +7,69 @@
|
|
|
7
7
|
import { getCortex } from "@/lib/cortex";
|
|
8
8
|
import { verifyPassword, generateSessionToken } from "@/lib/password";
|
|
9
9
|
|
|
10
|
+
/**
|
|
11
|
+
* Validates login request body structure.
|
|
12
|
+
* Returns validated credentials or null if invalid.
|
|
13
|
+
*/
|
|
14
|
+
function validateLoginBody(
|
|
15
|
+
body: unknown
|
|
16
|
+
): { username: string; password: string } | null {
|
|
17
|
+
if (typeof body !== "object" || body === null) {
|
|
18
|
+
return null;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const record = body as Record<string, unknown>;
|
|
22
|
+
|
|
23
|
+
// Validate username field exists and is a non-empty string
|
|
24
|
+
const hasValidUsername =
|
|
25
|
+
"username" in record &&
|
|
26
|
+
typeof record.username === "string" &&
|
|
27
|
+
record.username.length > 0 &&
|
|
28
|
+
record.username.length <= 256;
|
|
29
|
+
|
|
30
|
+
// Validate password field exists and is a non-empty string
|
|
31
|
+
const hasValidPassword =
|
|
32
|
+
"password" in record &&
|
|
33
|
+
typeof record.password === "string" &&
|
|
34
|
+
record.password.length > 0 &&
|
|
35
|
+
record.password.length <= 1024;
|
|
36
|
+
|
|
37
|
+
if (!hasValidUsername || !hasValidPassword) {
|
|
38
|
+
return null;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return {
|
|
42
|
+
username: record.username as string,
|
|
43
|
+
password: record.password as string,
|
|
44
|
+
};
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
* Safely extracts an error message for logging without exposing user data.
|
|
49
|
+
*/
|
|
50
|
+
function getSafeErrorMessage(error: unknown): string {
|
|
51
|
+
if (error instanceof Error) {
|
|
52
|
+
// Only include error name and a sanitized message
|
|
53
|
+
// Avoid logging full stack traces which may contain user data
|
|
54
|
+
return `${error.name}: ${error.message.slice(0, 200)}`;
|
|
55
|
+
}
|
|
56
|
+
return "Unknown error";
|
|
57
|
+
}
|
|
58
|
+
|
|
10
59
|
export async function POST(req: Request) {
|
|
11
60
|
try {
|
|
12
61
|
const body = await req.json();
|
|
13
|
-
const { username, password } = body;
|
|
14
62
|
|
|
15
|
-
// Validate input
|
|
16
|
-
|
|
63
|
+
// Validate input structure before extracting values
|
|
64
|
+
const credentials = validateLoginBody(body);
|
|
65
|
+
if (!credentials) {
|
|
17
66
|
return Response.json(
|
|
18
|
-
{ error: "Username
|
|
67
|
+
{ error: "Username and password are required" },
|
|
19
68
|
{ status: 400 }
|
|
20
69
|
);
|
|
21
70
|
}
|
|
22
71
|
|
|
23
|
-
|
|
24
|
-
return Response.json(
|
|
25
|
-
{ error: "Password is required" },
|
|
26
|
-
{ status: 400 }
|
|
27
|
-
);
|
|
28
|
-
}
|
|
72
|
+
const { username, password } = credentials;
|
|
29
73
|
|
|
30
74
|
const cortex = getCortex();
|
|
31
75
|
const sanitizedUsername = username.toLowerCase();
|
|
@@ -73,7 +117,8 @@ export async function POST(req: Request) {
|
|
|
73
117
|
sessionToken,
|
|
74
118
|
});
|
|
75
119
|
} catch (error) {
|
|
76
|
-
|
|
120
|
+
// Log sanitized error to prevent log injection
|
|
121
|
+
console.error("[Login Error]", getSafeErrorMessage(error));
|
|
77
122
|
|
|
78
123
|
return Response.json(
|
|
79
124
|
{ error: "Failed to authenticate" },
|
|
@@ -122,45 +122,100 @@ function generateTitle(message: string): string {
|
|
|
122
122
|
return title;
|
|
123
123
|
}
|
|
124
124
|
|
|
125
|
+
/**
|
|
126
|
+
* Normalize messages to ensure they have the `parts` array format
|
|
127
|
+
* expected by AI SDK v6's convertToModelMessages.
|
|
128
|
+
*
|
|
129
|
+
* Handles:
|
|
130
|
+
* - Messages with `content` string (legacy format) -> converts to `parts` array
|
|
131
|
+
* - Messages with `role: "agent"` -> converts to `role: "assistant"`
|
|
132
|
+
* - Messages already in v6 format -> passes through unchanged
|
|
133
|
+
*/
|
|
134
|
+
function normalizeMessages(messages: unknown[]): unknown[] {
|
|
135
|
+
return messages.map((msg: unknown) => {
|
|
136
|
+
const m = msg as Record<string, unknown>;
|
|
137
|
+
|
|
138
|
+
// Normalize role: "agent" -> "assistant"
|
|
139
|
+
let role = m.role as string;
|
|
140
|
+
if (role === "agent") {
|
|
141
|
+
role = "assistant";
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// Ensure parts array exists
|
|
145
|
+
let parts = m.parts as Array<{ type: string; text?: string }> | undefined;
|
|
146
|
+
if (!parts) {
|
|
147
|
+
// Convert content string to parts array
|
|
148
|
+
const content = m.content as string | undefined;
|
|
149
|
+
if (content) {
|
|
150
|
+
parts = [{ type: "text", text: content }];
|
|
151
|
+
} else {
|
|
152
|
+
parts = [];
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return {
|
|
157
|
+
...m,
|
|
158
|
+
role,
|
|
159
|
+
parts,
|
|
160
|
+
};
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Extract text from a message (handles both content string and parts array)
|
|
166
|
+
*/
|
|
167
|
+
function getMessageText(message: { content?: string; parts?: Array<{ type: string; text?: string }> }): string {
|
|
168
|
+
if (typeof message.content === "string") {
|
|
169
|
+
return message.content;
|
|
170
|
+
}
|
|
171
|
+
if (message.parts && Array.isArray(message.parts)) {
|
|
172
|
+
return message.parts
|
|
173
|
+
.filter((part) => part.type === "text" && part.text)
|
|
174
|
+
.map((part) => part.text)
|
|
175
|
+
.join("");
|
|
176
|
+
}
|
|
177
|
+
return "";
|
|
178
|
+
}
|
|
179
|
+
|
|
125
180
|
export async function POST(req: Request) {
|
|
126
181
|
try {
|
|
127
182
|
const body = await req.json();
|
|
128
183
|
const { messages, memorySpaceId, userId, conversationId: providedConversationId } = body;
|
|
129
184
|
|
|
185
|
+
// Validate messages array exists
|
|
186
|
+
if (!messages || !Array.isArray(messages)) {
|
|
187
|
+
return new Response(
|
|
188
|
+
JSON.stringify({ error: "messages array is required" }),
|
|
189
|
+
{ status: 400, headers: { "Content-Type": "application/json" } },
|
|
190
|
+
);
|
|
191
|
+
}
|
|
192
|
+
|
|
130
193
|
// Generate conversation ID if not provided (new chat)
|
|
131
194
|
const conversationId = providedConversationId ||
|
|
132
195
|
`conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
133
196
|
const isNewConversation = !providedConversationId;
|
|
134
197
|
|
|
198
|
+
// Normalize messages to ensure they have the `parts` array format
|
|
199
|
+
// expected by AI SDK v6's convertToModelMessages
|
|
200
|
+
const normalizedMessages = normalizeMessages(messages);
|
|
201
|
+
|
|
135
202
|
// Convert UIMessage[] from useChat to ModelMessage[] for streamText
|
|
136
203
|
// Note: In AI SDK v6+, convertToModelMessages may return a Promise
|
|
137
|
-
|
|
204
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
205
|
+
const modelMessagesResult = convertToModelMessages(normalizedMessages as any);
|
|
138
206
|
const modelMessages =
|
|
139
207
|
modelMessagesResult instanceof Promise
|
|
140
208
|
? await modelMessagesResult
|
|
141
209
|
: modelMessagesResult;
|
|
142
210
|
|
|
143
211
|
// Get the first user message for title generation
|
|
144
|
-
// AI SDK v5+ uses `parts` array instead of `content` string
|
|
145
212
|
const firstUserMessage = messages.find((m: { role: string }) => m.role === "user") as {
|
|
146
213
|
role: string;
|
|
147
214
|
content?: string;
|
|
148
215
|
parts?: Array<{ type: string; text?: string }>;
|
|
149
216
|
} | undefined;
|
|
150
217
|
|
|
151
|
-
|
|
152
|
-
if (firstUserMessage) {
|
|
153
|
-
if (typeof firstUserMessage.content === "string") {
|
|
154
|
-
// Legacy format: content is a string
|
|
155
|
-
messageText = firstUserMessage.content;
|
|
156
|
-
} else if (firstUserMessage.parts && Array.isArray(firstUserMessage.parts)) {
|
|
157
|
-
// AI SDK v5+ format: extract text from parts array
|
|
158
|
-
messageText = firstUserMessage.parts
|
|
159
|
-
.filter((part) => part.type === "text" && part.text)
|
|
160
|
-
.map((part) => part.text)
|
|
161
|
-
.join("");
|
|
162
|
-
}
|
|
163
|
-
}
|
|
218
|
+
const messageText = firstUserMessage ? getMessageText(firstUserMessage) : "";
|
|
164
219
|
|
|
165
220
|
// Use createUIMessageStream to send both LLM text and layer events
|
|
166
221
|
return createUIMessageStreamResponse({
|
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Chat API Route (AI SDK v6 Style)
|
|
3
|
+
*
|
|
4
|
+
* This route uses AI SDK v6's patterns while maintaining full Cortex Memory
|
|
5
|
+
* capabilities including:
|
|
6
|
+
* - Memory recall (reading past memories)
|
|
7
|
+
* - Memory storage (saving new conversations)
|
|
8
|
+
* - Fact extraction (extracting knowledge from conversations)
|
|
9
|
+
* - Belief revision (superseding outdated facts)
|
|
10
|
+
* - Layer observer (real-time UI updates)
|
|
11
|
+
*
|
|
12
|
+
* The key difference from v5 is using v6's cleaner APIs, but the memory
|
|
13
|
+
* infrastructure is identical to ensure feature parity.
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import { createCortexMemoryAsync } from "@cortexmemory/vercel-ai-provider";
|
|
17
|
+
import type {
|
|
18
|
+
LayerObserver,
|
|
19
|
+
CortexMemoryConfig,
|
|
20
|
+
} from "@cortexmemory/vercel-ai-provider";
|
|
21
|
+
import { openai, createOpenAI } from "@ai-sdk/openai";
|
|
22
|
+
import {
|
|
23
|
+
streamText,
|
|
24
|
+
embed,
|
|
25
|
+
convertToModelMessages,
|
|
26
|
+
createUIMessageStream,
|
|
27
|
+
createUIMessageStreamResponse,
|
|
28
|
+
} from "ai";
|
|
29
|
+
import { getCortex } from "@/lib/cortex";
|
|
30
|
+
|
|
31
|
+
// Create OpenAI client for embeddings
|
|
32
|
+
const openaiClient = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
|
33
|
+
|
|
34
|
+
// System prompt for the assistant
|
|
35
|
+
const SYSTEM_PROMPT = `You are a helpful AI assistant with long-term memory powered by Cortex.
|
|
36
|
+
|
|
37
|
+
Your capabilities:
|
|
38
|
+
- You remember everything users tell you across conversations
|
|
39
|
+
- You can recall facts, preferences, and context from past interactions
|
|
40
|
+
- You naturally reference what you've learned about the user
|
|
41
|
+
|
|
42
|
+
Behavior guidelines:
|
|
43
|
+
- When you remember something from a previous conversation, mention it naturally
|
|
44
|
+
- If asked about something you learned, reference it specifically
|
|
45
|
+
- Be conversational and friendly
|
|
46
|
+
- Help demonstrate the memory system by showing what you remember`;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Create Cortex Memory config - IDENTICAL to v5 route for feature parity
|
|
50
|
+
*/
|
|
51
|
+
function getCortexMemoryConfig(
|
|
52
|
+
memorySpaceId: string,
|
|
53
|
+
userId: string,
|
|
54
|
+
conversationId: string,
|
|
55
|
+
layerObserver?: LayerObserver
|
|
56
|
+
): CortexMemoryConfig {
|
|
57
|
+
return {
|
|
58
|
+
convexUrl: process.env.CONVEX_URL!,
|
|
59
|
+
memorySpaceId,
|
|
60
|
+
|
|
61
|
+
// User identification
|
|
62
|
+
userId,
|
|
63
|
+
userName: "Demo User",
|
|
64
|
+
|
|
65
|
+
// Agent identification
|
|
66
|
+
agentId: "cortex-memory-agent",
|
|
67
|
+
agentName: "Cortex v6 Assistant",
|
|
68
|
+
|
|
69
|
+
// Conversation ID for chat history isolation
|
|
70
|
+
conversationId,
|
|
71
|
+
|
|
72
|
+
// Enable graph memory sync
|
|
73
|
+
enableGraphMemory: process.env.CORTEX_GRAPH_SYNC === "true",
|
|
74
|
+
|
|
75
|
+
// Enable fact extraction - CRITICAL for memory to work!
|
|
76
|
+
enableFactExtraction: process.env.CORTEX_FACT_EXTRACTION === "true",
|
|
77
|
+
|
|
78
|
+
// Belief Revision - handles fact updates and supersessions
|
|
79
|
+
beliefRevision: {
|
|
80
|
+
enabled: true,
|
|
81
|
+
slotMatching: true,
|
|
82
|
+
llmResolution: true,
|
|
83
|
+
},
|
|
84
|
+
|
|
85
|
+
// Embedding provider for semantic matching
|
|
86
|
+
embeddingProvider: {
|
|
87
|
+
generate: async (text: string) => {
|
|
88
|
+
const result = await embed({
|
|
89
|
+
model: openaiClient.embedding("text-embedding-3-small"),
|
|
90
|
+
value: text,
|
|
91
|
+
});
|
|
92
|
+
return result.embedding;
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
|
|
96
|
+
// Streaming enhancements
|
|
97
|
+
streamingOptions: {
|
|
98
|
+
storePartialResponse: true,
|
|
99
|
+
progressiveFactExtraction: true,
|
|
100
|
+
enableAdaptiveProcessing: true,
|
|
101
|
+
},
|
|
102
|
+
|
|
103
|
+
// Memory recall configuration
|
|
104
|
+
memorySearchLimit: 20,
|
|
105
|
+
|
|
106
|
+
// Real-time layer tracking
|
|
107
|
+
layerObserver,
|
|
108
|
+
|
|
109
|
+
// Debug in development
|
|
110
|
+
debug: process.env.NODE_ENV === "development",
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Normalize messages to AI SDK v6 UIMessage format
|
|
116
|
+
*/
|
|
117
|
+
function normalizeMessages(messages: unknown[]): unknown[] {
|
|
118
|
+
return messages.map((msg: unknown) => {
|
|
119
|
+
const m = msg as Record<string, unknown>;
|
|
120
|
+
|
|
121
|
+
// Normalize role: "agent" -> "assistant"
|
|
122
|
+
let role = m.role as string;
|
|
123
|
+
if (role === "agent") {
|
|
124
|
+
role = "assistant";
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Ensure parts array exists
|
|
128
|
+
let parts = m.parts as Array<{ type: string; text?: string }> | undefined;
|
|
129
|
+
if (!parts) {
|
|
130
|
+
const content = m.content as string | undefined;
|
|
131
|
+
if (content) {
|
|
132
|
+
parts = [{ type: "text", text: content }];
|
|
133
|
+
} else {
|
|
134
|
+
parts = [];
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
...m,
|
|
140
|
+
role,
|
|
141
|
+
parts,
|
|
142
|
+
};
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Extract text from a message
|
|
148
|
+
*/
|
|
149
|
+
function getMessageText(message: {
|
|
150
|
+
content?: string;
|
|
151
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
152
|
+
}): string {
|
|
153
|
+
if (typeof message.content === "string") {
|
|
154
|
+
return message.content;
|
|
155
|
+
}
|
|
156
|
+
if (message.parts && Array.isArray(message.parts)) {
|
|
157
|
+
return message.parts
|
|
158
|
+
.filter((part) => part.type === "text" && part.text)
|
|
159
|
+
.map((part) => part.text)
|
|
160
|
+
.join("");
|
|
161
|
+
}
|
|
162
|
+
return "";
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Generate a title from the first user message
|
|
167
|
+
*/
|
|
168
|
+
function generateTitle(message: string): string {
|
|
169
|
+
let title = message.slice(0, 50);
|
|
170
|
+
if (message.length > 50) {
|
|
171
|
+
const lastSpace = title.lastIndexOf(" ");
|
|
172
|
+
if (lastSpace > 20) {
|
|
173
|
+
title = title.slice(0, lastSpace);
|
|
174
|
+
}
|
|
175
|
+
title += "...";
|
|
176
|
+
}
|
|
177
|
+
return title;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
export async function POST(req: Request) {
|
|
181
|
+
try {
|
|
182
|
+
const body = await req.json();
|
|
183
|
+
const {
|
|
184
|
+
messages,
|
|
185
|
+
memorySpaceId = "quickstart-demo",
|
|
186
|
+
userId = "demo-user",
|
|
187
|
+
conversationId: providedConversationId,
|
|
188
|
+
} = body;
|
|
189
|
+
|
|
190
|
+
// Validate messages array exists
|
|
191
|
+
if (!messages || !Array.isArray(messages)) {
|
|
192
|
+
return new Response(
|
|
193
|
+
JSON.stringify({ error: "messages array is required" }),
|
|
194
|
+
{ status: 400, headers: { "Content-Type": "application/json" } }
|
|
195
|
+
);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Generate conversation ID if not provided
|
|
199
|
+
const conversationId =
|
|
200
|
+
providedConversationId ||
|
|
201
|
+
`conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
202
|
+
const isNewConversation = !providedConversationId;
|
|
203
|
+
|
|
204
|
+
// Normalize messages for convertToModelMessages
|
|
205
|
+
const normalizedMessages = normalizeMessages(messages);
|
|
206
|
+
|
|
207
|
+
// Convert to model messages
|
|
208
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
209
|
+
const modelMessagesResult = convertToModelMessages(normalizedMessages as any);
|
|
210
|
+
const modelMessages =
|
|
211
|
+
modelMessagesResult instanceof Promise
|
|
212
|
+
? await modelMessagesResult
|
|
213
|
+
: modelMessagesResult;
|
|
214
|
+
|
|
215
|
+
// Get first user message for title
|
|
216
|
+
const firstUserMessage = messages.find(
|
|
217
|
+
(m: { role: string }) => m.role === "user"
|
|
218
|
+
) as {
|
|
219
|
+
role: string;
|
|
220
|
+
content?: string;
|
|
221
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
222
|
+
} | undefined;
|
|
223
|
+
|
|
224
|
+
const messageText = firstUserMessage ? getMessageText(firstUserMessage) : "";
|
|
225
|
+
|
|
226
|
+
// Use createUIMessageStreamResponse - same as v5 for full memory support
|
|
227
|
+
return createUIMessageStreamResponse({
|
|
228
|
+
stream: createUIMessageStream({
|
|
229
|
+
execute: async ({ writer }) => {
|
|
230
|
+
// Create layer observer for real-time UI updates
|
|
231
|
+
const layerObserver: LayerObserver = {
|
|
232
|
+
onOrchestrationStart: (orchestrationId) => {
|
|
233
|
+
writer.write({
|
|
234
|
+
type: "data-orchestration-start",
|
|
235
|
+
data: { orchestrationId },
|
|
236
|
+
transient: true,
|
|
237
|
+
});
|
|
238
|
+
},
|
|
239
|
+
onLayerUpdate: (event) => {
|
|
240
|
+
writer.write({
|
|
241
|
+
type: "data-layer-update",
|
|
242
|
+
data: {
|
|
243
|
+
layer: event.layer,
|
|
244
|
+
status: event.status,
|
|
245
|
+
timestamp: event.timestamp,
|
|
246
|
+
latencyMs: event.latencyMs,
|
|
247
|
+
data: event.data,
|
|
248
|
+
error: event.error,
|
|
249
|
+
revisionAction: event.revisionAction,
|
|
250
|
+
supersededFacts: event.supersededFacts,
|
|
251
|
+
},
|
|
252
|
+
transient: true,
|
|
253
|
+
});
|
|
254
|
+
},
|
|
255
|
+
onOrchestrationComplete: (summary) => {
|
|
256
|
+
writer.write({
|
|
257
|
+
type: "data-orchestration-complete",
|
|
258
|
+
data: {
|
|
259
|
+
orchestrationId: summary.orchestrationId,
|
|
260
|
+
totalLatencyMs: summary.totalLatencyMs,
|
|
261
|
+
createdIds: summary.createdIds,
|
|
262
|
+
},
|
|
263
|
+
transient: true,
|
|
264
|
+
});
|
|
265
|
+
},
|
|
266
|
+
};
|
|
267
|
+
|
|
268
|
+
// Build config with observer
|
|
269
|
+
const config = getCortexMemoryConfig(
|
|
270
|
+
memorySpaceId,
|
|
271
|
+
userId,
|
|
272
|
+
conversationId,
|
|
273
|
+
layerObserver
|
|
274
|
+
);
|
|
275
|
+
|
|
276
|
+
// Create memory-augmented model - THIS handles both recall AND storage!
|
|
277
|
+
const cortexMemory = await createCortexMemoryAsync(config);
|
|
278
|
+
|
|
279
|
+
// Stream response with automatic memory integration
|
|
280
|
+
const result = streamText({
|
|
281
|
+
model: cortexMemory(openai("gpt-4o-mini")),
|
|
282
|
+
messages: modelMessages,
|
|
283
|
+
system: SYSTEM_PROMPT,
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
// Merge LLM stream into UI message stream
|
|
287
|
+
writer.merge(result.toUIMessageStream());
|
|
288
|
+
|
|
289
|
+
// Create conversation if new
|
|
290
|
+
if (isNewConversation && messageText) {
|
|
291
|
+
try {
|
|
292
|
+
const cortex = getCortex();
|
|
293
|
+
await cortex.conversations.create({
|
|
294
|
+
memorySpaceId,
|
|
295
|
+
conversationId,
|
|
296
|
+
type: "user-agent",
|
|
297
|
+
participants: {
|
|
298
|
+
userId,
|
|
299
|
+
agentId: "cortex-memory-agent",
|
|
300
|
+
},
|
|
301
|
+
metadata: { title: generateTitle(messageText) },
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
// Send conversation update to client
|
|
305
|
+
writer.write({
|
|
306
|
+
type: "data-conversation-update",
|
|
307
|
+
data: {
|
|
308
|
+
conversationId,
|
|
309
|
+
title: generateTitle(messageText),
|
|
310
|
+
},
|
|
311
|
+
transient: true,
|
|
312
|
+
});
|
|
313
|
+
} catch (error) {
|
|
314
|
+
console.error("Failed to create conversation:", error);
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
},
|
|
318
|
+
}),
|
|
319
|
+
});
|
|
320
|
+
} catch (error) {
|
|
321
|
+
console.error("[Chat v6 API Error]", error);
|
|
322
|
+
|
|
323
|
+
return new Response(
|
|
324
|
+
JSON.stringify({
|
|
325
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
326
|
+
}),
|
|
327
|
+
{
|
|
328
|
+
status: 500,
|
|
329
|
+
headers: { "Content-Type": "application/json" },
|
|
330
|
+
}
|
|
331
|
+
);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
"use client";
|
|
2
2
|
|
|
3
3
|
import dynamic from "next/dynamic";
|
|
4
|
-
import { useState, useCallback } from "react";
|
|
4
|
+
import { useState, useCallback, useEffect } from "react";
|
|
5
5
|
import { useLayerTracking } from "@/lib/layer-tracking";
|
|
6
|
+
import { detectVersions, type VersionInfo } from "@/lib/versions";
|
|
6
7
|
import { AuthProvider, useAuth } from "@/components/AuthProvider";
|
|
7
8
|
import { AdminSetup } from "@/components/AdminSetup";
|
|
8
9
|
import { LoginScreen } from "@/components/LoginScreen";
|
|
@@ -52,6 +53,7 @@ function MainContent() {
|
|
|
52
53
|
const { isLoading, isAdminSetup, isAuthenticated, user } = useAuth();
|
|
53
54
|
const [memorySpaceId, setMemorySpaceId] = useState("quickstart-demo");
|
|
54
55
|
const [currentConversationId, setCurrentConversationId] = useState<string | null>(null);
|
|
56
|
+
const [versions, setVersions] = useState<VersionInfo | null>(null);
|
|
55
57
|
const {
|
|
56
58
|
layers,
|
|
57
59
|
isOrchestrating,
|
|
@@ -60,6 +62,11 @@ function MainContent() {
|
|
|
60
62
|
resetLayers,
|
|
61
63
|
} = useLayerTracking();
|
|
62
64
|
|
|
65
|
+
// Detect SDK versions on mount
|
|
66
|
+
useEffect(() => {
|
|
67
|
+
detectVersions().then(setVersions);
|
|
68
|
+
}, []);
|
|
69
|
+
|
|
63
70
|
// Handle new chat
|
|
64
71
|
const handleNewChat = useCallback(() => {
|
|
65
72
|
setCurrentConversationId(null);
|
|
@@ -109,7 +116,7 @@ function MainContent() {
|
|
|
109
116
|
|
|
110
117
|
// Main authenticated interface
|
|
111
118
|
return (
|
|
112
|
-
<main className="
|
|
119
|
+
<main className="h-screen flex flex-col overflow-hidden">
|
|
113
120
|
{/* Header */}
|
|
114
121
|
<header className="border-b border-white/10 px-6 py-4">
|
|
115
122
|
<div className="flex items-center justify-between">
|
|
@@ -151,6 +158,7 @@ function MainContent() {
|
|
|
151
158
|
memorySpaceId={memorySpaceId}
|
|
152
159
|
userId={userId}
|
|
153
160
|
conversationId={currentConversationId}
|
|
161
|
+
apiEndpoint={versions?.aiSdkMajor === 6 ? "/api/chat-v6" : "/api/chat"}
|
|
154
162
|
onOrchestrationStart={startOrchestration}
|
|
155
163
|
onLayerUpdate={updateLayer}
|
|
156
164
|
onReset={resetLayers}
|
|
@@ -185,9 +193,15 @@ function MainContent() {
|
|
|
185
193
|
<footer className="border-t border-white/10 px-6 py-3">
|
|
186
194
|
<div className="flex items-center justify-between text-sm text-gray-500">
|
|
187
195
|
<div className="flex items-center gap-4">
|
|
188
|
-
<span>Cortex SDK
|
|
196
|
+
<span>Cortex SDK {versions ? `v${versions.cortexSdk}` : "..."}</span>
|
|
189
197
|
<span>•</span>
|
|
190
|
-
<span>Vercel AI SDK
|
|
198
|
+
<span>Vercel AI SDK {versions?.aiSdk ?? "..."}</span>
|
|
199
|
+
{versions?.aiSdkMajor === 6 && (
|
|
200
|
+
<>
|
|
201
|
+
<span>•</span>
|
|
202
|
+
<span className="text-cortex-400">Using ToolLoopAgent</span>
|
|
203
|
+
</>
|
|
204
|
+
)}
|
|
191
205
|
</div>
|
|
192
206
|
<a
|
|
193
207
|
href="https://cortexmemory.dev/docs"
|
|
@@ -26,6 +26,8 @@ interface ChatInterfaceProps {
|
|
|
26
26
|
memorySpaceId: string;
|
|
27
27
|
userId: string;
|
|
28
28
|
conversationId: string | null;
|
|
29
|
+
/** API endpoint for chat - defaults to /api/chat, use /api/chat-v6 for AI SDK v6 */
|
|
30
|
+
apiEndpoint?: string;
|
|
29
31
|
onOrchestrationStart?: () => void;
|
|
30
32
|
onLayerUpdate?: (
|
|
31
33
|
layer: MemoryLayer,
|
|
@@ -44,6 +46,7 @@ export function ChatInterface({
|
|
|
44
46
|
memorySpaceId,
|
|
45
47
|
userId,
|
|
46
48
|
conversationId,
|
|
49
|
+
apiEndpoint = "/api/chat",
|
|
47
50
|
onOrchestrationStart,
|
|
48
51
|
onLayerUpdate,
|
|
49
52
|
onReset,
|
|
@@ -71,7 +74,7 @@ export function ChatInterface({
|
|
|
71
74
|
const transport = useMemo(
|
|
72
75
|
() =>
|
|
73
76
|
new DefaultChatTransport({
|
|
74
|
-
api:
|
|
77
|
+
api: apiEndpoint,
|
|
75
78
|
// Use a function to get body so it reads latest conversationId from ref
|
|
76
79
|
body: () => ({
|
|
77
80
|
memorySpaceId,
|
|
@@ -79,7 +82,7 @@ export function ChatInterface({
|
|
|
79
82
|
conversationId: conversationIdRef.current,
|
|
80
83
|
}),
|
|
81
84
|
}),
|
|
82
|
-
[memorySpaceId, userId], // Note: conversationId removed - ref handles updates
|
|
85
|
+
[apiEndpoint, memorySpaceId, userId], // Note: conversationId removed - ref handles updates
|
|
83
86
|
);
|
|
84
87
|
|
|
85
88
|
// Handle layer data parts from the stream
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Jest Configuration for Vercel AI Quickstart
|
|
3
3
|
*
|
|
4
|
-
*
|
|
4
|
+
* Three test projects:
|
|
5
5
|
* - unit: Fast unit tests with mocked dependencies
|
|
6
6
|
* - integration: Integration tests for API routes with mocked SDK
|
|
7
|
+
* - e2e: End-to-end tests with real Cortex backend (requires CONVEX_URL, OPENAI_API_KEY)
|
|
7
8
|
*/
|
|
8
9
|
|
|
9
10
|
const baseConfig = {
|
|
@@ -41,5 +42,11 @@ module.exports = {
|
|
|
41
42
|
testMatch: ["<rootDir>/tests/integration/**/*.test.ts"],
|
|
42
43
|
testTimeout: 30000,
|
|
43
44
|
},
|
|
45
|
+
{
|
|
46
|
+
...baseConfig,
|
|
47
|
+
displayName: "e2e",
|
|
48
|
+
testMatch: ["<rootDir>/tests/e2e/**/*.test.ts"],
|
|
49
|
+
testTimeout: 120000, // 2 minutes for real network calls
|
|
50
|
+
},
|
|
44
51
|
],
|
|
45
52
|
};
|