@cortexmemory/cli 0.27.3 → 0.28.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/db.d.ts.map +1 -1
- package/dist/commands/db.js +18 -6
- package/dist/commands/db.js.map +1 -1
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +191 -80
- package/dist/commands/deploy.js.map +1 -1
- package/dist/commands/dev.js +3 -2
- package/dist/commands/dev.js.map +1 -1
- package/dist/commands/init.d.ts.map +1 -1
- package/dist/commands/init.js +12 -0
- package/dist/commands/init.js.map +1 -1
- package/dist/types.d.ts +1 -1
- package/dist/types.d.ts.map +1 -1
- package/dist/utils/app-template-sync.d.ts.map +1 -1
- package/dist/utils/app-template-sync.js +35 -13
- package/dist/utils/app-template-sync.js.map +1 -1
- package/dist/utils/init/quickstart-setup.d.ts.map +1 -1
- package/dist/utils/init/quickstart-setup.js.map +1 -1
- package/package.json +4 -4
- package/templates/basic/.env.local.example +23 -0
- package/templates/basic/README.md +181 -56
- package/templates/basic/package-lock.json +2180 -406
- package/templates/basic/package.json +23 -5
- package/templates/basic/src/__tests__/chat.test.ts +340 -0
- package/templates/basic/src/__tests__/cortex.test.ts +260 -0
- package/templates/basic/src/__tests__/display.test.ts +455 -0
- package/templates/basic/src/__tests__/e2e/fact-extraction.test.ts +498 -0
- package/templates/basic/src/__tests__/e2e/memory-flow.test.ts +355 -0
- package/templates/basic/src/__tests__/e2e/server-e2e.test.ts +414 -0
- package/templates/basic/src/__tests__/helpers/test-utils.ts +345 -0
- package/templates/basic/src/__tests__/integration/chat-flow.test.ts +422 -0
- package/templates/basic/src/__tests__/integration/server.test.ts +441 -0
- package/templates/basic/src/__tests__/llm.test.ts +344 -0
- package/templates/basic/src/chat.ts +300 -0
- package/templates/basic/src/cortex.ts +203 -0
- package/templates/basic/src/display.ts +425 -0
- package/templates/basic/src/index.ts +194 -64
- package/templates/basic/src/llm.ts +214 -0
- package/templates/basic/src/server.ts +280 -0
- package/templates/basic/vitest.config.ts +33 -0
- package/templates/basic/vitest.e2e.config.ts +28 -0
- package/templates/basic/vitest.integration.config.ts +25 -0
- package/templates/vercel-ai-quickstart/app/api/auth/check/route.ts +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +61 -19
- package/templates/vercel-ai-quickstart/app/api/auth/register/route.ts +14 -18
- package/templates/vercel-ai-quickstart/app/api/auth/setup/route.ts +4 -7
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +95 -23
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +339 -0
- package/templates/vercel-ai-quickstart/app/api/conversations/route.ts +16 -16
- package/templates/vercel-ai-quickstart/app/globals.css +24 -9
- package/templates/vercel-ai-quickstart/app/page.tsx +41 -15
- package/templates/vercel-ai-quickstart/components/AdminSetup.tsx +3 -1
- package/templates/vercel-ai-quickstart/components/AuthProvider.tsx +6 -6
- package/templates/vercel-ai-quickstart/components/ChatHistorySidebar.tsx +19 -8
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +46 -16
- package/templates/vercel-ai-quickstart/components/LoginScreen.tsx +10 -5
- package/templates/vercel-ai-quickstart/jest.config.js +8 -1
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +165 -0
- package/templates/vercel-ai-quickstart/lib/password.ts +5 -5
- package/templates/vercel-ai-quickstart/lib/versions.ts +60 -0
- package/templates/vercel-ai-quickstart/next.config.js +10 -2
- package/templates/vercel-ai-quickstart/package.json +23 -12
- package/templates/vercel-ai-quickstart/test-api.mjs +303 -0
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +483 -0
- package/templates/vercel-ai-quickstart/tests/helpers/mock-cortex.ts +40 -40
- package/templates/vercel-ai-quickstart/tests/integration/auth.test.ts +8 -8
- package/templates/vercel-ai-quickstart/tests/integration/conversations.test.ts +12 -8
- package/templates/vercel-ai-quickstart/tests/unit/password.test.ts +4 -1
|
@@ -122,45 +122,117 @@ function generateTitle(message: string): string {
|
|
|
122
122
|
return title;
|
|
123
123
|
}
|
|
124
124
|
|
|
125
|
+
/**
|
|
126
|
+
* Normalize messages to ensure they have the `parts` array format
|
|
127
|
+
* expected by AI SDK v6's convertToModelMessages.
|
|
128
|
+
*
|
|
129
|
+
* Handles:
|
|
130
|
+
* - Messages with `content` string (legacy format) -> converts to `parts` array
|
|
131
|
+
* - Messages with `role: "agent"` -> converts to `role: "assistant"`
|
|
132
|
+
* - Messages already in v6 format -> passes through unchanged
|
|
133
|
+
*/
|
|
134
|
+
function normalizeMessages(messages: unknown[]): unknown[] {
|
|
135
|
+
return messages.map((msg: unknown) => {
|
|
136
|
+
const m = msg as Record<string, unknown>;
|
|
137
|
+
|
|
138
|
+
// Normalize role: "agent" -> "assistant"
|
|
139
|
+
let role = m.role as string;
|
|
140
|
+
if (role === "agent") {
|
|
141
|
+
role = "assistant";
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// Ensure parts array exists
|
|
145
|
+
let parts = m.parts as Array<{ type: string; text?: string }> | undefined;
|
|
146
|
+
if (!parts) {
|
|
147
|
+
// Convert content string to parts array
|
|
148
|
+
const content = m.content as string | undefined;
|
|
149
|
+
if (content) {
|
|
150
|
+
parts = [{ type: "text", text: content }];
|
|
151
|
+
} else {
|
|
152
|
+
parts = [];
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
return {
|
|
157
|
+
...m,
|
|
158
|
+
role,
|
|
159
|
+
parts,
|
|
160
|
+
};
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Extract text from a message (handles both content string and parts array)
|
|
166
|
+
*/
|
|
167
|
+
function getMessageText(message: {
|
|
168
|
+
content?: string;
|
|
169
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
170
|
+
}): string {
|
|
171
|
+
if (typeof message.content === "string") {
|
|
172
|
+
return message.content;
|
|
173
|
+
}
|
|
174
|
+
if (message.parts && Array.isArray(message.parts)) {
|
|
175
|
+
return message.parts
|
|
176
|
+
.filter((part) => part.type === "text" && part.text)
|
|
177
|
+
.map((part) => part.text)
|
|
178
|
+
.join("");
|
|
179
|
+
}
|
|
180
|
+
return "";
|
|
181
|
+
}
|
|
182
|
+
|
|
125
183
|
export async function POST(req: Request) {
|
|
126
184
|
try {
|
|
127
185
|
const body = await req.json();
|
|
128
|
-
const {
|
|
186
|
+
const {
|
|
187
|
+
messages,
|
|
188
|
+
memorySpaceId,
|
|
189
|
+
userId,
|
|
190
|
+
conversationId: providedConversationId,
|
|
191
|
+
} = body;
|
|
192
|
+
|
|
193
|
+
// Validate messages array exists
|
|
194
|
+
if (!messages || !Array.isArray(messages)) {
|
|
195
|
+
return new Response(
|
|
196
|
+
JSON.stringify({ error: "messages array is required" }),
|
|
197
|
+
{ status: 400, headers: { "Content-Type": "application/json" } },
|
|
198
|
+
);
|
|
199
|
+
}
|
|
129
200
|
|
|
130
201
|
// Generate conversation ID if not provided (new chat)
|
|
131
|
-
const conversationId =
|
|
202
|
+
const conversationId =
|
|
203
|
+
providedConversationId ||
|
|
132
204
|
`conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
133
205
|
const isNewConversation = !providedConversationId;
|
|
134
206
|
|
|
207
|
+
// Normalize messages to ensure they have the `parts` array format
|
|
208
|
+
// expected by AI SDK v6's convertToModelMessages
|
|
209
|
+
const normalizedMessages = normalizeMessages(messages);
|
|
210
|
+
|
|
135
211
|
// Convert UIMessage[] from useChat to ModelMessage[] for streamText
|
|
136
212
|
// Note: In AI SDK v6+, convertToModelMessages may return a Promise
|
|
137
|
-
|
|
213
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
214
|
+
const modelMessagesResult = convertToModelMessages(
|
|
215
|
+
normalizedMessages as any,
|
|
216
|
+
);
|
|
138
217
|
const modelMessages =
|
|
139
218
|
modelMessagesResult instanceof Promise
|
|
140
219
|
? await modelMessagesResult
|
|
141
220
|
: modelMessagesResult;
|
|
142
221
|
|
|
143
222
|
// Get the first user message for title generation
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
// AI SDK v5+ format: extract text from parts array
|
|
158
|
-
messageText = firstUserMessage.parts
|
|
159
|
-
.filter((part) => part.type === "text" && part.text)
|
|
160
|
-
.map((part) => part.text)
|
|
161
|
-
.join("");
|
|
162
|
-
}
|
|
163
|
-
}
|
|
223
|
+
const firstUserMessage = messages.find(
|
|
224
|
+
(m: { role: string }) => m.role === "user",
|
|
225
|
+
) as
|
|
226
|
+
| {
|
|
227
|
+
role: string;
|
|
228
|
+
content?: string;
|
|
229
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
230
|
+
}
|
|
231
|
+
| undefined;
|
|
232
|
+
|
|
233
|
+
const messageText = firstUserMessage
|
|
234
|
+
? getMessageText(firstUserMessage)
|
|
235
|
+
: "";
|
|
164
236
|
|
|
165
237
|
// Use createUIMessageStream to send both LLM text and layer events
|
|
166
238
|
return createUIMessageStreamResponse({
|
|
@@ -0,0 +1,339 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Chat API Route (AI SDK v6 Style)
|
|
3
|
+
*
|
|
4
|
+
* This route uses AI SDK v6's patterns while maintaining full Cortex Memory
|
|
5
|
+
* capabilities including:
|
|
6
|
+
* - Memory recall (reading past memories)
|
|
7
|
+
* - Memory storage (saving new conversations)
|
|
8
|
+
* - Fact extraction (extracting knowledge from conversations)
|
|
9
|
+
* - Belief revision (superseding outdated facts)
|
|
10
|
+
* - Layer observer (real-time UI updates)
|
|
11
|
+
*
|
|
12
|
+
* The key difference from v5 is using v6's cleaner APIs, but the memory
|
|
13
|
+
* infrastructure is identical to ensure feature parity.
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import { createCortexMemoryAsync } from "@cortexmemory/vercel-ai-provider";
|
|
17
|
+
import type {
|
|
18
|
+
LayerObserver,
|
|
19
|
+
CortexMemoryConfig,
|
|
20
|
+
} from "@cortexmemory/vercel-ai-provider";
|
|
21
|
+
import { openai, createOpenAI } from "@ai-sdk/openai";
|
|
22
|
+
import {
|
|
23
|
+
streamText,
|
|
24
|
+
embed,
|
|
25
|
+
convertToModelMessages,
|
|
26
|
+
createUIMessageStream,
|
|
27
|
+
createUIMessageStreamResponse,
|
|
28
|
+
} from "ai";
|
|
29
|
+
import { getCortex } from "@/lib/cortex";
|
|
30
|
+
|
|
31
|
+
// Create OpenAI client for embeddings
|
|
32
|
+
const openaiClient = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
|
33
|
+
|
|
34
|
+
// System prompt for the assistant
|
|
35
|
+
const SYSTEM_PROMPT = `You are a helpful AI assistant with long-term memory powered by Cortex.
|
|
36
|
+
|
|
37
|
+
Your capabilities:
|
|
38
|
+
- You remember everything users tell you across conversations
|
|
39
|
+
- You can recall facts, preferences, and context from past interactions
|
|
40
|
+
- You naturally reference what you've learned about the user
|
|
41
|
+
|
|
42
|
+
Behavior guidelines:
|
|
43
|
+
- When you remember something from a previous conversation, mention it naturally
|
|
44
|
+
- If asked about something you learned, reference it specifically
|
|
45
|
+
- Be conversational and friendly
|
|
46
|
+
- Help demonstrate the memory system by showing what you remember`;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Create Cortex Memory config - IDENTICAL to v5 route for feature parity
|
|
50
|
+
*/
|
|
51
|
+
function getCortexMemoryConfig(
|
|
52
|
+
memorySpaceId: string,
|
|
53
|
+
userId: string,
|
|
54
|
+
conversationId: string,
|
|
55
|
+
layerObserver?: LayerObserver,
|
|
56
|
+
): CortexMemoryConfig {
|
|
57
|
+
return {
|
|
58
|
+
convexUrl: process.env.CONVEX_URL!,
|
|
59
|
+
memorySpaceId,
|
|
60
|
+
|
|
61
|
+
// User identification
|
|
62
|
+
userId,
|
|
63
|
+
userName: "Demo User",
|
|
64
|
+
|
|
65
|
+
// Agent identification
|
|
66
|
+
agentId: "cortex-memory-agent",
|
|
67
|
+
agentName: "Cortex v6 Assistant",
|
|
68
|
+
|
|
69
|
+
// Conversation ID for chat history isolation
|
|
70
|
+
conversationId,
|
|
71
|
+
|
|
72
|
+
// Enable graph memory sync
|
|
73
|
+
enableGraphMemory: process.env.CORTEX_GRAPH_SYNC === "true",
|
|
74
|
+
|
|
75
|
+
// Enable fact extraction - CRITICAL for memory to work!
|
|
76
|
+
enableFactExtraction: process.env.CORTEX_FACT_EXTRACTION === "true",
|
|
77
|
+
|
|
78
|
+
// Belief Revision - handles fact updates and supersessions
|
|
79
|
+
beliefRevision: {
|
|
80
|
+
enabled: true,
|
|
81
|
+
slotMatching: true,
|
|
82
|
+
llmResolution: true,
|
|
83
|
+
},
|
|
84
|
+
|
|
85
|
+
// Embedding provider for semantic matching
|
|
86
|
+
embeddingProvider: {
|
|
87
|
+
generate: async (text: string) => {
|
|
88
|
+
const result = await embed({
|
|
89
|
+
model: openaiClient.embedding("text-embedding-3-small"),
|
|
90
|
+
value: text,
|
|
91
|
+
});
|
|
92
|
+
return result.embedding;
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
|
|
96
|
+
// Streaming enhancements
|
|
97
|
+
streamingOptions: {
|
|
98
|
+
storePartialResponse: true,
|
|
99
|
+
progressiveFactExtraction: true,
|
|
100
|
+
enableAdaptiveProcessing: true,
|
|
101
|
+
},
|
|
102
|
+
|
|
103
|
+
// Memory recall configuration
|
|
104
|
+
memorySearchLimit: 20,
|
|
105
|
+
|
|
106
|
+
// Real-time layer tracking
|
|
107
|
+
layerObserver,
|
|
108
|
+
|
|
109
|
+
// Debug in development
|
|
110
|
+
debug: process.env.NODE_ENV === "development",
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Normalize messages to AI SDK v6 UIMessage format
|
|
116
|
+
*/
|
|
117
|
+
function normalizeMessages(messages: unknown[]): unknown[] {
|
|
118
|
+
return messages.map((msg: unknown) => {
|
|
119
|
+
const m = msg as Record<string, unknown>;
|
|
120
|
+
|
|
121
|
+
// Normalize role: "agent" -> "assistant"
|
|
122
|
+
let role = m.role as string;
|
|
123
|
+
if (role === "agent") {
|
|
124
|
+
role = "assistant";
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Ensure parts array exists
|
|
128
|
+
let parts = m.parts as Array<{ type: string; text?: string }> | undefined;
|
|
129
|
+
if (!parts) {
|
|
130
|
+
const content = m.content as string | undefined;
|
|
131
|
+
if (content) {
|
|
132
|
+
parts = [{ type: "text", text: content }];
|
|
133
|
+
} else {
|
|
134
|
+
parts = [];
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
...m,
|
|
140
|
+
role,
|
|
141
|
+
parts,
|
|
142
|
+
};
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Extract text from a message
|
|
148
|
+
*/
|
|
149
|
+
function getMessageText(message: {
|
|
150
|
+
content?: string;
|
|
151
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
152
|
+
}): string {
|
|
153
|
+
if (typeof message.content === "string") {
|
|
154
|
+
return message.content;
|
|
155
|
+
}
|
|
156
|
+
if (message.parts && Array.isArray(message.parts)) {
|
|
157
|
+
return message.parts
|
|
158
|
+
.filter((part) => part.type === "text" && part.text)
|
|
159
|
+
.map((part) => part.text)
|
|
160
|
+
.join("");
|
|
161
|
+
}
|
|
162
|
+
return "";
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Generate a title from the first user message
|
|
167
|
+
*/
|
|
168
|
+
function generateTitle(message: string): string {
|
|
169
|
+
let title = message.slice(0, 50);
|
|
170
|
+
if (message.length > 50) {
|
|
171
|
+
const lastSpace = title.lastIndexOf(" ");
|
|
172
|
+
if (lastSpace > 20) {
|
|
173
|
+
title = title.slice(0, lastSpace);
|
|
174
|
+
}
|
|
175
|
+
title += "...";
|
|
176
|
+
}
|
|
177
|
+
return title;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
export async function POST(req: Request) {
|
|
181
|
+
try {
|
|
182
|
+
const body = await req.json();
|
|
183
|
+
const {
|
|
184
|
+
messages,
|
|
185
|
+
memorySpaceId = "quickstart-demo",
|
|
186
|
+
userId = "demo-user",
|
|
187
|
+
conversationId: providedConversationId,
|
|
188
|
+
} = body;
|
|
189
|
+
|
|
190
|
+
// Validate messages array exists
|
|
191
|
+
if (!messages || !Array.isArray(messages)) {
|
|
192
|
+
return new Response(
|
|
193
|
+
JSON.stringify({ error: "messages array is required" }),
|
|
194
|
+
{ status: 400, headers: { "Content-Type": "application/json" } },
|
|
195
|
+
);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Generate conversation ID if not provided
|
|
199
|
+
const conversationId =
|
|
200
|
+
providedConversationId ||
|
|
201
|
+
`conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
202
|
+
const isNewConversation = !providedConversationId;
|
|
203
|
+
|
|
204
|
+
// Normalize messages for convertToModelMessages
|
|
205
|
+
const normalizedMessages = normalizeMessages(messages);
|
|
206
|
+
|
|
207
|
+
// Convert to model messages
|
|
208
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
209
|
+
const modelMessagesResult = convertToModelMessages(
|
|
210
|
+
normalizedMessages as any,
|
|
211
|
+
);
|
|
212
|
+
const modelMessages =
|
|
213
|
+
modelMessagesResult instanceof Promise
|
|
214
|
+
? await modelMessagesResult
|
|
215
|
+
: modelMessagesResult;
|
|
216
|
+
|
|
217
|
+
// Get first user message for title
|
|
218
|
+
const firstUserMessage = messages.find(
|
|
219
|
+
(m: { role: string }) => m.role === "user",
|
|
220
|
+
) as
|
|
221
|
+
| {
|
|
222
|
+
role: string;
|
|
223
|
+
content?: string;
|
|
224
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
225
|
+
}
|
|
226
|
+
| undefined;
|
|
227
|
+
|
|
228
|
+
const messageText = firstUserMessage
|
|
229
|
+
? getMessageText(firstUserMessage)
|
|
230
|
+
: "";
|
|
231
|
+
|
|
232
|
+
// Use createUIMessageStreamResponse - same as v5 for full memory support
|
|
233
|
+
return createUIMessageStreamResponse({
|
|
234
|
+
stream: createUIMessageStream({
|
|
235
|
+
execute: async ({ writer }) => {
|
|
236
|
+
// Create layer observer for real-time UI updates
|
|
237
|
+
const layerObserver: LayerObserver = {
|
|
238
|
+
onOrchestrationStart: (orchestrationId) => {
|
|
239
|
+
writer.write({
|
|
240
|
+
type: "data-orchestration-start",
|
|
241
|
+
data: { orchestrationId },
|
|
242
|
+
transient: true,
|
|
243
|
+
});
|
|
244
|
+
},
|
|
245
|
+
onLayerUpdate: (event) => {
|
|
246
|
+
writer.write({
|
|
247
|
+
type: "data-layer-update",
|
|
248
|
+
data: {
|
|
249
|
+
layer: event.layer,
|
|
250
|
+
status: event.status,
|
|
251
|
+
timestamp: event.timestamp,
|
|
252
|
+
latencyMs: event.latencyMs,
|
|
253
|
+
data: event.data,
|
|
254
|
+
error: event.error,
|
|
255
|
+
revisionAction: event.revisionAction,
|
|
256
|
+
supersededFacts: event.supersededFacts,
|
|
257
|
+
},
|
|
258
|
+
transient: true,
|
|
259
|
+
});
|
|
260
|
+
},
|
|
261
|
+
onOrchestrationComplete: (summary) => {
|
|
262
|
+
writer.write({
|
|
263
|
+
type: "data-orchestration-complete",
|
|
264
|
+
data: {
|
|
265
|
+
orchestrationId: summary.orchestrationId,
|
|
266
|
+
totalLatencyMs: summary.totalLatencyMs,
|
|
267
|
+
createdIds: summary.createdIds,
|
|
268
|
+
},
|
|
269
|
+
transient: true,
|
|
270
|
+
});
|
|
271
|
+
},
|
|
272
|
+
};
|
|
273
|
+
|
|
274
|
+
// Build config with observer
|
|
275
|
+
const config = getCortexMemoryConfig(
|
|
276
|
+
memorySpaceId,
|
|
277
|
+
userId,
|
|
278
|
+
conversationId,
|
|
279
|
+
layerObserver,
|
|
280
|
+
);
|
|
281
|
+
|
|
282
|
+
// Create memory-augmented model - THIS handles both recall AND storage!
|
|
283
|
+
const cortexMemory = await createCortexMemoryAsync(config);
|
|
284
|
+
|
|
285
|
+
// Stream response with automatic memory integration
|
|
286
|
+
const result = streamText({
|
|
287
|
+
model: cortexMemory(openai("gpt-4o-mini")),
|
|
288
|
+
messages: modelMessages,
|
|
289
|
+
system: SYSTEM_PROMPT,
|
|
290
|
+
});
|
|
291
|
+
|
|
292
|
+
// Merge LLM stream into UI message stream
|
|
293
|
+
writer.merge(result.toUIMessageStream());
|
|
294
|
+
|
|
295
|
+
// Create conversation if new
|
|
296
|
+
if (isNewConversation && messageText) {
|
|
297
|
+
try {
|
|
298
|
+
const cortex = getCortex();
|
|
299
|
+
await cortex.conversations.create({
|
|
300
|
+
memorySpaceId,
|
|
301
|
+
conversationId,
|
|
302
|
+
type: "user-agent",
|
|
303
|
+
participants: {
|
|
304
|
+
userId,
|
|
305
|
+
agentId: "cortex-memory-agent",
|
|
306
|
+
},
|
|
307
|
+
metadata: { title: generateTitle(messageText) },
|
|
308
|
+
});
|
|
309
|
+
|
|
310
|
+
// Send conversation update to client
|
|
311
|
+
writer.write({
|
|
312
|
+
type: "data-conversation-update",
|
|
313
|
+
data: {
|
|
314
|
+
conversationId,
|
|
315
|
+
title: generateTitle(messageText),
|
|
316
|
+
},
|
|
317
|
+
transient: true,
|
|
318
|
+
});
|
|
319
|
+
} catch (error) {
|
|
320
|
+
console.error("Failed to create conversation:", error);
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
},
|
|
324
|
+
}),
|
|
325
|
+
});
|
|
326
|
+
} catch (error) {
|
|
327
|
+
console.error("[Chat v6 API Error]", error);
|
|
328
|
+
|
|
329
|
+
return new Response(
|
|
330
|
+
JSON.stringify({
|
|
331
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
332
|
+
}),
|
|
333
|
+
{
|
|
334
|
+
status: 500,
|
|
335
|
+
headers: { "Content-Type": "application/json" },
|
|
336
|
+
},
|
|
337
|
+
);
|
|
338
|
+
}
|
|
339
|
+
}
|
|
@@ -13,7 +13,8 @@ export async function GET(req: Request) {
|
|
|
13
13
|
const { searchParams } = new URL(req.url);
|
|
14
14
|
const conversationId = searchParams.get("conversationId");
|
|
15
15
|
const userId = searchParams.get("userId");
|
|
16
|
-
const memorySpaceId =
|
|
16
|
+
const memorySpaceId =
|
|
17
|
+
searchParams.get("memorySpaceId") || "quickstart-demo";
|
|
17
18
|
|
|
18
19
|
const cortex = getCortex();
|
|
19
20
|
|
|
@@ -27,7 +28,7 @@ export async function GET(req: Request) {
|
|
|
27
28
|
if (!conversation) {
|
|
28
29
|
return Response.json(
|
|
29
30
|
{ error: "Conversation not found" },
|
|
30
|
-
{ status: 404 }
|
|
31
|
+
{ status: 404 },
|
|
31
32
|
);
|
|
32
33
|
}
|
|
33
34
|
|
|
@@ -42,7 +43,9 @@ export async function GET(req: Request) {
|
|
|
42
43
|
return Response.json({
|
|
43
44
|
conversation: {
|
|
44
45
|
id: conversation.conversationId,
|
|
45
|
-
title:
|
|
46
|
+
title:
|
|
47
|
+
(conversation.metadata?.title as string) ||
|
|
48
|
+
getDefaultTitle(conversation),
|
|
46
49
|
createdAt: conversation.createdAt,
|
|
47
50
|
updatedAt: conversation.updatedAt,
|
|
48
51
|
messageCount: conversation.messageCount || 0,
|
|
@@ -53,10 +56,7 @@ export async function GET(req: Request) {
|
|
|
53
56
|
|
|
54
57
|
// List conversations for user (requires userId)
|
|
55
58
|
if (!userId) {
|
|
56
|
-
return Response.json(
|
|
57
|
-
{ error: "userId is required" },
|
|
58
|
-
{ status: 400 }
|
|
59
|
-
);
|
|
59
|
+
return Response.json({ error: "userId is required" }, { status: 400 });
|
|
60
60
|
}
|
|
61
61
|
|
|
62
62
|
// Get conversations for the user
|
|
@@ -84,7 +84,7 @@ export async function GET(req: Request) {
|
|
|
84
84
|
|
|
85
85
|
return Response.json(
|
|
86
86
|
{ error: "Failed to fetch conversations" },
|
|
87
|
-
{ status: 500 }
|
|
87
|
+
{ status: 500 },
|
|
88
88
|
);
|
|
89
89
|
}
|
|
90
90
|
}
|
|
@@ -95,10 +95,7 @@ export async function POST(req: Request) {
|
|
|
95
95
|
const { userId, memorySpaceId = "quickstart-demo", title } = body;
|
|
96
96
|
|
|
97
97
|
if (!userId) {
|
|
98
|
-
return Response.json(
|
|
99
|
-
{ error: "userId is required" },
|
|
100
|
-
{ status: 400 }
|
|
101
|
-
);
|
|
98
|
+
return Response.json({ error: "userId is required" }, { status: 400 });
|
|
102
99
|
}
|
|
103
100
|
|
|
104
101
|
const cortex = getCortex();
|
|
@@ -134,7 +131,7 @@ export async function POST(req: Request) {
|
|
|
134
131
|
|
|
135
132
|
return Response.json(
|
|
136
133
|
{ error: "Failed to create conversation" },
|
|
137
|
-
{ status: 500 }
|
|
134
|
+
{ status: 500 },
|
|
138
135
|
);
|
|
139
136
|
}
|
|
140
137
|
}
|
|
@@ -147,7 +144,7 @@ export async function DELETE(req: Request) {
|
|
|
147
144
|
if (!conversationId) {
|
|
148
145
|
return Response.json(
|
|
149
146
|
{ error: "conversationId is required" },
|
|
150
|
-
{ status: 400 }
|
|
147
|
+
{ status: 400 },
|
|
151
148
|
);
|
|
152
149
|
}
|
|
153
150
|
|
|
@@ -161,7 +158,7 @@ export async function DELETE(req: Request) {
|
|
|
161
158
|
|
|
162
159
|
return Response.json(
|
|
163
160
|
{ error: "Failed to delete conversation" },
|
|
164
|
-
{ status: 500 }
|
|
161
|
+
{ status: 500 },
|
|
165
162
|
);
|
|
166
163
|
}
|
|
167
164
|
}
|
|
@@ -169,7 +166,10 @@ export async function DELETE(req: Request) {
|
|
|
169
166
|
/**
|
|
170
167
|
* Generate a default title from conversation data
|
|
171
168
|
*/
|
|
172
|
-
function getDefaultTitle(conv: {
|
|
169
|
+
function getDefaultTitle(conv: {
|
|
170
|
+
createdAt: number;
|
|
171
|
+
messageCount?: number;
|
|
172
|
+
}): string {
|
|
173
173
|
const date = new Date(conv.createdAt);
|
|
174
174
|
const timeStr = date.toLocaleTimeString("en-US", {
|
|
175
175
|
hour: "numeric",
|
|
@@ -129,7 +129,8 @@
|
|
|
129
129
|
}
|
|
130
130
|
|
|
131
131
|
@keyframes logo-pulse {
|
|
132
|
-
0%,
|
|
132
|
+
0%,
|
|
133
|
+
100% {
|
|
133
134
|
transform: scale(1);
|
|
134
135
|
box-shadow: 0 0 0 0 rgba(12, 140, 230, 0.4);
|
|
135
136
|
}
|
|
@@ -182,12 +183,24 @@
|
|
|
182
183
|
}
|
|
183
184
|
|
|
184
185
|
/* Staggered animation for conversation list */
|
|
185
|
-
.conversation-item:nth-child(1) {
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
.conversation-item:nth-child(
|
|
189
|
-
|
|
190
|
-
|
|
186
|
+
.conversation-item:nth-child(1) {
|
|
187
|
+
animation-delay: 0ms;
|
|
188
|
+
}
|
|
189
|
+
.conversation-item:nth-child(2) {
|
|
190
|
+
animation-delay: 30ms;
|
|
191
|
+
}
|
|
192
|
+
.conversation-item:nth-child(3) {
|
|
193
|
+
animation-delay: 60ms;
|
|
194
|
+
}
|
|
195
|
+
.conversation-item:nth-child(4) {
|
|
196
|
+
animation-delay: 90ms;
|
|
197
|
+
}
|
|
198
|
+
.conversation-item:nth-child(5) {
|
|
199
|
+
animation-delay: 120ms;
|
|
200
|
+
}
|
|
201
|
+
.conversation-item:nth-child(n + 6) {
|
|
202
|
+
animation-delay: 150ms;
|
|
203
|
+
}
|
|
191
204
|
|
|
192
205
|
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
193
206
|
Input Focus Animations
|
|
@@ -216,7 +229,9 @@ input:focus {
|
|
|
216
229
|
background: rgba(255, 255, 255, 0.1);
|
|
217
230
|
border-radius: 50%;
|
|
218
231
|
transform: translate(-50%, -50%);
|
|
219
|
-
transition:
|
|
232
|
+
transition:
|
|
233
|
+
width 0.4s ease,
|
|
234
|
+
height 0.4s ease;
|
|
220
235
|
}
|
|
221
236
|
|
|
222
237
|
.btn-primary:hover::after {
|
|
@@ -265,7 +280,7 @@ input:focus {
|
|
|
265
280
|
transform: translateX(-100%);
|
|
266
281
|
transition: transform 0.3s ease;
|
|
267
282
|
}
|
|
268
|
-
|
|
283
|
+
|
|
269
284
|
.sidebar-mobile-visible {
|
|
270
285
|
transform: translateX(0);
|
|
271
286
|
position: fixed;
|