@cortexmemory/cli 0.27.1 → 0.27.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/convex.js +1 -1
- package/dist/commands/convex.js.map +1 -1
- package/dist/commands/deploy.d.ts +1 -1
- package/dist/commands/deploy.d.ts.map +1 -1
- package/dist/commands/deploy.js +839 -141
- package/dist/commands/deploy.js.map +1 -1
- package/dist/commands/dev.d.ts.map +1 -1
- package/dist/commands/dev.js +89 -26
- package/dist/commands/dev.js.map +1 -1
- package/dist/index.js +1 -1
- package/dist/utils/app-template-sync.d.ts +95 -0
- package/dist/utils/app-template-sync.d.ts.map +1 -0
- package/dist/utils/app-template-sync.js +445 -0
- package/dist/utils/app-template-sync.js.map +1 -0
- package/dist/utils/deployment-selector.d.ts +21 -0
- package/dist/utils/deployment-selector.d.ts.map +1 -1
- package/dist/utils/deployment-selector.js +32 -0
- package/dist/utils/deployment-selector.js.map +1 -1
- package/dist/utils/init/graph-setup.d.ts.map +1 -1
- package/dist/utils/init/graph-setup.js +13 -2
- package/dist/utils/init/graph-setup.js.map +1 -1
- package/package.json +1 -1
- package/templates/vercel-ai-quickstart/app/api/auth/check/route.ts +30 -0
- package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +128 -0
- package/templates/vercel-ai-quickstart/app/api/auth/register/route.ts +94 -0
- package/templates/vercel-ai-quickstart/app/api/auth/setup/route.ts +59 -0
- package/templates/vercel-ai-quickstart/app/api/chat/route.ts +139 -3
- package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +333 -0
- package/templates/vercel-ai-quickstart/app/api/conversations/route.ts +179 -0
- package/templates/vercel-ai-quickstart/app/globals.css +161 -0
- package/templates/vercel-ai-quickstart/app/page.tsx +110 -11
- package/templates/vercel-ai-quickstart/components/AdminSetup.tsx +139 -0
- package/templates/vercel-ai-quickstart/components/AuthProvider.tsx +283 -0
- package/templates/vercel-ai-quickstart/components/ChatHistorySidebar.tsx +323 -0
- package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +117 -17
- package/templates/vercel-ai-quickstart/components/LoginScreen.tsx +202 -0
- package/templates/vercel-ai-quickstart/jest.config.js +52 -0
- package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +165 -0
- package/templates/vercel-ai-quickstart/lib/cortex.ts +27 -0
- package/templates/vercel-ai-quickstart/lib/password.ts +120 -0
- package/templates/vercel-ai-quickstart/lib/versions.ts +60 -0
- package/templates/vercel-ai-quickstart/next.config.js +20 -0
- package/templates/vercel-ai-quickstart/package.json +11 -2
- package/templates/vercel-ai-quickstart/test-api.mjs +272 -0
- package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +454 -0
- package/templates/vercel-ai-quickstart/tests/helpers/mock-cortex.ts +263 -0
- package/templates/vercel-ai-quickstart/tests/helpers/setup.ts +48 -0
- package/templates/vercel-ai-quickstart/tests/integration/auth.test.ts +455 -0
- package/templates/vercel-ai-quickstart/tests/integration/conversations.test.ts +461 -0
- package/templates/vercel-ai-quickstart/tests/unit/password.test.ts +228 -0
- package/templates/vercel-ai-quickstart/tsconfig.json +1 -1
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Chat API Route (AI SDK v6 Style)
|
|
3
|
+
*
|
|
4
|
+
* This route uses AI SDK v6's patterns while maintaining full Cortex Memory
|
|
5
|
+
* capabilities including:
|
|
6
|
+
* - Memory recall (reading past memories)
|
|
7
|
+
* - Memory storage (saving new conversations)
|
|
8
|
+
* - Fact extraction (extracting knowledge from conversations)
|
|
9
|
+
* - Belief revision (superseding outdated facts)
|
|
10
|
+
* - Layer observer (real-time UI updates)
|
|
11
|
+
*
|
|
12
|
+
* The key difference from v5 is using v6's cleaner APIs, but the memory
|
|
13
|
+
* infrastructure is identical to ensure feature parity.
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
import { createCortexMemoryAsync } from "@cortexmemory/vercel-ai-provider";
|
|
17
|
+
import type {
|
|
18
|
+
LayerObserver,
|
|
19
|
+
CortexMemoryConfig,
|
|
20
|
+
} from "@cortexmemory/vercel-ai-provider";
|
|
21
|
+
import { openai, createOpenAI } from "@ai-sdk/openai";
|
|
22
|
+
import {
|
|
23
|
+
streamText,
|
|
24
|
+
embed,
|
|
25
|
+
convertToModelMessages,
|
|
26
|
+
createUIMessageStream,
|
|
27
|
+
createUIMessageStreamResponse,
|
|
28
|
+
} from "ai";
|
|
29
|
+
import { getCortex } from "@/lib/cortex";
|
|
30
|
+
|
|
31
|
+
// Create OpenAI client for embeddings
|
|
32
|
+
const openaiClient = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
|
|
33
|
+
|
|
34
|
+
// System prompt for the assistant
|
|
35
|
+
const SYSTEM_PROMPT = `You are a helpful AI assistant with long-term memory powered by Cortex.
|
|
36
|
+
|
|
37
|
+
Your capabilities:
|
|
38
|
+
- You remember everything users tell you across conversations
|
|
39
|
+
- You can recall facts, preferences, and context from past interactions
|
|
40
|
+
- You naturally reference what you've learned about the user
|
|
41
|
+
|
|
42
|
+
Behavior guidelines:
|
|
43
|
+
- When you remember something from a previous conversation, mention it naturally
|
|
44
|
+
- If asked about something you learned, reference it specifically
|
|
45
|
+
- Be conversational and friendly
|
|
46
|
+
- Help demonstrate the memory system by showing what you remember`;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Create Cortex Memory config - IDENTICAL to v5 route for feature parity
|
|
50
|
+
*/
|
|
51
|
+
function getCortexMemoryConfig(
|
|
52
|
+
memorySpaceId: string,
|
|
53
|
+
userId: string,
|
|
54
|
+
conversationId: string,
|
|
55
|
+
layerObserver?: LayerObserver
|
|
56
|
+
): CortexMemoryConfig {
|
|
57
|
+
return {
|
|
58
|
+
convexUrl: process.env.CONVEX_URL!,
|
|
59
|
+
memorySpaceId,
|
|
60
|
+
|
|
61
|
+
// User identification
|
|
62
|
+
userId,
|
|
63
|
+
userName: "Demo User",
|
|
64
|
+
|
|
65
|
+
// Agent identification
|
|
66
|
+
agentId: "cortex-memory-agent",
|
|
67
|
+
agentName: "Cortex v6 Assistant",
|
|
68
|
+
|
|
69
|
+
// Conversation ID for chat history isolation
|
|
70
|
+
conversationId,
|
|
71
|
+
|
|
72
|
+
// Enable graph memory sync
|
|
73
|
+
enableGraphMemory: process.env.CORTEX_GRAPH_SYNC === "true",
|
|
74
|
+
|
|
75
|
+
// Enable fact extraction - CRITICAL for memory to work!
|
|
76
|
+
enableFactExtraction: process.env.CORTEX_FACT_EXTRACTION === "true",
|
|
77
|
+
|
|
78
|
+
// Belief Revision - handles fact updates and supersessions
|
|
79
|
+
beliefRevision: {
|
|
80
|
+
enabled: true,
|
|
81
|
+
slotMatching: true,
|
|
82
|
+
llmResolution: true,
|
|
83
|
+
},
|
|
84
|
+
|
|
85
|
+
// Embedding provider for semantic matching
|
|
86
|
+
embeddingProvider: {
|
|
87
|
+
generate: async (text: string) => {
|
|
88
|
+
const result = await embed({
|
|
89
|
+
model: openaiClient.embedding("text-embedding-3-small"),
|
|
90
|
+
value: text,
|
|
91
|
+
});
|
|
92
|
+
return result.embedding;
|
|
93
|
+
},
|
|
94
|
+
},
|
|
95
|
+
|
|
96
|
+
// Streaming enhancements
|
|
97
|
+
streamingOptions: {
|
|
98
|
+
storePartialResponse: true,
|
|
99
|
+
progressiveFactExtraction: true,
|
|
100
|
+
enableAdaptiveProcessing: true,
|
|
101
|
+
},
|
|
102
|
+
|
|
103
|
+
// Memory recall configuration
|
|
104
|
+
memorySearchLimit: 20,
|
|
105
|
+
|
|
106
|
+
// Real-time layer tracking
|
|
107
|
+
layerObserver,
|
|
108
|
+
|
|
109
|
+
// Debug in development
|
|
110
|
+
debug: process.env.NODE_ENV === "development",
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Normalize messages to AI SDK v6 UIMessage format
|
|
116
|
+
*/
|
|
117
|
+
function normalizeMessages(messages: unknown[]): unknown[] {
|
|
118
|
+
return messages.map((msg: unknown) => {
|
|
119
|
+
const m = msg as Record<string, unknown>;
|
|
120
|
+
|
|
121
|
+
// Normalize role: "agent" -> "assistant"
|
|
122
|
+
let role = m.role as string;
|
|
123
|
+
if (role === "agent") {
|
|
124
|
+
role = "assistant";
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Ensure parts array exists
|
|
128
|
+
let parts = m.parts as Array<{ type: string; text?: string }> | undefined;
|
|
129
|
+
if (!parts) {
|
|
130
|
+
const content = m.content as string | undefined;
|
|
131
|
+
if (content) {
|
|
132
|
+
parts = [{ type: "text", text: content }];
|
|
133
|
+
} else {
|
|
134
|
+
parts = [];
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
...m,
|
|
140
|
+
role,
|
|
141
|
+
parts,
|
|
142
|
+
};
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Extract text from a message
|
|
148
|
+
*/
|
|
149
|
+
function getMessageText(message: {
|
|
150
|
+
content?: string;
|
|
151
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
152
|
+
}): string {
|
|
153
|
+
if (typeof message.content === "string") {
|
|
154
|
+
return message.content;
|
|
155
|
+
}
|
|
156
|
+
if (message.parts && Array.isArray(message.parts)) {
|
|
157
|
+
return message.parts
|
|
158
|
+
.filter((part) => part.type === "text" && part.text)
|
|
159
|
+
.map((part) => part.text)
|
|
160
|
+
.join("");
|
|
161
|
+
}
|
|
162
|
+
return "";
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
/**
|
|
166
|
+
* Generate a title from the first user message
|
|
167
|
+
*/
|
|
168
|
+
function generateTitle(message: string): string {
|
|
169
|
+
let title = message.slice(0, 50);
|
|
170
|
+
if (message.length > 50) {
|
|
171
|
+
const lastSpace = title.lastIndexOf(" ");
|
|
172
|
+
if (lastSpace > 20) {
|
|
173
|
+
title = title.slice(0, lastSpace);
|
|
174
|
+
}
|
|
175
|
+
title += "...";
|
|
176
|
+
}
|
|
177
|
+
return title;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
export async function POST(req: Request) {
|
|
181
|
+
try {
|
|
182
|
+
const body = await req.json();
|
|
183
|
+
const {
|
|
184
|
+
messages,
|
|
185
|
+
memorySpaceId = "quickstart-demo",
|
|
186
|
+
userId = "demo-user",
|
|
187
|
+
conversationId: providedConversationId,
|
|
188
|
+
} = body;
|
|
189
|
+
|
|
190
|
+
// Validate messages array exists
|
|
191
|
+
if (!messages || !Array.isArray(messages)) {
|
|
192
|
+
return new Response(
|
|
193
|
+
JSON.stringify({ error: "messages array is required" }),
|
|
194
|
+
{ status: 400, headers: { "Content-Type": "application/json" } }
|
|
195
|
+
);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// Generate conversation ID if not provided
|
|
199
|
+
const conversationId =
|
|
200
|
+
providedConversationId ||
|
|
201
|
+
`conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
202
|
+
const isNewConversation = !providedConversationId;
|
|
203
|
+
|
|
204
|
+
// Normalize messages for convertToModelMessages
|
|
205
|
+
const normalizedMessages = normalizeMessages(messages);
|
|
206
|
+
|
|
207
|
+
// Convert to model messages
|
|
208
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
209
|
+
const modelMessagesResult = convertToModelMessages(normalizedMessages as any);
|
|
210
|
+
const modelMessages =
|
|
211
|
+
modelMessagesResult instanceof Promise
|
|
212
|
+
? await modelMessagesResult
|
|
213
|
+
: modelMessagesResult;
|
|
214
|
+
|
|
215
|
+
// Get first user message for title
|
|
216
|
+
const firstUserMessage = messages.find(
|
|
217
|
+
(m: { role: string }) => m.role === "user"
|
|
218
|
+
) as {
|
|
219
|
+
role: string;
|
|
220
|
+
content?: string;
|
|
221
|
+
parts?: Array<{ type: string; text?: string }>;
|
|
222
|
+
} | undefined;
|
|
223
|
+
|
|
224
|
+
const messageText = firstUserMessage ? getMessageText(firstUserMessage) : "";
|
|
225
|
+
|
|
226
|
+
// Use createUIMessageStreamResponse - same as v5 for full memory support
|
|
227
|
+
return createUIMessageStreamResponse({
|
|
228
|
+
stream: createUIMessageStream({
|
|
229
|
+
execute: async ({ writer }) => {
|
|
230
|
+
// Create layer observer for real-time UI updates
|
|
231
|
+
const layerObserver: LayerObserver = {
|
|
232
|
+
onOrchestrationStart: (orchestrationId) => {
|
|
233
|
+
writer.write({
|
|
234
|
+
type: "data-orchestration-start",
|
|
235
|
+
data: { orchestrationId },
|
|
236
|
+
transient: true,
|
|
237
|
+
});
|
|
238
|
+
},
|
|
239
|
+
onLayerUpdate: (event) => {
|
|
240
|
+
writer.write({
|
|
241
|
+
type: "data-layer-update",
|
|
242
|
+
data: {
|
|
243
|
+
layer: event.layer,
|
|
244
|
+
status: event.status,
|
|
245
|
+
timestamp: event.timestamp,
|
|
246
|
+
latencyMs: event.latencyMs,
|
|
247
|
+
data: event.data,
|
|
248
|
+
error: event.error,
|
|
249
|
+
revisionAction: event.revisionAction,
|
|
250
|
+
supersededFacts: event.supersededFacts,
|
|
251
|
+
},
|
|
252
|
+
transient: true,
|
|
253
|
+
});
|
|
254
|
+
},
|
|
255
|
+
onOrchestrationComplete: (summary) => {
|
|
256
|
+
writer.write({
|
|
257
|
+
type: "data-orchestration-complete",
|
|
258
|
+
data: {
|
|
259
|
+
orchestrationId: summary.orchestrationId,
|
|
260
|
+
totalLatencyMs: summary.totalLatencyMs,
|
|
261
|
+
createdIds: summary.createdIds,
|
|
262
|
+
},
|
|
263
|
+
transient: true,
|
|
264
|
+
});
|
|
265
|
+
},
|
|
266
|
+
};
|
|
267
|
+
|
|
268
|
+
// Build config with observer
|
|
269
|
+
const config = getCortexMemoryConfig(
|
|
270
|
+
memorySpaceId,
|
|
271
|
+
userId,
|
|
272
|
+
conversationId,
|
|
273
|
+
layerObserver
|
|
274
|
+
);
|
|
275
|
+
|
|
276
|
+
// Create memory-augmented model - THIS handles both recall AND storage!
|
|
277
|
+
const cortexMemory = await createCortexMemoryAsync(config);
|
|
278
|
+
|
|
279
|
+
// Stream response with automatic memory integration
|
|
280
|
+
const result = streamText({
|
|
281
|
+
model: cortexMemory(openai("gpt-4o-mini")),
|
|
282
|
+
messages: modelMessages,
|
|
283
|
+
system: SYSTEM_PROMPT,
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
// Merge LLM stream into UI message stream
|
|
287
|
+
writer.merge(result.toUIMessageStream());
|
|
288
|
+
|
|
289
|
+
// Create conversation if new
|
|
290
|
+
if (isNewConversation && messageText) {
|
|
291
|
+
try {
|
|
292
|
+
const cortex = getCortex();
|
|
293
|
+
await cortex.conversations.create({
|
|
294
|
+
memorySpaceId,
|
|
295
|
+
conversationId,
|
|
296
|
+
type: "user-agent",
|
|
297
|
+
participants: {
|
|
298
|
+
userId,
|
|
299
|
+
agentId: "cortex-memory-agent",
|
|
300
|
+
},
|
|
301
|
+
metadata: { title: generateTitle(messageText) },
|
|
302
|
+
});
|
|
303
|
+
|
|
304
|
+
// Send conversation update to client
|
|
305
|
+
writer.write({
|
|
306
|
+
type: "data-conversation-update",
|
|
307
|
+
data: {
|
|
308
|
+
conversationId,
|
|
309
|
+
title: generateTitle(messageText),
|
|
310
|
+
},
|
|
311
|
+
transient: true,
|
|
312
|
+
});
|
|
313
|
+
} catch (error) {
|
|
314
|
+
console.error("Failed to create conversation:", error);
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
},
|
|
318
|
+
}),
|
|
319
|
+
});
|
|
320
|
+
} catch (error) {
|
|
321
|
+
console.error("[Chat v6 API Error]", error);
|
|
322
|
+
|
|
323
|
+
return new Response(
|
|
324
|
+
JSON.stringify({
|
|
325
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
326
|
+
}),
|
|
327
|
+
{
|
|
328
|
+
status: 500,
|
|
329
|
+
headers: { "Content-Type": "application/json" },
|
|
330
|
+
}
|
|
331
|
+
);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Conversations API Route
|
|
3
|
+
*
|
|
4
|
+
* GET: List conversations for a user (chat history)
|
|
5
|
+
* POST: Create a new conversation
|
|
6
|
+
* DELETE: Delete a conversation
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { getCortex } from "@/lib/cortex";
|
|
10
|
+
|
|
11
|
+
export async function GET(req: Request) {
|
|
12
|
+
try {
|
|
13
|
+
const { searchParams } = new URL(req.url);
|
|
14
|
+
const conversationId = searchParams.get("conversationId");
|
|
15
|
+
const userId = searchParams.get("userId");
|
|
16
|
+
const memorySpaceId = searchParams.get("memorySpaceId") || "quickstart-demo";
|
|
17
|
+
|
|
18
|
+
const cortex = getCortex();
|
|
19
|
+
|
|
20
|
+
// If conversationId is provided, fetch single conversation with messages
|
|
21
|
+
if (conversationId) {
|
|
22
|
+
const conversation = await cortex.conversations.get(conversationId, {
|
|
23
|
+
includeMessages: true,
|
|
24
|
+
messageLimit: 100,
|
|
25
|
+
});
|
|
26
|
+
|
|
27
|
+
if (!conversation) {
|
|
28
|
+
return Response.json(
|
|
29
|
+
{ error: "Conversation not found" },
|
|
30
|
+
{ status: 404 }
|
|
31
|
+
);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// Transform messages to the format expected by AI SDK useChat
|
|
35
|
+
const messages = (conversation.messages || []).map((msg) => ({
|
|
36
|
+
id: msg.id,
|
|
37
|
+
role: msg.role as "user" | "assistant",
|
|
38
|
+
content: msg.content,
|
|
39
|
+
createdAt: new Date(msg.timestamp),
|
|
40
|
+
}));
|
|
41
|
+
|
|
42
|
+
return Response.json({
|
|
43
|
+
conversation: {
|
|
44
|
+
id: conversation.conversationId,
|
|
45
|
+
title: (conversation.metadata?.title as string) || getDefaultTitle(conversation),
|
|
46
|
+
createdAt: conversation.createdAt,
|
|
47
|
+
updatedAt: conversation.updatedAt,
|
|
48
|
+
messageCount: conversation.messageCount || 0,
|
|
49
|
+
},
|
|
50
|
+
messages,
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// List conversations for user (requires userId)
|
|
55
|
+
if (!userId) {
|
|
56
|
+
return Response.json(
|
|
57
|
+
{ error: "userId is required" },
|
|
58
|
+
{ status: 400 }
|
|
59
|
+
);
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Get conversations for the user
|
|
63
|
+
const result = await cortex.conversations.list({
|
|
64
|
+
memorySpaceId,
|
|
65
|
+
userId,
|
|
66
|
+
limit: 50,
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
// Map conversations to a simpler format for the UI
|
|
70
|
+
const conversations = result.conversations.map((conv) => ({
|
|
71
|
+
id: conv.conversationId,
|
|
72
|
+
title: (conv.metadata?.title as string) || getDefaultTitle(conv),
|
|
73
|
+
createdAt: conv.createdAt,
|
|
74
|
+
updatedAt: conv.updatedAt,
|
|
75
|
+
messageCount: conv.messageCount || 0,
|
|
76
|
+
}));
|
|
77
|
+
|
|
78
|
+
// Sort by updatedAt descending (most recent first)
|
|
79
|
+
conversations.sort((a, b) => b.updatedAt - a.updatedAt);
|
|
80
|
+
|
|
81
|
+
return Response.json({ conversations });
|
|
82
|
+
} catch (error) {
|
|
83
|
+
console.error("[Conversations Error]", error);
|
|
84
|
+
|
|
85
|
+
return Response.json(
|
|
86
|
+
{ error: "Failed to fetch conversations" },
|
|
87
|
+
{ status: 500 }
|
|
88
|
+
);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
export async function POST(req: Request) {
|
|
93
|
+
try {
|
|
94
|
+
const body = await req.json();
|
|
95
|
+
const { userId, memorySpaceId = "quickstart-demo", title } = body;
|
|
96
|
+
|
|
97
|
+
if (!userId) {
|
|
98
|
+
return Response.json(
|
|
99
|
+
{ error: "userId is required" },
|
|
100
|
+
{ status: 400 }
|
|
101
|
+
);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const cortex = getCortex();
|
|
105
|
+
|
|
106
|
+
// Create a new conversation
|
|
107
|
+
const conversationId = `conv-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
|
|
108
|
+
|
|
109
|
+
const conversation = await cortex.conversations.create({
|
|
110
|
+
memorySpaceId,
|
|
111
|
+
conversationId,
|
|
112
|
+
type: "user-agent",
|
|
113
|
+
participants: {
|
|
114
|
+
userId,
|
|
115
|
+
agentId: "quickstart-assistant",
|
|
116
|
+
},
|
|
117
|
+
metadata: {
|
|
118
|
+
title: title || "New Chat",
|
|
119
|
+
},
|
|
120
|
+
});
|
|
121
|
+
|
|
122
|
+
return Response.json({
|
|
123
|
+
success: true,
|
|
124
|
+
conversation: {
|
|
125
|
+
id: conversation.conversationId,
|
|
126
|
+
title: (conversation.metadata?.title as string) || "New Chat",
|
|
127
|
+
createdAt: conversation.createdAt,
|
|
128
|
+
updatedAt: conversation.updatedAt,
|
|
129
|
+
messageCount: 0,
|
|
130
|
+
},
|
|
131
|
+
});
|
|
132
|
+
} catch (error) {
|
|
133
|
+
console.error("[Conversation Create Error]", error);
|
|
134
|
+
|
|
135
|
+
return Response.json(
|
|
136
|
+
{ error: "Failed to create conversation" },
|
|
137
|
+
{ status: 500 }
|
|
138
|
+
);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
export async function DELETE(req: Request) {
|
|
143
|
+
try {
|
|
144
|
+
const { searchParams } = new URL(req.url);
|
|
145
|
+
const conversationId = searchParams.get("conversationId");
|
|
146
|
+
|
|
147
|
+
if (!conversationId) {
|
|
148
|
+
return Response.json(
|
|
149
|
+
{ error: "conversationId is required" },
|
|
150
|
+
{ status: 400 }
|
|
151
|
+
);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
const cortex = getCortex();
|
|
155
|
+
|
|
156
|
+
await cortex.conversations.delete(conversationId);
|
|
157
|
+
|
|
158
|
+
return Response.json({ success: true });
|
|
159
|
+
} catch (error) {
|
|
160
|
+
console.error("[Conversation Delete Error]", error);
|
|
161
|
+
|
|
162
|
+
return Response.json(
|
|
163
|
+
{ error: "Failed to delete conversation" },
|
|
164
|
+
{ status: 500 }
|
|
165
|
+
);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* Generate a default title from conversation data
|
|
171
|
+
*/
|
|
172
|
+
function getDefaultTitle(conv: { createdAt: number; messageCount?: number }): string {
|
|
173
|
+
const date = new Date(conv.createdAt);
|
|
174
|
+
const timeStr = date.toLocaleTimeString("en-US", {
|
|
175
|
+
hour: "numeric",
|
|
176
|
+
minute: "2-digit",
|
|
177
|
+
});
|
|
178
|
+
return `Chat at ${timeStr}`;
|
|
179
|
+
}
|
|
@@ -112,3 +112,164 @@
|
|
|
112
112
|
font-size: 0.875rem;
|
|
113
113
|
line-height: 1.5;
|
|
114
114
|
}
|
|
115
|
+
|
|
116
|
+
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
117
|
+
Auth Screen Animations
|
|
118
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ */
|
|
119
|
+
|
|
120
|
+
@keyframes fade-in-up {
|
|
121
|
+
from {
|
|
122
|
+
opacity: 0;
|
|
123
|
+
transform: translateY(20px);
|
|
124
|
+
}
|
|
125
|
+
to {
|
|
126
|
+
opacity: 1;
|
|
127
|
+
transform: translateY(0);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
@keyframes logo-pulse {
|
|
132
|
+
0%, 100% {
|
|
133
|
+
transform: scale(1);
|
|
134
|
+
box-shadow: 0 0 0 0 rgba(12, 140, 230, 0.4);
|
|
135
|
+
}
|
|
136
|
+
50% {
|
|
137
|
+
transform: scale(1.02);
|
|
138
|
+
box-shadow: 0 0 40px 10px rgba(12, 140, 230, 0.2);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
.auth-screen {
|
|
143
|
+
animation: fade-in-up 0.5s ease-out;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
.auth-logo {
|
|
147
|
+
animation: logo-pulse 3s ease-in-out infinite;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
151
|
+
Sidebar Animations
|
|
152
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ */
|
|
153
|
+
|
|
154
|
+
@keyframes sidebar-slide-in {
|
|
155
|
+
from {
|
|
156
|
+
opacity: 0;
|
|
157
|
+
transform: translateX(-20px);
|
|
158
|
+
}
|
|
159
|
+
to {
|
|
160
|
+
opacity: 1;
|
|
161
|
+
transform: translateX(0);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
@keyframes conversation-item-in {
|
|
166
|
+
from {
|
|
167
|
+
opacity: 0;
|
|
168
|
+
transform: translateX(-10px);
|
|
169
|
+
}
|
|
170
|
+
to {
|
|
171
|
+
opacity: 1;
|
|
172
|
+
transform: translateX(0);
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
.sidebar-animate {
|
|
177
|
+
animation: sidebar-slide-in 0.3s ease-out;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
.conversation-item {
|
|
181
|
+
animation: conversation-item-in 0.2s ease-out;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/* Staggered animation for conversation list */
|
|
185
|
+
.conversation-item:nth-child(1) { animation-delay: 0ms; }
|
|
186
|
+
.conversation-item:nth-child(2) { animation-delay: 30ms; }
|
|
187
|
+
.conversation-item:nth-child(3) { animation-delay: 60ms; }
|
|
188
|
+
.conversation-item:nth-child(4) { animation-delay: 90ms; }
|
|
189
|
+
.conversation-item:nth-child(5) { animation-delay: 120ms; }
|
|
190
|
+
.conversation-item:nth-child(n+6) { animation-delay: 150ms; }
|
|
191
|
+
|
|
192
|
+
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
193
|
+
Input Focus Animations
|
|
194
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ */
|
|
195
|
+
|
|
196
|
+
input:focus {
|
|
197
|
+
box-shadow: 0 0 0 2px rgba(12, 140, 230, 0.2);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
201
|
+
Button Hover Effects
|
|
202
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ */
|
|
203
|
+
|
|
204
|
+
.btn-primary {
|
|
205
|
+
position: relative;
|
|
206
|
+
overflow: hidden;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
.btn-primary::after {
|
|
210
|
+
content: "";
|
|
211
|
+
position: absolute;
|
|
212
|
+
top: 50%;
|
|
213
|
+
left: 50%;
|
|
214
|
+
width: 0;
|
|
215
|
+
height: 0;
|
|
216
|
+
background: rgba(255, 255, 255, 0.1);
|
|
217
|
+
border-radius: 50%;
|
|
218
|
+
transform: translate(-50%, -50%);
|
|
219
|
+
transition: width 0.4s ease, height 0.4s ease;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
.btn-primary:hover::after {
|
|
223
|
+
width: 300px;
|
|
224
|
+
height: 300px;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
228
|
+
Loading States
|
|
229
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ */
|
|
230
|
+
|
|
231
|
+
@keyframes shimmer {
|
|
232
|
+
0% {
|
|
233
|
+
background-position: -200% 0;
|
|
234
|
+
}
|
|
235
|
+
100% {
|
|
236
|
+
background-position: 200% 0;
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
.skeleton {
|
|
241
|
+
background: linear-gradient(
|
|
242
|
+
90deg,
|
|
243
|
+
rgba(255, 255, 255, 0.05) 25%,
|
|
244
|
+
rgba(255, 255, 255, 0.1) 50%,
|
|
245
|
+
rgba(255, 255, 255, 0.05) 75%
|
|
246
|
+
);
|
|
247
|
+
background-size: 200% 100%;
|
|
248
|
+
animation: shimmer 1.5s infinite;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/* ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
252
|
+
Responsive Sidebar
|
|
253
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ */
|
|
254
|
+
|
|
255
|
+
@media (max-width: 1024px) {
|
|
256
|
+
/* On smaller screens, the layer flow visualization is hidden */
|
|
257
|
+
.layer-flow-panel {
|
|
258
|
+
display: none;
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
@media (max-width: 768px) {
|
|
263
|
+
/* On mobile, sidebar becomes collapsible */
|
|
264
|
+
.sidebar-mobile-hidden {
|
|
265
|
+
transform: translateX(-100%);
|
|
266
|
+
transition: transform 0.3s ease;
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
.sidebar-mobile-visible {
|
|
270
|
+
transform: translateX(0);
|
|
271
|
+
position: fixed;
|
|
272
|
+
z-index: 50;
|
|
273
|
+
height: 100vh;
|
|
274
|
+
}
|
|
275
|
+
}
|