@cortexmemory/cli 0.26.2 โ†’ 0.27.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/dist/commands/dev.d.ts.map +1 -1
  2. package/dist/commands/dev.js +121 -10
  3. package/dist/commands/dev.js.map +1 -1
  4. package/dist/commands/init.d.ts.map +1 -1
  5. package/dist/commands/init.js +273 -43
  6. package/dist/commands/init.js.map +1 -1
  7. package/dist/commands/setup.d.ts.map +1 -1
  8. package/dist/commands/setup.js +102 -46
  9. package/dist/commands/setup.js.map +1 -1
  10. package/dist/commands/status.d.ts.map +1 -1
  11. package/dist/commands/status.js +94 -7
  12. package/dist/commands/status.js.map +1 -1
  13. package/dist/types.d.ts +23 -0
  14. package/dist/types.d.ts.map +1 -1
  15. package/dist/utils/config.d.ts +11 -0
  16. package/dist/utils/config.d.ts.map +1 -1
  17. package/dist/utils/config.js +20 -0
  18. package/dist/utils/config.js.map +1 -1
  19. package/dist/utils/init/graph-setup.d.ts.map +1 -1
  20. package/dist/utils/init/graph-setup.js +12 -0
  21. package/dist/utils/init/graph-setup.js.map +1 -1
  22. package/dist/utils/init/quickstart-setup.d.ts +87 -0
  23. package/dist/utils/init/quickstart-setup.d.ts.map +1 -0
  24. package/dist/utils/init/quickstart-setup.js +462 -0
  25. package/dist/utils/init/quickstart-setup.js.map +1 -0
  26. package/dist/utils/schema-sync.d.ts.map +1 -1
  27. package/dist/utils/schema-sync.js +27 -21
  28. package/dist/utils/schema-sync.js.map +1 -1
  29. package/package.json +3 -2
  30. package/templates/vercel-ai-quickstart/.env.local.example +45 -0
  31. package/templates/vercel-ai-quickstart/README.md +280 -0
  32. package/templates/vercel-ai-quickstart/app/api/chat/route.ts +196 -0
  33. package/templates/vercel-ai-quickstart/app/api/facts/route.ts +39 -0
  34. package/templates/vercel-ai-quickstart/app/api/health/route.ts +99 -0
  35. package/templates/vercel-ai-quickstart/app/api/memories/route.ts +37 -0
  36. package/templates/vercel-ai-quickstart/app/globals.css +114 -0
  37. package/templates/vercel-ai-quickstart/app/layout.tsx +19 -0
  38. package/templates/vercel-ai-quickstart/app/page.tsx +131 -0
  39. package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +237 -0
  40. package/templates/vercel-ai-quickstart/components/ConvexClientProvider.tsx +21 -0
  41. package/templates/vercel-ai-quickstart/components/DataPreview.tsx +57 -0
  42. package/templates/vercel-ai-quickstart/components/HealthStatus.tsx +214 -0
  43. package/templates/vercel-ai-quickstart/components/LayerCard.tsx +263 -0
  44. package/templates/vercel-ai-quickstart/components/LayerFlowDiagram.tsx +195 -0
  45. package/templates/vercel-ai-quickstart/components/MemorySpaceSwitcher.tsx +93 -0
  46. package/templates/vercel-ai-quickstart/convex/conversations.ts +67 -0
  47. package/templates/vercel-ai-quickstart/convex/facts.ts +131 -0
  48. package/templates/vercel-ai-quickstart/convex/health.ts +15 -0
  49. package/templates/vercel-ai-quickstart/convex/memories.ts +104 -0
  50. package/templates/vercel-ai-quickstart/convex/schema.ts +20 -0
  51. package/templates/vercel-ai-quickstart/convex/users.ts +105 -0
  52. package/templates/vercel-ai-quickstart/lib/animations.ts +146 -0
  53. package/templates/vercel-ai-quickstart/lib/layer-tracking.ts +214 -0
  54. package/templates/vercel-ai-quickstart/next.config.js +7 -0
  55. package/templates/vercel-ai-quickstart/package.json +41 -0
  56. package/templates/vercel-ai-quickstart/postcss.config.js +5 -0
  57. package/templates/vercel-ai-quickstart/tailwind.config.js +37 -0
  58. package/templates/vercel-ai-quickstart/tsconfig.json +33 -0
@@ -0,0 +1,280 @@
1
+ # Cortex Memory + Vercel AI SDK Quickstart
2
+
3
+ This is the official quickstart demo for **Cortex Memory** with the **Vercel AI SDK**. It provides an interactive visualization of how data flows through the Cortex memory orchestration system in real-time.
4
+
5
+ > **SDK v0.24.0**: Now with **Belief Revision**! When users change their preferences (e.g., "I now prefer purple" after saying "I like blue"), Cortex intelligently updates or supersedes existing facts instead of creating duplicates.
6
+
7
+ ## Features
8
+
9
+ - ๐Ÿง  **Real-time Memory Visualization** - Watch data flow through all Cortex layers (Memory Space โ†’ User โ†’ Agent โ†’ Conversation โ†’ Vector โ†’ Facts โ†’ Graph)
10
+ - ๐Ÿ’ฌ **Interactive Chat** - Send messages and see them processed with automatic memory storage
11
+ - ๐Ÿ“Š **Layer Flow Diagram** - Animated visualization showing latency and data at each layer
12
+ - ๐Ÿ”€ **Memory Space Switching** - Demonstrate multi-tenant isolation by switching between memory spaces
13
+ - โšก **Streaming Support** - Full streaming with progressive fact extraction
14
+ - ๐Ÿงน **Belief Revision** - Intelligent fact updates when information changes (v0.24.0)
15
+ - ๐Ÿ”„ **Smart Fact Deduplication** - Semantic matching prevents duplicate facts across sessions (v0.22.0)
16
+
17
+ ## Prerequisites
18
+
19
+ - Node.js 18+
20
+ - A Convex deployment ([get started](https://www.convex.dev/))
21
+ - An OpenAI API key ([get one](https://platform.openai.com/api-keys))
22
+
23
+ ## Quick Start
24
+
25
+ ### Local Development (within monorepo)
26
+
27
+ 1. **Install dependencies**
28
+
29
+ ```bash
30
+ cd packages/vercel-ai-provider/quickstart
31
+ npm install
32
+ ```
33
+
34
+ > **Note**: The `package.json` uses `file:` references to link to the local SDK and provider packages. This allows you to test changes to the provider immediately.
35
+
36
+ ### Using Published Packages
37
+
38
+ If you want to use the published npm packages instead, update `package.json`:
39
+
40
+ ```json
41
+ {
42
+ "dependencies": {
43
+ "@cortexmemory/sdk": "^0.24.0",
44
+ "@cortexmemory/vercel-ai-provider": "^1.0.0"
45
+ // ... other deps
46
+ }
47
+ }
48
+ ```
49
+
50
+ 2. **Set up environment variables**
51
+
52
+ ```bash
53
+ cp .env.local.example .env.local
54
+ ```
55
+
56
+ Edit `.env.local` and add your credentials:
57
+
58
+ ```env
59
+ CONVEX_URL=https://your-project.convex.cloud
60
+ NEXT_PUBLIC_CONVEX_URL=https://your-project.convex.cloud
61
+ OPENAI_API_KEY=sk-...
62
+ ```
63
+
64
+ 3. **Deploy Convex schema**
65
+
66
+ ```bash
67
+ npm run convex:dev
68
+ ```
69
+
70
+ 4. **Start the development server**
71
+
72
+ ```bash
73
+ npm run dev
74
+ ```
75
+
76
+ 5. **Open the demo**
77
+
78
+ Visit [http://localhost:3000](http://localhost:3000) to see the demo in action.
79
+
80
+ ## What This Demo Shows
81
+
82
+ ### Memory Layer Orchestration
83
+
84
+ When you send a message, you'll see it flow through these layers:
85
+
86
+ | Layer | Description |
87
+ | ---------------- | ----------------------------------------------- |
88
+ | **Memory Space** | Isolated namespace for multi-tenancy |
89
+ | **User** | User profile and identity |
90
+ | **Agent** | AI agent participant (required in SDK v0.17.0+) |
91
+ | **Conversation** | Message storage with threading |
92
+ | **Vector** | Semantic embeddings for similarity search |
93
+ | **Facts** | Extracted structured information |
94
+ | **Graph** | Entity relationships (optional) |
95
+
96
+ ### Key Features Demonstrated
97
+
98
+ 1. **Belief Revision** - SDK v0.24.0 intelligently updates/supersedes facts when information changes
99
+ 2. **Unified Retrieval (recall)** - SDK v0.23.0 retrieves from vector + facts + graph in one call
100
+ 3. **agentId Requirement** - SDK v0.17.0+ requires `agentId` for all user-agent conversations
101
+ 4. **Automatic Fact Extraction** - LLM-powered extraction of preferences, identity, relationships
102
+ 5. **Semantic Fact Deduplication** - SDK v0.22.0 automatically prevents duplicate facts using embedding similarity
103
+ 6. **Multi-tenant Isolation** - Switch memory spaces to see complete isolation
104
+ 7. **Streaming with Memory** - Full streaming support with progressive storage
105
+
106
+ ## Configuration
107
+
108
+ The chat API route at `/app/api/chat/route.ts` shows how to configure the Cortex Memory provider:
109
+
110
+ ```typescript
111
+ import { createCortexMemory } from "@cortexmemory/vercel-ai-provider";
112
+ import { openai, createOpenAI } from "@ai-sdk/openai";
113
+ import { streamText, embed } from "ai";
114
+
115
+ const openaiClient = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
116
+
117
+ const cortexMemory = createCortexMemory({
118
+ convexUrl: process.env.CONVEX_URL!,
119
+ memorySpaceId: "quickstart-demo",
120
+
121
+ // User identification
122
+ userId: "demo-user",
123
+ userName: "Demo User",
124
+
125
+ // Agent identification (REQUIRED in SDK v0.17.0+)
126
+ agentId: "quickstart-assistant",
127
+ agentName: "Cortex Demo Assistant",
128
+
129
+ // Optional features
130
+ enableGraphMemory: process.env.CORTEX_GRAPH_SYNC === "true",
131
+ enableFactExtraction: process.env.CORTEX_FACT_EXTRACTION === "true",
132
+
133
+ // Belief Revision (v0.24.0+)
134
+ // Automatically handles fact updates when user changes their mind
135
+ // e.g., "I like blue" โ†’ "I prefer purple" will UPDATE/SUPERSEDE the old fact
136
+ beliefRevision: {
137
+ enabled: true,
138
+ slotMatching: true, // Fast slot-based conflict detection
139
+ llmResolution: true, // LLM-based resolution for nuanced conflicts
140
+ },
141
+
142
+ // Embedding provider (required for semantic matching)
143
+ embeddingProvider: {
144
+ generate: async (text) => {
145
+ const result = await embed({
146
+ model: openaiClient.embedding("text-embedding-3-small"),
147
+ value: text,
148
+ });
149
+ return result.embedding;
150
+ },
151
+ },
152
+ });
153
+
154
+ const result = await streamText({
155
+ model: cortexMemory(openai("gpt-4o-mini")),
156
+ messages,
157
+ });
158
+ ```
159
+
160
+ ## Architecture
161
+
162
+ ```
163
+ quickstart/
164
+ โ”œโ”€โ”€ app/
165
+ โ”‚ โ”œโ”€โ”€ api/
166
+ โ”‚ โ”‚ โ”œโ”€โ”€ chat/route.ts # Main chat endpoint with Cortex
167
+ โ”‚ โ”‚ โ”œโ”€โ”€ memories/route.ts # Memory inspection endpoint
168
+ โ”‚ โ”‚ โ””โ”€โ”€ facts/route.ts # Facts inspection endpoint
169
+ โ”‚ โ”œโ”€โ”€ layout.tsx
170
+ โ”‚ โ”œโ”€โ”€ page.tsx # Main demo page
171
+ โ”‚ โ””โ”€โ”€ globals.css
172
+ โ”œโ”€โ”€ components/
173
+ โ”‚ โ”œโ”€โ”€ ChatInterface.tsx # Chat UI component
174
+ โ”‚ โ”œโ”€โ”€ LayerFlowDiagram.tsx # Hero visualization component
175
+ โ”‚ โ”œโ”€โ”€ LayerCard.tsx # Individual layer status card
176
+ โ”‚ โ”œโ”€โ”€ DataPreview.tsx # Expandable data viewer
177
+ โ”‚ โ””โ”€โ”€ MemorySpaceSwitcher.tsx
178
+ โ”œโ”€โ”€ lib/
179
+ โ”‚ โ”œโ”€โ”€ layer-tracking.ts # Layer status management
180
+ โ”‚ โ””โ”€โ”€ animations.ts # Framer Motion variants
181
+ โ””โ”€โ”€ convex/
182
+ โ”œโ”€โ”€ schema.ts # Convex schema
183
+ โ”œโ”€โ”€ conversations.ts # Conversation queries
184
+ โ”œโ”€โ”€ memories.ts # Memory queries
185
+ โ”œโ”€โ”€ facts.ts # Facts queries
186
+ โ””โ”€โ”€ users.ts # User queries
187
+ ```
188
+
189
+ ## Learn More
190
+
191
+ - [Cortex Memory Documentation](https://cortexmemory.dev/docs)
192
+ - [Vercel AI SDK Documentation](https://sdk.vercel.ai/docs)
193
+ - [API Reference](/Documentation/03-api-reference/02-memory-operations.md)
194
+
195
+ ## Troubleshooting
196
+
197
+ ### "agentId is required"
198
+
199
+ Since SDK v0.17.0, all user-agent conversations require an `agentId`. Add it to your configuration:
200
+
201
+ ```typescript
202
+ const cortexMemory = createCortexMemory({
203
+ // ... other config
204
+ agentId: "my-assistant", // Required!
205
+ });
206
+ ```
207
+
208
+ ### Memories not appearing
209
+
210
+ 1. Check that your Convex deployment is running
211
+ 2. Verify `CONVEX_URL` is set correctly
212
+ 3. Ensure the memory space ID matches between frontend and backend
213
+
214
+ ### Fact extraction not working
215
+
216
+ Enable fact extraction via environment variable:
217
+
218
+ ```env
219
+ CORTEX_FACT_EXTRACTION=true
220
+ ```
221
+
222
+ ### Duplicate facts being created
223
+
224
+ Ensure you have `embeddingProvider` configured for optimal semantic deduplication:
225
+
226
+ ```typescript
227
+ const cortexMemory = createCortexMemory({
228
+ // ... other config
229
+ embeddingProvider: {
230
+ generate: async (text) => {
231
+ const result = await embed({
232
+ model: openaiClient.embedding("text-embedding-3-small"),
233
+ value: text,
234
+ });
235
+ return result.embedding;
236
+ },
237
+ },
238
+ });
239
+ ```
240
+
241
+ Without `embeddingProvider`, deduplication falls back to `structural` matching (subject + predicate + object), which is less accurate for semantically similar facts.
242
+
243
+ To disable deduplication entirely (not recommended):
244
+
245
+ ```typescript
246
+ const cortexMemory = createCortexMemory({
247
+ // ... other config
248
+ factDeduplication: false, // Uses pre-v0.22.0 behavior
249
+ });
250
+ ```
251
+
252
+ ### Facts not being updated when user changes preferences
253
+
254
+ If you say "I like blue" then later say "I prefer purple" and both facts remain, enable **Belief Revision** (v0.24.0+):
255
+
256
+ ```typescript
257
+ const cortexMemory = createCortexMemory({
258
+ // ... other config
259
+ beliefRevision: {
260
+ enabled: true,
261
+ slotMatching: true, // Fast detection via subject-predicate matching
262
+ llmResolution: true, // LLM resolves nuanced conflicts
263
+ },
264
+ });
265
+ ```
266
+
267
+ **Revision actions explained:**
268
+
269
+ | Action | Description | Example |
270
+ | ----------- | -------------------------------------------------- | ------------------------------------- |
271
+ | `CREATE` | New fact with no conflicts | First time mentioning favorite color |
272
+ | `UPDATE` | Existing fact refined with new details | "I like blue" โ†’ "I love dark blue" |
273
+ | `SUPERSEDE` | Old fact replaced by contradicting new information | "I like blue" โ†’ "I prefer purple now" |
274
+ | `NONE` | Duplicate or irrelevant, no storage needed | Saying "I like blue" twice |
275
+
276
+ The demo visualization shows these actions with colored badges on the Facts layer.
277
+
278
+ ## License
279
+
280
+ FSL-1.1-Apache-2.0 - See LICENSE.md in the root of the repository.
@@ -0,0 +1,196 @@
1
+ import { createCortexMemoryAsync } from "@cortexmemory/vercel-ai-provider";
2
+ import type {
3
+ LayerObserver,
4
+ CortexMemoryConfig,
5
+ } from "@cortexmemory/vercel-ai-provider";
6
+ import { openai, createOpenAI } from "@ai-sdk/openai";
7
+ import {
8
+ streamText,
9
+ embed,
10
+ convertToModelMessages,
11
+ createUIMessageStream,
12
+ createUIMessageStreamResponse,
13
+ } from "ai";
14
+
15
+ // Create OpenAI client for embeddings
16
+ const openaiClient = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
17
+
18
+ // System prompt for the assistant
19
+ const SYSTEM_PROMPT = `You are a helpful AI assistant with long-term memory powered by Cortex.
20
+
21
+ Your capabilities:
22
+ - You remember everything users tell you across conversations
23
+ - You can recall facts, preferences, and context from past interactions
24
+ - You naturally reference what you've learned about the user
25
+
26
+ Behavior guidelines:
27
+ - When you remember something from a previous conversation, mention it naturally
28
+ - If asked about something you learned, reference it specifically
29
+ - Be conversational and friendly
30
+ - Help demonstrate the memory system by showing what you remember
31
+
32
+ Example interactions:
33
+ - User: "My name is Alex" โ†’ Remember and use their name
34
+ - User: "I work at Acme Corp" โ†’ Remember their employer
35
+ - User: "My favorite color is blue" โ†’ Remember their preference
36
+ - User: "What do you know about me?" โ†’ List everything you remember`;
37
+
38
+ // Create Cortex Memory config factory
39
+ // Uses createCortexMemoryAsync for graph support when CORTEX_GRAPH_SYNC=true
40
+ function getCortexMemoryConfig(
41
+ memorySpaceId: string,
42
+ userId: string,
43
+ layerObserver?: LayerObserver,
44
+ ): CortexMemoryConfig {
45
+ return {
46
+ convexUrl: process.env.CONVEX_URL!,
47
+ memorySpaceId,
48
+
49
+ // User identification
50
+ userId,
51
+ userName: "Demo User",
52
+
53
+ // Agent identification (required for user-agent conversations in SDK v0.17.0+)
54
+ agentId: "quickstart-assistant",
55
+ agentName: "Cortex Demo Assistant",
56
+
57
+ // Enable graph memory sync (auto-configured via env vars)
58
+ // When true, uses CypherGraphAdapter to sync to Neo4j/Memgraph
59
+ enableGraphMemory: process.env.CORTEX_GRAPH_SYNC === "true",
60
+
61
+ // Enable fact extraction (auto-configured via env vars)
62
+ enableFactExtraction: process.env.CORTEX_FACT_EXTRACTION === "true",
63
+
64
+ // Belief Revision (v0.24.0+)
65
+ // Automatically handles fact updates, supersessions, and deduplication
66
+ // When a user changes their preference (e.g., "I now prefer purple"),
67
+ // the system intelligently updates or supersedes the old fact.
68
+ beliefRevision: {
69
+ enabled: true, // Enable the belief revision pipeline
70
+ slotMatching: true, // Fast slot-based conflict detection (subject-predicate matching)
71
+ llmResolution: true, // LLM-based resolution for nuanced conflicts
72
+ },
73
+
74
+ // Embedding provider for semantic matching (required for semantic dedup & belief revision)
75
+ embeddingProvider: {
76
+ generate: async (text: string) => {
77
+ const result = await embed({
78
+ model: openaiClient.embedding("text-embedding-3-small"),
79
+ value: text,
80
+ });
81
+ return result.embedding;
82
+ },
83
+ },
84
+
85
+ // Streaming enhancements
86
+ streamingOptions: {
87
+ storePartialResponse: true,
88
+ progressiveFactExtraction: true,
89
+ enableAdaptiveProcessing: true,
90
+ },
91
+
92
+ // Memory recall configuration (v0.23.0 - unified retrieval across all layers)
93
+ memorySearchLimit: 20, // Results from combined vector + facts + graph search
94
+
95
+ // Real-time layer tracking (v0.24.0+)
96
+ // Events are emitted as each layer processes, enabling live UI updates
97
+ layerObserver,
98
+
99
+ // Debug in development
100
+ debug: process.env.NODE_ENV === "development",
101
+ };
102
+ }
103
+
104
+ export async function POST(req: Request) {
105
+ try {
106
+ const body = await req.json();
107
+ const { messages, memorySpaceId, userId } = body;
108
+
109
+ // Convert UIMessage[] from useChat to ModelMessage[] for streamText
110
+ // Note: In AI SDK v6+, convertToModelMessages may return a Promise
111
+ const modelMessagesResult = convertToModelMessages(messages);
112
+ const modelMessages =
113
+ modelMessagesResult instanceof Promise
114
+ ? await modelMessagesResult
115
+ : modelMessagesResult;
116
+
117
+ // Use createUIMessageStream to send both LLM text and layer events
118
+ return createUIMessageStreamResponse({
119
+ stream: createUIMessageStream({
120
+ execute: async ({ writer }) => {
121
+ // Create observer that writes layer events to the stream
122
+ // These events are transient (not persisted in message history)
123
+ const layerObserver: LayerObserver = {
124
+ onOrchestrationStart: (orchestrationId) => {
125
+ writer.write({
126
+ type: "data-orchestration-start",
127
+ data: { orchestrationId },
128
+ transient: true,
129
+ });
130
+ },
131
+ onLayerUpdate: (event) => {
132
+ writer.write({
133
+ type: "data-layer-update",
134
+ data: {
135
+ layer: event.layer,
136
+ status: event.status,
137
+ timestamp: event.timestamp,
138
+ latencyMs: event.latencyMs,
139
+ data: event.data,
140
+ error: event.error,
141
+ revisionAction: event.revisionAction,
142
+ supersededFacts: event.supersededFacts,
143
+ },
144
+ transient: true,
145
+ });
146
+ },
147
+ onOrchestrationComplete: (summary) => {
148
+ writer.write({
149
+ type: "data-orchestration-complete",
150
+ data: {
151
+ orchestrationId: summary.orchestrationId,
152
+ totalLatencyMs: summary.totalLatencyMs,
153
+ createdIds: summary.createdIds,
154
+ },
155
+ transient: true,
156
+ });
157
+ },
158
+ };
159
+
160
+ // Build config with the observer
161
+ const config = getCortexMemoryConfig(
162
+ memorySpaceId || "quickstart-demo",
163
+ userId || "demo-user",
164
+ layerObserver,
165
+ );
166
+
167
+ // Create memory-augmented model with async initialization (enables graph support)
168
+ // This connects to Neo4j/Memgraph if CORTEX_GRAPH_SYNC=true
169
+ const cortexMemory = await createCortexMemoryAsync(config);
170
+
171
+ // Stream response with automatic memory integration
172
+ const result = streamText({
173
+ model: cortexMemory(openai("gpt-4o-mini")),
174
+ messages: modelMessages,
175
+ system: SYSTEM_PROMPT,
176
+ });
177
+
178
+ // Merge LLM stream into the UI message stream
179
+ writer.merge(result.toUIMessageStream());
180
+ },
181
+ }),
182
+ });
183
+ } catch (error) {
184
+ console.error("[Chat API Error]", error);
185
+
186
+ return new Response(
187
+ JSON.stringify({
188
+ error: error instanceof Error ? error.message : "Unknown error",
189
+ }),
190
+ {
191
+ status: 500,
192
+ headers: { "Content-Type": "application/json" },
193
+ },
194
+ );
195
+ }
196
+ }
@@ -0,0 +1,39 @@
1
+ import { Cortex } from "@cortexmemory/sdk";
2
+
3
+ export const dynamic = "force-dynamic";
4
+
5
+ function getCortex() {
6
+ return new Cortex({ convexUrl: process.env.CONVEX_URL! });
7
+ }
8
+
9
+ export async function GET(req: Request) {
10
+ try {
11
+ const { searchParams } = new URL(req.url);
12
+ const memorySpaceId =
13
+ searchParams.get("memorySpaceId") || "quickstart-demo";
14
+ const userId = searchParams.get("userId");
15
+ const limit = parseInt(searchParams.get("limit") || "50");
16
+
17
+ const cortex = getCortex();
18
+
19
+ // Fetch facts for the user/memory space
20
+ const facts = await cortex.facts.list({
21
+ memorySpaceId,
22
+ ...(userId ? { userId } : {}),
23
+ limit,
24
+ });
25
+
26
+ return Response.json({
27
+ facts,
28
+ count: facts.length,
29
+ memorySpaceId,
30
+ });
31
+ } catch (error) {
32
+ console.error("[Facts API Error]", error);
33
+
34
+ return Response.json(
35
+ { error: error instanceof Error ? error.message : "Unknown error" },
36
+ { status: 500 },
37
+ );
38
+ }
39
+ }
@@ -0,0 +1,99 @@
1
+ import { Cortex } from "@cortexmemory/sdk";
2
+
3
+ /**
4
+ * Health check endpoint to verify all backend services
5
+ */
6
+ export async function GET() {
7
+ const checks: Record<
8
+ string,
9
+ { status: string; latencyMs?: number; error?: string }
10
+ > = {};
11
+
12
+ // Check 1: Environment variables
13
+ const hasConvexUrl = !!process.env.CONVEX_URL;
14
+ const hasPublicConvexUrl = !!process.env.NEXT_PUBLIC_CONVEX_URL;
15
+ const hasOpenAIKey = !!process.env.OPENAI_API_KEY;
16
+ const hasNeo4jUri = !!process.env.NEO4J_URI;
17
+ const hasMemgraphUri = !!process.env.MEMGRAPH_URI;
18
+
19
+ checks.environment = {
20
+ status: hasConvexUrl && hasOpenAIKey ? "ok" : "warning",
21
+ error: !hasConvexUrl
22
+ ? "CONVEX_URL not set"
23
+ : !hasOpenAIKey
24
+ ? "OPENAI_API_KEY not set"
25
+ : undefined,
26
+ };
27
+
28
+ // Check 2: Cortex SDK initialization
29
+ try {
30
+ const startTime = Date.now();
31
+ const cortex = new Cortex({
32
+ convexUrl: process.env.CONVEX_URL!,
33
+ });
34
+
35
+ // Quick test - just initialize, don't actually query
36
+ checks.cortexSdk = {
37
+ status: "ok",
38
+ latencyMs: Date.now() - startTime,
39
+ };
40
+
41
+ cortex.close();
42
+ } catch (error) {
43
+ checks.cortexSdk = {
44
+ status: "error",
45
+ error: error instanceof Error ? error.message : "Unknown error",
46
+ };
47
+ }
48
+
49
+ // Check 3: Convex backend connectivity (via HTTP)
50
+ if (process.env.CONVEX_URL) {
51
+ try {
52
+ const startTime = Date.now();
53
+ // Convex URLs are like "https://xxx.convex.cloud"
54
+ // We can ping the HTTP endpoint
55
+ const convexUrl = new URL(process.env.CONVEX_URL);
56
+ const response = await fetch(`${convexUrl.origin}/version`, {
57
+ method: "GET",
58
+ signal: AbortSignal.timeout(5000),
59
+ });
60
+
61
+ checks.convexBackend = {
62
+ status: response.ok ? "ok" : "error",
63
+ latencyMs: Date.now() - startTime,
64
+ error: response.ok ? undefined : `HTTP ${response.status}`,
65
+ };
66
+ } catch (error) {
67
+ checks.convexBackend = {
68
+ status: "error",
69
+ error: error instanceof Error ? error.message : "Connection failed",
70
+ };
71
+ }
72
+ } else {
73
+ checks.convexBackend = {
74
+ status: "error",
75
+ error: "CONVEX_URL not configured",
76
+ };
77
+ }
78
+
79
+ // Overall status
80
+ const hasErrors = Object.values(checks).some((c) => c.status === "error");
81
+ const hasWarnings = Object.values(checks).some((c) => c.status === "warning");
82
+
83
+ return Response.json({
84
+ status: hasErrors ? "unhealthy" : hasWarnings ? "degraded" : "healthy",
85
+ timestamp: new Date().toISOString(),
86
+ checks,
87
+ config: {
88
+ convexUrl: hasConvexUrl ? "configured" : "missing",
89
+ publicConvexUrl: hasPublicConvexUrl ? "configured" : "missing",
90
+ openaiKey: hasOpenAIKey ? "configured" : "missing",
91
+ graphSync: hasNeo4jUri || hasMemgraphUri ? "enabled" : "disabled",
92
+ graphBackend: hasNeo4jUri
93
+ ? "neo4j"
94
+ : hasMemgraphUri
95
+ ? "memgraph"
96
+ : "none",
97
+ },
98
+ });
99
+ }
@@ -0,0 +1,37 @@
1
+ import { Cortex } from "@cortexmemory/sdk";
2
+
3
+ export const dynamic = "force-dynamic";
4
+
5
+ function getCortex() {
6
+ return new Cortex({ convexUrl: process.env.CONVEX_URL! });
7
+ }
8
+
9
+ export async function GET(req: Request) {
10
+ try {
11
+ const { searchParams } = new URL(req.url);
12
+ const memorySpaceId =
13
+ searchParams.get("memorySpaceId") || "quickstart-demo";
14
+ const limit = parseInt(searchParams.get("limit") || "20");
15
+
16
+ const cortex = getCortex();
17
+
18
+ // Fetch recent memories
19
+ const memories = await cortex.memory.list({
20
+ memorySpaceId,
21
+ limit,
22
+ });
23
+
24
+ return Response.json({
25
+ memories,
26
+ count: memories.length,
27
+ memorySpaceId,
28
+ });
29
+ } catch (error) {
30
+ console.error("[Memories API Error]", error);
31
+
32
+ return Response.json(
33
+ { error: error instanceof Error ? error.message : "Unknown error" },
34
+ { status: 500 },
35
+ );
36
+ }
37
+ }