@cortexmemory/cli 0.27.3 → 0.28.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/dist/commands/db.d.ts.map +1 -1
  2. package/dist/commands/db.js +18 -6
  3. package/dist/commands/db.js.map +1 -1
  4. package/dist/commands/deploy.d.ts.map +1 -1
  5. package/dist/commands/deploy.js +191 -80
  6. package/dist/commands/deploy.js.map +1 -1
  7. package/dist/commands/dev.js +3 -2
  8. package/dist/commands/dev.js.map +1 -1
  9. package/dist/commands/init.d.ts.map +1 -1
  10. package/dist/commands/init.js +12 -0
  11. package/dist/commands/init.js.map +1 -1
  12. package/dist/types.d.ts +1 -1
  13. package/dist/types.d.ts.map +1 -1
  14. package/dist/utils/app-template-sync.d.ts.map +1 -1
  15. package/dist/utils/app-template-sync.js +35 -13
  16. package/dist/utils/app-template-sync.js.map +1 -1
  17. package/dist/utils/init/quickstart-setup.d.ts.map +1 -1
  18. package/dist/utils/init/quickstart-setup.js.map +1 -1
  19. package/package.json +4 -4
  20. package/templates/basic/.env.local.example +23 -0
  21. package/templates/basic/README.md +181 -56
  22. package/templates/basic/package-lock.json +2180 -406
  23. package/templates/basic/package.json +23 -5
  24. package/templates/basic/src/__tests__/chat.test.ts +340 -0
  25. package/templates/basic/src/__tests__/cortex.test.ts +260 -0
  26. package/templates/basic/src/__tests__/display.test.ts +455 -0
  27. package/templates/basic/src/__tests__/e2e/fact-extraction.test.ts +498 -0
  28. package/templates/basic/src/__tests__/e2e/memory-flow.test.ts +355 -0
  29. package/templates/basic/src/__tests__/e2e/server-e2e.test.ts +414 -0
  30. package/templates/basic/src/__tests__/helpers/test-utils.ts +345 -0
  31. package/templates/basic/src/__tests__/integration/chat-flow.test.ts +422 -0
  32. package/templates/basic/src/__tests__/integration/server.test.ts +441 -0
  33. package/templates/basic/src/__tests__/llm.test.ts +344 -0
  34. package/templates/basic/src/chat.ts +300 -0
  35. package/templates/basic/src/cortex.ts +203 -0
  36. package/templates/basic/src/display.ts +425 -0
  37. package/templates/basic/src/index.ts +194 -64
  38. package/templates/basic/src/llm.ts +214 -0
  39. package/templates/basic/src/server.ts +280 -0
  40. package/templates/basic/vitest.config.ts +33 -0
  41. package/templates/basic/vitest.e2e.config.ts +28 -0
  42. package/templates/basic/vitest.integration.config.ts +25 -0
  43. package/templates/vercel-ai-quickstart/app/api/auth/check/route.ts +1 -1
  44. package/templates/vercel-ai-quickstart/app/api/auth/login/route.ts +61 -19
  45. package/templates/vercel-ai-quickstart/app/api/auth/register/route.ts +14 -18
  46. package/templates/vercel-ai-quickstart/app/api/auth/setup/route.ts +4 -7
  47. package/templates/vercel-ai-quickstart/app/api/chat/route.ts +95 -23
  48. package/templates/vercel-ai-quickstart/app/api/chat-v6/route.ts +339 -0
  49. package/templates/vercel-ai-quickstart/app/api/conversations/route.ts +16 -16
  50. package/templates/vercel-ai-quickstart/app/globals.css +24 -9
  51. package/templates/vercel-ai-quickstart/app/page.tsx +41 -15
  52. package/templates/vercel-ai-quickstart/components/AdminSetup.tsx +3 -1
  53. package/templates/vercel-ai-quickstart/components/AuthProvider.tsx +6 -6
  54. package/templates/vercel-ai-quickstart/components/ChatHistorySidebar.tsx +19 -8
  55. package/templates/vercel-ai-quickstart/components/ChatInterface.tsx +46 -16
  56. package/templates/vercel-ai-quickstart/components/LoginScreen.tsx +10 -5
  57. package/templates/vercel-ai-quickstart/jest.config.js +8 -1
  58. package/templates/vercel-ai-quickstart/lib/agents/memory-agent.ts +165 -0
  59. package/templates/vercel-ai-quickstart/lib/password.ts +5 -5
  60. package/templates/vercel-ai-quickstart/lib/versions.ts +60 -0
  61. package/templates/vercel-ai-quickstart/next.config.js +10 -2
  62. package/templates/vercel-ai-quickstart/package.json +23 -12
  63. package/templates/vercel-ai-quickstart/test-api.mjs +303 -0
  64. package/templates/vercel-ai-quickstart/tests/e2e/chat-memory-flow.test.ts +483 -0
  65. package/templates/vercel-ai-quickstart/tests/helpers/mock-cortex.ts +40 -40
  66. package/templates/vercel-ai-quickstart/tests/integration/auth.test.ts +8 -8
  67. package/templates/vercel-ai-quickstart/tests/integration/conversations.test.ts +12 -8
  68. package/templates/vercel-ai-quickstart/tests/unit/password.test.ts +4 -1
@@ -0,0 +1,203 @@
1
+ /**
2
+ * Cortex SDK Client Configuration
3
+ *
4
+ * Mirrors the quickstart's memory configuration with full feature support.
5
+ * Includes optional embeddings, fact extraction, and belief revision.
6
+ */
7
+
8
+ import { Cortex } from "@cortexmemory/sdk";
9
+ import type { RememberParams } from "@cortexmemory/sdk";
10
+ import { printLayerUpdate, printOrchestrationStart } from "./display.js";
11
+
12
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
13
+ // Configuration
14
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
15
+
16
+ export const CONFIG = {
17
+ // Memory space for isolation
18
+ memorySpaceId: process.env.MEMORY_SPACE_ID || "basic-demo",
19
+
20
+ // User identification
21
+ userId: process.env.USER_ID || "demo-user",
22
+ userName: process.env.USER_NAME || "Demo User",
23
+
24
+ // Agent identification (required for user-agent conversations in SDK v0.17.0+)
25
+ agentId: process.env.AGENT_ID || "basic-assistant",
26
+ agentName: process.env.AGENT_NAME || "Cortex CLI Assistant",
27
+
28
+ // Feature flags
29
+ enableFactExtraction: process.env.CORTEX_FACT_EXTRACTION !== "false",
30
+ enableGraphMemory: process.env.CORTEX_GRAPH_SYNC === "true",
31
+
32
+ // Debug mode
33
+ debug: process.env.DEBUG === "true",
34
+ };
35
+
36
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
37
+ // Singleton Client
38
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
39
+
40
+ let cortexClient: Cortex | null = null;
41
+
42
+ /**
43
+ * Get or create a Cortex SDK client
44
+ */
45
+ export function getCortex(): Cortex {
46
+ if (!cortexClient) {
47
+ const convexUrl = process.env.CONVEX_URL;
48
+ if (!convexUrl) {
49
+ throw new Error(
50
+ "CONVEX_URL environment variable is required.\n" +
51
+ "Set it in .env.local or run: cortex init",
52
+ );
53
+ }
54
+
55
+ // Build client config
56
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
57
+ const config: any = { convexUrl };
58
+
59
+ // Configure LLM for auto fact extraction when OpenAI key is available
60
+ if (process.env.OPENAI_API_KEY && CONFIG.enableFactExtraction) {
61
+ config.llm = {
62
+ provider: "openai",
63
+ apiKey: process.env.OPENAI_API_KEY,
64
+ model: process.env.CORTEX_FACT_EXTRACTION_MODEL || "gpt-4o-mini",
65
+ };
66
+ }
67
+
68
+ cortexClient = new Cortex(config);
69
+ }
70
+
71
+ return cortexClient;
72
+ }
73
+
74
+ /**
75
+ * Close the Cortex client connection
76
+ */
77
+ export function closeCortex(): void {
78
+ if (cortexClient) {
79
+ cortexClient.close();
80
+ cortexClient = null;
81
+ }
82
+ }
83
+
84
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
85
+ // Layer Observer (for console output)
86
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
87
+
88
+ export interface LayerEvent {
89
+ layer: string;
90
+ status: "in_progress" | "complete" | "error" | "skipped";
91
+ timestamp: number;
92
+ latencyMs?: number;
93
+ data?: Record<string, unknown>;
94
+ error?: string;
95
+ revisionAction?: "ADD" | "UPDATE" | "SUPERSEDE" | "NONE";
96
+ supersededFacts?: string[];
97
+ }
98
+
99
+ /**
100
+ * Create a layer observer that prints to console
101
+ */
102
+ export function createLayerObserver() {
103
+ return {
104
+ onOrchestrationStart: (orchestrationId: string) => {
105
+ printOrchestrationStart(orchestrationId);
106
+ },
107
+ onLayerUpdate: (event: LayerEvent) => {
108
+ printLayerUpdate(event);
109
+ },
110
+ onOrchestrationComplete: (summary: {
111
+ orchestrationId: string;
112
+ totalLatencyMs: number;
113
+ createdIds?: Record<string, string>;
114
+ }) => {
115
+ // Summary is printed by display.ts after all layers
116
+ if (CONFIG.debug) {
117
+ console.log("[Debug] Orchestration complete:", summary);
118
+ }
119
+ },
120
+ };
121
+ }
122
+
123
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
124
+ // Embedding Provider (Optional)
125
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
126
+
127
+ /**
128
+ * Get embedding provider if OpenAI is configured
129
+ */
130
+ export async function getEmbeddingProvider(): Promise<
131
+ ((text: string) => Promise<number[]>) | undefined
132
+ > {
133
+ if (!process.env.OPENAI_API_KEY) {
134
+ return undefined;
135
+ }
136
+
137
+ try {
138
+ // Dynamic import to avoid requiring openai if not used
139
+ const { default: OpenAI } = await import("openai");
140
+ const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
141
+
142
+ return async (text: string): Promise<number[]> => {
143
+ const result = await openai.embeddings.create({
144
+ model: "text-embedding-3-small",
145
+ input: text,
146
+ });
147
+ return result.data[0].embedding;
148
+ };
149
+ } catch {
150
+ // OpenAI not installed
151
+ return undefined;
152
+ }
153
+ }
154
+
155
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
156
+ // Remember Parameters Builder
157
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
158
+
159
+ export interface ChatMessage {
160
+ userMessage: string;
161
+ agentResponse: string;
162
+ conversationId: string;
163
+ }
164
+
165
+ /**
166
+ * Build RememberParams with full configuration
167
+ */
168
+ export async function buildRememberParams(
169
+ message: ChatMessage,
170
+ ): Promise<RememberParams> {
171
+ const embeddingProvider = await getEmbeddingProvider();
172
+
173
+ const params: RememberParams = {
174
+ // Identity
175
+ memorySpaceId: CONFIG.memorySpaceId,
176
+ conversationId: message.conversationId,
177
+ userId: CONFIG.userId,
178
+ userName: CONFIG.userName,
179
+ agentId: CONFIG.agentId,
180
+
181
+ // Content
182
+ userMessage: message.userMessage,
183
+ agentResponse: message.agentResponse,
184
+
185
+ // Optional embedding
186
+ generateEmbedding: embeddingProvider,
187
+
188
+ // Fact extraction is handled by llmConfig on the Cortex client
189
+ // No need to pass extractFacts here - SDK auto-extracts when llmConfig is set
190
+
191
+ // Belief revision (v0.24.0+)
192
+ // Automatically handles fact updates, supersessions, and deduplication
193
+ beliefRevision: CONFIG.enableFactExtraction
194
+ ? {
195
+ enabled: true,
196
+ slotMatching: true,
197
+ llmResolution: !!process.env.OPENAI_API_KEY,
198
+ }
199
+ : undefined,
200
+ };
201
+
202
+ return params;
203
+ }
@@ -0,0 +1,425 @@
1
+ /**
2
+ * Rich Console Display
3
+ *
4
+ * Provides beautiful console output showing Cortex's "thinking" process,
5
+ * mirroring the UI visualization from the Vercel AI quickstart.
6
+ */
7
+
8
+ import type { LayerEvent } from "./cortex.js";
9
+
10
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
11
+ // Constants
12
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
13
+
14
+ const BOX_WIDTH = 70;
15
+
16
+ const LAYER_INFO: Record<
17
+ string,
18
+ { icon: string; name: string; description: string }
19
+ > = {
20
+ memorySpace: {
21
+ icon: "📦",
22
+ name: "Memory Space",
23
+ description: "Isolated namespace for multi-tenancy",
24
+ },
25
+ user: {
26
+ icon: "👤",
27
+ name: "User",
28
+ description: "User profile and identity",
29
+ },
30
+ agent: {
31
+ icon: "🤖",
32
+ name: "Agent",
33
+ description: "AI agent participant",
34
+ },
35
+ conversation: {
36
+ icon: "💬",
37
+ name: "Conversation",
38
+ description: "Message storage with threading",
39
+ },
40
+ vector: {
41
+ icon: "🎯",
42
+ name: "Vector Store",
43
+ description: "Semantic embeddings for search",
44
+ },
45
+ facts: {
46
+ icon: "💡",
47
+ name: "Facts",
48
+ description: "Extracted structured information",
49
+ },
50
+ graph: {
51
+ icon: "🕸️",
52
+ name: "Graph",
53
+ description: "Entity relationships",
54
+ },
55
+ };
56
+
57
+ const STATUS_SYMBOLS: Record<string, string> = {
58
+ pending: "○",
59
+ in_progress: "◐",
60
+ complete: "✓",
61
+ error: "✗",
62
+ skipped: "○",
63
+ };
64
+
65
+ const REVISION_BADGES: Record<string, string> = {
66
+ ADD: "\x1b[32m[NEW]\x1b[0m",
67
+ UPDATE: "\x1b[34m[UPDATED]\x1b[0m",
68
+ SUPERSEDE: "\x1b[33m[SUPERSEDED]\x1b[0m",
69
+ NONE: "\x1b[90m[DUPLICATE]\x1b[0m",
70
+ };
71
+
72
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
73
+ // State
74
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
75
+
76
+ interface LayerState {
77
+ status: string;
78
+ latencyMs?: number;
79
+ data?: Record<string, unknown>;
80
+ revisionAction?: string;
81
+ supersededFacts?: string[];
82
+ }
83
+
84
+ const layerStates: Map<string, LayerState> = new Map();
85
+ let orchestrationStartTime = 0;
86
+ let isOrchestrating = false;
87
+
88
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
89
+ // Spinner (Waiting Indicator)
90
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
91
+
92
+ const SPINNER_FRAMES = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"];
93
+ let spinnerInterval: ReturnType<typeof setInterval> | null = null;
94
+ let spinnerFrame = 0;
95
+ let currentSpinnerMessage = "";
96
+
97
+ /**
98
+ * Start a spinner with a message
99
+ */
100
+ export function startSpinner(message: string): void {
101
+ // Stop any existing spinner first
102
+ if (spinnerInterval) {
103
+ stopSpinner();
104
+ }
105
+
106
+ currentSpinnerMessage = message;
107
+ spinnerFrame = 0;
108
+
109
+ // Write initial frame
110
+ process.stdout.write(`\r\x1b[36m${SPINNER_FRAMES[0]}\x1b[0m ${message}`);
111
+
112
+ // Animate the spinner
113
+ spinnerInterval = setInterval(() => {
114
+ spinnerFrame = (spinnerFrame + 1) % SPINNER_FRAMES.length;
115
+ process.stdout.write(`\r\x1b[36m${SPINNER_FRAMES[spinnerFrame]}\x1b[0m ${currentSpinnerMessage}`);
116
+ }, 80);
117
+ }
118
+
119
+ /**
120
+ * Update spinner message without stopping
121
+ */
122
+ export function updateSpinner(message: string): void {
123
+ if (!spinnerInterval) return;
124
+
125
+ // Clear the line and write new message
126
+ currentSpinnerMessage = message;
127
+ process.stdout.write(`\r\x1b[K\x1b[36m${SPINNER_FRAMES[spinnerFrame]}\x1b[0m ${message}`);
128
+ }
129
+
130
+ /**
131
+ * Stop the spinner with optional success/failure indicator
132
+ */
133
+ export function stopSpinner(success?: boolean, message?: string): void {
134
+ if (spinnerInterval) {
135
+ clearInterval(spinnerInterval);
136
+ spinnerInterval = null;
137
+ }
138
+
139
+ // Clear the spinner line
140
+ process.stdout.write("\r\x1b[K");
141
+
142
+ // Print final message if provided
143
+ if (message !== undefined) {
144
+ if (success === true) {
145
+ console.log(`\x1b[32m✓\x1b[0m ${message}`);
146
+ } else if (success === false) {
147
+ console.log(`\x1b[31m✗\x1b[0m ${message}`);
148
+ } else {
149
+ console.log(` ${message}`);
150
+ }
151
+ }
152
+
153
+ currentSpinnerMessage = "";
154
+ }
155
+
156
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
157
+ // Box Drawing Helpers
158
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
159
+
160
+ function boxTop(): string {
161
+ return "┌" + "─".repeat(BOX_WIDTH - 2) + "┐";
162
+ }
163
+
164
+ function boxBottom(): string {
165
+ return "└" + "─".repeat(BOX_WIDTH - 2) + "┘";
166
+ }
167
+
168
+ function boxDivider(): string {
169
+ return "├" + "─".repeat(BOX_WIDTH - 2) + "┤";
170
+ }
171
+
172
+ function boxLine(content: string, indent = 0): string {
173
+ const indentStr = " ".repeat(indent);
174
+ const text = indentStr + content;
175
+ const padding = BOX_WIDTH - 4 - text.length;
176
+ // Handle ANSI escape codes by not counting them in padding
177
+ const visibleLength = text.replace(/\x1b\[[0-9;]*m/g, "").length;
178
+ const actualPadding = BOX_WIDTH - 4 - visibleLength;
179
+ return "│ " + text + " ".repeat(Math.max(0, actualPadding)) + "│";
180
+ }
181
+
182
+ function boxEmpty(): string {
183
+ return "│" + " ".repeat(BOX_WIDTH - 2) + "│";
184
+ }
185
+
186
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
187
+ // Public API
188
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
189
+
190
+ /**
191
+ * Print orchestration start header
192
+ */
193
+ export function printOrchestrationStart(_orchestrationId: string): void {
194
+ orchestrationStartTime = Date.now();
195
+ isOrchestrating = true;
196
+ layerStates.clear();
197
+
198
+ console.log("");
199
+ console.log(boxTop());
200
+ console.log(boxLine("MEMORY ORCHESTRATION"));
201
+ console.log(boxDivider());
202
+ }
203
+
204
+ /**
205
+ * Print a layer update event
206
+ */
207
+ export function printLayerUpdate(event: LayerEvent): void {
208
+ const info = LAYER_INFO[event.layer];
209
+ if (!info) return;
210
+
211
+ // Store state
212
+ layerStates.set(event.layer, {
213
+ status: event.status,
214
+ latencyMs: event.latencyMs,
215
+ data: event.data,
216
+ revisionAction: event.revisionAction,
217
+ supersededFacts: event.supersededFacts,
218
+ });
219
+
220
+ // Only print on complete/error/skipped
221
+ if (event.status === "in_progress") return;
222
+
223
+ const symbol = STATUS_SYMBOLS[event.status] || "?";
224
+ const latency = event.latencyMs ? `(${event.latencyMs}ms)` : "";
225
+ const revision =
226
+ event.revisionAction && event.layer === "facts"
227
+ ? " " + REVISION_BADGES[event.revisionAction]
228
+ : "";
229
+
230
+ // Layer header
231
+ const header = `${info.icon} ${info.name.padEnd(14)} ${symbol} ${event.status} ${revision} ${latency}`;
232
+ console.log(boxLine(header));
233
+
234
+ // Layer details based on data
235
+ if (event.data) {
236
+ printLayerData(event.layer, event.data);
237
+ }
238
+
239
+ // Superseded facts (for belief revision)
240
+ if (event.supersededFacts && event.supersededFacts.length > 0) {
241
+ console.log(boxLine("Superseded:", 3));
242
+ for (const fact of event.supersededFacts) {
243
+ console.log(boxLine(`• ${truncate(fact, 50)}`, 5));
244
+ }
245
+ }
246
+
247
+ console.log(boxEmpty());
248
+ }
249
+
250
+ /**
251
+ * Print layer-specific data
252
+ */
253
+ function printLayerData(layer: string, data: Record<string, unknown>): void {
254
+ switch (layer) {
255
+ case "memorySpace":
256
+ if (data.id) console.log(boxLine(`→ ID: ${data.id}`, 3));
257
+ if (data.isolation) console.log(boxLine(`→ Isolation: ${data.isolation}`, 3));
258
+ break;
259
+
260
+ case "user":
261
+ if (data.id) console.log(boxLine(`→ ID: ${data.id}`, 3));
262
+ if (data.name) console.log(boxLine(`→ Name: ${data.name}`, 3));
263
+ break;
264
+
265
+ case "agent":
266
+ if (data.id) console.log(boxLine(`→ ID: ${data.id}`, 3));
267
+ if (data.name) console.log(boxLine(`→ Name: ${data.name}`, 3));
268
+ break;
269
+
270
+ case "conversation":
271
+ if (data.id) console.log(boxLine(`→ ID: ${data.id}`, 3));
272
+ if (data.messageCount)
273
+ console.log(boxLine(`→ Messages: ${data.messageCount}`, 3));
274
+ if (data.preview)
275
+ console.log(boxLine(`→ "${truncate(String(data.preview), 45)}"`, 3));
276
+ break;
277
+
278
+ case "vector":
279
+ if (data.dimensions)
280
+ console.log(boxLine(`→ Embedded with ${data.dimensions} dimensions`, 3));
281
+ if (data.importance)
282
+ console.log(boxLine(`→ Importance: ${data.importance}`, 3));
283
+ break;
284
+
285
+ case "facts":
286
+ if (Array.isArray(data.facts) && data.facts.length > 0) {
287
+ console.log(boxLine(`→ Extracted ${data.facts.length} fact(s):`, 3));
288
+ for (const fact of data.facts.slice(0, 3)) {
289
+ const f = fact as { content?: string; factType?: string; confidence?: number };
290
+ const type = f.factType ? ` (${f.factType})` : "";
291
+ const conf = f.confidence ? ` ${f.confidence}%` : "";
292
+ console.log(boxLine(`• "${truncate(f.content || "", 40)}"${type}${conf}`, 5));
293
+ }
294
+ if (data.facts.length > 3) {
295
+ console.log(boxLine(`• ... and ${data.facts.length - 3} more`, 5));
296
+ }
297
+ } else if (data.count) {
298
+ console.log(boxLine(`→ Extracted ${data.count} fact(s)`, 3));
299
+ }
300
+ break;
301
+
302
+ case "graph":
303
+ if (data.nodes) console.log(boxLine(`→ Nodes: ${data.nodes}`, 3));
304
+ if (data.edges) console.log(boxLine(`→ Edges: ${data.edges}`, 3));
305
+ break;
306
+ }
307
+ }
308
+
309
+ /**
310
+ * Print orchestration complete summary
311
+ */
312
+ export function printOrchestrationComplete(totalMs?: number): void {
313
+ if (!isOrchestrating) return;
314
+
315
+ const elapsed = totalMs || Date.now() - orchestrationStartTime;
316
+
317
+ console.log(boxDivider());
318
+ console.log(boxLine(`Total: ${elapsed}ms`));
319
+ console.log(boxBottom());
320
+ console.log("");
321
+
322
+ isOrchestrating = false;
323
+ }
324
+
325
+ /**
326
+ * Print recall results
327
+ */
328
+ export function printRecallResults(
329
+ memories: Array<{ content?: string; importance?: number; source?: string }>,
330
+ facts: Array<{ content?: string; factType?: string; confidence?: number }>,
331
+ ): void {
332
+ console.log("");
333
+ console.log(boxTop());
334
+ console.log(boxLine("MEMORY RECALL"));
335
+ console.log(boxDivider());
336
+
337
+ if (memories.length === 0 && facts.length === 0) {
338
+ console.log(boxLine("No relevant memories found"));
339
+ } else {
340
+ if (memories.length > 0) {
341
+ console.log(boxLine(`🎯 ${memories.length} relevant memories:`));
342
+ for (const mem of memories.slice(0, 5)) {
343
+ const imp = mem.importance ? ` [${mem.importance}]` : "";
344
+ console.log(boxLine(`• ${truncate(mem.content || "", 55)}${imp}`, 3));
345
+ }
346
+ if (memories.length > 5) {
347
+ console.log(boxLine(`... and ${memories.length - 5} more`, 3));
348
+ }
349
+ console.log(boxEmpty());
350
+ }
351
+
352
+ if (facts.length > 0) {
353
+ console.log(boxLine(`💡 ${facts.length} known facts:`));
354
+ for (const fact of facts.slice(0, 5)) {
355
+ const type = fact.factType ? ` (${fact.factType})` : "";
356
+ console.log(boxLine(`• ${truncate(fact.content || "", 50)}${type}`, 3));
357
+ }
358
+ if (facts.length > 5) {
359
+ console.log(boxLine(`... and ${facts.length - 5} more`, 3));
360
+ }
361
+ }
362
+ }
363
+
364
+ console.log(boxBottom());
365
+ console.log("");
366
+ }
367
+
368
+ /**
369
+ * Print welcome banner
370
+ */
371
+ export function printWelcome(mode: "cli" | "server"): void {
372
+ console.log("");
373
+ console.log("╔══════════════════════════════════════════════════════════════════╗");
374
+ console.log("║ ║");
375
+ console.log("║ 🧠 Cortex Memory - Basic Demo ║");
376
+ console.log("║ ║");
377
+ console.log("║ Demonstrating memory orchestration without UI ║");
378
+ console.log("║ ║");
379
+ console.log("╚══════════════════════════════════════════════════════════════════╝");
380
+ console.log("");
381
+
382
+ if (mode === "cli") {
383
+ console.log("Type a message and press Enter to chat.");
384
+ console.log("Commands: /recall <query>, /facts, /history, /clear, /exit");
385
+ console.log("");
386
+ } else {
387
+ console.log("Server mode - POST /chat with { message, conversationId }");
388
+ console.log("");
389
+ }
390
+ }
391
+
392
+ /**
393
+ * Print error message
394
+ */
395
+ export function printError(message: string, error?: Error): void {
396
+ console.log("");
397
+ console.log(`\x1b[31m❌ Error: ${message}\x1b[0m`);
398
+ if (error && process.env.DEBUG === "true") {
399
+ console.log(`\x1b[90m${error.stack}\x1b[0m`);
400
+ }
401
+ console.log("");
402
+ }
403
+
404
+ /**
405
+ * Print info message
406
+ */
407
+ export function printInfo(message: string): void {
408
+ console.log(`\x1b[36mℹ ${message}\x1b[0m`);
409
+ }
410
+
411
+ /**
412
+ * Print success message
413
+ */
414
+ export function printSuccess(message: string): void {
415
+ console.log(`\x1b[32m✓ ${message}\x1b[0m`);
416
+ }
417
+
418
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
419
+ // Helpers
420
+ // ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
421
+
422
+ function truncate(str: string, maxLen: number): string {
423
+ if (str.length <= maxLen) return str;
424
+ return str.slice(0, maxLen - 3) + "...";
425
+ }