@cortexmemory/cli 0.29.0 → 0.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,20 +19,20 @@
19
19
  },
20
20
  "dependencies": {
21
21
  "@cortexmemory/sdk": "latest",
22
- "convex": "^1.31.2",
22
+ "convex": "^1.31.5",
23
23
  "dotenv": "^17.2.3",
24
- "hono": "^4.11.3",
25
- "@hono/node-server": "^1.19.7"
24
+ "hono": "^4.11.4",
25
+ "@hono/node-server": "^1.19.9"
26
26
  },
27
27
  "devDependencies": {
28
- "@types/node": "^25.0.3",
29
- "@vitest/coverage-v8": "^4.0.16",
28
+ "@types/node": "^25.0.9",
29
+ "@vitest/coverage-v8": "^4.0.17",
30
30
  "typescript": "^5.9.3",
31
31
  "tsx": "^4.21.0",
32
- "vitest": "^4.0.16"
32
+ "vitest": "^4.0.17"
33
33
  },
34
34
  "optionalDependencies": {
35
- "openai": "^6.15.0"
35
+ "openai": "^6.16.0"
36
36
  },
37
37
  "engines": {
38
38
  "node": ">=20"
@@ -13,11 +13,11 @@
13
13
  * infrastructure is identical to ensure feature parity.
14
14
  */
15
15
 
16
- import { createCortexMemoryAsync } from "@cortexmemory/vercel-ai-provider";
17
- import type {
18
- LayerObserver,
19
- CortexMemoryConfig,
16
+ import {
17
+ createCortexMemoryAsync,
18
+ createLayerStreamObserver,
20
19
  } from "@cortexmemory/vercel-ai-provider";
20
+ import type { CortexMemoryConfig } from "@cortexmemory/vercel-ai-provider";
21
21
  import { openai, createOpenAI } from "@ai-sdk/openai";
22
22
  import {
23
23
  streamText,
@@ -52,8 +52,7 @@ function getCortexMemoryConfig(
52
52
  memorySpaceId: string,
53
53
  userId: string,
54
54
  conversationId: string,
55
- layerObserver?: LayerObserver,
56
- ): CortexMemoryConfig {
55
+ ): Omit<CortexMemoryConfig, "layerObserver"> {
57
56
  return {
58
57
  convexUrl: process.env.CONVEX_URL!,
59
58
  memorySpaceId,
@@ -103,9 +102,6 @@ function getCortexMemoryConfig(
103
102
  // Memory recall configuration
104
103
  memorySearchLimit: 20,
105
104
 
106
- // Real-time layer tracking
107
- layerObserver,
108
-
109
105
  // Debug in development
110
106
  debug: process.env.NODE_ENV === "development",
111
107
  };
@@ -233,54 +229,23 @@ export async function POST(req: Request) {
233
229
  return createUIMessageStreamResponse({
234
230
  stream: createUIMessageStream({
235
231
  execute: async ({ writer }) => {
236
- // Create layer observer for real-time UI updates
237
- const layerObserver: LayerObserver = {
238
- onOrchestrationStart: (orchestrationId) => {
239
- writer.write({
240
- type: "data-orchestration-start",
241
- data: { orchestrationId },
242
- transient: true,
243
- });
244
- },
245
- onLayerUpdate: (event) => {
246
- writer.write({
247
- type: "data-layer-update",
248
- data: {
249
- layer: event.layer,
250
- status: event.status,
251
- timestamp: event.timestamp,
252
- latencyMs: event.latencyMs,
253
- data: event.data,
254
- error: event.error,
255
- revisionAction: event.revisionAction,
256
- supersededFacts: event.supersededFacts,
257
- },
258
- transient: true,
259
- });
260
- },
261
- onOrchestrationComplete: (summary) => {
262
- writer.write({
263
- type: "data-orchestration-complete",
264
- data: {
265
- orchestrationId: summary.orchestrationId,
266
- totalLatencyMs: summary.totalLatencyMs,
267
- createdIds: summary.createdIds,
268
- },
269
- transient: true,
270
- });
271
- },
272
- };
232
+ // Create layer observer using the streaming helper
233
+ // This replaces ~35 lines of manual observer setup with 2 lines!
234
+ const { observer, emitTo } = createLayerStreamObserver();
235
+ emitTo(writer);
273
236
 
274
237
  // Build config with observer
275
238
  const config = getCortexMemoryConfig(
276
239
  memorySpaceId,
277
240
  userId,
278
241
  conversationId,
279
- layerObserver,
280
242
  );
281
243
 
282
244
  // Create memory-augmented model - THIS handles both recall AND storage!
283
- const cortexMemory = await createCortexMemoryAsync(config);
245
+ const cortexMemory = await createCortexMemoryAsync({
246
+ ...config,
247
+ layerObserver: observer,
248
+ });
284
249
 
285
250
  // Stream response with automatic memory integration
286
251
  const result = streamText({
@@ -1,214 +1,43 @@
1
1
  "use client";
2
2
 
3
- import { useState, useCallback } from "react";
4
-
5
- // Layer types (defined locally to avoid import issues before npm install)
6
- export type MemoryLayer =
7
- | "memorySpace"
8
- | "user"
9
- | "agent"
10
- | "conversation"
11
- | "vector"
12
- | "facts"
13
- | "graph";
14
-
15
- export type LayerStatus =
16
- | "pending"
17
- | "in_progress"
18
- | "complete"
19
- | "error"
20
- | "skipped";
21
-
22
3
  /**
23
- * Revision action taken by the belief revision system (v0.24.0+)
24
- * - ADD: New fact was created (no conflicts)
25
- * - UPDATE: Existing fact was updated with new information
26
- * - SUPERSEDE: Old fact was superseded by contradicting information
27
- * - NONE: No action taken (duplicate or irrelevant)
4
+ * Layer Tracking - Re-exports from @cortexmemory/vercel-ai-provider/react
5
+ *
6
+ * This file re-exports the layer tracking hook and utilities from the provider
7
+ * package, adding the "use client" directive for Next.js App Router compatibility.
8
+ *
9
+ * The provider package contains the full implementation including:
10
+ * - useLayerTracking() hook for state management
11
+ * - handleDataPart callback for automatic stream parsing
12
+ * - Type definitions for layers, states, and events
13
+ *
14
+ * @example
15
+ * ```typescript
16
+ * import { useLayerTracking } from '@/lib/layer-tracking';
17
+ * import { useChat } from '@ai-sdk/react';
18
+ *
19
+ * function ChatComponent() {
20
+ * const { layers, isOrchestrating, handleDataPart } = useLayerTracking();
21
+ * const { messages } = useChat({ onData: handleDataPart });
22
+ * // ...
23
+ * }
24
+ * ```
28
25
  */
29
- export type RevisionAction = "ADD" | "UPDATE" | "SUPERSEDE" | "NONE";
30
-
31
- export interface LayerState {
32
- status: LayerStatus;
33
- latencyMs?: number;
34
- data?: {
35
- id?: string;
36
- preview?: string;
37
- metadata?: Record<string, unknown>;
38
- };
39
- startedAt?: number;
40
- completedAt?: number;
41
- /**
42
- * Revision action taken (v0.24.0+)
43
- * Only present for facts layer when belief revision is enabled
44
- */
45
- revisionAction?: RevisionAction;
46
- /**
47
- * Facts that were superseded by this action (v0.24.0+)
48
- * Only present when revisionAction is "SUPERSEDE"
49
- */
50
- supersededFacts?: string[];
51
- }
52
-
53
- export interface LayerTrackingState {
54
- layers: Record<string, LayerState>;
55
- isOrchestrating: boolean;
56
- orchestrationStartTime?: number;
57
- }
58
-
59
- const initialLayerState: LayerState = {
60
- status: "pending",
61
- };
62
-
63
- const allLayers: MemoryLayer[] = [
64
- "memorySpace",
65
- "user",
66
- "agent",
67
- "conversation",
68
- "vector",
69
- "facts",
70
- "graph",
71
- ];
72
-
73
- export function useLayerTracking() {
74
- const [state, setState] = useState<LayerTrackingState>({
75
- layers: Object.fromEntries(
76
- allLayers.map((layer) => [layer, { ...initialLayerState }]),
77
- ),
78
- isOrchestrating: false,
79
- });
80
-
81
- const startOrchestration = useCallback(() => {
82
- const now = Date.now();
83
- setState({
84
- layers: Object.fromEntries(
85
- allLayers.map((layer) => [
86
- layer,
87
- { status: "pending" as LayerStatus, startedAt: now },
88
- ]),
89
- ),
90
- isOrchestrating: true,
91
- orchestrationStartTime: now,
92
- });
93
- }, []);
94
26
 
95
- const updateLayer = useCallback(
96
- (
97
- layer: MemoryLayer,
98
- status: LayerStatus,
99
- data?: LayerState["data"],
100
- revisionInfo?: {
101
- action?: RevisionAction;
102
- supersededFacts?: string[];
103
- },
104
- ) => {
105
- setState((prev: LayerTrackingState) => {
106
- const now = Date.now();
107
- const layerState = prev.layers[layer];
108
- const latencyMs = layerState?.startedAt
109
- ? now - layerState.startedAt
110
- : prev.orchestrationStartTime
111
- ? now - prev.orchestrationStartTime
112
- : undefined;
113
-
114
- // Check if all layers are complete
115
- const updatedLayers: Record<string, LayerState> = {
116
- ...prev.layers,
117
- [layer]: {
118
- ...layerState,
119
- status,
120
- latencyMs,
121
- data,
122
- completedAt: status === "complete" ? now : layerState?.completedAt,
123
- // Belief revision info (v0.24.0+)
124
- revisionAction: revisionInfo?.action,
125
- supersededFacts: revisionInfo?.supersededFacts,
126
- },
127
- };
128
-
129
- const isStillOrchestrating = Object.values(updatedLayers).some(
130
- (l: LayerState) =>
131
- l.status === "pending" || l.status === "in_progress",
132
- );
133
-
134
- return {
135
- ...prev,
136
- layers: updatedLayers,
137
- isOrchestrating: isStillOrchestrating,
138
- };
139
- });
140
- },
141
- [],
142
- );
143
-
144
- const resetLayers = useCallback(() => {
145
- setState({
146
- layers: Object.fromEntries(
147
- allLayers.map((layer) => [layer, { ...initialLayerState }]),
148
- ),
149
- isOrchestrating: false,
150
- });
151
- }, []);
152
-
153
- return {
154
- layers: state.layers,
155
- isOrchestrating: state.isOrchestrating,
156
- startOrchestration,
157
- updateLayer,
158
- resetLayers,
159
- };
160
- }
161
-
162
- /**
163
- * Generate sample data for layer previews (used in demos)
164
- */
165
- export function generateSampleLayerData(
166
- layer: MemoryLayer,
167
- userMessage?: string,
168
- ): LayerState["data"] {
169
- switch (layer) {
170
- case "memorySpace":
171
- return {
172
- id: "quickstart-demo",
173
- preview: "Memory space for demo",
174
- metadata: { isolation: "full" },
175
- };
176
- case "user":
177
- return {
178
- id: "demo-user",
179
- preview: "Demo User",
180
- metadata: { memories: 5 },
181
- };
182
- case "agent":
183
- return {
184
- id: "quickstart-assistant",
185
- preview: "Cortex Demo Assistant",
186
- };
187
- case "conversation":
188
- return {
189
- id: `conv-${Date.now()}`,
190
- preview: userMessage?.slice(0, 50) || "New conversation",
191
- metadata: { messages: 2 },
192
- };
193
- case "vector":
194
- return {
195
- id: `mem-${Date.now()}`,
196
- preview: "Embedded content...",
197
- metadata: { dimensions: 1536, importance: 75 },
198
- };
199
- case "facts":
200
- return {
201
- id: `fact-${Date.now()}`,
202
- preview: "Extracted facts from conversation",
203
- metadata: { count: 3, types: ["identity", "preference"] },
204
- };
205
- case "graph":
206
- return {
207
- id: `graph-sync-${Date.now()}`,
208
- preview: "Entity relationships",
209
- metadata: { nodes: 4, edges: 3 },
210
- };
211
- default:
212
- return undefined;
213
- }
214
- }
27
+ // Re-export everything from the provider's React module
28
+ export {
29
+ useLayerTracking,
30
+ generateSampleLayerData,
31
+ ALL_LAYERS,
32
+ } from "@cortexmemory/vercel-ai-provider/react";
33
+
34
+ // Re-export types
35
+ export type {
36
+ MemoryLayer,
37
+ LayerStatus,
38
+ RevisionAction,
39
+ LayerState,
40
+ LayerTrackingState,
41
+ LayerUpdateData,
42
+ UseLayerTrackingResult,
43
+ } from "@cortexmemory/vercel-ai-provider/react";
@@ -19,17 +19,17 @@
19
19
  "convex:deploy": "convex deploy"
20
20
  },
21
21
  "dependencies": {
22
- "@ai-sdk/openai": "^3.0.4",
23
- "@ai-sdk/react": "^3.0.11",
22
+ "@ai-sdk/openai": "^3.0.12",
23
+ "@ai-sdk/react": "^3.0.44",
24
24
  "@anthropic-ai/sdk": "^0.71.2",
25
25
  "@cortexmemory/sdk": "file:../../..",
26
26
  "@cortexmemory/vercel-ai-provider": "file:..",
27
- "ai": "^6.0.11",
28
- "convex": "^1.31.2",
29
- "framer-motion": "^12.24.0",
27
+ "ai": "^6.0.42",
28
+ "convex": "^1.31.5",
29
+ "framer-motion": "^12.27.1",
30
30
  "neo4j-driver": "^6.0.1",
31
- "next": "^16.1.1",
32
- "openai": "^6.15.0",
31
+ "next": "^16.1.4",
32
+ "openai": "^6.16.0",
33
33
  "react": "^19.2.3",
34
34
  "react-dom": "^19.2.3",
35
35
  "zod": "^4.3.5"
@@ -37,8 +37,8 @@
37
37
  "devDependencies": {
38
38
  "@tailwindcss/postcss": "^4.1.18",
39
39
  "@types/jest": "^30.0.0",
40
- "@types/node": "^25.0.3",
41
- "@types/react": "^19.2.7",
40
+ "@types/node": "^25.0.9",
41
+ "@types/react": "^19.2.8",
42
42
  "@types/react-dom": "^19.2.3",
43
43
  "autoprefixer": "^10.4.23",
44
44
  "jest": "^30.2.0",