@maximem/synap-js-sdk 0.1.4 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -18,10 +18,10 @@ npm install @maximem/synap-js-sdk
18
18
  Install the Python runtime used by the wrapper:
19
19
 
20
20
  ```bash
21
- npx synap-js-sdk setup --sdk-version 0.1.1
21
+ npx synap-js-sdk setup --sdk-version 0.2.0
22
22
  ```
23
23
 
24
- If you want the latest Python SDK version, omit `--sdk-version`.
24
+ If you want the latest Python SDK version, omit `--sdk-version` and pass `--upgrade`.
25
25
 
26
26
  ## Verify Runtime
27
27
 
@@ -65,16 +65,27 @@ async function run() {
65
65
 
66
66
  await synap.addMemory({
67
67
  userId: 'user-123',
68
+ customerId: 'customer-456',
69
+ conversationId: 'conv-123',
68
70
  messages: [{ role: 'user', content: 'My name is Alex and I live in Austin.' }],
69
71
  });
70
72
 
71
- const result = await synap.searchMemory({
73
+ const context = await synap.fetchUserContext({
72
74
  userId: 'user-123',
73
- query: 'Where does the user live?',
75
+ customerId: 'customer-456',
76
+ conversationId: 'conv-123',
77
+ searchQuery: ['Where does the user live?'],
74
78
  maxResults: 10,
75
79
  });
76
80
 
77
- console.log(result);
81
+ console.log(context.facts);
82
+
83
+ const promptContext = await synap.getContextForPrompt({
84
+ conversationId: 'conv-123',
85
+ style: 'structured',
86
+ });
87
+
88
+ console.log(promptContext.formattedContext);
78
89
  await synap.shutdown();
79
90
  }
80
91
 
@@ -97,9 +108,17 @@ This command can:
97
108
  ## Single-Flow Setup (JS + TS)
98
109
 
99
110
  ```bash
100
- npm install @maximem/synap-js-sdk && npx synap-js-sdk setup --sdk-version 0.1.1 && npx synap-js-sdk setup-ts
111
+ npm install @maximem/synap-js-sdk && npx synap-js-sdk setup --sdk-version 0.2.0 && npx synap-js-sdk setup-ts
101
112
  ```
102
113
 
114
+ ## API Notes
115
+
116
+ - `addMemory()` now requires `customerId` to match the Python SDK's explicit ingestion scope.
117
+ - `fetchUserContext()`, `fetchCustomerContext()`, and `fetchClientContext()` expose the structured Python `ContextResponse` surface in JS/TS.
118
+ - `getContextForPrompt()` exposes compacted context plus recent un-compacted messages.
119
+ - `searchMemory()` and `getMemories()` remain convenience helpers built on top of user-scoped context fetches.
120
+ - Temporal fields are exposed in JS/TS as `eventDate`, `validUntil`, `temporalCategory`, `temporalConfidence`, plus top-level `temporalEvents`.
121
+
103
122
  ## CLI Commands
104
123
 
105
124
  ```bash
@@ -5,7 +5,9 @@ Protocol:
5
5
  stdin -> {"id": 1, "method": "init", "params": {...}}\n
6
6
  stdout <- {"id": 1, "result": {...}, "error": null}\n
7
7
  Methods:
8
- init, add_memory, search_memory, get_memories, delete_memory, shutdown
8
+ init, add_memory, search_memory, get_memories, fetch_user_context,
9
+ fetch_customer_context, fetch_client_context, get_context_for_prompt,
10
+ delete_memory, shutdown
9
11
  """
10
12
 
11
13
  import asyncio
@@ -51,6 +53,92 @@ def write_response(obj: dict) -> None:
51
53
  sys.stdout.flush()
52
54
 
53
55
 
56
+ def tracking_key(user_id: str, customer_id: Optional[str]) -> str:
57
+ """Scope tracked memory IDs by both customer and user."""
58
+ return f"{customer_id or ''}::{user_id}"
59
+
60
+
61
+ def serialize_context_response(context) -> dict:
62
+ """Serialize a Python ContextResponse for the JS bridge."""
63
+ payload = context.model_dump(mode="json")
64
+ payload["raw_response"] = context.raw if hasattr(context, "raw") else {}
65
+ return payload
66
+
67
+
68
+ def serialize_context_for_prompt_response(response) -> dict:
69
+ """Serialize a Python ContextForPromptResponse for the JS bridge."""
70
+ return response.model_dump(mode="json")
71
+
72
+
73
+ def flatten_context_items(context) -> List[dict]:
74
+ """Convert typed context collections into a flat memory list."""
75
+ items: List[dict] = []
76
+ for fact in context.facts:
77
+ items.append({
78
+ "id": fact.id,
79
+ "memory": fact.content,
80
+ "score": fact.confidence,
81
+ "source": fact.source,
82
+ "metadata": fact.metadata,
83
+ "context_type": "fact",
84
+ "event_date": str(fact.event_date) if getattr(fact, "event_date", None) else None,
85
+ "valid_until": str(fact.valid_until) if getattr(fact, "valid_until", None) else None,
86
+ "temporal_category": getattr(fact, "temporal_category", None),
87
+ "temporal_confidence": getattr(fact, "temporal_confidence", 0.0),
88
+ })
89
+ for preference in context.preferences:
90
+ items.append({
91
+ "id": preference.id,
92
+ "memory": preference.content,
93
+ "score": preference.strength,
94
+ "source": getattr(preference, "source", ""),
95
+ "metadata": preference.metadata,
96
+ "context_type": "preference",
97
+ "event_date": str(preference.event_date) if getattr(preference, "event_date", None) else None,
98
+ "valid_until": str(preference.valid_until) if getattr(preference, "valid_until", None) else None,
99
+ "temporal_category": getattr(preference, "temporal_category", None),
100
+ "temporal_confidence": getattr(preference, "temporal_confidence", 0.0),
101
+ })
102
+ for episode in context.episodes:
103
+ items.append({
104
+ "id": episode.id,
105
+ "memory": episode.summary,
106
+ "score": episode.significance,
107
+ "metadata": episode.metadata,
108
+ "context_type": "episode",
109
+ "event_date": str(episode.event_date) if getattr(episode, "event_date", None) else None,
110
+ "valid_until": str(episode.valid_until) if getattr(episode, "valid_until", None) else None,
111
+ "temporal_category": getattr(episode, "temporal_category", None),
112
+ "temporal_confidence": getattr(episode, "temporal_confidence", 0.0),
113
+ })
114
+ for emotion in context.emotions:
115
+ items.append({
116
+ "id": emotion.id,
117
+ "memory": emotion.context,
118
+ "score": emotion.intensity,
119
+ "metadata": emotion.metadata,
120
+ "context_type": "emotion",
121
+ "event_date": str(emotion.event_date) if getattr(emotion, "event_date", None) else None,
122
+ "valid_until": str(emotion.valid_until) if getattr(emotion, "valid_until", None) else None,
123
+ "temporal_category": getattr(emotion, "temporal_category", None),
124
+ "temporal_confidence": getattr(emotion, "temporal_confidence", 0.0),
125
+ })
126
+ for event in getattr(context, "temporal_events", []):
127
+ items.append({
128
+ "id": event.id,
129
+ "memory": event.content,
130
+ "score": event.temporal_confidence,
131
+ "source": event.source,
132
+ "metadata": event.metadata,
133
+ "context_type": "temporal_event",
134
+ "event_date": str(event.event_date) if event.event_date else None,
135
+ "valid_until": str(event.valid_until) if event.valid_until else None,
136
+ "temporal_category": event.temporal_category,
137
+ "temporal_confidence": event.temporal_confidence,
138
+ })
139
+ return items
140
+
141
+
54
142
  def messages_to_text(messages: List[dict]) -> str:
55
143
  lines: List[str] = []
56
144
  for message in messages:
@@ -147,7 +235,13 @@ async def handle_add_memory(params: dict) -> dict:
147
235
  timings: List[dict] = []
148
236
 
149
237
  user_id = params["user_id"]
238
+ customer_id = params.get("customer_id")
150
239
  messages = params["messages"]
240
+ conversation_id = params.get("conversation_id")
241
+ session_id = params.get("session_id")
242
+
243
+ if not customer_id:
244
+ raise ValueError("customer_id is required")
151
245
 
152
246
  step = time.perf_counter()
153
247
  transcript = messages_to_text(messages)
@@ -169,12 +263,26 @@ async def handle_add_memory(params: dict) -> dict:
169
263
 
170
264
  step = time.perf_counter()
171
265
  mode = params.get("mode", "long-range")
172
- create_result = await sdk.memories.create(
173
- document=transcript,
174
- document_type="ai-chat-conversation",
175
- user_id=user_id,
176
- mode=mode,
177
- )
266
+ document_type = params.get("document_type", "ai-chat-conversation")
267
+ document_id = params.get("document_id")
268
+ document_created_at = params.get("document_created_at")
269
+ metadata = params.get("metadata")
270
+
271
+ create_kwargs: dict = {
272
+ "document": transcript,
273
+ "document_type": document_type,
274
+ "user_id": user_id,
275
+ "customer_id": customer_id,
276
+ "mode": mode,
277
+ }
278
+ if document_id is not None:
279
+ create_kwargs["document_id"] = document_id
280
+ if document_created_at is not None:
281
+ create_kwargs["document_created_at"] = document_created_at
282
+ if metadata is not None:
283
+ create_kwargs["metadata"] = metadata
284
+
285
+ create_result = await sdk.memories.create(**create_kwargs)
178
286
  append_step(timings, "memories_create", step)
179
287
 
180
288
  ingestion_id = create_result.ingestion_id
@@ -186,10 +294,16 @@ async def handle_add_memory(params: dict) -> dict:
186
294
  if not content:
187
295
  continue
188
296
  try:
297
+ role = message.get("role", "user")
189
298
  await sdk.instance.send_message(
190
299
  content=content,
191
- role=message.get("role", "user"),
300
+ role=role,
301
+ conversation_id=conversation_id,
192
302
  user_id=user_id,
303
+ customer_id=customer_id,
304
+ session_id=session_id,
305
+ event_type="assistant_message" if role == "assistant" else "user_message",
306
+ metadata=message.get("metadata"),
193
307
  )
194
308
  except Exception as exc:
195
309
  logger.debug("gRPC send_message failed (non-fatal): %s", exc)
@@ -218,7 +332,7 @@ async def handle_add_memory(params: dict) -> dict:
218
332
 
219
333
  memory_ids = [str(memory_id) for memory_id in (final_status.memory_ids or [])]
220
334
  if memory_ids:
221
- user_memory_ids.setdefault(user_id, []).extend(memory_ids)
335
+ user_memory_ids.setdefault(tracking_key(user_id, customer_id), []).extend(memory_ids)
222
336
 
223
337
  return {
224
338
  "success": final_status.status.value != "failed",
@@ -242,38 +356,29 @@ async def handle_search_memory(params: dict) -> dict:
242
356
  timings: List[dict] = []
243
357
 
244
358
  user_id = params["user_id"]
359
+ customer_id = params.get("customer_id")
245
360
  query = params["query"]
246
361
  max_results = params.get("max_results", 10)
247
362
  mode = params.get("mode", "fast")
363
+ conversation_id = params.get("conversation_id")
364
+ types = params.get("types", ["all"])
248
365
 
249
366
  start = time.perf_counter()
250
367
 
251
368
  step = time.perf_counter()
252
369
  context = await sdk.user.context.fetch(
253
370
  user_id=user_id,
371
+ customer_id=customer_id,
372
+ conversation_id=conversation_id,
254
373
  search_query=[query],
255
374
  max_results=max_results,
256
- types=["all"],
375
+ types=types,
257
376
  mode=mode,
258
377
  )
259
378
  append_step(timings, "context_fetch", step)
260
379
 
261
380
  step = time.perf_counter()
262
- results = []
263
- for fact in context.facts:
264
- results.append({"id": fact.id, "memory": fact.content, "score": fact.confidence})
265
- for preference in context.preferences:
266
- results.append(
267
- {"id": preference.id, "memory": preference.content, "score": preference.strength}
268
- )
269
- for episode in context.episodes:
270
- results.append(
271
- {"id": episode.id, "memory": episode.summary, "score": episode.significance}
272
- )
273
- for emotion in context.emotions:
274
- results.append(
275
- {"id": emotion.id, "memory": emotion.context, "score": emotion.intensity}
276
- )
381
+ results = flatten_context_items(context)
277
382
  append_step(timings, "map_context_results", step)
278
383
 
279
384
  return {
@@ -295,30 +400,28 @@ async def handle_get_memories(params: dict) -> dict:
295
400
  timings: List[dict] = []
296
401
 
297
402
  user_id = params["user_id"]
403
+ customer_id = params.get("customer_id")
298
404
  mode = params.get("mode", "fast")
405
+ conversation_id = params.get("conversation_id")
406
+ max_results = params.get("max_results", 100)
407
+ types = params.get("types", ["all"])
299
408
 
300
409
  start = time.perf_counter()
301
410
 
302
411
  step = time.perf_counter()
303
412
  context = await sdk.user.context.fetch(
304
413
  user_id=user_id,
414
+ customer_id=customer_id,
415
+ conversation_id=conversation_id,
305
416
  search_query=[],
306
- max_results=100,
307
- types=["all"],
417
+ max_results=max_results,
418
+ types=types,
308
419
  mode=mode,
309
420
  )
310
421
  append_step(timings, "context_fetch_all", step)
311
422
 
312
423
  step = time.perf_counter()
313
- memories = []
314
- for fact in context.facts:
315
- memories.append({"id": fact.id, "memory": fact.content})
316
- for preference in context.preferences:
317
- memories.append({"id": preference.id, "memory": preference.content})
318
- for episode in context.episodes:
319
- memories.append({"id": episode.id, "memory": episode.summary})
320
- for emotion in context.emotions:
321
- memories.append({"id": emotion.id, "memory": emotion.context})
424
+ memories = flatten_context_items(context)
322
425
  append_step(timings, "map_memories", step)
323
426
 
324
427
  return {
@@ -326,7 +429,113 @@ async def handle_get_memories(params: dict) -> dict:
326
429
  "latencyMs": ms_since(start),
327
430
  "memories": memories,
328
431
  "memoriesCount": len(memories),
432
+ "totalCount": len(memories),
329
433
  "rawResponse": context.raw if hasattr(context, "raw") else {},
434
+ "source": context.metadata.source if context.metadata else "unknown",
435
+ "bridgeTiming": {
436
+ "python_total_ms": ms_since(handler_start),
437
+ "steps": timings,
438
+ },
439
+ }
440
+
441
+
442
+ async def handle_fetch_user_context(params: dict) -> dict:
443
+ handler_start = time.perf_counter()
444
+ timings: List[dict] = []
445
+ start = time.perf_counter()
446
+
447
+ step = time.perf_counter()
448
+ context = await sdk.user.context.fetch(
449
+ user_id=params["user_id"],
450
+ customer_id=params.get("customer_id"),
451
+ conversation_id=params.get("conversation_id"),
452
+ search_query=params.get("search_query"),
453
+ max_results=params.get("max_results", 10),
454
+ types=params.get("types"),
455
+ mode=params.get("mode", "fast"),
456
+ )
457
+ append_step(timings, "context_fetch", step)
458
+
459
+ return {
460
+ "success": True,
461
+ "latencyMs": ms_since(start),
462
+ "context": serialize_context_response(context),
463
+ "bridgeTiming": {
464
+ "python_total_ms": ms_since(handler_start),
465
+ "steps": timings,
466
+ },
467
+ }
468
+
469
+
470
+ async def handle_fetch_customer_context(params: dict) -> dict:
471
+ handler_start = time.perf_counter()
472
+ timings: List[dict] = []
473
+ start = time.perf_counter()
474
+
475
+ step = time.perf_counter()
476
+ context = await sdk.customer.context.fetch(
477
+ customer_id=params["customer_id"],
478
+ conversation_id=params.get("conversation_id"),
479
+ search_query=params.get("search_query"),
480
+ max_results=params.get("max_results", 10),
481
+ types=params.get("types"),
482
+ mode=params.get("mode", "fast"),
483
+ )
484
+ append_step(timings, "context_fetch", step)
485
+
486
+ return {
487
+ "success": True,
488
+ "latencyMs": ms_since(start),
489
+ "context": serialize_context_response(context),
490
+ "bridgeTiming": {
491
+ "python_total_ms": ms_since(handler_start),
492
+ "steps": timings,
493
+ },
494
+ }
495
+
496
+
497
+ async def handle_fetch_client_context(params: dict) -> dict:
498
+ handler_start = time.perf_counter()
499
+ timings: List[dict] = []
500
+ start = time.perf_counter()
501
+
502
+ step = time.perf_counter()
503
+ context = await sdk.client.context.fetch(
504
+ conversation_id=params.get("conversation_id"),
505
+ search_query=params.get("search_query"),
506
+ max_results=params.get("max_results", 10),
507
+ types=params.get("types"),
508
+ mode=params.get("mode", "fast"),
509
+ )
510
+ append_step(timings, "context_fetch", step)
511
+
512
+ return {
513
+ "success": True,
514
+ "latencyMs": ms_since(start),
515
+ "context": serialize_context_response(context),
516
+ "bridgeTiming": {
517
+ "python_total_ms": ms_since(handler_start),
518
+ "steps": timings,
519
+ },
520
+ }
521
+
522
+
523
+ async def handle_get_context_for_prompt(params: dict) -> dict:
524
+ handler_start = time.perf_counter()
525
+ timings: List[dict] = []
526
+ start = time.perf_counter()
527
+
528
+ step = time.perf_counter()
529
+ response = await sdk.conversation.get_context_for_prompt(
530
+ conversation_id=params["conversation_id"],
531
+ style=params.get("style", "structured"),
532
+ )
533
+ append_step(timings, "get_context_for_prompt", step)
534
+
535
+ return {
536
+ "success": True,
537
+ "latencyMs": ms_since(start),
538
+ "context_for_prompt": serialize_context_for_prompt_response(response),
330
539
  "bridgeTiming": {
331
540
  "python_total_ms": ms_since(handler_start),
332
541
  "steps": timings,
@@ -339,6 +548,7 @@ async def handle_delete_memory(params: dict) -> dict:
339
548
  timings: List[dict] = []
340
549
 
341
550
  user_id = params["user_id"]
551
+ customer_id = params.get("customer_id")
342
552
  memory_id = params.get("memory_id")
343
553
 
344
554
  start = time.perf_counter()
@@ -350,6 +560,7 @@ async def handle_delete_memory(params: dict) -> dict:
350
560
  return {
351
561
  "success": True,
352
562
  "latencyMs": ms_since(start),
563
+ "deletedCount": 1,
353
564
  "rawResponse": {"deleted": 1},
354
565
  "bridgeTiming": {
355
566
  "python_total_ms": ms_since(handler_start),
@@ -357,11 +568,15 @@ async def handle_delete_memory(params: dict) -> dict:
357
568
  },
358
569
  }
359
570
 
360
- tracked_ids = user_memory_ids.get(user_id, [])
571
+ if not customer_id:
572
+ raise ValueError("customer_id is required when memory_id is not provided")
573
+
574
+ tracked_ids = user_memory_ids.get(tracking_key(user_id, customer_id), [])
361
575
  if not tracked_ids:
362
576
  return {
363
577
  "success": True,
364
578
  "latencyMs": 0,
579
+ "deletedCount": 0,
365
580
  "rawResponse": None,
366
581
  "note": "No tracked memory IDs for this user",
367
582
  "bridgeTiming": {
@@ -380,7 +595,7 @@ async def handle_delete_memory(params: dict) -> dict:
380
595
  last_error = str(exc)
381
596
  append_step(timings, "delete_tracked_memories", step)
382
597
 
383
- user_memory_ids.pop(user_id, None)
598
+ user_memory_ids.pop(tracking_key(user_id, customer_id), None)
384
599
 
385
600
  if last_error:
386
601
  return {
@@ -396,6 +611,7 @@ async def handle_delete_memory(params: dict) -> dict:
396
611
  return {
397
612
  "success": True,
398
613
  "latencyMs": ms_since(start),
614
+ "deletedCount": len(tracked_ids),
399
615
  "rawResponse": {"deleted": len(tracked_ids)},
400
616
  "note": f"Deleted {len(tracked_ids)} memories",
401
617
  "bridgeTiming": {
@@ -434,6 +650,10 @@ HANDLERS = {
434
650
  "add_memory": handle_add_memory,
435
651
  "search_memory": handle_search_memory,
436
652
  "get_memories": handle_get_memories,
653
+ "fetch_user_context": handle_fetch_user_context,
654
+ "fetch_customer_context": handle_fetch_customer_context,
655
+ "fetch_client_context": handle_fetch_client_context,
656
+ "get_context_for_prompt": handle_get_context_for_prompt,
437
657
  "delete_memory": handle_delete_memory,
438
658
  "shutdown": handle_shutdown,
439
659
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@maximem/synap-js-sdk",
3
- "version": "0.1.4",
3
+ "version": "0.2.0",
4
4
  "description": "JavaScript wrapper around the Synap Python SDK",
5
5
  "main": "src/index.js",
6
6
  "types": "types/index.d.ts",
@@ -64,6 +64,10 @@ function getWrapperTemplate() {
64
64
  type AddMemoryInput,
65
65
  type SearchMemoryInput,
66
66
  type GetMemoriesInput,
67
+ type FetchUserContextInput,
68
+ type FetchCustomerContextInput,
69
+ type FetchClientContextInput,
70
+ type GetContextForPromptInput,
67
71
  type DeleteMemoryInput,
68
72
  } from '@maximem/synap-js-sdk';
69
73
 
@@ -90,6 +94,22 @@ export class SynapTsClient {
90
94
  return this.client.getMemories(input);
91
95
  }
92
96
 
97
+ fetchUserContext(input: FetchUserContextInput) {
98
+ return this.client.fetchUserContext(input);
99
+ }
100
+
101
+ fetchCustomerContext(input: FetchCustomerContextInput) {
102
+ return this.client.fetchCustomerContext(input);
103
+ }
104
+
105
+ fetchClientContext(input?: FetchClientContextInput) {
106
+ return this.client.fetchClientContext(input);
107
+ }
108
+
109
+ getContextForPrompt(input: GetContextForPromptInput) {
110
+ return this.client.getContextForPrompt(input);
111
+ }
112
+
93
113
  deleteMemory(input: DeleteMemoryInput) {
94
114
  return this.client.deleteMemory(input);
95
115
  }
@@ -1,5 +1,200 @@
1
1
  const { BridgeManager } = require('./bridge-manager');
2
2
 
3
+ function pickDefined(...values) {
4
+ for (const value of values) {
5
+ if (value !== undefined) return value;
6
+ }
7
+ return undefined;
8
+ }
9
+
10
+ function normalizeTemporalFields(item = {}) {
11
+ return {
12
+ eventDate: pickDefined(item.eventDate, item.event_date, null),
13
+ validUntil: pickDefined(item.validUntil, item.valid_until, null),
14
+ temporalCategory: pickDefined(item.temporalCategory, item.temporal_category, null),
15
+ temporalConfidence: pickDefined(item.temporalConfidence, item.temporal_confidence, 0),
16
+ };
17
+ }
18
+
19
+ function normalizeMemoryItem(item = {}) {
20
+ return {
21
+ id: item.id || '',
22
+ memory: pickDefined(item.memory, item.content, ''),
23
+ score: item.score,
24
+ source: item.source,
25
+ metadata: item.metadata || {},
26
+ contextType: pickDefined(item.contextType, item.context_type),
27
+ ...normalizeTemporalFields(item),
28
+ };
29
+ }
30
+
31
+ function normalizeContextMetadata(metadata = {}) {
32
+ return {
33
+ correlationId: pickDefined(metadata.correlationId, metadata.correlation_id, ''),
34
+ ttlSeconds: pickDefined(metadata.ttlSeconds, metadata.ttl_seconds, 0),
35
+ source: metadata.source || 'unknown',
36
+ retrievedAt: pickDefined(metadata.retrievedAt, metadata.retrieved_at, null),
37
+ compactionApplied: pickDefined(metadata.compactionApplied, metadata.compaction_applied, null),
38
+ };
39
+ }
40
+
41
+ function normalizeConversationContext(value) {
42
+ if (!value) return null;
43
+ return {
44
+ summary: value.summary || null,
45
+ currentState: pickDefined(value.currentState, value.current_state, {}),
46
+ keyExtractions: pickDefined(value.keyExtractions, value.key_extractions, {}),
47
+ recentTurns: pickDefined(value.recentTurns, value.recent_turns, []),
48
+ compactionId: pickDefined(value.compactionId, value.compaction_id, null),
49
+ compactedAt: pickDefined(value.compactedAt, value.compacted_at, null),
50
+ conversationId: pickDefined(value.conversationId, value.conversation_id, null),
51
+ };
52
+ }
53
+
54
+ function normalizeFact(item = {}) {
55
+ return {
56
+ id: item.id || '',
57
+ content: item.content || '',
58
+ confidence: pickDefined(item.confidence, 0),
59
+ source: item.source || '',
60
+ extractedAt: pickDefined(item.extractedAt, item.extracted_at, null),
61
+ metadata: item.metadata || {},
62
+ ...normalizeTemporalFields(item),
63
+ };
64
+ }
65
+
66
+ function normalizePreference(item = {}) {
67
+ return {
68
+ id: item.id || '',
69
+ category: item.category || '',
70
+ content: item.content || '',
71
+ strength: pickDefined(item.strength, item.confidence, 0),
72
+ source: item.source || '',
73
+ extractedAt: pickDefined(item.extractedAt, item.extracted_at, null),
74
+ metadata: item.metadata || {},
75
+ ...normalizeTemporalFields(item),
76
+ };
77
+ }
78
+
79
+ function normalizeEpisode(item = {}) {
80
+ return {
81
+ id: item.id || '',
82
+ summary: pickDefined(item.summary, item.content, ''),
83
+ occurredAt: pickDefined(item.occurredAt, item.occurred_at, null),
84
+ significance: pickDefined(item.significance, item.confidence, 0),
85
+ participants: item.participants || [],
86
+ metadata: item.metadata || {},
87
+ ...normalizeTemporalFields(item),
88
+ };
89
+ }
90
+
91
+ function normalizeEmotion(item = {}) {
92
+ return {
93
+ id: item.id || '',
94
+ emotionType: pickDefined(item.emotionType, item.emotion_type, ''),
95
+ intensity: pickDefined(item.intensity, item.confidence, 0),
96
+ detectedAt: pickDefined(item.detectedAt, item.detected_at, null),
97
+ context: item.context || '',
98
+ metadata: item.metadata || {},
99
+ ...normalizeTemporalFields(item),
100
+ };
101
+ }
102
+
103
+ function normalizeTemporalEvent(item = {}) {
104
+ return {
105
+ id: item.id || '',
106
+ content: item.content || '',
107
+ eventDate: pickDefined(item.eventDate, item.event_date, null),
108
+ validUntil: pickDefined(item.validUntil, item.valid_until, null),
109
+ temporalCategory: pickDefined(item.temporalCategory, item.temporal_category, ''),
110
+ temporalConfidence: pickDefined(item.temporalConfidence, item.temporal_confidence, 0),
111
+ confidence: pickDefined(item.confidence, 0),
112
+ source: item.source || '',
113
+ extractedAt: pickDefined(item.extractedAt, item.extracted_at, null),
114
+ metadata: item.metadata || {},
115
+ };
116
+ }
117
+
118
+ function normalizeContextResponse(result = {}) {
119
+ const context = result.context || result;
120
+ return {
121
+ facts: (context.facts || []).map(normalizeFact),
122
+ preferences: (context.preferences || []).map(normalizePreference),
123
+ episodes: (context.episodes || []).map(normalizeEpisode),
124
+ emotions: (context.emotions || []).map(normalizeEmotion),
125
+ temporalEvents: (pickDefined(context.temporalEvents, context.temporal_events, []) || []).map(normalizeTemporalEvent),
126
+ conversationContext: normalizeConversationContext(
127
+ pickDefined(context.conversationContext, context.conversation_context, null)
128
+ ),
129
+ metadata: normalizeContextMetadata(context.metadata || {}),
130
+ rawResponse: pickDefined(context.rawResponse, context.raw_response, {}),
131
+ bridgeTiming: result.bridgeTiming,
132
+ };
133
+ }
134
+
135
+ function normalizeRecentMessage(item = {}) {
136
+ return {
137
+ role: item.role || 'user',
138
+ content: item.content || '',
139
+ timestamp: item.timestamp || null,
140
+ messageId: pickDefined(item.messageId, item.message_id, ''),
141
+ };
142
+ }
143
+
144
+ function normalizeContextForPromptResult(result = {}) {
145
+ const payload = pickDefined(result.contextForPrompt, result.context_for_prompt, result);
146
+ return {
147
+ formattedContext: pickDefined(payload.formattedContext, payload.formatted_context, null),
148
+ available: !!payload.available,
149
+ isStale: !!pickDefined(payload.isStale, payload.is_stale, false),
150
+ compressionRatio: pickDefined(payload.compressionRatio, payload.compression_ratio, null),
151
+ validationScore: pickDefined(payload.validationScore, payload.validation_score, null),
152
+ compactionAgeSeconds: pickDefined(payload.compactionAgeSeconds, payload.compaction_age_seconds, null),
153
+ qualityWarning: !!pickDefined(payload.qualityWarning, payload.quality_warning, false),
154
+ recentMessages: (pickDefined(payload.recentMessages, payload.recent_messages, []) || []).map(normalizeRecentMessage),
155
+ recentMessageCount: pickDefined(payload.recentMessageCount, payload.recent_message_count, 0),
156
+ compactedMessageCount: pickDefined(payload.compactedMessageCount, payload.compacted_message_count, 0),
157
+ totalMessageCount: pickDefined(payload.totalMessageCount, payload.total_message_count, 0),
158
+ bridgeTiming: result.bridgeTiming,
159
+ };
160
+ }
161
+
162
+ function normalizeSearchMemoryResult(result = {}) {
163
+ return {
164
+ success: !!result.success,
165
+ latencyMs: result.latencyMs || 0,
166
+ results: (result.results || []).map(normalizeMemoryItem),
167
+ resultsCount: pickDefined(result.resultsCount, result.results?.length, 0),
168
+ rawResponse: result.rawResponse || {},
169
+ source: result.source,
170
+ bridgeTiming: result.bridgeTiming,
171
+ };
172
+ }
173
+
174
+ function normalizeGetMemoriesResult(result = {}) {
175
+ const memories = (result.memories || []).map(normalizeMemoryItem);
176
+ return {
177
+ success: !!result.success,
178
+ latencyMs: result.latencyMs || 0,
179
+ memories,
180
+ totalCount: pickDefined(result.totalCount, result.memoriesCount, memories.length, 0),
181
+ rawResponse: pickDefined(result.rawResponse, null),
182
+ source: result.source,
183
+ bridgeTiming: result.bridgeTiming,
184
+ };
185
+ }
186
+
187
+ function normalizeDeleteMemoryResult(result = {}) {
188
+ return {
189
+ success: !!result.success,
190
+ latencyMs: result.latencyMs || 0,
191
+ deletedCount: pickDefined(result.deletedCount, result.rawResponse?.deleted, 0),
192
+ rawResponse: pickDefined(result.rawResponse, null),
193
+ note: result.note,
194
+ bridgeTiming: result.bridgeTiming,
195
+ };
196
+ }
197
+
3
198
  class SynapClient {
4
199
  constructor(options = {}) {
5
200
  this.bridge = new BridgeManager(options);
@@ -14,42 +209,122 @@ class SynapClient {
14
209
  await this.bridge.ensureStarted();
15
210
  }
16
211
 
17
- async addMemory({ userId, messages, mode }) {
212
+ async addMemory({
213
+ userId,
214
+ customerId,
215
+ conversationId,
216
+ sessionId,
217
+ messages,
218
+ mode,
219
+ documentType,
220
+ documentId,
221
+ documentCreatedAt,
222
+ metadata,
223
+ }) {
18
224
  this.#assert(userId, 'userId is required');
225
+ this.#assert(customerId, 'customerId is required');
19
226
  this.#assert(Array.isArray(messages), 'messages must be an array');
20
227
 
21
228
  const params = { user_id: userId, messages };
229
+ params.customer_id = customerId;
230
+ if (conversationId !== undefined) params.conversation_id = conversationId;
231
+ if (sessionId !== undefined) params.session_id = sessionId;
22
232
  if (mode !== undefined) params.mode = mode;
233
+ if (documentType !== undefined) params.document_type = documentType;
234
+ if (documentId !== undefined) params.document_id = documentId;
235
+ if (documentCreatedAt !== undefined) params.document_created_at = documentCreatedAt;
236
+ if (metadata !== undefined) params.metadata = metadata;
23
237
 
24
238
  return this.bridge.call('add_memory', params, this.options.ingestTimeoutMs);
25
239
  }
26
240
 
27
- async searchMemory({ userId, query, maxResults = 10, mode }) {
241
+ async searchMemory({ userId, customerId, query, maxResults = 10, mode, conversationId, types }) {
28
242
  this.#assert(userId, 'userId is required');
29
243
  this.#assert(query, 'query is required');
244
+ this.#assertArray(types, 'types must be an array when provided');
30
245
 
31
246
  const params = { user_id: userId, query, max_results: maxResults };
247
+ if (customerId !== undefined) params.customer_id = customerId;
32
248
  if (mode !== undefined) params.mode = mode;
249
+ if (conversationId !== undefined) params.conversation_id = conversationId;
250
+ if (types !== undefined) params.types = types;
33
251
 
34
- return this.bridge.call('search_memory', params);
252
+ return normalizeSearchMemoryResult(await this.bridge.call('search_memory', params));
35
253
  }
36
254
 
37
- async getMemories({ userId, mode }) {
255
+ async getMemories({ userId, customerId, mode, conversationId, maxResults, types }) {
38
256
  this.#assert(userId, 'userId is required');
257
+ this.#assertArray(types, 'types must be an array when provided');
39
258
 
40
259
  const params = { user_id: userId };
260
+ if (customerId !== undefined) params.customer_id = customerId;
41
261
  if (mode !== undefined) params.mode = mode;
262
+ if (conversationId !== undefined) params.conversation_id = conversationId;
263
+ if (maxResults !== undefined) params.max_results = maxResults;
264
+ if (types !== undefined) params.types = types;
42
265
 
43
- return this.bridge.call('get_memories', params);
266
+ return normalizeGetMemoriesResult(await this.bridge.call('get_memories', params));
44
267
  }
45
268
 
46
- async deleteMemory({ userId, memoryId = null }) {
269
+ async fetchUserContext({ userId, customerId, conversationId, searchQuery, maxResults = 10, types, mode }) {
47
270
  this.#assert(userId, 'userId is required');
271
+ this.#assertArray(searchQuery, 'searchQuery must be an array when provided');
272
+ this.#assertArray(types, 'types must be an array when provided');
48
273
 
49
- return this.bridge.call('delete_memory', {
50
- user_id: userId,
51
- memory_id: memoryId,
52
- });
274
+ const params = { user_id: userId, max_results: maxResults };
275
+ if (customerId !== undefined) params.customer_id = customerId;
276
+ if (conversationId !== undefined) params.conversation_id = conversationId;
277
+ if (searchQuery !== undefined) params.search_query = searchQuery;
278
+ if (types !== undefined) params.types = types;
279
+ if (mode !== undefined) params.mode = mode;
280
+
281
+ return normalizeContextResponse(await this.bridge.call('fetch_user_context', params));
282
+ }
283
+
284
+ async fetchCustomerContext({ customerId, conversationId, searchQuery, maxResults = 10, types, mode }) {
285
+ this.#assert(customerId, 'customerId is required');
286
+ this.#assertArray(searchQuery, 'searchQuery must be an array when provided');
287
+ this.#assertArray(types, 'types must be an array when provided');
288
+
289
+ const params = { customer_id: customerId, max_results: maxResults };
290
+ if (conversationId !== undefined) params.conversation_id = conversationId;
291
+ if (searchQuery !== undefined) params.search_query = searchQuery;
292
+ if (types !== undefined) params.types = types;
293
+ if (mode !== undefined) params.mode = mode;
294
+
295
+ return normalizeContextResponse(await this.bridge.call('fetch_customer_context', params));
296
+ }
297
+
298
+ async fetchClientContext({ conversationId, searchQuery, maxResults = 10, types, mode } = {}) {
299
+ this.#assertArray(searchQuery, 'searchQuery must be an array when provided');
300
+ this.#assertArray(types, 'types must be an array when provided');
301
+
302
+ const params = { max_results: maxResults };
303
+ if (conversationId !== undefined) params.conversation_id = conversationId;
304
+ if (searchQuery !== undefined) params.search_query = searchQuery;
305
+ if (types !== undefined) params.types = types;
306
+ if (mode !== undefined) params.mode = mode;
307
+
308
+ return normalizeContextResponse(await this.bridge.call('fetch_client_context', params));
309
+ }
310
+
311
+ async getContextForPrompt({ conversationId, style } = {}) {
312
+ this.#assert(conversationId, 'conversationId is required');
313
+
314
+ const params = { conversation_id: conversationId };
315
+ if (style !== undefined) params.style = style;
316
+
317
+ return normalizeContextForPromptResult(await this.bridge.call('get_context_for_prompt', params));
318
+ }
319
+
320
+ async deleteMemory({ userId, customerId, memoryId = null }) {
321
+ this.#assert(userId, 'userId is required');
322
+ if (memoryId == null) this.#assert(customerId, 'customerId is required when memoryId is not provided');
323
+
324
+ const params = { user_id: userId, memory_id: memoryId };
325
+ if (customerId !== undefined) params.customer_id = customerId;
326
+
327
+ return normalizeDeleteMemoryResult(await this.bridge.call('delete_memory', params));
53
328
  }
54
329
 
55
330
  async shutdown() {
@@ -60,6 +335,10 @@ class SynapClient {
60
335
  if (!value) throw new Error(message);
61
336
  }
62
337
 
338
+ #assertArray(value, message) {
339
+ if (value !== undefined && !Array.isArray(value)) throw new Error(message);
340
+ }
341
+
63
342
  #registerShutdownHooks() {
64
343
  const close = async () => {
65
344
  try {
package/types/index.d.ts CHANGED
@@ -1,4 +1,28 @@
1
1
  export type LogLevel = 'debug' | 'error';
2
+ export type RetrievalMode = 'fast' | 'accurate';
3
+ export type IngestMode = 'fast' | 'long-range';
4
+ export type PromptStyle = 'structured' | 'narrative' | 'bullet_points';
5
+ export type ContextType =
6
+ | 'all'
7
+ | 'facts'
8
+ | 'preferences'
9
+ | 'episodes'
10
+ | 'emotions'
11
+ | 'temporal_events';
12
+ export type FlattenedContextType =
13
+ | 'fact'
14
+ | 'preference'
15
+ | 'episode'
16
+ | 'emotion'
17
+ | 'temporal_event';
18
+ export type DocumentType =
19
+ | 'ai-chat-conversation'
20
+ | 'document'
21
+ | 'email'
22
+ | 'pdf'
23
+ | 'image'
24
+ | 'audio'
25
+ | 'meeting-transcript';
2
26
 
3
27
  export interface BridgeLogHandler {
4
28
  (level: LogLevel, message: string): void;
@@ -31,30 +55,81 @@ export interface SynapClientOptions {
31
55
  }
32
56
 
33
57
  export interface ChatMessage {
34
- role?: string;
58
+ role?: 'user' | 'assistant';
35
59
  content: string;
60
+ metadata?: Record<string, string>;
36
61
  }
37
62
 
38
63
  export interface AddMemoryInput {
39
64
  userId: string;
65
+ customerId: string;
66
+ conversationId?: string;
67
+ sessionId?: string;
40
68
  messages: ChatMessage[];
69
+ mode?: IngestMode;
70
+ documentType?: DocumentType;
71
+ documentId?: string;
72
+ documentCreatedAt?: string;
73
+ metadata?: Record<string, unknown>;
41
74
  }
42
75
 
43
76
  export interface SearchMemoryInput {
44
77
  userId: string;
78
+ customerId?: string;
45
79
  query: string;
46
80
  maxResults?: number;
81
+ mode?: RetrievalMode;
82
+ conversationId?: string;
83
+ types?: ContextType[];
47
84
  }
48
85
 
49
86
  export interface GetMemoriesInput {
50
87
  userId: string;
88
+ customerId?: string;
89
+ mode?: RetrievalMode;
90
+ conversationId?: string;
91
+ maxResults?: number;
92
+ types?: ContextType[];
51
93
  }
52
94
 
53
95
  export interface DeleteMemoryInput {
54
96
  userId: string;
97
+ customerId?: string;
55
98
  memoryId?: string | null;
56
99
  }
57
100
 
101
+ export interface FetchUserContextInput {
102
+ userId: string;
103
+ customerId?: string;
104
+ conversationId?: string;
105
+ searchQuery?: string[];
106
+ maxResults?: number;
107
+ types?: ContextType[];
108
+ mode?: RetrievalMode;
109
+ }
110
+
111
+ export interface FetchCustomerContextInput {
112
+ customerId: string;
113
+ conversationId?: string;
114
+ searchQuery?: string[];
115
+ maxResults?: number;
116
+ types?: ContextType[];
117
+ mode?: RetrievalMode;
118
+ }
119
+
120
+ export interface FetchClientContextInput {
121
+ conversationId?: string;
122
+ searchQuery?: string[];
123
+ maxResults?: number;
124
+ types?: ContextType[];
125
+ mode?: RetrievalMode;
126
+ }
127
+
128
+ export interface GetContextForPromptInput {
129
+ conversationId: string;
130
+ style?: PromptStyle;
131
+ }
132
+
58
133
  export interface BridgeStepTiming {
59
134
  step: string;
60
135
  ms: number;
@@ -65,6 +140,115 @@ export interface BridgeTiming {
65
140
  steps: BridgeStepTiming[];
66
141
  }
67
142
 
143
+ export interface TemporalFields {
144
+ eventDate: string | null;
145
+ validUntil: string | null;
146
+ temporalCategory: string | null;
147
+ temporalConfidence: number;
148
+ }
149
+
150
+ export interface Fact extends TemporalFields {
151
+ id: string;
152
+ content: string;
153
+ confidence: number;
154
+ source: string;
155
+ extractedAt: string | null;
156
+ metadata: Record<string, unknown>;
157
+ }
158
+
159
+ export interface Preference extends TemporalFields {
160
+ id: string;
161
+ category: string;
162
+ content: string;
163
+ strength: number;
164
+ source: string;
165
+ extractedAt: string | null;
166
+ metadata: Record<string, unknown>;
167
+ }
168
+
169
+ export interface Episode extends TemporalFields {
170
+ id: string;
171
+ summary: string;
172
+ occurredAt: string | null;
173
+ significance: number;
174
+ participants: string[];
175
+ metadata: Record<string, unknown>;
176
+ }
177
+
178
+ export interface Emotion extends TemporalFields {
179
+ id: string;
180
+ emotionType: string;
181
+ intensity: number;
182
+ detectedAt: string | null;
183
+ context: string;
184
+ metadata: Record<string, unknown>;
185
+ }
186
+
187
+ export interface TemporalEvent {
188
+ id: string;
189
+ content: string;
190
+ eventDate: string | null;
191
+ validUntil: string | null;
192
+ temporalCategory: string;
193
+ temporalConfidence: number;
194
+ confidence: number;
195
+ source: string;
196
+ extractedAt: string | null;
197
+ metadata: Record<string, unknown>;
198
+ }
199
+
200
+ export interface RecentMessage {
201
+ role: string;
202
+ content: string;
203
+ timestamp: string | null;
204
+ messageId: string;
205
+ }
206
+
207
+ export interface ContextResponseMetadata {
208
+ correlationId: string;
209
+ ttlSeconds: number;
210
+ source: string;
211
+ retrievedAt: string | null;
212
+ compactionApplied: string | null;
213
+ }
214
+
215
+ export interface ConversationContext {
216
+ summary: string | null;
217
+ currentState: Record<string, unknown>;
218
+ keyExtractions: Record<string, Array<Record<string, unknown>>>;
219
+ recentTurns: Array<Record<string, unknown>>;
220
+ compactionId: string | null;
221
+ compactedAt: string | null;
222
+ conversationId: string | null;
223
+ }
224
+
225
+ export interface ContextResponse {
226
+ facts: Fact[];
227
+ preferences: Preference[];
228
+ episodes: Episode[];
229
+ emotions: Emotion[];
230
+ temporalEvents: TemporalEvent[];
231
+ conversationContext: ConversationContext | null;
232
+ metadata: ContextResponseMetadata;
233
+ rawResponse: Record<string, unknown>;
234
+ bridgeTiming?: BridgeTiming;
235
+ }
236
+
237
+ export interface ContextForPromptResult {
238
+ formattedContext: string | null;
239
+ available: boolean;
240
+ isStale: boolean;
241
+ compressionRatio: number | null;
242
+ validationScore: number | null;
243
+ compactionAgeSeconds: number | null;
244
+ qualityWarning: boolean;
245
+ recentMessages: RecentMessage[];
246
+ recentMessageCount: number;
247
+ compactedMessageCount: number;
248
+ totalMessageCount: number;
249
+ bridgeTiming?: BridgeTiming;
250
+ }
251
+
68
252
  export interface AddMemoryResult {
69
253
  success: boolean;
70
254
  latencyMs: number;
@@ -73,10 +257,13 @@ export interface AddMemoryResult {
73
257
  bridgeTiming?: BridgeTiming;
74
258
  }
75
259
 
76
- export interface SearchMemoryItem {
260
+ export interface SearchMemoryItem extends TemporalFields {
77
261
  id: string;
78
262
  memory: string;
79
263
  score?: number;
264
+ source?: string;
265
+ metadata: Record<string, unknown>;
266
+ contextType?: FlattenedContextType;
80
267
  }
81
268
 
82
269
  export interface SearchMemoryResult {
@@ -89,22 +276,23 @@ export interface SearchMemoryResult {
89
276
  bridgeTiming?: BridgeTiming;
90
277
  }
91
278
 
92
- export interface MemoryItem {
93
- id: string;
94
- memory: string;
95
- }
279
+ export interface MemoryItem extends SearchMemoryItem {}
96
280
 
97
281
  export interface GetMemoriesResult {
98
282
  success: boolean;
99
283
  latencyMs: number;
100
284
  memories: MemoryItem[];
101
285
  totalCount: number;
286
+ rawResponse: Record<string, unknown> | null;
287
+ source?: string;
102
288
  bridgeTiming?: BridgeTiming;
103
289
  }
104
290
 
105
291
  export interface DeleteMemoryResult {
106
292
  success: boolean;
293
+ latencyMs: number;
107
294
  deletedCount: number;
295
+ rawResponse: Record<string, unknown> | null;
108
296
  note?: string;
109
297
  bridgeTiming?: BridgeTiming;
110
298
  }
@@ -115,6 +303,10 @@ export class SynapClient {
115
303
  addMemory(input: AddMemoryInput): Promise<AddMemoryResult>;
116
304
  searchMemory(input: SearchMemoryInput): Promise<SearchMemoryResult>;
117
305
  getMemories(input: GetMemoriesInput): Promise<GetMemoriesResult>;
306
+ fetchUserContext(input: FetchUserContextInput): Promise<ContextResponse>;
307
+ fetchCustomerContext(input: FetchCustomerContextInput): Promise<ContextResponse>;
308
+ fetchClientContext(input?: FetchClientContextInput): Promise<ContextResponse>;
309
+ getContextForPrompt(input: GetContextForPromptInput): Promise<ContextForPromptResult>;
118
310
  deleteMemory(input: DeleteMemoryInput): Promise<DeleteMemoryResult>;
119
311
  shutdown(): Promise<void>;
120
312
  }