@maximem/synap-js-sdk 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,493 @@
1
+ #!/usr/bin/env python3
2
+ """Synap SDK Bridge: JSON-RPC over stdin/stdout for the Node.js wrapper.
3
+
4
+ Protocol:
5
+ stdin -> {"id": 1, "method": "init", "params": {...}}\n
6
+ stdout <- {"id": 1, "result": {...}, "error": null}\n
7
+ Methods:
8
+ init, add_memory, search_memory, get_memories, delete_memory, shutdown
9
+ """
10
+
11
+ import asyncio
12
+ import json
13
+ import logging
14
+ import sys
15
+ import time
16
+ import traceback
17
+ from typing import Dict, List, Optional
18
+ from uuid import UUID
19
+
20
+ # Keep stdout clean for protocol responses.
21
+ logging.basicConfig(
22
+ stream=sys.stderr,
23
+ level=logging.INFO,
24
+ format="%(asctime)s %(name)s %(levelname)s %(message)s",
25
+ )
26
+ logger = logging.getLogger("synap.bridge")
27
+
28
+ try:
29
+ from maximem_synap import MaximemSynapSDK
30
+ except Exception as import_error: # pragma: no cover
31
+ sys.stderr.write(
32
+ "Failed to import maximem_synap. Run `synap-js-sdk setup` first.\n"
33
+ )
34
+ sys.stderr.write(f"Import error: {import_error}\n")
35
+ raise
36
+
37
+ sdk = None
38
+ user_memory_ids: Dict[str, List[str]] = {}
39
+
40
+
41
+ def ms_since(start: float) -> int:
42
+ return int((time.perf_counter() - start) * 1000)
43
+
44
+
45
+ def append_step(timings: List[dict], step: str, started: float) -> None:
46
+ timings.append({"step": step, "ms": ms_since(started)})
47
+
48
+
49
+ def write_response(obj: dict) -> None:
50
+ sys.stdout.write(json.dumps(obj, default=str) + "\n")
51
+ sys.stdout.flush()
52
+
53
+
54
+ def messages_to_text(messages: List[dict]) -> str:
55
+ lines: List[str] = []
56
+ for message in messages:
57
+ content = (message.get("content") or "").strip()
58
+ if not content:
59
+ continue
60
+ role = "Assistant" if message.get("role") == "assistant" else "User"
61
+ lines.append(f"{role}: {content}")
62
+ return "\n".join(lines)
63
+
64
+
65
+ def on_anticipated_context(bundle: dict) -> None:
66
+ logger.info(
67
+ "Anticipated context bundle received: %s",
68
+ json.dumps(bundle, default=str)[:300],
69
+ )
70
+
71
+
72
+ async def handle_init(params: dict) -> dict:
73
+ global sdk
74
+
75
+ handler_start = time.perf_counter()
76
+ timings: List[dict] = []
77
+
78
+ instance_id = params.get("instance_id", "")
79
+ bootstrap_token = params.get("bootstrap_token") or params.get("bootstrap_key")
80
+
81
+ step = time.perf_counter()
82
+ try:
83
+ sdk = MaximemSynapSDK(
84
+ instance_id=instance_id,
85
+ bootstrap_token=bootstrap_token,
86
+ _force_new=True,
87
+ )
88
+ except TypeError:
89
+ # Backward compatibility with older SDK constructors.
90
+ sdk = MaximemSynapSDK(instance_id=instance_id, _force_new=True)
91
+ append_step(timings, "construct_sdk", step)
92
+
93
+ config_kwargs = {"log_level": "DEBUG", "cache_backend": "sqlite"}
94
+
95
+ if params.get("base_url"):
96
+ config_kwargs["api_base_url"] = params["base_url"]
97
+ if params.get("grpc_host"):
98
+ config_kwargs["grpc_host"] = params["grpc_host"]
99
+ if params.get("grpc_port"):
100
+ config_kwargs["grpc_port"] = int(params["grpc_port"])
101
+ if "grpc_use_tls" in params:
102
+ config_kwargs["grpc_use_tls"] = bool(params["grpc_use_tls"])
103
+
104
+ step = time.perf_counter()
105
+ sdk.configure(**config_kwargs)
106
+ append_step(timings, "configure_sdk", step)
107
+
108
+ step = time.perf_counter()
109
+ if bootstrap_token:
110
+ await sdk.initialize(bootstrap_token=bootstrap_token)
111
+ else:
112
+ await sdk.initialize()
113
+ append_step(timings, "initialize_sdk", step)
114
+
115
+ grpc_listening = False
116
+
117
+ step = time.perf_counter()
118
+ try:
119
+ await sdk.instance.listen(
120
+ on_context=on_anticipated_context,
121
+ on_reconnect=lambda attempt: logger.info(
122
+ "gRPC reconnect attempt %d", attempt
123
+ ),
124
+ on_disconnect=lambda reason: logger.warning(
125
+ "gRPC disconnected: %s", reason
126
+ ),
127
+ )
128
+ grpc_listening = sdk.instance.is_listening
129
+ except Exception as exc: # non-fatal
130
+ logger.warning("gRPC listen failed (non-fatal): %s", exc)
131
+ append_step(timings, "start_grpc_listener", step)
132
+
133
+ return {
134
+ "success": True,
135
+ "instance_id": instance_id,
136
+ "bootstrap_token_used": bool(bootstrap_token),
137
+ "grpc_listening": grpc_listening,
138
+ "bridgeTiming": {
139
+ "python_total_ms": ms_since(handler_start),
140
+ "steps": timings,
141
+ },
142
+ }
143
+
144
+
145
+ async def handle_add_memory(params: dict) -> dict:
146
+ handler_start = time.perf_counter()
147
+ timings: List[dict] = []
148
+
149
+ user_id = params["user_id"]
150
+ messages = params["messages"]
151
+
152
+ step = time.perf_counter()
153
+ transcript = messages_to_text(messages)
154
+ append_step(timings, "build_transcript", step)
155
+
156
+ if not transcript:
157
+ return {
158
+ "success": True,
159
+ "latencyMs": 0,
160
+ "rawResponse": {"note": "Empty input; skipped"},
161
+ "note": "No text content to ingest",
162
+ "bridgeTiming": {
163
+ "python_total_ms": ms_since(handler_start),
164
+ "steps": timings,
165
+ },
166
+ }
167
+
168
+ start = time.perf_counter()
169
+
170
+ step = time.perf_counter()
171
+ create_result = await sdk.memories.create(
172
+ document=transcript,
173
+ document_type="ai-chat-conversation",
174
+ user_id=user_id,
175
+ mode="long-range",
176
+ )
177
+ append_step(timings, "memories_create", step)
178
+
179
+ ingestion_id = create_result.ingestion_id
180
+
181
+ step = time.perf_counter()
182
+ if sdk.instance.is_listening:
183
+ for message in messages:
184
+ content = (message.get("content") or "").strip()
185
+ if not content:
186
+ continue
187
+ try:
188
+ await sdk.instance.send_message(
189
+ content=content,
190
+ role=message.get("role", "user"),
191
+ user_id=user_id,
192
+ )
193
+ except Exception as exc:
194
+ logger.debug("gRPC send_message failed (non-fatal): %s", exc)
195
+ append_step(timings, "grpc_send_messages", step)
196
+
197
+ step = time.perf_counter()
198
+ try:
199
+ final_status = await sdk.memories.wait_for_completion(
200
+ ingestion_id,
201
+ timeout_seconds=60,
202
+ poll_interval_seconds=2,
203
+ )
204
+ except TimeoutError:
205
+ append_step(timings, "wait_for_completion_timeout", step)
206
+ return {
207
+ "success": True,
208
+ "latencyMs": ms_since(start),
209
+ "rawResponse": {"ingestion_id": str(ingestion_id), "status": "timeout"},
210
+ "note": "Ingestion timed out after 60s and may still complete later",
211
+ "bridgeTiming": {
212
+ "python_total_ms": ms_since(handler_start),
213
+ "steps": timings,
214
+ },
215
+ }
216
+ append_step(timings, "wait_for_completion", step)
217
+
218
+ memory_ids = [str(memory_id) for memory_id in (final_status.memory_ids or [])]
219
+ if memory_ids:
220
+ user_memory_ids.setdefault(user_id, []).extend(memory_ids)
221
+
222
+ return {
223
+ "success": final_status.status.value != "failed",
224
+ "latencyMs": ms_since(start),
225
+ "rawResponse": {
226
+ "ingestion_id": str(ingestion_id),
227
+ "status": final_status.status.value,
228
+ "memories_created": final_status.memories_created,
229
+ "memory_ids": memory_ids,
230
+ },
231
+ "note": f"Ingestion {final_status.status.value}; created {final_status.memories_created}",
232
+ "bridgeTiming": {
233
+ "python_total_ms": ms_since(handler_start),
234
+ "steps": timings,
235
+ },
236
+ }
237
+
238
+
239
+ async def handle_search_memory(params: dict) -> dict:
240
+ handler_start = time.perf_counter()
241
+ timings: List[dict] = []
242
+
243
+ user_id = params["user_id"]
244
+ query = params["query"]
245
+ max_results = params.get("max_results", 10)
246
+
247
+ start = time.perf_counter()
248
+
249
+ step = time.perf_counter()
250
+ context = await sdk.user.context.fetch(
251
+ user_id=user_id,
252
+ search_query=[query],
253
+ max_results=max_results,
254
+ types=["all"],
255
+ mode="fast",
256
+ )
257
+ append_step(timings, "context_fetch", step)
258
+
259
+ step = time.perf_counter()
260
+ results = []
261
+ for fact in context.facts:
262
+ results.append({"id": fact.id, "memory": fact.content, "score": fact.confidence})
263
+ for preference in context.preferences:
264
+ results.append(
265
+ {"id": preference.id, "memory": preference.content, "score": preference.strength}
266
+ )
267
+ for episode in context.episodes:
268
+ results.append(
269
+ {"id": episode.id, "memory": episode.summary, "score": episode.significance}
270
+ )
271
+ for emotion in context.emotions:
272
+ results.append(
273
+ {"id": emotion.id, "memory": emotion.context, "score": emotion.intensity}
274
+ )
275
+ append_step(timings, "map_context_results", step)
276
+
277
+ return {
278
+ "success": True,
279
+ "latencyMs": ms_since(start),
280
+ "results": results,
281
+ "resultsCount": len(results),
282
+ "rawResponse": context.raw if hasattr(context, "raw") else {},
283
+ "source": context.metadata.source if context.metadata else "unknown",
284
+ "bridgeTiming": {
285
+ "python_total_ms": ms_since(handler_start),
286
+ "steps": timings,
287
+ },
288
+ }
289
+
290
+
291
+ async def handle_get_memories(params: dict) -> dict:
292
+ handler_start = time.perf_counter()
293
+ timings: List[dict] = []
294
+
295
+ user_id = params["user_id"]
296
+
297
+ start = time.perf_counter()
298
+
299
+ step = time.perf_counter()
300
+ context = await sdk.user.context.fetch(
301
+ user_id=user_id,
302
+ search_query=[],
303
+ max_results=100,
304
+ types=["all"],
305
+ mode="fast",
306
+ )
307
+ append_step(timings, "context_fetch_all", step)
308
+
309
+ step = time.perf_counter()
310
+ memories = []
311
+ for fact in context.facts:
312
+ memories.append({"id": fact.id, "memory": fact.content})
313
+ for preference in context.preferences:
314
+ memories.append({"id": preference.id, "memory": preference.content})
315
+ for episode in context.episodes:
316
+ memories.append({"id": episode.id, "memory": episode.summary})
317
+ for emotion in context.emotions:
318
+ memories.append({"id": emotion.id, "memory": emotion.context})
319
+ append_step(timings, "map_memories", step)
320
+
321
+ return {
322
+ "success": True,
323
+ "latencyMs": ms_since(start),
324
+ "memories": memories,
325
+ "memoriesCount": len(memories),
326
+ "rawResponse": context.raw if hasattr(context, "raw") else {},
327
+ "bridgeTiming": {
328
+ "python_total_ms": ms_since(handler_start),
329
+ "steps": timings,
330
+ },
331
+ }
332
+
333
+
334
+ async def handle_delete_memory(params: dict) -> dict:
335
+ handler_start = time.perf_counter()
336
+ timings: List[dict] = []
337
+
338
+ user_id = params["user_id"]
339
+ memory_id = params.get("memory_id")
340
+
341
+ start = time.perf_counter()
342
+
343
+ step = time.perf_counter()
344
+ if memory_id:
345
+ await sdk.memories.delete(UUID(memory_id))
346
+ append_step(timings, "delete_single_memory", step)
347
+ return {
348
+ "success": True,
349
+ "latencyMs": ms_since(start),
350
+ "rawResponse": {"deleted": 1},
351
+ "bridgeTiming": {
352
+ "python_total_ms": ms_since(handler_start),
353
+ "steps": timings,
354
+ },
355
+ }
356
+
357
+ tracked_ids = user_memory_ids.get(user_id, [])
358
+ if not tracked_ids:
359
+ return {
360
+ "success": True,
361
+ "latencyMs": 0,
362
+ "rawResponse": None,
363
+ "note": "No tracked memory IDs for this user",
364
+ "bridgeTiming": {
365
+ "python_total_ms": ms_since(handler_start),
366
+ "steps": timings,
367
+ },
368
+ }
369
+
370
+ last_error: Optional[str] = None
371
+
372
+ step = time.perf_counter()
373
+ for tracked_id in tracked_ids:
374
+ try:
375
+ await sdk.memories.delete(UUID(tracked_id))
376
+ except Exception as exc:
377
+ last_error = str(exc)
378
+ append_step(timings, "delete_tracked_memories", step)
379
+
380
+ user_memory_ids.pop(user_id, None)
381
+
382
+ if last_error:
383
+ return {
384
+ "success": False,
385
+ "latencyMs": ms_since(start),
386
+ "error": last_error,
387
+ "bridgeTiming": {
388
+ "python_total_ms": ms_since(handler_start),
389
+ "steps": timings,
390
+ },
391
+ }
392
+
393
+ return {
394
+ "success": True,
395
+ "latencyMs": ms_since(start),
396
+ "rawResponse": {"deleted": len(tracked_ids)},
397
+ "note": f"Deleted {len(tracked_ids)} memories",
398
+ "bridgeTiming": {
399
+ "python_total_ms": ms_since(handler_start),
400
+ "steps": timings,
401
+ },
402
+ }
403
+
404
+
405
+ async def handle_shutdown(_params: dict) -> dict:
406
+ handler_start = time.perf_counter()
407
+ timings: List[dict] = []
408
+
409
+ if sdk:
410
+ step = time.perf_counter()
411
+ try:
412
+ await sdk.instance.stop_listening()
413
+ except Exception:
414
+ pass
415
+ append_step(timings, "stop_listener", step)
416
+
417
+ step = time.perf_counter()
418
+ await sdk.shutdown()
419
+ append_step(timings, "sdk_shutdown", step)
420
+ return {
421
+ "success": True,
422
+ "bridgeTiming": {
423
+ "python_total_ms": ms_since(handler_start),
424
+ "steps": timings,
425
+ },
426
+ }
427
+
428
+
429
+ HANDLERS = {
430
+ "init": handle_init,
431
+ "add_memory": handle_add_memory,
432
+ "search_memory": handle_search_memory,
433
+ "get_memories": handle_get_memories,
434
+ "delete_memory": handle_delete_memory,
435
+ "shutdown": handle_shutdown,
436
+ }
437
+
438
+
439
+ async def main() -> None:
440
+ logger.info("Synap bridge starting")
441
+
442
+ reader = asyncio.StreamReader()
443
+ protocol = asyncio.StreamReaderProtocol(reader)
444
+ await asyncio.get_event_loop().connect_read_pipe(lambda: protocol, sys.stdin)
445
+
446
+ while True:
447
+ line = await reader.readline()
448
+ if not line:
449
+ logger.info("stdin closed; shutting down bridge")
450
+ if sdk and getattr(sdk, "_initialized", False):
451
+ await handle_shutdown({})
452
+ break
453
+
454
+ payload = line.decode().strip()
455
+ if not payload:
456
+ continue
457
+
458
+ try:
459
+ request = json.loads(payload)
460
+ except json.JSONDecodeError as exc:
461
+ write_response({"id": None, "result": None, "error": f"Invalid JSON: {exc}"})
462
+ continue
463
+
464
+ req_id = request.get("id")
465
+ method = request.get("method")
466
+ params = request.get("params", {})
467
+
468
+ handler = HANDLERS.get(method)
469
+ if not handler:
470
+ write_response({"id": req_id, "result": None, "error": f"Unknown method: {method}"})
471
+ continue
472
+
473
+ try:
474
+ handler_started = time.perf_counter()
475
+ result = await handler(params)
476
+ if isinstance(result, dict):
477
+ bridge_timing = result.get("bridgeTiming")
478
+ if isinstance(bridge_timing, dict):
479
+ bridge_timing.setdefault("python_total_ms", ms_since(handler_started))
480
+ bridge_timing.setdefault("steps", [])
481
+ else:
482
+ result["bridgeTiming"] = {
483
+ "python_total_ms": ms_since(handler_started),
484
+ "steps": [],
485
+ }
486
+ write_response({"id": req_id, "result": result, "error": None})
487
+ except Exception as exc:
488
+ logger.error("Handler error for %s: %s", method, traceback.format_exc())
489
+ write_response({"id": req_id, "result": None, "error": str(exc)})
490
+
491
+
492
+ if __name__ == "__main__":
493
+ asyncio.run(main())
package/package.json ADDED
@@ -0,0 +1,38 @@
1
+ {
2
+ "name": "@maximem/synap-js-sdk",
3
+ "version": "0.1.1",
4
+ "description": "JavaScript wrapper around the Synap Python SDK",
5
+ "main": "src/index.js",
6
+ "types": "types/index.d.ts",
7
+ "type": "commonjs",
8
+ "bin": {
9
+ "synap-js-sdk": "bin/synap-js-sdk.js"
10
+ },
11
+ "files": [
12
+ "src",
13
+ "types",
14
+ "bridge/synap_bridge.py",
15
+ "bin",
16
+ "README.md",
17
+ ".env.example"
18
+ ],
19
+ "scripts": {
20
+ "setup": "node bin/synap-js-sdk.js setup",
21
+ "setup:ts": "node bin/synap-js-sdk.js setup-ts",
22
+ "check": "node -e \"const sdk=require('./src'); console.log(Object.keys(sdk));\""
23
+ },
24
+ "engines": {
25
+ "node": ">=18"
26
+ },
27
+ "publishConfig": {
28
+ "access": "public"
29
+ },
30
+ "keywords": [
31
+ "synap",
32
+ "sdk",
33
+ "python",
34
+ "wrapper",
35
+ "bridge"
36
+ ],
37
+ "license": "MIT"
38
+ }