langgraph-api 0.0.26__py3-none-any.whl → 0.0.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

Files changed (53) hide show
  1. langgraph_api/api/__init__.py +2 -0
  2. langgraph_api/api/assistants.py +43 -13
  3. langgraph_api/api/meta.py +2 -1
  4. langgraph_api/api/runs.py +14 -1
  5. langgraph_api/api/ui.py +68 -0
  6. langgraph_api/asyncio.py +43 -4
  7. langgraph_api/auth/middleware.py +2 -2
  8. langgraph_api/cli.py +72 -57
  9. langgraph_api/config.py +23 -1
  10. langgraph_api/cron_scheduler.py +1 -1
  11. langgraph_api/graph.py +5 -0
  12. langgraph_api/http.py +24 -7
  13. langgraph_api/js/.gitignore +2 -0
  14. langgraph_api/js/build.mts +49 -3
  15. langgraph_api/js/client.mts +84 -40
  16. langgraph_api/js/global.d.ts +1 -0
  17. langgraph_api/js/package.json +15 -7
  18. langgraph_api/js/remote.py +662 -16
  19. langgraph_api/js/src/graph.mts +5 -4
  20. langgraph_api/js/sse.py +138 -0
  21. langgraph_api/js/tests/api.test.mts +28 -0
  22. langgraph_api/js/tests/compose-postgres.yml +2 -2
  23. langgraph_api/js/tests/graphs/agent.css +1 -0
  24. langgraph_api/js/tests/graphs/agent.ui.tsx +10 -0
  25. langgraph_api/js/tests/graphs/package.json +2 -2
  26. langgraph_api/js/tests/graphs/yarn.lock +13 -13
  27. langgraph_api/js/yarn.lock +710 -1187
  28. langgraph_api/lifespan.py +15 -5
  29. langgraph_api/logging.py +9 -0
  30. langgraph_api/metadata.py +5 -1
  31. langgraph_api/middleware/http_logger.py +1 -1
  32. langgraph_api/patch.py +2 -0
  33. langgraph_api/queue_entrypoint.py +63 -0
  34. langgraph_api/schema.py +2 -0
  35. langgraph_api/stream.py +1 -0
  36. langgraph_api/webhook.py +42 -0
  37. langgraph_api/{queue.py → worker.py} +52 -166
  38. {langgraph_api-0.0.26.dist-info → langgraph_api-0.0.28.dist-info}/METADATA +8 -8
  39. {langgraph_api-0.0.26.dist-info → langgraph_api-0.0.28.dist-info}/RECORD +49 -46
  40. langgraph_storage/database.py +8 -22
  41. langgraph_storage/inmem_stream.py +108 -0
  42. langgraph_storage/ops.py +80 -57
  43. langgraph_storage/queue.py +126 -103
  44. langgraph_storage/retry.py +5 -1
  45. langgraph_storage/store.py +5 -1
  46. openapi.json +3 -3
  47. langgraph_api/js/client.new.mts +0 -861
  48. langgraph_api/js/remote_new.py +0 -694
  49. langgraph_api/js/remote_old.py +0 -667
  50. langgraph_api/js/server_sent_events.py +0 -126
  51. {langgraph_api-0.0.26.dist-info → langgraph_api-0.0.28.dist-info}/LICENSE +0 -0
  52. {langgraph_api-0.0.26.dist-info → langgraph_api-0.0.28.dist-info}/WHEEL +0 -0
  53. {langgraph_api-0.0.26.dist-info → langgraph_api-0.0.28.dist-info}/entry_points.txt +0 -0
@@ -1,18 +1,664 @@
1
- from langgraph_api.config import FF_JS_ZEROMQ_ENABLED
2
-
3
- if FF_JS_ZEROMQ_ENABLED:
4
- from langgraph_api.js.remote_new import ( # noqa: I001
5
- run_js_process, # noqa: F401
6
- RemotePregel, # noqa: F401
7
- run_remote_checkpointer, # noqa: F401
8
- wait_until_js_ready, # noqa: F401
9
- js_healthcheck, # noqa: F401
1
+ import asyncio
2
+ import os
3
+ import shutil
4
+ import ssl
5
+ from collections.abc import AsyncIterator
6
+ from typing import Any, Literal
7
+
8
+ import certifi
9
+ import httpx
10
+ import orjson
11
+ import structlog
12
+ import uvicorn
13
+ from langchain_core.runnables.config import RunnableConfig
14
+ from langchain_core.runnables.graph import Edge, Node
15
+ from langchain_core.runnables.graph import Graph as DrawableGraph
16
+ from langchain_core.runnables.schema import (
17
+ CustomStreamEvent,
18
+ StandardStreamEvent,
19
+ StreamEvent,
20
+ )
21
+ from langgraph.checkpoint.serde.base import SerializerProtocol
22
+ from langgraph.pregel.types import PregelTask, StateSnapshot
23
+ from langgraph.store.base import GetOp, Item, ListNamespacesOp, PutOp, SearchOp
24
+ from langgraph.types import Command, Interrupt
25
+ from pydantic import BaseModel
26
+ from starlette.applications import Starlette
27
+ from starlette.exceptions import HTTPException
28
+ from starlette.requests import Request
29
+ from starlette.routing import Route
30
+
31
+ from langgraph_api.js.base import BaseRemotePregel
32
+ from langgraph_api.js.errors import RemoteException
33
+ from langgraph_api.js.sse import SSEDecoder, aiter_lines_raw
34
+ from langgraph_api.route import ApiResponse
35
+ from langgraph_api.serde import json_dumpb
36
+ from langgraph_api.utils import AsyncConnectionProto
37
+
38
+ logger = structlog.stdlib.get_logger(__name__)
39
+
40
+ GRAPH_PORT = 5556
41
+ REMOTE_PORT = 5555
42
+ SSL = ssl.create_default_context(cafile=certifi.where())
43
+
44
+ if port := int(os.getenv("PORT", "8080")):
45
+ if port in (GRAPH_PORT, REMOTE_PORT):
46
+ raise ValueError(
47
+ f"PORT={port} is a reserved port for the JS worker. Please choose a different port."
48
+ )
49
+
50
+ _client = httpx.AsyncClient(
51
+ base_url=f"http://localhost:{GRAPH_PORT}",
52
+ timeout=httpx.Timeout(15.0), # 3 x HEARTBEAT_MS
53
+ limits=httpx.Limits(),
54
+ transport=httpx.AsyncHTTPTransport(verify=SSL),
55
+ )
56
+
57
+
58
+ async def _client_stream(method: str, data: dict[str, Any]):
59
+ graph_id = data.get("graph_id")
60
+ async with _client.stream(
61
+ "POST",
62
+ f"/{graph_id}/{method}",
63
+ headers={
64
+ "Accept": "text/event-stream",
65
+ "Cache-Control": "no-store",
66
+ "Content-Type": "application/json",
67
+ },
68
+ data=orjson.dumps(data),
69
+ ) as response:
70
+ decoder = SSEDecoder()
71
+ async for line in aiter_lines_raw(response):
72
+ sse = decoder.decode(line)
73
+ if sse is not None:
74
+ if sse.event == "error":
75
+ raise RemoteException(sse.data["error"], sse.data["message"])
76
+ yield sse.data
77
+
78
+
79
+ async def _client_invoke(method: str, data: dict[str, Any]):
80
+ graph_id = data.get("graph_id")
81
+ res = await _client.post(
82
+ f"/{graph_id}/{method}",
83
+ headers={"Content-Type": "application/json"},
84
+ data=orjson.dumps(data),
85
+ )
86
+ return res.json()
87
+
88
+
89
+ class RemotePregel(BaseRemotePregel):
90
+ @staticmethod
91
+ def load(graph_id: str):
92
+ model = RemotePregel()
93
+ model.graph_id = graph_id
94
+ return model
95
+
96
+ async def astream_events(
97
+ self,
98
+ input: Any,
99
+ config: RunnableConfig | None = None,
100
+ *,
101
+ version: Literal["v1", "v2"],
102
+ **kwargs: Any,
103
+ ) -> AsyncIterator[StreamEvent]:
104
+ if version != "v2":
105
+ raise ValueError("Only v2 of astream_events is supported")
106
+
107
+ data = {
108
+ "graph_id": self.graph_id,
109
+ "command" if isinstance(input, Command) else "input": input,
110
+ "config": config,
111
+ **kwargs,
112
+ }
113
+
114
+ async for event in _client_stream("streamEvents", data):
115
+ if event["event"] == "on_custom_event":
116
+ yield CustomStreamEvent(**event)
117
+ else:
118
+ yield StandardStreamEvent(**event)
119
+
120
+ async def fetch_state_schema(self):
121
+ return await _client_invoke("getSchema", {"graph_id": self.graph_id})
122
+
123
+ async def fetch_graph(
124
+ self,
125
+ config: RunnableConfig | None = None,
126
+ *,
127
+ xray: int | bool = False,
128
+ ) -> DrawableGraph:
129
+ response = await _client_invoke(
130
+ "getGraph", {"graph_id": self.graph_id, "config": config, "xray": xray}
131
+ )
132
+
133
+ nodes: list[Any] = response.pop("nodes")
134
+ edges: list[Any] = response.pop("edges")
135
+
136
+ class NoopModel(BaseModel):
137
+ pass
138
+
139
+ return DrawableGraph(
140
+ {
141
+ data["id"]: Node(
142
+ data["id"], data["id"], NoopModel(), data.get("metadata")
143
+ )
144
+ for data in nodes
145
+ },
146
+ {
147
+ Edge(
148
+ data["source"],
149
+ data["target"],
150
+ data.get("data"),
151
+ data.get("conditional", False),
152
+ )
153
+ for data in edges
154
+ },
155
+ )
156
+
157
+ async def fetch_subgraphs(
158
+ self, *, namespace: str | None = None, recurse: bool = False
159
+ ) -> dict[str, dict]:
160
+ return await _client_invoke(
161
+ "getSubgraphs",
162
+ {"graph_id": self.graph_id, "namespace": namespace, "recurse": recurse},
163
+ )
164
+
165
+ def _convert_state_snapshot(self, item: dict) -> StateSnapshot:
166
+ def _convert_tasks(tasks: list[dict]) -> tuple[PregelTask, ...]:
167
+ result: list[PregelTask] = []
168
+ for task in tasks:
169
+ state = task.get("state")
170
+
171
+ if state and isinstance(state, dict) and "config" in state:
172
+ state = self._convert_state_snapshot(state)
173
+
174
+ result.append(
175
+ PregelTask(
176
+ task["id"],
177
+ task["name"],
178
+ tuple(task["path"]) if task.get("path") else tuple(),
179
+ # TODO: figure out how to properly deserialise errors
180
+ task.get("error"),
181
+ (
182
+ tuple(
183
+ Interrupt(
184
+ value=interrupt["value"],
185
+ when=interrupt["when"],
186
+ resumable=interrupt.get("resumable", True),
187
+ ns=interrupt.get("ns"),
188
+ )
189
+ for interrupt in task.get("interrupts")
190
+ )
191
+ if task.get("interrupts")
192
+ else []
193
+ ),
194
+ state,
195
+ )
196
+ )
197
+ return tuple(result)
198
+
199
+ return StateSnapshot(
200
+ item.get("values"),
201
+ item.get("next"),
202
+ item.get("config"),
203
+ item.get("metadata"),
204
+ item.get("createdAt"),
205
+ item.get("parentConfig"),
206
+ _convert_tasks(item.get("tasks", [])),
207
+ )
208
+
209
+ async def aget_state(
210
+ self, config: RunnableConfig, *, subgraphs: bool = False
211
+ ) -> StateSnapshot:
212
+ return self._convert_state_snapshot(
213
+ await _client_invoke(
214
+ "getState",
215
+ {"graph_id": self.graph_id, "config": config, "subgraphs": subgraphs},
216
+ )
217
+ )
218
+
219
+ async def aupdate_state(
220
+ self,
221
+ config: RunnableConfig,
222
+ values: dict[str, Any] | Any,
223
+ as_node: str | None = None,
224
+ ) -> RunnableConfig:
225
+ response = await _client_invoke(
226
+ "updateState",
227
+ {
228
+ "graph_id": self.graph_id,
229
+ "config": config,
230
+ "values": values,
231
+ "as_node": as_node,
232
+ },
233
+ )
234
+ return RunnableConfig(**response)
235
+
236
+ async def aget_state_history(
237
+ self,
238
+ config: RunnableConfig,
239
+ *,
240
+ filter: dict[str, Any] | None = None,
241
+ before: RunnableConfig | None = None,
242
+ limit: int | None = None,
243
+ ) -> AsyncIterator[StateSnapshot]:
244
+ async for event in _client_stream(
245
+ "getStateHistory",
246
+ {
247
+ "graph_id": self.graph_id,
248
+ "config": config,
249
+ "limit": limit,
250
+ "filter": filter,
251
+ "before": before,
252
+ },
253
+ ):
254
+ yield self._convert_state_snapshot(event)
255
+
256
+ def get_graph(
257
+ self,
258
+ config: RunnableConfig | None = None,
259
+ *,
260
+ xray: int | bool = False,
261
+ ) -> dict[str, Any]:
262
+ raise Exception("Not implemented")
263
+
264
+ def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
265
+ raise Exception("Not implemented")
266
+
267
+ def get_output_schema(
268
+ self, config: RunnableConfig | None = None
269
+ ) -> type[BaseModel]:
270
+ raise Exception("Not implemented")
271
+
272
+ def config_schema(self) -> type[BaseModel]:
273
+ raise Exception("Not implemented")
274
+
275
+ async def invoke(self, input: Any, config: RunnableConfig | None = None):
276
+ raise Exception("Not implemented")
277
+
278
+
279
+ async def run_js_process(paths_str: str, watch: bool = False):
280
+ # check if tsx is available
281
+ tsx_path = shutil.which("tsx")
282
+ if tsx_path is None:
283
+ raise FileNotFoundError("tsx not found in PATH")
284
+ attempt = 0
285
+ while not asyncio.current_task().cancelled():
286
+ client_file = os.path.join(os.path.dirname(__file__), "client.mts")
287
+ args = ("tsx", client_file)
288
+ if watch:
289
+ args = ("tsx", "watch", client_file, "--skip-schema-cache")
290
+ try:
291
+ process = await asyncio.create_subprocess_exec(
292
+ *args,
293
+ env={
294
+ "LANGSERVE_GRAPHS": paths_str,
295
+ "LANGCHAIN_CALLBACKS_BACKGROUND": "true",
296
+ "NODE_ENV": "development" if watch else "production",
297
+ "CHOKIDAR_USEPOLLING": "true",
298
+ **os.environ,
299
+ },
300
+ )
301
+ code = await process.wait()
302
+ raise Exception(f"JS process exited with code {code}")
303
+ except asyncio.CancelledError:
304
+ logger.info("Terminating JS graphs process")
305
+ try:
306
+ process.terminate()
307
+ await process.wait()
308
+ except (UnboundLocalError, ProcessLookupError):
309
+ pass
310
+ raise
311
+ except Exception:
312
+ if attempt >= 3:
313
+ raise
314
+ else:
315
+ logger.warning(f"Retrying JS process {3 - attempt} more times...")
316
+ attempt += 1
317
+
318
+
319
+ def _get_passthrough_checkpointer(conn: AsyncConnectionProto):
320
+ from langgraph_storage.checkpoint import Checkpointer
321
+
322
+ class PassthroughSerialiser(SerializerProtocol):
323
+ def dumps(self, obj: Any) -> bytes:
324
+ return json_dumpb(obj)
325
+
326
+ def dumps_typed(self, obj: Any) -> tuple[str, bytes]:
327
+ return "json", json_dumpb(obj)
328
+
329
+ def loads(self, data: bytes) -> Any:
330
+ return orjson.loads(data)
331
+
332
+ def loads_typed(self, data: tuple[str, bytes]) -> Any:
333
+ type, payload = data
334
+ if type != "json":
335
+ raise ValueError(f"Unsupported type {type}")
336
+ return orjson.loads(payload)
337
+
338
+ checkpointer = Checkpointer(conn)
339
+
340
+ # This checkpointer does not attempt to revive LC-objects.
341
+ # Instead, it will pass through the JSON values as-is.
342
+ checkpointer.serde = PassthroughSerialiser()
343
+
344
+ return checkpointer
345
+
346
+
347
+ def _get_passthrough_store():
348
+ from langgraph_storage.store import Store
349
+
350
+ return Store()
351
+
352
+
353
+ # Setup a HTTP server on top of CHECKPOINTER_SOCKET unix socket
354
+ # used by `client.mts` to communicate with the Python checkpointer
355
+ async def run_remote_checkpointer():
356
+ from langgraph_storage.database import connect
357
+
358
+ async def checkpointer_list(payload: dict):
359
+ """Search checkpoints"""
360
+
361
+ result = []
362
+ async with connect() as conn:
363
+ checkpointer = _get_passthrough_checkpointer(conn)
364
+ async for item in checkpointer.alist(
365
+ config=payload.get("config"),
366
+ limit=payload.get("limit"),
367
+ before=payload.get("before"),
368
+ filter=payload.get("filter"),
369
+ ):
370
+ result.append(item)
371
+
372
+ return result
373
+
374
+ async def checkpointer_put(payload: dict):
375
+ """Put the new checkpoint metadata"""
376
+
377
+ async with connect() as conn:
378
+ checkpointer = _get_passthrough_checkpointer(conn)
379
+ return await checkpointer.aput(
380
+ payload["config"],
381
+ payload["checkpoint"],
382
+ payload["metadata"],
383
+ payload.get("new_versions", {}),
384
+ )
385
+
386
+ async def checkpointer_get_tuple(payload: dict):
387
+ """Get actual checkpoint values (reads)"""
388
+
389
+ async with connect() as conn:
390
+ checkpointer = _get_passthrough_checkpointer(conn)
391
+ return await checkpointer.aget_tuple(config=payload["config"])
392
+
393
+ async def checkpointer_put_writes(payload: dict):
394
+ """Put actual checkpoint values (writes)"""
395
+
396
+ async with connect() as conn:
397
+ checkpointer = _get_passthrough_checkpointer(conn)
398
+ return await checkpointer.aput_writes(
399
+ payload["config"],
400
+ payload["writes"],
401
+ payload["taskId"],
402
+ )
403
+
404
+ async def store_batch(payload: dict):
405
+ """Batch operations on the store"""
406
+ operations = payload.get("operations", [])
407
+
408
+ if not operations:
409
+ raise ValueError("No operations provided")
410
+
411
+ # Convert raw operations to proper objects
412
+ processed_operations = []
413
+ for op in operations:
414
+ if "value" in op:
415
+ processed_operations.append(
416
+ PutOp(
417
+ namespace=tuple(op["namespace"]),
418
+ key=op["key"],
419
+ value=op["value"],
420
+ )
421
+ )
422
+ elif "namespace_prefix" in op:
423
+ processed_operations.append(
424
+ SearchOp(
425
+ namespace_prefix=tuple(op["namespace_prefix"]),
426
+ filter=op.get("filter"),
427
+ limit=op.get("limit", 10),
428
+ offset=op.get("offset", 0),
429
+ )
430
+ )
431
+
432
+ elif "namespace" in op and "key" in op:
433
+ processed_operations.append(
434
+ GetOp(namespace=tuple(op["namespace"]), key=op["key"])
435
+ )
436
+ elif "match_conditions" in op:
437
+ processed_operations.append(
438
+ ListNamespacesOp(
439
+ match_conditions=tuple(op["match_conditions"]),
440
+ max_depth=op.get("max_depth"),
441
+ limit=op.get("limit", 100),
442
+ offset=op.get("offset", 0),
443
+ )
444
+ )
445
+ else:
446
+ raise ValueError(f"Unknown operation type: {op}")
447
+
448
+ store = _get_passthrough_store()
449
+ results = await store.abatch(processed_operations)
450
+
451
+ # Handle potentially undefined or non-dict results
452
+ processed_results = []
453
+ # Result is of type: Union[Item, list[Item], list[tuple[str, ...]], None]
454
+ for result in results:
455
+ if isinstance(result, Item):
456
+ processed_results.append(result.dict())
457
+ elif isinstance(result, dict):
458
+ processed_results.append(result)
459
+ elif isinstance(result, list):
460
+ coerced = []
461
+ for res in result:
462
+ if isinstance(res, Item):
463
+ coerced.append(res.dict())
464
+ elif isinstance(res, tuple):
465
+ coerced.append(list(res))
466
+ elif res is None:
467
+ coerced.append(res)
468
+ else:
469
+ coerced.append(str(res))
470
+ processed_results.append(coerced)
471
+ elif result is None:
472
+ processed_results.append(None)
473
+ else:
474
+ processed_results.append(str(result))
475
+ return processed_results
476
+
477
+ async def store_get(payload: dict):
478
+ """Get store data"""
479
+ namespaces_str = payload.get("namespace")
480
+ key = payload.get("key")
481
+
482
+ if not namespaces_str or not key:
483
+ raise ValueError("Both namespaces and key are required")
484
+
485
+ namespaces = namespaces_str.split(".")
486
+
487
+ store = _get_passthrough_store()
488
+ result = await store.aget(namespaces, key)
489
+
490
+ return result
491
+
492
+ async def store_put(payload: dict):
493
+ """Put the new store data"""
494
+
495
+ namespace = tuple(payload["namespace"].split("."))
496
+ key = payload["key"]
497
+ value = payload["value"]
498
+ index = payload.get("index")
499
+
500
+ store = _get_passthrough_store()
501
+ await store.aput(namespace, key, value, index=index)
502
+
503
+ return {"success": True}
504
+
505
+ async def store_search(payload: dict):
506
+ """Search stores"""
507
+ namespace_prefix = tuple(payload["namespace_prefix"])
508
+ filter = payload.get("filter")
509
+ limit = payload.get("limit", 10)
510
+ offset = payload.get("offset", 0)
511
+ query = payload.get("query")
512
+
513
+ store = _get_passthrough_store()
514
+ result = await store.asearch(
515
+ namespace_prefix, filter=filter, limit=limit, offset=offset, query=query
516
+ )
517
+
518
+ return [item.dict() for item in result]
519
+
520
+ async def store_delete(payload: dict):
521
+ """Delete store data"""
522
+
523
+ namespace = tuple(payload["namespace"])
524
+ key = payload["key"]
525
+
526
+ store = _get_passthrough_store()
527
+ await store.adelete(namespace, key)
528
+
529
+ return {"success": True}
530
+
531
+ async def store_list_namespaces(payload: dict):
532
+ """List all namespaces"""
533
+ prefix = tuple(payload.get("prefix", [])) or None
534
+ suffix = tuple(payload.get("suffix", [])) or None
535
+ max_depth = payload.get("max_depth")
536
+ limit = payload.get("limit", 100)
537
+ offset = payload.get("offset", 0)
538
+
539
+ store = _get_passthrough_store()
540
+ result = await store.alist_namespaces(
541
+ prefix=prefix,
542
+ suffix=suffix,
543
+ max_depth=max_depth,
544
+ limit=limit,
545
+ offset=offset,
546
+ )
547
+
548
+ return [list(ns) for ns in result]
549
+
550
+ def wrap_handler(cb):
551
+ async def wrapped(request: Request):
552
+ try:
553
+ payload = orjson.loads(await request.body())
554
+ return ApiResponse(await cb(payload))
555
+ except ValueError as exc:
556
+ return ApiResponse({"error": str(exc)}, status_code=400)
557
+
558
+ return wrapped
559
+
560
+ remote = Starlette(
561
+ routes=[
562
+ Route(
563
+ "/checkpointer_get_tuple",
564
+ wrap_handler(checkpointer_get_tuple),
565
+ methods=["POST"],
566
+ ),
567
+ Route(
568
+ "/checkpointer_list", wrap_handler(checkpointer_list), methods=["POST"]
569
+ ),
570
+ Route(
571
+ "/checkpointer_put", wrap_handler(checkpointer_put), methods=["POST"]
572
+ ),
573
+ Route(
574
+ "/checkpointer_put_writes",
575
+ wrap_handler(checkpointer_put_writes),
576
+ methods=["POST"],
577
+ ),
578
+ Route("/store_get", wrap_handler(store_get), methods=["POST"]),
579
+ Route("/store_put", wrap_handler(store_put), methods=["POST"]),
580
+ Route("/store_delete", wrap_handler(store_delete), methods=["POST"]),
581
+ Route("/store_search", wrap_handler(store_search), methods=["POST"]),
582
+ Route(
583
+ "/store_list_namespaces",
584
+ wrap_handler(store_list_namespaces),
585
+ methods=["POST"],
586
+ ),
587
+ Route("/store_batch", wrap_handler(store_batch), methods=["POST"]),
588
+ Route("/ok", lambda _: ApiResponse({"ok": True}), methods=["GET"]),
589
+ ]
10
590
  )
11
- else:
12
- from langgraph_api.js.remote_old import ( # noqa: I001
13
- run_js_process, # noqa: F401
14
- RemotePregel, # noqa: F401
15
- run_remote_checkpointer, # noqa: F401
16
- wait_until_js_ready, # noqa: F401
17
- js_healthcheck, # noqa: F401
591
+
592
+ server = uvicorn.Server(
593
+ uvicorn.Config(
594
+ remote,
595
+ port=REMOTE_PORT,
596
+ # We need to _explicitly_ set these values in order
597
+ # to avoid reinitialising the logger, which removes
598
+ # the structlog logger setup before.
599
+ # See: https://github.com/encode/uvicorn/blob/8f4c8a7f34914c16650ebd026127b96560425fde/uvicorn/config.py#L357-L393
600
+ log_config=None,
601
+ log_level=None,
602
+ access_log=True,
603
+ )
18
604
  )
605
+ await server.serve()
606
+
607
+
608
+ async def wait_until_js_ready():
609
+ async with (
610
+ httpx.AsyncClient(
611
+ base_url=f"http://localhost:{GRAPH_PORT}",
612
+ limits=httpx.Limits(max_connections=1),
613
+ transport=httpx.AsyncHTTPTransport(verify=SSL),
614
+ ) as graph_client,
615
+ httpx.AsyncClient(
616
+ base_url=f"http://localhost:{REMOTE_PORT}",
617
+ limits=httpx.Limits(max_connections=1),
618
+ transport=httpx.AsyncHTTPTransport(verify=SSL),
619
+ ) as checkpointer_client,
620
+ ):
621
+ attempt = 0
622
+ while not asyncio.current_task().cancelled():
623
+ try:
624
+ res = await graph_client.get("/ok")
625
+ res.raise_for_status()
626
+ res = await checkpointer_client.get("/ok")
627
+ res.raise_for_status()
628
+ return
629
+ except httpx.HTTPError:
630
+ if attempt > 240:
631
+ raise
632
+ else:
633
+ attempt += 1
634
+ await asyncio.sleep(0.5)
635
+
636
+
637
+ async def js_healthcheck():
638
+ async with (
639
+ httpx.AsyncClient(
640
+ base_url=f"http://localhost:{GRAPH_PORT}",
641
+ limits=httpx.Limits(max_connections=1),
642
+ transport=httpx.AsyncHTTPTransport(verify=SSL),
643
+ ) as graph_client,
644
+ httpx.AsyncClient(
645
+ base_url=f"http://localhost:{REMOTE_PORT}",
646
+ limits=httpx.Limits(max_connections=1),
647
+ transport=httpx.AsyncHTTPTransport(verify=SSL),
648
+ ) as checkpointer_client,
649
+ ):
650
+ try:
651
+ res = await graph_client.get("/ok")
652
+ res.raise_for_status()
653
+ res = await checkpointer_client.get("/ok")
654
+ res.raise_for_status()
655
+ return True
656
+ except httpx.HTTPError as exc:
657
+ logger.warning(
658
+ "JS healthcheck failed. Either the JS server is not running or the event loop is blocked by a CPU-intensive task.",
659
+ error=exc,
660
+ )
661
+ raise HTTPException(
662
+ status_code=500,
663
+ detail="JS healthcheck failed. Either the JS server is not running or the event loop is blocked by a CPU-intensive task.",
664
+ ) from exc
@@ -82,9 +82,10 @@ export async function resolveGraph(
82
82
  return { sourceFile, exportSymbol, resolved };
83
83
  }
84
84
 
85
- export async function runGraphSchemaWorker(spec: GraphSpec) {
86
- const SCHEMA_RESOLVE_TIMEOUT_MS = 30_000;
87
-
85
+ export async function runGraphSchemaWorker(
86
+ spec: GraphSpec,
87
+ options?: { timeoutMs?: number }
88
+ ) {
88
89
  return await new Promise<Record<string, GraphSchema>>((resolve, reject) => {
89
90
  const worker = new Worker(
90
91
  new URL("./parser/parser.worker.mjs", import.meta.url).pathname
@@ -94,7 +95,7 @@ export async function runGraphSchemaWorker(spec: GraphSpec) {
94
95
  const timeoutId = setTimeout(() => {
95
96
  worker.terminate();
96
97
  reject(new Error("Schema extract worker timed out"));
97
- }, SCHEMA_RESOLVE_TIMEOUT_MS);
98
+ }, options?.timeoutMs ?? 30_000);
98
99
 
99
100
  worker.on("message", (result) => {
100
101
  worker.terminate();