langgraph-api 0.4.48__py3-none-any.whl → 0.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

Files changed (33) hide show
  1. langgraph_api/__init__.py +1 -1
  2. langgraph_api/api/assistants.py +65 -61
  3. langgraph_api/api/meta.py +6 -0
  4. langgraph_api/api/threads.py +1 -1
  5. langgraph_api/auth/custom.py +29 -24
  6. langgraph_api/config.py +56 -1
  7. langgraph_api/graph.py +1 -1
  8. langgraph_api/{grpc_ops → grpc}/client.py +91 -0
  9. langgraph_api/grpc/config_conversion.py +225 -0
  10. langgraph_api/grpc/generated/core_api_pb2.py +275 -0
  11. langgraph_api/{grpc_ops → grpc}/generated/core_api_pb2.pyi +20 -31
  12. langgraph_api/{grpc_ops → grpc}/generated/core_api_pb2_grpc.py +2 -2
  13. langgraph_api/grpc/generated/engine_common_pb2.py +190 -0
  14. langgraph_api/grpc/generated/engine_common_pb2.pyi +634 -0
  15. langgraph_api/grpc/generated/engine_common_pb2_grpc.py +24 -0
  16. langgraph_api/{grpc_ops → grpc}/ops.py +75 -217
  17. langgraph_api/js/package.json +5 -5
  18. langgraph_api/js/src/graph.mts +20 -0
  19. langgraph_api/js/yarn.lock +137 -187
  20. langgraph_api/queue_entrypoint.py +2 -2
  21. langgraph_api/route.py +14 -4
  22. langgraph_api/schema.py +2 -2
  23. langgraph_api/self_hosted_metrics.py +48 -2
  24. langgraph_api/serde.py +58 -14
  25. langgraph_api/worker.py +1 -1
  26. {langgraph_api-0.4.48.dist-info → langgraph_api-0.5.6.dist-info}/METADATA +5 -5
  27. {langgraph_api-0.4.48.dist-info → langgraph_api-0.5.6.dist-info}/RECORD +32 -28
  28. langgraph_api/grpc_ops/generated/core_api_pb2.py +0 -276
  29. /langgraph_api/{grpc_ops → grpc}/__init__.py +0 -0
  30. /langgraph_api/{grpc_ops → grpc}/generated/__init__.py +0 -0
  31. {langgraph_api-0.4.48.dist-info → langgraph_api-0.5.6.dist-info}/WHEEL +0 -0
  32. {langgraph_api-0.4.48.dist-info → langgraph_api-0.5.6.dist-info}/entry_points.txt +0 -0
  33. {langgraph_api-0.4.48.dist-info → langgraph_api-0.5.6.dist-info}/licenses/LICENSE +0 -0
@@ -4,10 +4,10 @@ from __future__ import annotations
4
4
 
5
5
  import asyncio
6
6
  import functools
7
- from collections.abc import AsyncIterator, Sequence
7
+ from collections.abc import AsyncIterator
8
8
  from datetime import UTC
9
9
  from http import HTTPStatus
10
- from typing import Any
10
+ from typing import Any, overload
11
11
  from uuid import UUID
12
12
 
13
13
  import orjson
@@ -16,10 +16,10 @@ from google.protobuf.json_format import MessageToDict
16
16
  from google.protobuf.struct_pb2 import Struct # type: ignore[import]
17
17
  from grpc import StatusCode
18
18
  from grpc.aio import AioRpcError
19
- from langgraph.pregel.debug import CheckpointPayload
20
19
  from langgraph_sdk.schema import Config
21
20
  from starlette.exceptions import HTTPException
22
21
 
22
+ from langgraph_api.grpc import config_conversion
23
23
  from langgraph_api.schema import (
24
24
  Assistant,
25
25
  AssistantSelectField,
@@ -32,7 +32,7 @@ from langgraph_api.schema import (
32
32
  )
33
33
  from langgraph_api.serde import json_dumpb, json_loads
34
34
 
35
- from .client import GrpcClient
35
+ from .client import get_shared_client
36
36
  from .generated import core_api_pb2 as pb
37
37
 
38
38
  GRPC_STATUS_TO_HTTP_STATUS = {
@@ -50,30 +50,16 @@ def map_if_exists(if_exists: str) -> pb.OnConflictBehavior:
50
50
  return pb.OnConflictBehavior.RAISE
51
51
 
52
52
 
53
- def map_configurable(config: Config) -> Struct:
54
- """Build pb.Config, placing non-standard keys into `extra` bytes.
53
+ @overload
54
+ def consolidate_config_and_context(
55
+ config: Config | None, context: None
56
+ ) -> tuple[Config, None]: ...
55
57
 
56
- The `extra` field mirrors any keys that are not first-class in
57
- Config (e.g., "tags", "recursion_limit", "configurable").
58
- It is JSON-encoded bytes to minimize serde overhead; the server will
59
- unpack and persist them as top-level keys.
60
- """
61
- base_keys = {"tags", "recursion_limit", "configurable"}
62
- extra_dict = {k: v for k, v in (config or {}).items() if k not in base_keys}
63
-
64
- kwargs: dict[str, Any] = dict(
65
- tags=pb.Tags(values=config.get("tags")),
66
- recursion_limit=config.get("recursion_limit"),
67
- configurable=(
68
- dict_to_struct(config.get("configurable", {}))
69
- if config.get("configurable")
70
- else None
71
- ),
72
- )
73
- if extra_dict:
74
- kwargs["extra"] = orjson.dumps(extra_dict)
75
58
 
76
- return pb.Config(**kwargs)
59
+ @overload
60
+ def consolidate_config_and_context(
61
+ config: Config | None, context: Context
62
+ ) -> tuple[Config, Context]: ...
77
63
 
78
64
 
79
65
  def consolidate_config_and_context(
@@ -84,17 +70,20 @@ def consolidate_config_and_context(
84
70
  Does not mutate the passed-in objects. If both configurable and context
85
71
  are provided, raises 400. If only one is provided, mirrors it to the other.
86
72
  """
87
- cfg: Config = dict(config or {})
73
+ cfg: Config = Config(config or {})
88
74
  ctx: Context | None = dict(context) if context is not None else None
75
+ configurable = cfg.get("configurable")
89
76
 
90
- if cfg.get("configurable") and ctx:
77
+ if configurable and ctx:
91
78
  raise HTTPException(
92
79
  status_code=400,
93
- detail="Cannot specify both configurable and context. Prefer setting context alone. Context was introduced in LangGraph 0.6.0 and is the long term planned replacement for configurable.",
80
+ detail="Cannot specify both configurable and context. Prefer setting context alone."
81
+ " Context was introduced in LangGraph 0.6.0 and "
82
+ "is the long term planned replacement for configurable.",
94
83
  )
95
84
 
96
- if cfg.get("configurable"):
97
- ctx = cfg["configurable"]
85
+ if configurable:
86
+ ctx = configurable
98
87
  elif ctx is not None:
99
88
  cfg["configurable"] = ctx
100
89
 
@@ -114,38 +103,6 @@ def struct_to_dict(struct: Struct) -> dict[str, Any]:
114
103
  return MessageToDict(struct) if struct else {}
115
104
 
116
105
 
117
- def _runnable_config_to_user_dict(cfg: pb.Config | None) -> dict[str, Any]:
118
- """Convert pb.Config to user-visible dict, unpacking `extra`.
119
-
120
- - Keeps top-level known keys: tags, recursion_limit, configurable.
121
- - Merges keys from `extra` into the top-level dict.
122
- """
123
- if not cfg:
124
- return {}
125
-
126
- out: dict[str, Any] = {}
127
- # tags
128
- if cfg.tags and cfg.tags.values:
129
- out["tags"] = list(cfg.tags.values)
130
- # recursion_limit (preserve presence of 0 if set)
131
- try:
132
- if cfg.HasField("recursion_limit"):
133
- out["recursion_limit"] = cfg.recursion_limit
134
- except ValueError:
135
- # Some runtimes may not support HasField on certain builds; fallback
136
- if getattr(cfg, "recursion_limit", None) is not None:
137
- out["recursion_limit"] = cfg.recursion_limit
138
- # configurable
139
- if cfg.HasField("configurable"):
140
- out["configurable"] = struct_to_dict(cfg.configurable)
141
- # extra (bytes: JSON-encoded object)
142
- if cfg.HasField("extra") and cfg.extra:
143
- extra = orjson.loads(cfg.extra)
144
- if isinstance(extra, dict) and extra:
145
- out.update(extra)
146
- return out
147
-
148
-
149
106
  def proto_to_assistant(proto_assistant: pb.Assistant) -> Assistant:
150
107
  """Convert protobuf Assistant to dictionary format."""
151
108
  # Preserve None for optional scalar fields by checking presence via HasField
@@ -158,7 +115,7 @@ def proto_to_assistant(proto_assistant: pb.Assistant) -> Assistant:
158
115
  "version": proto_assistant.version,
159
116
  "created_at": proto_assistant.created_at.ToDatetime(tzinfo=UTC),
160
117
  "updated_at": proto_assistant.updated_at.ToDatetime(tzinfo=UTC),
161
- "config": _runnable_config_to_user_dict(proto_assistant.config),
118
+ "config": config_conversion.config_from_proto(proto_assistant.config),
162
119
  "context": struct_to_dict(proto_assistant.context),
163
120
  "metadata": struct_to_dict(proto_assistant.metadata),
164
121
  "name": proto_assistant.name,
@@ -299,93 +256,6 @@ def proto_to_thread(proto_thread: pb.Thread) -> Thread:
299
256
  }
300
257
 
301
258
 
302
- def _checkpoint_metadata_to_pb(
303
- metadata: dict[str, Any] | None,
304
- ) -> pb.CheckpointMetadata | None:
305
- if not metadata:
306
- return None
307
-
308
- message = pb.CheckpointMetadata()
309
- source = metadata.get("source")
310
- if source is not None:
311
- if isinstance(source, str):
312
- enum_key = f"CHECKPOINT_SOURCE_{source.upper()}"
313
- try:
314
- message.source = pb.CheckpointSource.Value(enum_key)
315
- except ValueError:
316
- logger.warning(
317
- "Unknown checkpoint source enum, defaulting to unspecified",
318
- source=source,
319
- )
320
- elif isinstance(source, int):
321
- try:
322
- message.source = pb.CheckpointSource(source)
323
- except ValueError:
324
- logger.warning(
325
- "Unknown checkpoint source value, defaulting to unspecified",
326
- source=source,
327
- )
328
- if step := metadata.get("step"):
329
- message.step = int(step)
330
- parents = metadata.get("parents")
331
- if isinstance(parents, dict):
332
- message.parents.update({str(k): str(v) for k, v in parents.items()})
333
- return message
334
-
335
-
336
- def _checkpoint_tasks_to_pb(tasks: Sequence[dict[str, Any]]) -> list[pb.CheckpointTask]:
337
- task_messages: list[pb.CheckpointTask] = []
338
- for task in tasks:
339
- message = pb.CheckpointTask(
340
- id=task.get("id", ""),
341
- name=task.get("name", ""),
342
- )
343
- if task.get("error"):
344
- message.error = str(task["error"])
345
- interrupts = task.get("interrupts") or []
346
- for interrupt in interrupts:
347
- message.interrupts.append(dict_to_struct(interrupt))
348
- if task.get("state"):
349
- message.state.CopyFrom(dict_to_struct(task["state"]))
350
- task_messages.append(message)
351
- return task_messages
352
-
353
-
354
- def checkpoint_to_pb(
355
- checkpoint: CheckpointPayload | None,
356
- ) -> pb.CheckpointPayload | None:
357
- if checkpoint is None:
358
- return None
359
-
360
- message = pb.CheckpointPayload()
361
-
362
- config = checkpoint.get("config")
363
- if config:
364
- message.config.CopyFrom(map_configurable(config))
365
-
366
- metadata = _checkpoint_metadata_to_pb(checkpoint.get("metadata"))
367
- if metadata:
368
- message.metadata.CopyFrom(metadata)
369
-
370
- values = checkpoint.get("values")
371
- if values:
372
- message.values.CopyFrom(dict_to_struct(values))
373
-
374
- next_nodes = checkpoint.get("next")
375
- if next_nodes:
376
- message.next.extend([str(n) for n in next_nodes])
377
-
378
- parent_config = checkpoint.get("parent_config")
379
- if parent_config:
380
- message.parent_config.CopyFrom(map_configurable(parent_config))
381
-
382
- tasks = checkpoint.get("tasks")
383
- if tasks:
384
- message.tasks.extend(_checkpoint_tasks_to_pb(tasks))
385
-
386
- return message
387
-
388
-
389
259
  def exception_to_struct(exception: BaseException | None) -> Struct | None:
390
260
  if exception is None:
391
261
  return None
@@ -400,7 +270,7 @@ def _filter_thread_fields(
400
270
  thread: Thread, select: list[ThreadSelectField] | None
401
271
  ) -> dict[str, Any]:
402
272
  if not select:
403
- return thread
273
+ return dict(thread)
404
274
  return {field: thread[field] for field in select if field in thread}
405
275
 
406
276
 
@@ -539,9 +409,8 @@ class Assistants(Authenticated):
539
409
  select=select,
540
410
  )
541
411
 
542
- # Make the gRPC call
543
- async with GrpcClient() as client:
544
- response = await client.assistants.Search(request)
412
+ client = await get_shared_client()
413
+ response = await client.assistants.Search(request)
545
414
 
546
415
  # Convert response to expected format
547
416
  assistants = [
@@ -578,9 +447,8 @@ class Assistants(Authenticated):
578
447
  filters=auth_filters or {},
579
448
  )
580
449
 
581
- # Make the gRPC call
582
- async with GrpcClient() as client:
583
- response = await client.assistants.Get(request)
450
+ client = await get_shared_client()
451
+ response = await client.assistants.Get(request)
584
452
 
585
453
  # Convert and yield the result
586
454
  assistant = proto_to_assistant(response)
@@ -630,16 +498,15 @@ class Assistants(Authenticated):
630
498
  graph_id=graph_id,
631
499
  filters=auth_filters or {},
632
500
  if_exists=on_conflict,
633
- config=map_configurable(config),
501
+ config=config_conversion.config_to_proto(config),
634
502
  context=dict_to_struct(context or {}),
635
503
  name=name,
636
504
  description=description,
637
505
  metadata=dict_to_struct(metadata or {}),
638
506
  )
639
507
 
640
- # Make the gRPC call
641
- async with GrpcClient() as client:
642
- response = await client.assistants.Create(request)
508
+ client = await get_shared_client()
509
+ response = await client.assistants.Create(request)
643
510
 
644
511
  # Convert and yield the result
645
512
  assistant = proto_to_assistant(response)
@@ -654,7 +521,7 @@ class Assistants(Authenticated):
654
521
  conn, # Not used in gRPC implementation
655
522
  assistant_id: UUID | str,
656
523
  *,
657
- config: dict | None = None,
524
+ config: Config | None = None,
658
525
  context: Context | None = None,
659
526
  graph_id: str | None = None,
660
527
  metadata: MetadataInput | None = None,
@@ -664,7 +531,7 @@ class Assistants(Authenticated):
664
531
  ) -> AsyncIterator[Assistant]: # type: ignore[return-value]
665
532
  """Update assistant via gRPC."""
666
533
  metadata = metadata if metadata is not None else {}
667
- config = config if config is not None else {}
534
+ config = config if config is not None else Config()
668
535
  # Handle auth filters
669
536
  auth_filters = await Assistants.handle_event(
670
537
  ctx,
@@ -694,15 +561,14 @@ class Assistants(Authenticated):
694
561
 
695
562
  # Add optional config if provided
696
563
  if config:
697
- request.config.CopyFrom(map_configurable(config))
564
+ request.config.CopyFrom(config_conversion.config_to_proto(config))
698
565
 
699
566
  # Add optional context if provided
700
567
  if context:
701
568
  request.context.CopyFrom(dict_to_struct(context))
702
569
 
703
- # Make the gRPC call
704
- async with GrpcClient() as client:
705
- response = await client.assistants.Patch(request)
570
+ client = await get_shared_client()
571
+ response = await client.assistants.Patch(request)
706
572
 
707
573
  # Convert and yield the result
708
574
  assistant = proto_to_assistant(response)
@@ -730,9 +596,8 @@ class Assistants(Authenticated):
730
596
  filters=auth_filters or {},
731
597
  )
732
598
 
733
- # Make the gRPC call
734
- async with GrpcClient() as client:
735
- await client.assistants.Delete(request)
599
+ client = await get_shared_client()
600
+ await client.assistants.Delete(request)
736
601
 
737
602
  # Return the deleted ID
738
603
  async def generate_result():
@@ -765,9 +630,8 @@ class Assistants(Authenticated):
765
630
  filters=auth_filters or {},
766
631
  )
767
632
 
768
- # Make the gRPC call
769
- async with GrpcClient() as client:
770
- response = await client.assistants.SetLatest(request)
633
+ client = await get_shared_client()
634
+ response = await client.assistants.SetLatest(request)
771
635
 
772
636
  # Convert and yield the result
773
637
  assistant = proto_to_assistant(response)
@@ -803,9 +667,8 @@ class Assistants(Authenticated):
803
667
  offset=offset,
804
668
  )
805
669
 
806
- # Make the gRPC call
807
- async with GrpcClient() as client:
808
- response = await client.assistants.GetVersions(request)
670
+ client = await get_shared_client()
671
+ response = await client.assistants.GetVersions(request)
809
672
 
810
673
  # Convert and yield the results
811
674
  async def generate_results():
@@ -819,7 +682,7 @@ class Assistants(Authenticated):
819
682
  "graph_id": version.graph_id,
820
683
  "version": version.version,
821
684
  "created_at": version.created_at.ToDatetime(tzinfo=UTC),
822
- "config": _runnable_config_to_user_dict(version.config),
685
+ "config": config_conversion.config_from_proto(version.config),
823
686
  "context": struct_to_dict(version.context),
824
687
  "metadata": struct_to_dict(version.metadata),
825
688
  "name": version.name,
@@ -849,9 +712,8 @@ class Assistants(Authenticated):
849
712
  metadata=dict_to_struct(metadata or {}),
850
713
  )
851
714
 
852
- # Make the gRPC call
853
- async with GrpcClient() as client:
854
- response = await client.assistants.Count(request)
715
+ client = await get_shared_client()
716
+ response = await client.assistants.Count(request)
855
717
 
856
718
  return int(response.count)
857
719
 
@@ -913,24 +775,22 @@ class Threads(Authenticated):
913
775
  if ids:
914
776
  normalized_ids = [_normalize_uuid(thread_id) for thread_id in ids]
915
777
  threads: list[Thread] = []
916
- async with GrpcClient() as client:
917
- for thread_id in normalized_ids:
918
- request = pb.GetThreadRequest(
919
- thread_id=pb.UUID(value=_normalize_uuid(thread_id)),
920
- filters=auth_filters or {},
921
- )
922
- response = await client.threads.Get(request)
923
- thread = proto_to_thread(response)
924
-
925
- if status and thread["status"] != status:
926
- continue
927
- if metadata and not _json_contains(thread["metadata"], metadata):
928
- continue
929
- if values and not _json_contains(
930
- thread.get("values") or {}, values
931
- ):
932
- continue
933
- threads.append(thread)
778
+ client = await get_shared_client()
779
+ for thread_id in normalized_ids:
780
+ request = pb.GetThreadRequest(
781
+ thread_id=pb.UUID(value=_normalize_uuid(thread_id)),
782
+ filters=auth_filters or {},
783
+ )
784
+ response = await client.threads.Get(request)
785
+ thread = proto_to_thread(response)
786
+
787
+ if status and thread["status"] != status:
788
+ continue
789
+ if metadata and not _json_contains(thread["metadata"], metadata):
790
+ continue
791
+ if values and not _json_contains(thread.get("values") or {}, values):
792
+ continue
793
+ threads.append(thread)
934
794
 
935
795
  total = len(threads)
936
796
  paginated = threads[offset : offset + limit]
@@ -964,10 +824,10 @@ class Threads(Authenticated):
964
824
  if select:
965
825
  request_kwargs["select"] = select
966
826
 
967
- async with GrpcClient() as client:
968
- response = await client.threads.Search(
969
- pb.SearchThreadsRequest(**request_kwargs)
970
- )
827
+ client = await get_shared_client()
828
+ response = await client.threads.Search(
829
+ pb.SearchThreadsRequest(**request_kwargs)
830
+ )
971
831
 
972
832
  threads = [proto_to_thread(thread) for thread in response.threads]
973
833
  cursor = offset + limit if len(threads) == limit else None
@@ -1014,10 +874,8 @@ class Threads(Authenticated):
1014
874
  )
1015
875
  request_kwargs["status"] = mapped_status
1016
876
 
1017
- async with GrpcClient() as client:
1018
- response = await client.threads.Count(
1019
- pb.CountThreadsRequest(**request_kwargs)
1020
- )
877
+ client = await get_shared_client()
878
+ response = await client.threads.Count(pb.CountThreadsRequest(**request_kwargs))
1021
879
 
1022
880
  return int(response.count)
1023
881
 
@@ -1035,8 +893,8 @@ class Threads(Authenticated):
1035
893
  thread_id=pb.UUID(value=_normalize_uuid(thread_id)),
1036
894
  filters=auth_filters or {},
1037
895
  )
1038
- async with GrpcClient() as client:
1039
- response = await client.threads.Get(request)
896
+ client = await get_shared_client()
897
+ response = await client.threads.Get(request)
1040
898
 
1041
899
  thread = proto_to_thread(response)
1042
900
 
@@ -1077,8 +935,8 @@ class Threads(Authenticated):
1077
935
  if ttl_config is not None:
1078
936
  request.ttl.CopyFrom(ttl_config)
1079
937
 
1080
- async with GrpcClient() as client:
1081
- response = await client.threads.Create(request)
938
+ client = await get_shared_client()
939
+ response = await client.threads.Create(request)
1082
940
  thread = proto_to_thread(response)
1083
941
 
1084
942
  async def generate_result():
@@ -1118,8 +976,8 @@ class Threads(Authenticated):
1118
976
  if ttl_config is not None:
1119
977
  request.ttl.CopyFrom(ttl_config)
1120
978
 
1121
- async with GrpcClient() as client:
1122
- response = await client.threads.Patch(request)
979
+ client = await get_shared_client()
980
+ response = await client.threads.Patch(request)
1123
981
 
1124
982
  thread = proto_to_thread(response)
1125
983
 
@@ -1147,8 +1005,8 @@ class Threads(Authenticated):
1147
1005
  filters=auth_filters or {},
1148
1006
  )
1149
1007
 
1150
- async with GrpcClient() as client:
1151
- response = await client.threads.Delete(request)
1008
+ client = await get_shared_client()
1009
+ response = await client.threads.Delete(request)
1152
1010
 
1153
1011
  deleted_id = UUID(response.value)
1154
1012
 
@@ -1176,8 +1034,8 @@ class Threads(Authenticated):
1176
1034
  filters=auth_filters or {},
1177
1035
  )
1178
1036
 
1179
- async with GrpcClient() as client:
1180
- response = await client.threads.Copy(request)
1037
+ client = await get_shared_client()
1038
+ response = await client.threads.Copy(request)
1181
1039
 
1182
1040
  thread = proto_to_thread(response)
1183
1041
 
@@ -16,12 +16,12 @@
16
16
  "@langchain/langgraph-checkpoint": "^1.0.0",
17
17
  "@types/json-schema": "^7.0.15",
18
18
  "@typescript/vfs": "^1.6.0",
19
- "dedent": "^1.5.3",
19
+ "dedent": "^1.7.0",
20
20
  "exit-hook": "^4.0.0",
21
- "hono": "^4.10.2",
21
+ "hono": "^4.10.4",
22
22
  "p-queue": "^8.0.1",
23
23
  "p-retry": "^6.2.0",
24
- "tsx": "^4.19.3",
24
+ "tsx": "^4.20.6",
25
25
  "typescript": "^5.5.4",
26
26
  "undici": "^6.21.2",
27
27
  "uuid": "^10.0.0",
@@ -40,8 +40,8 @@
40
40
  "@types/react-dom": "^19.0.3",
41
41
  "jose": "^6.0.10",
42
42
  "postgres": "^3.4.4",
43
- "prettier": "^3.3.3",
44
- "vitest": "^3.0.5"
43
+ "prettier": "^3.6.2",
44
+ "vitest": "^4.0.6"
45
45
  },
46
46
  "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e"
47
47
  }
@@ -62,6 +62,17 @@ export async function resolveGraph(
62
62
  return "compile" in graph && typeof graph.compile === "function";
63
63
  };
64
64
 
65
+ const isCompiledGraph = (
66
+ graph: GraphLike,
67
+ ): graph is CompiledGraph<string> => {
68
+ if (typeof graph !== "object" || graph == null) return false;
69
+ return (
70
+ "builder" in graph &&
71
+ typeof graph.builder === "object" &&
72
+ graph.builder != null
73
+ );
74
+ };
75
+
65
76
  const graph: GraphUnknown = await import(sourceFile).then(
66
77
  (module) => module[exportSymbol || "default"],
67
78
  );
@@ -73,6 +84,15 @@ export async function resolveGraph(
73
84
 
74
85
  const afterResolve = (graphLike: GraphLike): CompiledGraph<string> => {
75
86
  const graph = isGraph(graphLike) ? graphLike.compile() : graphLike;
87
+
88
+ // TODO: hack, remove once LangChain 1.x createAgent is fixed
89
+ // LangGraph API will assign it's checkpointer by setting it
90
+ // via `graph.checkpointer = ...` and `graph.store = ...`, and the 1.x `createAgent`
91
+ // hides the underlying `StateGraph` instance, so we need to access it directly.
92
+ if (!isCompiledGraph(graph) && "graph" in graph) {
93
+ return (graph as { graph: CompiledGraph<string> }).graph;
94
+ }
95
+
76
96
  return graph;
77
97
  };
78
98