langgraph-executor 0.0.1a6__tar.gz → 0.0.1a8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/.gitignore +1 -0
  2. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/PKG-INFO +1 -1
  3. langgraph_executor-0.0.1a8/langgraph_executor/__init__.py +1 -0
  4. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/client/patch.py +4 -4
  5. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/client/utils.py +8 -8
  6. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/common.py +10 -14
  7. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/executor_base.py +47 -15
  8. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/extract_graph.py +3 -3
  9. langgraph_executor-0.0.1a8/langgraph_executor/serde.py +13 -0
  10. langgraph_executor-0.0.1a6/langgraph_executor/__init__.py +0 -1
  11. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/README.md +0 -0
  12. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/client/__init__.py +0 -0
  13. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/example.py +0 -0
  14. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/execute_task.py +0 -0
  15. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/executor.py +0 -0
  16. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/info_logger.py +0 -0
  17. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/__init__.py +0 -0
  18. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/executor_pb2.py +0 -0
  19. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/executor_pb2.pyi +0 -0
  20. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/executor_pb2_grpc.py +0 -0
  21. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/executor_pb2_grpc.pyi +0 -0
  22. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/graph_pb2.py +0 -0
  23. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/graph_pb2.pyi +0 -0
  24. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/graph_pb2_grpc.py +0 -0
  25. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/graph_pb2_grpc.pyi +0 -0
  26. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/runtime_pb2.py +0 -0
  27. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/runtime_pb2.pyi +0 -0
  28. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/runtime_pb2_grpc.py +0 -0
  29. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/runtime_pb2_grpc.pyi +0 -0
  30. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/types_pb2.py +0 -0
  31. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/types_pb2.pyi +0 -0
  32. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/types_pb2_grpc.py +0 -0
  33. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/pb/types_pb2_grpc.pyi +0 -0
  34. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/py.typed +0 -0
  35. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/server.py +0 -0
  36. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/setup.sh +0 -0
  37. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/langgraph_executor/stream_utils.py +0 -0
  38. {langgraph_executor-0.0.1a6 → langgraph_executor-0.0.1a8}/pyproject.toml +0 -0
@@ -49,3 +49,4 @@ logs/
49
49
  #runtime binary
50
50
  langgraph_runtime_integration/bin/*
51
51
  *.egg-info/
52
+ .gocache
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-executor
3
- Version: 0.0.1a6
3
+ Version: 0.0.1a8
4
4
  Summary: LangGraph python RPC server executable by the langgraph-go orchestrator.
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: grpcio>=1.73.1
@@ -0,0 +1 @@
1
+ __version__ = "0.0.1a8"
@@ -9,8 +9,8 @@ from langgraph.runtime import get_runtime
9
9
  from langgraph.types import Interrupt
10
10
  from pydantic import ValidationError
11
11
 
12
+ from langgraph_executor import serde
12
13
  from langgraph_executor.client.utils import (
13
- SERDE,
14
14
  config_to_pb,
15
15
  context_to_pb,
16
16
  create_runopts_pb,
@@ -58,7 +58,7 @@ async def _ainvoke_wrapper(
58
58
  # subgraph names coerced when initializing executor
59
59
  graph_name = pregel_self.name
60
60
 
61
- logger.info(f"SUBGRAPH INVOKE ENCOUNTERED: {graph_name}")
61
+ logger.info(f"SUBGRAPH AINVOKE ENCOUNTERED: {graph_name}")
62
62
 
63
63
  # TODO: Hacky way of retrieving runtime from runnable context
64
64
  if not context:
@@ -111,7 +111,7 @@ async def _ainvoke_wrapper(
111
111
  for interrupt in graph_interrupt.interrupts:
112
112
  interrupts.append(
113
113
  Interrupt(
114
- value=SERDE.loads_typed(
114
+ value=serde.get_serializer().loads_typed(
115
115
  (
116
116
  interrupt.value.base_value.method,
117
117
  interrupt.value.base_value.value,
@@ -239,7 +239,7 @@ def _invoke_wrapper(
239
239
  for interrupt in graph_interrupt.interrupts:
240
240
  interrupts.append(
241
241
  Interrupt(
242
- value=SERDE.loads_typed(
242
+ value=serde.get_serializer().loads_typed(
243
243
  (
244
244
  interrupt.value.base_value.method,
245
245
  interrupt.value.base_value.value,
@@ -14,19 +14,18 @@ from langgraph._internal._constants import (
14
14
  CONFIG_KEY_CHECKPOINT_MAP,
15
15
  CONFIG_KEY_CHECKPOINT_NS,
16
16
  CONFIG_KEY_DURABILITY,
17
+ CONFIG_KEY_RESUME_MAP,
17
18
  CONFIG_KEY_RESUMING,
18
19
  CONFIG_KEY_TASK_ID,
19
20
  CONFIG_KEY_THREAD_ID,
20
21
  )
21
- from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer
22
22
  from langgraph.pregel.debug import CheckpointMetadata
23
23
  from langgraph.types import StateSnapshot
24
24
 
25
+ from langgraph_executor import serde
25
26
  from langgraph_executor.common import reconstruct_config, val_to_pb
26
27
  from langgraph_executor.pb import runtime_pb2, types_pb2
27
28
 
28
- SERDE = JsonPlusSerializer()
29
-
30
29
 
31
30
  def input_to_pb(input):
32
31
  return val_to_pb(None, input)
@@ -51,10 +50,11 @@ def maybe_update_reserved_configurable(
51
50
  reserved_configurable.checkpoint_id = str(value)
52
51
  elif key == CONFIG_KEY_CHECKPOINT_NS:
53
52
  reserved_configurable.checkpoint_ns = str(value)
54
- # elif key == CONFIG_KEY_PREVIOUS:
55
- # serde = JsonPlusSerializer()
56
- # meth, ser = serde.dumps_typed(value)
57
- # reserved_configurable.previous = types_pb2.SerializedValue(method=meth, value=bytes(ser))
53
+ elif key == CONFIG_KEY_RESUME_MAP and value is not None:
54
+ resume_map = cast(dict[str, Any], value)
55
+ for k, v in resume_map.items():
56
+ pb_value = val_to_pb(None, v)
57
+ reserved_configurable.resume_map[k].CopyFrom(pb_value)
58
58
  elif key == CONFIG_KEY_DURABILITY:
59
59
  reserved_configurable.durability = str(value)
60
60
  else:
@@ -221,7 +221,7 @@ def _deser_vals(current_chunk):
221
221
  if not isinstance(current_chunk, dict):
222
222
  return current_chunk
223
223
  if set(current_chunk.keys()) == VAL_KEYS:
224
- return SERDE.loads_typed(
224
+ return serde.get_serializer().loads_typed(
225
225
  (current_chunk["method"], base64.b64decode(current_chunk["value"]))
226
226
  )
227
227
  for k, v in current_chunk.items():
@@ -1,8 +1,9 @@
1
1
  import traceback
2
+ import uuid
2
3
  from collections.abc import Mapping, Sequence
3
4
  from collections.abc import Sequence as SequenceType
4
5
  from contextvars import ContextVar
5
- from typing import Any, cast
6
+ from typing import Any, Literal, cast
6
7
 
7
8
  from google.protobuf.json_format import MessageToDict
8
9
  from langchain_core.runnables import RunnableConfig
@@ -26,7 +27,6 @@ from langgraph.checkpoint.base import (
26
27
  CheckpointTuple,
27
28
  PendingWrite,
28
29
  )
29
- from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer
30
30
  from langgraph.errors import GraphBubbleUp, GraphInterrupt
31
31
  from langgraph.managed.base import ManagedValue, ManagedValueMapping
32
32
  from langgraph.pregel import Pregel
@@ -34,6 +34,7 @@ from langgraph.pregel._algo import PregelTaskWrites
34
34
  from langgraph.pregel._read import PregelNode
35
35
  from langgraph.types import Command, Interrupt, Send
36
36
 
37
+ from langgraph_executor import serde
37
38
  from langgraph_executor.pb import types_pb2
38
39
 
39
40
  var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar(
@@ -44,8 +45,6 @@ var_child_runnable_config: ContextVar[RunnableConfig | None] = ContextVar(
44
45
  def map_reserved_configurable(
45
46
  reserved_configurable: types_pb2.ReservedConfigurable,
46
47
  ) -> dict[str, Any]:
47
- # serde = JsonPlusSerializer()
48
-
49
48
  return {
50
49
  CONFIG_KEY_RESUMING: reserved_configurable.resuming,
51
50
  CONFIG_KEY_TASK_ID: reserved_configurable.task_id,
@@ -80,7 +79,7 @@ def reconstruct_config(pb_config: types_pb2.RunnableConfig) -> RunnableConfig:
80
79
  tags=list(pb_config.tags),
81
80
  metadata=MessageToDict(pb_config.metadata),
82
81
  run_name=pb_config.run_name,
83
- run_id=pb_config.run_id,
82
+ run_id=cast(uuid.UUID, pb_config.run_id),
84
83
  max_concurrency=pb_config.max_concurrency,
85
84
  recursion_limit=pb_config.recursion_limit,
86
85
  configurable=configurable,
@@ -138,7 +137,6 @@ def reconstruct_checkpoint(request_checkpoint: types_pb2.Checkpoint) -> Checkpoi
138
137
  def reconstruct_task_writes(
139
138
  request_tasks: SequenceType[Any],
140
139
  ) -> SequenceType[PregelTaskWrites]:
141
- # serde = JsonPlusSerializer()
142
140
  return [
143
141
  PregelTaskWrites(
144
142
  tuple(t.task_path),
@@ -187,11 +185,11 @@ def get_node(node_name: str, graph: Pregel, graph_name: str) -> PregelNode:
187
185
 
188
186
 
189
187
  def pb_to_val(value: types_pb2.Value) -> Any:
190
- serde = JsonPlusSerializer()
191
-
192
188
  value_kind = value.WhichOneof("message")
193
189
  if value_kind == "base_value":
194
- return serde.loads_typed((value.base_value.method, value.base_value.value))
190
+ return serde.get_serializer().loads_typed(
191
+ (value.base_value.method, value.base_value.value)
192
+ )
195
193
  if value_kind == "sends":
196
194
  sends = []
197
195
  for send in value.sends.sends:
@@ -313,9 +311,7 @@ def base_value_to_pb(value: Any) -> types_pb2.Value:
313
311
 
314
312
 
315
313
  def serialize_value(value: Any) -> types_pb2.SerializedValue:
316
- serde = JsonPlusSerializer()
317
-
318
- meth, ser_val = serde.dumps_typed(value)
314
+ meth, ser_val = serde.get_serializer().dumps_typed(value)
319
315
  return types_pb2.SerializedValue(method=meth, value=bytes(ser_val))
320
316
 
321
317
 
@@ -442,7 +438,7 @@ def reconstruct_checkpoint_metadata(
442
438
  return None
443
439
 
444
440
  return CheckpointMetadata(
445
- source=metadata_pb.source,
441
+ source=cast(Literal["input", "loop", "update", "fork"], metadata_pb.source),
446
442
  step=metadata_pb.step,
447
- parents=metadata_pb.parents,
443
+ parents=dict(metadata_pb.parents) or {},
448
444
  )
@@ -91,14 +91,16 @@ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
91
91
  get_graph: GetGraph,
92
92
  runtime_channel: grpc.Channel | None = None,
93
93
  logger: Logger | None = None,
94
- on_message: Callable[
95
- [
96
- BaseMessageChunk,
97
- dict[str, Any],
98
- ],
99
- None,
100
- ]
101
- | None = None,
94
+ on_message: (
95
+ Callable[
96
+ [
97
+ BaseMessageChunk,
98
+ dict[str, Any],
99
+ ],
100
+ None,
101
+ ]
102
+ | None
103
+ ) = None,
102
104
  on_custom: Callable[[Any], None] | None = None,
103
105
  get_store: Callable[[], Awaitable[BaseStore]] | None = None,
104
106
  ):
@@ -143,7 +145,9 @@ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
143
145
  ) -> executor_pb2.GetGraphResponse: # type: ignore[name-defined]
144
146
  """Get graph definition."""
145
147
  try:
146
- self.logger.debug("GetGraph called")
148
+ self.logger.debug(
149
+ "GetGraph called", extra={"graph_name": request.graph_name}
150
+ )
147
151
  graph_name: str = request.graph_name
148
152
  return await self._get_graph_definition(graph_name)
149
153
 
@@ -183,7 +187,14 @@ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
183
187
  self, request: Any, context: grpc.aio.ServicerContext
184
188
  ) -> executor_pb2.ChannelsFromCheckpointResponse: # type: ignore[name-defined]
185
189
  try:
186
- self.logger.debug("ChannelsFromCheckpoint called")
190
+ self.logger.debug(
191
+ "ChannelsFromCheckpoint called",
192
+ extra={
193
+ "graph_name": request.graph_name,
194
+ "specs": request.specs,
195
+ "checkpoint_channel_values": request.checkpoint_channel_values,
196
+ },
197
+ )
187
198
  async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
188
199
  # reconstruct specs
189
200
  specs, _ = reconstruct_channels(
@@ -215,11 +226,16 @@ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
215
226
  request_iterator: Iterator[executor_pb2.ExecuteTaskRequest], # type: ignore[name-defined]
216
227
  context: grpc.aio.ServicerContext,
217
228
  ) -> AsyncIterator[executor_pb2.ExecuteTaskResponse]: # type: ignore[name-defined]
218
- self.logger.debug("ExecuteTask called")
219
-
220
- # Right now, only handle task execution without interrupts, etc
221
229
  try:
222
230
  request = await _get_init_request(request_iterator)
231
+ self.logger.debug(
232
+ "ExecuteTask called",
233
+ extra={
234
+ "graph_name": request.graph_name,
235
+ "task": request.task,
236
+ "stream_modes": request.stream_modes,
237
+ },
238
+ )
223
239
  config = reconstruct_config(request.task.config)
224
240
  store = await self.get_store() if self.get_store is not None else None
225
241
  async with self.get_graph(request.graph_name, config) as graph:
@@ -338,7 +354,15 @@ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
338
354
  self, request: Any, context: grpc.aio.ServicerContext
339
355
  ) -> executor_pb2.ApplyWritesResponse: # type: ignore[name-defined]
340
356
  # get graph
341
- self.logger.debug("ApplyWrites called")
357
+ self.logger.debug(
358
+ "ApplyWrites called",
359
+ extra={
360
+ "graph_name": request.graph_name,
361
+ "tasks": request.tasks,
362
+ "channels": request.channels,
363
+ "checkpoint": request.checkpoint,
364
+ },
365
+ )
342
366
  try:
343
367
  async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
344
368
  channels, _ = reconstruct_channels(
@@ -382,7 +406,15 @@ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
382
406
  request: executor_pb2.StateUpdateRequest,
383
407
  context: grpc.aio.ServicerContext,
384
408
  ) -> executor_pb2.TaskResult | None:
385
- self.logger.debug("StateUpdate called")
409
+ self.logger.debug(
410
+ "StateUpdate called",
411
+ extra={
412
+ "graph_name": request.graph_name,
413
+ "node_name": request.node_name,
414
+ "task_id": request.task_id,
415
+ "values": request.values,
416
+ },
417
+ )
386
418
 
387
419
  try:
388
420
  async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
@@ -1,7 +1,7 @@
1
1
  """Shared module for extracting graph information from LangGraph graphs."""
2
2
 
3
3
  from collections.abc import Sequence
4
- from typing import Any
4
+ from typing import Any, cast
5
5
 
6
6
  from google.protobuf.json_format import MessageToJson
7
7
  from google.protobuf.struct_pb2 import Struct # type: ignore[import-not-found]
@@ -55,8 +55,8 @@ def extract_config(config: RunnableConfig) -> types_pb2.RunnableConfig:
55
55
  tags=[t for t in ensured_config["tags"]],
56
56
  recursion_limit=ensured_config["recursion_limit"],
57
57
  run_name=ensured_config.get("run_name", ""),
58
- max_concurrency=(
59
- ensured_config.get("max_concurrency", DEFAULT_MAX_CONCURRENCY)
58
+ max_concurrency=cast(
59
+ int, ensured_config.get("max_concurrency", DEFAULT_MAX_CONCURRENCY)
60
60
  ),
61
61
  metadata=metadata_proto,
62
62
  configurable=configurable_proto,
@@ -0,0 +1,13 @@
1
+ from langgraph.checkpoint.serde.base import SerializerProtocol
2
+ from langgraph.checkpoint.serde.jsonplus import JsonPlusSerializer
3
+
4
+ SERIALIZER: SerializerProtocol = JsonPlusSerializer()
5
+
6
+
7
+ def set_serializer(serializer: SerializerProtocol) -> None:
8
+ global SERIALIZER
9
+ SERIALIZER = serializer
10
+
11
+
12
+ def get_serializer() -> SerializerProtocol:
13
+ return SERIALIZER
@@ -1 +0,0 @@
1
- __version__ = "0.0.1a6"