langgraph-executor 0.0.1a1__tar.gz → 0.0.1a3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/PKG-INFO +1 -1
  2. langgraph_executor-0.0.1a3/langgraph_executor/__init__.py +1 -0
  3. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/execute_task.py +0 -9
  4. langgraph_executor-0.0.1a3/langgraph_executor/executor.py +162 -0
  5. langgraph_executor-0.0.1a3/langgraph_executor/executor_base.py +497 -0
  6. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/info_logger.py +3 -3
  7. langgraph_executor-0.0.1a3/langgraph_executor/pb/executor_pb2.py +84 -0
  8. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/executor_pb2.pyi +24 -2
  9. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/executor_pb2_grpc.py +44 -0
  10. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/executor_pb2_grpc.pyi +20 -0
  11. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/server.py +22 -25
  12. langgraph_executor-0.0.1a1/langgraph_executor/__init__.py +0 -1
  13. langgraph_executor-0.0.1a1/langgraph_executor/executor.py +0 -376
  14. langgraph_executor-0.0.1a1/langgraph_executor/pb/executor_pb2.py +0 -82
  15. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/.gitignore +0 -0
  16. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/README.md +0 -0
  17. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/common.py +0 -0
  18. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/example.py +0 -0
  19. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/extract_graph.py +0 -0
  20. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/__init__.py +0 -0
  21. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/graph_pb2.py +0 -0
  22. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/graph_pb2.pyi +0 -0
  23. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/graph_pb2_grpc.py +0 -0
  24. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/graph_pb2_grpc.pyi +0 -0
  25. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/runtime_pb2.py +0 -0
  26. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/runtime_pb2.pyi +0 -0
  27. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/runtime_pb2_grpc.py +0 -0
  28. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/runtime_pb2_grpc.pyi +0 -0
  29. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/types_pb2.py +0 -0
  30. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/types_pb2.pyi +0 -0
  31. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/types_pb2_grpc.py +0 -0
  32. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/pb/types_pb2_grpc.pyi +0 -0
  33. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/py.typed +0 -0
  34. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/setup.sh +0 -0
  35. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/langgraph_executor/stream_utils.py +0 -0
  36. {langgraph_executor-0.0.1a1 → langgraph_executor-0.0.1a3}/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-executor
3
- Version: 0.0.1a1
3
+ Version: 0.0.1a3
4
4
  Summary: LangGraph python RPC server executable by the langgraph-go orchestrator.
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: grpcio>=1.73.1
@@ -0,0 +1 @@
1
+ __version__ = "0.0.1a3"
@@ -43,15 +43,6 @@ from langgraph_executor.common import (
43
43
  from langgraph_executor.pb import types_pb2
44
44
 
45
45
 
46
- def get_init_request(request_iterator):
47
- request = next(request_iterator)
48
-
49
- if not hasattr(request, "init"):
50
- raise ValueError("First message must be init")
51
-
52
- return request.init
53
-
54
-
55
46
  def reconstruct_task(
56
47
  request,
57
48
  graph: Pregel,
@@ -0,0 +1,162 @@
1
+ import contextlib
2
+ import functools
3
+ import logging
4
+ from typing import Any
5
+
6
+ import grpc
7
+ import grpc.aio
8
+ from langgraph._internal._constants import NS_SEP
9
+ from langgraph.pregel import Pregel
10
+
11
+ from langgraph_executor.executor_base import LangGraphExecutorServicer
12
+ from langgraph_executor.pb.executor_pb2_grpc import (
13
+ add_LangGraphExecutorServicer_to_server,
14
+ )
15
+
16
+ # Internal helpers
17
+ LOGGER = logging.getLogger(__name__)
18
+
19
+
20
+ def create_server(graphs: dict[str, Pregel], address: str) -> grpc.aio.Server:
21
+ graphs, subgraph_map = _load_graphs(graphs)
22
+ server = grpc.aio.server(
23
+ # Be permissive: allow client pings without active RPCs and accept intervals
24
+ # as low as 50s. Our clients still default to ~5m, but this avoids penalizing
25
+ # other, more frequent clients.
26
+ options=[
27
+ ("grpc.keepalive_permit_without_calls", 1),
28
+ ("grpc.http2.min_recv_ping_interval_without_data_ms", 50000), # 50s
29
+ ("grpc.http2.max_ping_strikes", 2),
30
+ ]
31
+ )
32
+ getter = functools.partial(get_graph, graphs=graphs)
33
+ add_LangGraphExecutorServicer_to_server(
34
+ LangGraphExecutorServicer(graphs, subgraph_map=subgraph_map, get_graph=getter),
35
+ server,
36
+ )
37
+ server.add_insecure_port(address)
38
+ return server
39
+
40
+
41
+ @contextlib.asynccontextmanager
42
+ async def get_graph(graph_name: str, config: Any, *, graphs: dict[str, Pregel]):
43
+ yield graphs[graph_name]
44
+
45
+
46
+ def _load_graphs(graphs: dict[str, Pregel]) -> tuple[dict[str, Pregel], dict[str, str]]:
47
+ """Load graphs and their subgraphs recursively in hierarchical order.
48
+
49
+ Args:
50
+ graphs: Dictionary of root graphs to load
51
+ """
52
+ # First, ensure all root graphs have unique names
53
+ _ensure_unique_root_names(graphs)
54
+ subgraph_map: dict[str, str] = {}
55
+
56
+ # Then, collect all subgraphs and mappings
57
+ all_subgraphs: dict[str, Pregel] = {}
58
+ subgraph_to_parent: dict[str, str] = {}
59
+
60
+ for root_graph in graphs.values():
61
+ subgraphs, mappings = _collect_subgraphs(root_graph, root_graph.name)
62
+ all_subgraphs.update(subgraphs)
63
+ subgraph_to_parent.update(mappings)
64
+
65
+ subgraph_map.update(subgraph_to_parent)
66
+
67
+ # Now build self.graphs in hierarchical order (parents before children)
68
+ for root_name in sorted(graphs.keys()):
69
+ _load_graph_and_children(
70
+ root_name, graphs, {**graphs, **all_subgraphs}, subgraph_map
71
+ )
72
+
73
+ _log_supported_graphs(graphs, subgraph_map)
74
+ return graphs, subgraph_map
75
+
76
+
77
+ def _ensure_unique_root_names(graphs: dict[str, Pregel]) -> None:
78
+ """Ensure all root graphs have unique names"""
79
+ seen_names = set()
80
+
81
+ for name in graphs:
82
+ if name in seen_names:
83
+ raise ValueError(
84
+ f"Root graph name conflict detected: {name}. Root graphs must have unique names"
85
+ )
86
+ seen_names.add(name)
87
+
88
+
89
+ def _collect_subgraphs(
90
+ graph: Pregel, namespace: str
91
+ ) -> tuple[dict[str, Pregel], dict[str, str]]:
92
+ """Recursively collect all subgraphs from a root graph"""
93
+ subgraphs = {}
94
+ mappings = {}
95
+
96
+ for idx, (node_name, subgraph) in enumerate(graph.get_subgraphs(recurse=False)):
97
+ # Generate subgraph name
98
+ subgraph.name = f"{namespace}{NS_SEP}{node_name}{NS_SEP}{idx}"
99
+
100
+ # Add this subgraph
101
+ subgraphs[subgraph.name] = subgraph
102
+ mappings[subgraph.name] = graph.name
103
+
104
+ # Recursively process this subgraph's children
105
+ nested_subgraphs, nested_mappings = _collect_subgraphs(subgraph, namespace)
106
+
107
+ subgraphs.update(nested_subgraphs)
108
+ mappings.update(nested_mappings)
109
+
110
+ return subgraphs, mappings
111
+
112
+
113
+ def _load_graph_and_children(
114
+ graph_name: str,
115
+ graphs: dict[str, Pregel],
116
+ all_graphs: dict[str, Pregel],
117
+ subgraph_map: dict[str, str],
118
+ ) -> None:
119
+ """Recursively add a graph and its children to self.graphs in order"""
120
+
121
+ # Add this graph to self.graphs (maintaining insertion order)
122
+ graphs[graph_name] = all_graphs[graph_name]
123
+
124
+ # Get direct children of this graph
125
+ children = [
126
+ child_name
127
+ for child_name, parent_name in subgraph_map.items()
128
+ if parent_name == graph_name
129
+ ]
130
+
131
+ # Add children in sorted order (for deterministic output)
132
+ for child_name in sorted(children):
133
+ _load_graph_and_children(child_name, graphs, all_graphs, subgraph_map)
134
+
135
+
136
+ def _log_supported_graphs(
137
+ graphs: dict[str, Pregel], subgraph_map: dict[str, str]
138
+ ) -> None:
139
+ """Log the complete graph hierarchy in a tree-like format."""
140
+ LOGGER.info("Loaded graphs:")
141
+
142
+ # Get root graphs
143
+ root_graphs = {name for name in graphs if name not in subgraph_map}
144
+
145
+ for root_name in sorted(root_graphs):
146
+ LOGGER.info(f" {root_name}")
147
+ _log_graph_children(root_name, subgraph_map, indent=2)
148
+
149
+
150
+ def _log_graph_children(
151
+ parent_name: str, subgraph_map: dict[str, str], *, indent: int = 0
152
+ ) -> None:
153
+ """Recursively log children of a graph with proper indentation."""
154
+ children = [
155
+ child for child, parent in subgraph_map.items() if parent == parent_name
156
+ ]
157
+
158
+ for child in sorted(children):
159
+ prefix = " " * indent + "└─ "
160
+ LOGGER.info(f"{prefix}{child}")
161
+ # Recursively log this child's children
162
+ _log_graph_children(child, subgraph_map, indent=indent + 1)
@@ -0,0 +1,497 @@
1
+ import asyncio
2
+ import contextlib
3
+ import functools
4
+ import logging
5
+ import uuid
6
+ from collections.abc import AsyncIterator, Callable, Collection, Iterator, Sequence
7
+ from typing import Any, Protocol, cast
8
+
9
+ import grpc
10
+ import grpc.aio
11
+ from google.protobuf.struct_pb2 import Struct # type: ignore[import-untyped]
12
+ from langchain_core.messages import BaseMessage, BaseMessageChunk
13
+ from langchain_core.runnables import RunnableConfig
14
+ from langgraph.checkpoint.base import Checkpoint
15
+ from langgraph.errors import GraphBubbleUp, GraphInterrupt
16
+ from langgraph.pregel import Pregel
17
+ from langgraph.pregel._algo import apply_writes
18
+ from langgraph.pregel._checkpoint import channels_from_checkpoint
19
+ from langgraph.pregel._retry import arun_with_retry
20
+ from langgraph.types import PregelExecutableTask
21
+
22
+ from langgraph_executor.common import (
23
+ checkpoint_to_proto,
24
+ exception_to_pb,
25
+ extract_channels,
26
+ pb_to_val,
27
+ reconstruct_channels,
28
+ reconstruct_checkpoint,
29
+ reconstruct_config,
30
+ reconstruct_task_writes,
31
+ updates_to_proto,
32
+ )
33
+ from langgraph_executor.execute_task import (
34
+ extract_writes,
35
+ reconstruct_task,
36
+ )
37
+ from langgraph_executor.extract_graph import extract_graph
38
+ from langgraph_executor.pb import executor_pb2, executor_pb2_grpc, types_pb2
39
+ from langgraph_executor.stream_utils import ExecutorStreamHandler
40
+
41
+
42
+ class Logger(Protocol):
43
+ def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
44
+ def info(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
45
+ def warning(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
46
+ def error(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
47
+ def exception(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
48
+ def critical(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
49
+
50
+
51
+ LOGGER = logging.getLogger(__name__)
52
+ SENTINEL = cast(executor_pb2.ExecuteTaskResponse, object())
53
+ GetGraph = Callable[
54
+ [str, RunnableConfig], contextlib.AbstractAsyncContextManager[Pregel]
55
+ ]
56
+
57
+
58
+ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
59
+ """gRPC servicer for LangGraph runtime execution operations."""
60
+
61
+ def __init__(
62
+ self,
63
+ graphs: Collection[str],
64
+ *,
65
+ subgraph_map: dict[str, str],
66
+ get_graph: GetGraph,
67
+ logger: Logger | None = None,
68
+ on_message: Callable[
69
+ [
70
+ BaseMessageChunk,
71
+ dict[str, Any],
72
+ ],
73
+ None,
74
+ ]
75
+ | None = None,
76
+ on_custom: Callable[[Any], None] | None = None,
77
+ ):
78
+ """Initialize the servicer with compiled graphs.
79
+
80
+ Args:
81
+ graphs: Dictionary mapping graph names to compiled graphs
82
+ subgraph_map: Dictionary mapping subgraph names to parent graph names
83
+ get_graph: Function to get a graph by name
84
+ logger: Optional logger
85
+
86
+ """
87
+ self.logger = logger or LOGGER
88
+ self.graphs = set(graphs)
89
+ self.graph_names = sorted(self.graphs)
90
+ self.subgraph_map = subgraph_map
91
+ self.get_graph = get_graph
92
+ _patch_base_message_with_ids()
93
+ self._graph_definition_cache: dict[str, executor_pb2.GetGraphResponse] = {}
94
+ self.on_message = on_message
95
+ self.on_custom = on_custom
96
+
97
+ async def ListGraphs(
98
+ self, request: Any, context: grpc.aio.ServicerContext
99
+ ) -> executor_pb2.ListGraphsResponse: # type: ignore[name-defined]
100
+ """List available graphs."""
101
+ return executor_pb2.ListGraphsResponse(
102
+ graph_names=self.graph_names,
103
+ )
104
+
105
+ async def GetGraph(
106
+ self, request: Any, context: grpc.aio.ServicerContext
107
+ ) -> executor_pb2.GetGraphResponse: # type: ignore[name-defined]
108
+ """Get graph definition."""
109
+ try:
110
+ self.logger.debug("GetGraph called")
111
+ graph_name: str = request.graph_name
112
+ return await self._get_graph_definition(graph_name)
113
+
114
+ except Exception as e:
115
+ self.logger.error(f"GetGraph Error: {e}", exc_info=True)
116
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
117
+
118
+ async def _get_graph_definition(self, name: str) -> executor_pb2.GetGraphResponse:
119
+ if (resp := self._graph_definition_cache.get(name)) is not None:
120
+ return resp
121
+ async with self.get_graph(name, RunnableConfig()) as graph:
122
+ graph_definition = extract_graph(graph)
123
+
124
+ resp = executor_pb2.GetGraphResponse(
125
+ graph_definition=graph_definition,
126
+ parent_name=self.subgraph_map.get(name, None),
127
+ checkpointer=graph.checkpointer is not None,
128
+ )
129
+ self._graph_definition_cache[name] = resp
130
+ return resp
131
+
132
+ async def GetAllGraphs(
133
+ self,
134
+ request: executor_pb2.GetAllGraphsRequest,
135
+ context: grpc.aio.ServicerContext,
136
+ ) -> AsyncIterator[executor_pb2.GetGraphResponse]:
137
+ try:
138
+ self.logger.debug("GetAllGraphs called")
139
+ for name in self.graph_names:
140
+ yield await self._get_graph_definition(name)
141
+
142
+ except Exception as e:
143
+ self.logger.error(f"GetAllGraphs Error: {e}", exc_info=True)
144
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
145
+
146
+ async def ChannelsFromCheckpoint(
147
+ self, request: Any, context: grpc.aio.ServicerContext
148
+ ) -> executor_pb2.ChannelsFromCheckpointResponse: # type: ignore[name-defined]
149
+ try:
150
+ self.logger.debug("ChannelsFromCheckpoint called")
151
+ async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
152
+ # reconstruct specs
153
+ specs, _ = reconstruct_channels(
154
+ request.specs.channels,
155
+ graph,
156
+ scratchpad=None, # type: ignore[invalid-arg-type]
157
+ )
158
+
159
+ # initialize channels from specs and checkpoint channel values
160
+ checkpoint_dummy = Checkpoint( # type: ignore[typeddict-item]
161
+ channel_values={
162
+ k: pb_to_val(v)
163
+ for k, v in request.checkpoint_channel_values.items()
164
+ },
165
+ )
166
+ channels, _ = channels_from_checkpoint(specs, checkpoint_dummy)
167
+
168
+ # channels to pb
169
+ channels = extract_channels(channels)
170
+
171
+ return executor_pb2.ChannelsFromCheckpointResponse(channels=channels)
172
+
173
+ except Exception as e:
174
+ self.logger.error(f"ChannelsFromCheckpoint Error: {e}", exc_info=True)
175
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
176
+
177
+ async def ExecuteTask(
178
+ self,
179
+ request_iterator: Iterator[executor_pb2.ExecuteTaskRequest], # type: ignore[name-defined]
180
+ context: grpc.aio.ServicerContext,
181
+ ) -> AsyncIterator[executor_pb2.ExecuteTaskResponse]: # type: ignore[name-defined]
182
+ self.logger.debug("ExecuteTask called")
183
+
184
+ # Right now, only handle task execution without interrupts, etc
185
+ try:
186
+ request = await _get_init_request(request_iterator)
187
+ config = reconstruct_config(request.task.config)
188
+ async with self.get_graph(request.graph_name, config) as graph:
189
+ stream_messages = "messages" in request.stream_modes
190
+ stream_custom = "custom" in request.stream_modes
191
+
192
+ stream_queue = asyncio.Queue()
193
+
194
+ custom_stream_writer = (
195
+ _create_custom_stream_writer(
196
+ stream_queue, self.logger, on_custom=self.on_custom
197
+ )
198
+ if stream_custom
199
+ else None
200
+ )
201
+
202
+ task = reconstruct_task(
203
+ request, graph, custom_stream_writer=custom_stream_writer
204
+ )
205
+ if stream_messages:
206
+ # Create and inject callback handler
207
+ stream_handler = ExecutorStreamHandler(
208
+ functools.partial(
209
+ stream_callback,
210
+ logger=self.logger,
211
+ stream_queue=stream_queue,
212
+ on_message=self.on_message,
213
+ ),
214
+ task.id,
215
+ )
216
+
217
+ # Add handler to task config callbacks
218
+ if "callbacks" not in task.config:
219
+ task.config["callbacks"] = []
220
+ task.config["callbacks"].append(stream_handler) # type: ignore[union-attr]
221
+
222
+ # Execute task, catching interrupts
223
+ # Check cache if task has cache key - send request to Go orchestrator
224
+ should_execute = True
225
+ if task.cache_key:
226
+ self.logger.debug(
227
+ f"Task {task.id} has cache key, sending cache check request to Go",
228
+ )
229
+
230
+ # Send cache check request to Go runtime
231
+ cache_check_request = executor_pb2.CacheCheckRequest(
232
+ cache_namespace=list(task.cache_key.ns),
233
+ cache_key=task.cache_key.key,
234
+ ttl=task.cache_key.ttl,
235
+ )
236
+
237
+ yield executor_pb2.ExecuteTaskResponse(
238
+ cache_check_request=cache_check_request,
239
+ )
240
+
241
+ # Wait for Go's response via the bidirectional stream
242
+ try:
243
+ cache_response_request = next(request_iterator)
244
+ if hasattr(cache_response_request, "cache_check_response"):
245
+ cache_response = cache_response_request.cache_check_response
246
+ should_execute = not cache_response.cache_hit
247
+ self.logger.debug(
248
+ f"Received cache response for task {task.id}: cache_hit={cache_response.cache_hit}",
249
+ )
250
+ else:
251
+ self.logger.warning(
252
+ f"Expected cache_check_response for task {task.id}, got unexpected message type",
253
+ )
254
+ should_execute = (
255
+ True # Default to execution if unexpected response
256
+ )
257
+ except StopIteration:
258
+ self.logger.warning(
259
+ f"No cache response received for task {task.id}, defaulting to execution",
260
+ )
261
+ should_execute = True # Default to execution if no response
262
+
263
+ # TODO patch retry policy
264
+ # TODO configurable to deal with _call and the functional api
265
+ exception_pb = None
266
+ if should_execute:
267
+ runner_task = asyncio.create_task(
268
+ _run_task(task, logger=self.logger, stream_queue=stream_queue)
269
+ )
270
+ # Drain the queue and stream responses to client
271
+ while True:
272
+ item = await stream_queue.get()
273
+ if item is SENTINEL:
274
+ break
275
+ yield item
276
+ exception_pb = await runner_task
277
+
278
+ # Ensure the final chat messages are emitted (if any)
279
+ final_messages = _extract_output_messages(task.writes)
280
+ if final_messages:
281
+ for message in final_messages:
282
+ yield executor_pb2.ExecuteTaskResponse(
283
+ message_or_message_chunk=message
284
+ )
285
+
286
+ # Final task result
287
+ yield executor_pb2.ExecuteTaskResponse(
288
+ task_result=executor_pb2.TaskResult(
289
+ error=exception_pb, writes=extract_writes(task.writes)
290
+ )
291
+ )
292
+
293
+ except Exception as e:
294
+ self.logger.exception(f"ExecuteTask error: {e}")
295
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
296
+
297
+ async def ApplyWrites(
298
+ self, request: Any, context: grpc.aio.ServicerContext
299
+ ) -> executor_pb2.ApplyWritesResponse: # type: ignore[name-defined]
300
+ # get graph
301
+ self.logger.debug("ApplyWrites called")
302
+ try:
303
+ async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
304
+ channels, _ = reconstruct_channels(
305
+ request.channels.channels,
306
+ graph,
307
+ # TODO: figure this out
308
+ scratchpad=None, # type: ignore[invalid-arg-type]
309
+ )
310
+ checkpoint = reconstruct_checkpoint(request.checkpoint)
311
+ tasks = reconstruct_task_writes(request.tasks)
312
+
313
+ # apply writes
314
+ updated_channel_names_set = apply_writes(
315
+ checkpoint,
316
+ channels,
317
+ tasks,
318
+ lambda *args: request.next_version,
319
+ graph.trigger_to_nodes,
320
+ )
321
+ updated_channel_names = list(updated_channel_names_set)
322
+
323
+ # Reconstruct protos
324
+ updated_channels = extract_channels(channels)
325
+ checkpoint_proto = checkpoint_to_proto(checkpoint)
326
+
327
+ # Respond with updates
328
+ return executor_pb2.ApplyWritesResponse(
329
+ updates=updates_to_proto(
330
+ checkpoint_proto,
331
+ updated_channel_names,
332
+ updated_channels,
333
+ ),
334
+ )
335
+
336
+ except Exception as e:
337
+ self.logger.exception(f"ApplyWrites error: {e}")
338
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
339
+
340
+ async def GenerateCacheKey(
341
+ self,
342
+ request: executor_pb2.GenerateCacheKeyRequest,
343
+ context: grpc.aio.ServicerContext,
344
+ ) -> executor_pb2.GenerateCacheKeyResponse:
345
+ """Generate cache key for a node execution"""
346
+ raise NotImplementedError("GenerateCacheKey not implemented")
347
+
348
+
349
+ # Helpers
350
+
351
+
352
+ async def _run_task(
353
+ task: PregelExecutableTask,
354
+ *,
355
+ logger: Logger,
356
+ stream_queue: asyncio.Queue[executor_pb2.ExecuteTaskResponse],
357
+ ) -> types_pb2.ExecutorError | None:
358
+ try:
359
+ await arun_with_retry(
360
+ task,
361
+ retry_policy=None,
362
+ )
363
+
364
+ except Exception as e:
365
+ if isinstance(e, GraphBubbleUp | GraphInterrupt):
366
+ logger.info(f"Interrupt in task {task.id}: {e}")
367
+ else:
368
+ logger.exception(
369
+ f"Exception running task {task.id}: {e}\nTask: {task}\n\n",
370
+ exc_info=True,
371
+ )
372
+ return exception_to_pb(e)
373
+ finally:
374
+ await stream_queue.put(SENTINEL)
375
+
376
+
377
+ def stream_callback(
378
+ message: BaseMessageChunk,
379
+ metadata: dict[str, Any],
380
+ *,
381
+ logger: Logger,
382
+ stream_queue: asyncio.Queue[executor_pb2.ExecuteTaskResponse],
383
+ on_message: Callable[[BaseMessageChunk, dict[str, Any]], None] | None = None,
384
+ ):
385
+ """Callback to capture stream chunks and queue them."""
386
+ try:
387
+ if on_message is not None:
388
+ on_message(message, metadata)
389
+ stream_queue.put_nowait(
390
+ executor_pb2.ExecuteTaskResponse(
391
+ message_or_message_chunk=_extract_output_message(message)
392
+ )
393
+ )
394
+ except Exception as e:
395
+ logger.warning(f"Failed to create stream chunk: {e}", exc_info=True)
396
+
397
+
398
+ def _create_custom_stream_writer(
399
+ stream_queue: asyncio.Queue[Any],
400
+ logger: Logger,
401
+ *,
402
+ on_custom: Callable[[Any], None] | None = None,
403
+ ):
404
+ """Create a proper stream_writer function for custom mode (like langgraph does)."""
405
+
406
+ def stream_writer(content):
407
+ """Custom stream writer that creates CustomStreamEvent messages."""
408
+ try:
409
+ if on_custom is not None:
410
+ on_custom(content)
411
+ # Create payload struct (like langgraph does)
412
+ payload = Struct()
413
+ if isinstance(content, str):
414
+ payload.update({"content": content})
415
+ elif isinstance(content, dict):
416
+ payload.update(content)
417
+ else:
418
+ payload.update({"content": str(content)})
419
+
420
+ # Create CustomStreamEvent
421
+ custom_event = executor_pb2.CustomStreamEvent(payload=payload)
422
+ custom_event_response = executor_pb2.ExecuteTaskResponse(
423
+ custom_stream_event=custom_event
424
+ )
425
+ stream_queue.put_nowait(custom_event_response)
426
+
427
+ except Exception as e:
428
+ logger.warning(f"Failed to create custom stream event: {e}", exc_info=True)
429
+
430
+ return stream_writer
431
+
432
+
433
+ def _extract_output_messages(writes: Sequence[Any]) -> list[types_pb2.Message]: # type: ignore[name-defined]
434
+ messages = []
435
+ for write in writes:
436
+ # Not sure this check is right
437
+ if isinstance(write[1], BaseMessage):
438
+ messages.append(_extract_output_message(write[1]))
439
+ elif isinstance(write[1], Sequence):
440
+ messages.extend(
441
+ [
442
+ _extract_output_message(w)
443
+ for w in write[1]
444
+ if isinstance(w, BaseMessage)
445
+ ]
446
+ )
447
+
448
+ return messages
449
+
450
+
451
+ def _extract_output_message(write: Any) -> types_pb2.Message: # type: ignore[name-defined]
452
+ message = Struct()
453
+ message.update(
454
+ {
455
+ "is_streaming_chunk": False,
456
+ "message": {
457
+ "id": getattr(write, "id", None) or uuid.uuid4().hex,
458
+ "type": getattr(write, "type", None),
459
+ "content": str(getattr(write, "content", "") or ""),
460
+ "additional_kwargs": getattr(write, "additional_kwargs", {}),
461
+ "usage_metadata": getattr(write, "usage_metadata", {}),
462
+ "tool_calls": getattr(write, "tool_calls", []),
463
+ "tool_call_id": getattr(write, "tool_call_id", ""),
464
+ "tool_call_chunks": getattr(write, "tool_call_chunks", []),
465
+ "response_metadata": getattr(write, "response_metadata", {}),
466
+ },
467
+ "metadata": {},
468
+ }
469
+ )
470
+ return types_pb2.Message(payload=message)
471
+
472
+
473
+ async def _get_init_request(request_iterator):
474
+ request = await anext(request_iterator)
475
+
476
+ if not hasattr(request, "init"):
477
+ raise ValueError("First message must be init")
478
+
479
+ return request.init
480
+
481
+
482
+ @functools.lru_cache(maxsize=1)
483
+ def _patch_base_message_with_ids() -> None:
484
+ """Patch the specific BaseMessage class used in your system."""
485
+ try:
486
+ from langchain_core.messages import BaseMessage
487
+
488
+ original_init = BaseMessage.__init__
489
+
490
+ def patched_init(self, content: Any, **kwargs: Any) -> None:
491
+ original_init(self, content, **kwargs)
492
+ if self.id is None:
493
+ self.id = str(uuid.uuid4())
494
+
495
+ BaseMessage.__init__ = patched_init # type: ignore[method-assign]
496
+ except Exception as e:
497
+ LOGGER.warning("Failed to patch BaseMessage with IDs: %s", e)