langgraph-executor 0.0.1a0__py3-none-any.whl → 0.0.1a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,473 @@
1
+ import asyncio
2
+ import contextlib
3
+ import functools
4
+ import logging
5
+ import uuid
6
+ from collections.abc import AsyncIterator, Callable, Collection, Iterator, Sequence
7
+ from typing import Any, Protocol, cast
8
+
9
+ import grpc
10
+ import grpc.aio
11
+ from google.protobuf.struct_pb2 import Struct # type: ignore[import-untyped]
12
+ from langchain_core.messages import BaseMessage, BaseMessageChunk
13
+ from langchain_core.runnables import RunnableConfig
14
+ from langgraph.checkpoint.base import Checkpoint
15
+ from langgraph.errors import GraphBubbleUp, GraphInterrupt
16
+ from langgraph.pregel import Pregel
17
+ from langgraph.pregel._algo import apply_writes
18
+ from langgraph.pregel._checkpoint import channels_from_checkpoint
19
+ from langgraph.pregel._retry import arun_with_retry
20
+ from langgraph.types import PregelExecutableTask
21
+
22
+ from langgraph_executor.common import (
23
+ checkpoint_to_proto,
24
+ exception_to_pb,
25
+ extract_channels,
26
+ pb_to_val,
27
+ reconstruct_channels,
28
+ reconstruct_checkpoint,
29
+ reconstruct_config,
30
+ reconstruct_task_writes,
31
+ updates_to_proto,
32
+ )
33
+ from langgraph_executor.execute_task import (
34
+ extract_writes,
35
+ reconstruct_task,
36
+ )
37
+ from langgraph_executor.extract_graph import extract_graph
38
+ from langgraph_executor.pb import executor_pb2, executor_pb2_grpc, types_pb2
39
+ from langgraph_executor.stream_utils import ExecutorStreamHandler
40
+
41
+
42
+ class Logger(Protocol):
43
+ def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
44
+ def info(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
45
+ def warning(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
46
+ def error(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
47
+ def exception(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
48
+ def critical(self, msg: str, *args: Any, **kwargs: Any) -> None: ...
49
+
50
+
51
+ LOGGER = logging.getLogger(__name__)
52
+ SENTINEL = cast(executor_pb2.ExecuteTaskResponse, object())
53
+ GetGraph = Callable[
54
+ [str, RunnableConfig], contextlib.AbstractAsyncContextManager[Pregel]
55
+ ]
56
+
57
+
58
+ class LangGraphExecutorServicer(executor_pb2_grpc.LangGraphExecutorServicer):
59
+ """gRPC servicer for LangGraph runtime execution operations."""
60
+
61
+ def __init__(
62
+ self,
63
+ graphs: Collection[str],
64
+ *,
65
+ subgraph_map: dict[str, str],
66
+ get_graph: GetGraph,
67
+ logger: Logger | None = None,
68
+ ):
69
+ """Initialize the servicer with compiled graphs.
70
+
71
+ Args:
72
+ graphs: Dictionary mapping graph names to compiled graphs
73
+ subgraph_map: Dictionary mapping subgraph names to parent graph names
74
+ get_graph: Function to get a graph by name
75
+ logger: Optional logger
76
+
77
+ """
78
+ self.logger = logger or LOGGER
79
+ self.graphs = set(graphs)
80
+ self.graph_names = sorted(self.graphs)
81
+ self.subgraph_map = subgraph_map
82
+ self.get_graph = get_graph
83
+ _patch_base_message_with_ids()
84
+ self._graph_definition_cache: dict[str, executor_pb2.GetGraphResponse] = {}
85
+
86
+ async def ListGraphs(
87
+ self, request: Any, context: grpc.aio.ServicerContext
88
+ ) -> executor_pb2.ListGraphsResponse: # type: ignore[name-defined]
89
+ """List available graphs."""
90
+ return executor_pb2.ListGraphsResponse(
91
+ graph_names=self.graph_names,
92
+ )
93
+
94
+ async def GetGraph(
95
+ self, request: Any, context: grpc.aio.ServicerContext
96
+ ) -> executor_pb2.GetGraphResponse: # type: ignore[name-defined]
97
+ """Get graph definition."""
98
+ try:
99
+ self.logger.debug("GetGraph called")
100
+ graph_name: str = request.graph_name
101
+ return await self._get_graph_definition(graph_name)
102
+
103
+ except Exception as e:
104
+ self.logger.error(f"GetGraph Error: {e}", exc_info=True)
105
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
106
+
107
+ async def _get_graph_definition(self, name: str) -> executor_pb2.GetGraphResponse:
108
+ if (resp := self._graph_definition_cache.get(name)) is not None:
109
+ return resp
110
+ async with self.get_graph(name, RunnableConfig()) as graph:
111
+ graph_definition = extract_graph(graph)
112
+
113
+ resp = executor_pb2.GetGraphResponse(
114
+ graph_definition=graph_definition,
115
+ parent_name=self.subgraph_map.get(name, None),
116
+ checkpointer=graph.checkpointer is not None,
117
+ )
118
+ self._graph_definition_cache[name] = resp
119
+ return resp
120
+
121
+ async def GetAllGraphs(
122
+ self,
123
+ request: executor_pb2.GetAllGraphsRequest,
124
+ context: grpc.aio.ServicerContext,
125
+ ) -> AsyncIterator[executor_pb2.GetGraphResponse]:
126
+ try:
127
+ self.logger.debug("GetAllGraphs called")
128
+ for name in self.graph_names:
129
+ yield await self._get_graph_definition(name)
130
+
131
+ except Exception as e:
132
+ self.logger.error(f"GetAllGraphs Error: {e}", exc_info=True)
133
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
134
+
135
+ async def ChannelsFromCheckpoint(
136
+ self, request: Any, context: grpc.aio.ServicerContext
137
+ ) -> executor_pb2.ChannelsFromCheckpointResponse: # type: ignore[name-defined]
138
+ try:
139
+ self.logger.debug("ChannelsFromCheckpoint called")
140
+ async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
141
+ # reconstruct specs
142
+ specs, _ = reconstruct_channels(
143
+ request.specs.channels,
144
+ graph,
145
+ scratchpad=None, # type: ignore[invalid-arg-type]
146
+ )
147
+
148
+ # initialize channels from specs and checkpoint channel values
149
+ checkpoint_dummy = Checkpoint( # type: ignore[typeddict-item]
150
+ channel_values={
151
+ k: pb_to_val(v)
152
+ for k, v in request.checkpoint_channel_values.items()
153
+ },
154
+ )
155
+ channels, _ = channels_from_checkpoint(specs, checkpoint_dummy)
156
+
157
+ # channels to pb
158
+ channels = extract_channels(channels)
159
+
160
+ return executor_pb2.ChannelsFromCheckpointResponse(channels=channels)
161
+
162
+ except Exception as e:
163
+ self.logger.error(f"ChannelsFromCheckpoint Error: {e}", exc_info=True)
164
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
165
+
166
+ async def ExecuteTask(
167
+ self,
168
+ request_iterator: Iterator[executor_pb2.ExecuteTaskRequest], # type: ignore[name-defined]
169
+ context: grpc.aio.ServicerContext,
170
+ ) -> AsyncIterator[executor_pb2.ExecuteTaskResponse]: # type: ignore[name-defined]
171
+ self.logger.debug("ExecuteTask called")
172
+
173
+ # Right now, only handle task execution without interrupts, etc
174
+ try:
175
+ request = await _get_init_request(request_iterator)
176
+ config = reconstruct_config(request.task.config)
177
+ async with self.get_graph(request.graph_name, config) as graph:
178
+ stream_messages = "messages" in request.stream_modes
179
+ stream_custom = "custom" in request.stream_modes
180
+
181
+ stream_queue = asyncio.Queue()
182
+
183
+ custom_stream_writer = (
184
+ _create_custom_stream_writer(stream_queue, self.logger)
185
+ if stream_custom
186
+ else None
187
+ )
188
+
189
+ task = reconstruct_task(
190
+ request, graph, custom_stream_writer=custom_stream_writer
191
+ )
192
+ if stream_messages:
193
+ # Create and inject callback handler
194
+ stream_handler = ExecutorStreamHandler(
195
+ functools.partial(
196
+ stream_callback,
197
+ logger=self.logger,
198
+ stream_queue=stream_queue,
199
+ ),
200
+ task.id,
201
+ )
202
+
203
+ # Add handler to task config callbacks
204
+ if "callbacks" not in task.config:
205
+ task.config["callbacks"] = []
206
+ task.config["callbacks"].append(stream_handler) # type: ignore[union-attr]
207
+
208
+ # Execute task, catching interrupts
209
+ # Check cache if task has cache key - send request to Go orchestrator
210
+ should_execute = True
211
+ if task.cache_key:
212
+ self.logger.debug(
213
+ f"Task {task.id} has cache key, sending cache check request to Go",
214
+ )
215
+
216
+ # Send cache check request to Go runtime
217
+ cache_check_request = executor_pb2.CacheCheckRequest(
218
+ cache_namespace=list(task.cache_key.ns),
219
+ cache_key=task.cache_key.key,
220
+ ttl=task.cache_key.ttl,
221
+ )
222
+
223
+ yield executor_pb2.ExecuteTaskResponse(
224
+ cache_check_request=cache_check_request,
225
+ )
226
+
227
+ # Wait for Go's response via the bidirectional stream
228
+ try:
229
+ cache_response_request = next(request_iterator)
230
+ if hasattr(cache_response_request, "cache_check_response"):
231
+ cache_response = cache_response_request.cache_check_response
232
+ should_execute = not cache_response.cache_hit
233
+ self.logger.debug(
234
+ f"Received cache response for task {task.id}: cache_hit={cache_response.cache_hit}",
235
+ )
236
+ else:
237
+ self.logger.warning(
238
+ f"Expected cache_check_response for task {task.id}, got unexpected message type",
239
+ )
240
+ should_execute = (
241
+ True # Default to execution if unexpected response
242
+ )
243
+ except StopIteration:
244
+ self.logger.warning(
245
+ f"No cache response received for task {task.id}, defaulting to execution",
246
+ )
247
+ should_execute = True # Default to execution if no response
248
+
249
+ # TODO patch retry policy
250
+ # TODO configurable to deal with _call and the functional api
251
+ exception_pb = None
252
+ if should_execute:
253
+ runner_task = asyncio.create_task(
254
+ _run_task(task, logger=self.logger, stream_queue=stream_queue)
255
+ )
256
+ # Drain the queue and stream responses to client
257
+ while True:
258
+ item = await stream_queue.get()
259
+ if item is SENTINEL:
260
+ break
261
+ yield item
262
+ exception_pb = await runner_task
263
+
264
+ # Ensure the final chat messages are emitted (if any)
265
+ final_messages = _extract_output_messages(task.writes)
266
+ if final_messages:
267
+ for message in final_messages:
268
+ yield executor_pb2.ExecuteTaskResponse(
269
+ message_or_message_chunk=message
270
+ )
271
+
272
+ # Final task result
273
+ yield executor_pb2.ExecuteTaskResponse(
274
+ task_result=executor_pb2.TaskResult(
275
+ error=exception_pb, writes=extract_writes(task.writes)
276
+ )
277
+ )
278
+
279
+ except Exception as e:
280
+ self.logger.exception(f"ExecuteTask error: {e}")
281
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
282
+
283
+ async def ApplyWrites(
284
+ self, request: Any, context: grpc.aio.ServicerContext
285
+ ) -> executor_pb2.ApplyWritesResponse: # type: ignore[name-defined]
286
+ # get graph
287
+ self.logger.debug("ApplyWrites called")
288
+ try:
289
+ async with self.get_graph(request.graph_name, RunnableConfig()) as graph:
290
+ channels, _ = reconstruct_channels(
291
+ request.channels.channels,
292
+ graph,
293
+ # TODO: figure this out
294
+ scratchpad=None, # type: ignore[invalid-arg-type]
295
+ )
296
+ checkpoint = reconstruct_checkpoint(request.checkpoint)
297
+ tasks = reconstruct_task_writes(request.tasks)
298
+
299
+ # apply writes
300
+ updated_channel_names_set = apply_writes(
301
+ checkpoint,
302
+ channels,
303
+ tasks,
304
+ lambda *args: request.next_version,
305
+ graph.trigger_to_nodes,
306
+ )
307
+ updated_channel_names = list(updated_channel_names_set)
308
+
309
+ # Reconstruct protos
310
+ updated_channels = extract_channels(channels)
311
+ checkpoint_proto = checkpoint_to_proto(checkpoint)
312
+
313
+ # Respond with updates
314
+ return executor_pb2.ApplyWritesResponse(
315
+ updates=updates_to_proto(
316
+ checkpoint_proto,
317
+ updated_channel_names,
318
+ updated_channels,
319
+ ),
320
+ )
321
+
322
+ except Exception as e:
323
+ self.logger.exception(f"ApplyWrites error: {e}")
324
+ await context.abort(grpc.StatusCode.INTERNAL, str(e))
325
+
326
+ async def GenerateCacheKey(
327
+ self,
328
+ request: executor_pb2.GenerateCacheKeyRequest,
329
+ context: grpc.aio.ServicerContext,
330
+ ) -> executor_pb2.GenerateCacheKeyResponse:
331
+ """Generate cache key for a node execution"""
332
+ raise NotImplementedError("GenerateCacheKey not implemented")
333
+
334
+
335
+ # Helpers
336
+
337
+
338
+ async def _run_task(
339
+ task: PregelExecutableTask,
340
+ *,
341
+ logger: Logger,
342
+ stream_queue: asyncio.Queue[executor_pb2.ExecuteTaskResponse],
343
+ ) -> types_pb2.ExecutorError | None:
344
+ try:
345
+ await arun_with_retry(
346
+ task,
347
+ retry_policy=None,
348
+ )
349
+
350
+ except Exception as e:
351
+ if isinstance(e, GraphBubbleUp | GraphInterrupt):
352
+ logger.info(f"Interrupt in task {task.id}: {e}")
353
+ else:
354
+ logger.exception(
355
+ f"Exception running task {task.id}: {e}\nTask: {task}\n\n",
356
+ exc_info=True,
357
+ )
358
+ return exception_to_pb(e)
359
+ finally:
360
+ await stream_queue.put(SENTINEL)
361
+
362
+
363
+ def stream_callback(
364
+ message: BaseMessageChunk,
365
+ metadata: dict,
366
+ *,
367
+ logger: Logger,
368
+ stream_queue: asyncio.Queue[executor_pb2.ExecuteTaskResponse],
369
+ ):
370
+ """Callback to capture stream chunks and queue them."""
371
+ try:
372
+ stream_queue.put_nowait(
373
+ executor_pb2.ExecuteTaskResponse(
374
+ message_or_message_chunk=_extract_output_message(message)
375
+ )
376
+ )
377
+ except Exception as e:
378
+ logger.warning(f"Failed to create stream chunk: {e}", exc_info=True)
379
+
380
+
381
+ def _create_custom_stream_writer(stream_queue: asyncio.Queue[Any], logger: Logger):
382
+ """Create a proper stream_writer function for custom mode (like langgraph does)."""
383
+
384
+ def stream_writer(content):
385
+ """Custom stream writer that creates CustomStreamEvent messages."""
386
+ try:
387
+ # Create payload struct (like langgraph does)
388
+ payload = Struct()
389
+ if isinstance(content, str):
390
+ payload.update({"content": content})
391
+ elif isinstance(content, dict):
392
+ payload.update(content)
393
+ else:
394
+ payload.update({"content": str(content)})
395
+
396
+ # Create CustomStreamEvent
397
+ custom_event = executor_pb2.CustomStreamEvent(payload=payload)
398
+ custom_event_response = executor_pb2.ExecuteTaskResponse(
399
+ custom_stream_event=custom_event
400
+ )
401
+ stream_queue.put_nowait(custom_event_response)
402
+
403
+ except Exception as e:
404
+ logger.warning(f"Failed to create custom stream event: {e}", exc_info=True)
405
+
406
+ return stream_writer
407
+
408
+
409
+ def _extract_output_messages(writes: Sequence[Any]) -> list[types_pb2.Message]: # type: ignore[name-defined]
410
+ messages = []
411
+ for write in writes:
412
+ # Not sure this check is right
413
+ if isinstance(write[1], BaseMessage):
414
+ messages.append(_extract_output_message(write[1]))
415
+ elif isinstance(write[1], Sequence):
416
+ messages.extend(
417
+ [
418
+ _extract_output_message(w)
419
+ for w in write[1]
420
+ if isinstance(w, BaseMessage)
421
+ ]
422
+ )
423
+
424
+ return messages
425
+
426
+
427
+ def _extract_output_message(write: Any) -> types_pb2.Message: # type: ignore[name-defined]
428
+ message = Struct()
429
+ message.update(
430
+ {
431
+ "is_streaming_chunk": False,
432
+ "message": {
433
+ "id": getattr(write, "id", None) or uuid.uuid4().hex,
434
+ "type": getattr(write, "type", None),
435
+ "content": str(getattr(write, "content", "") or ""),
436
+ "additional_kwargs": getattr(write, "additional_kwargs", {}),
437
+ "usage_metadata": getattr(write, "usage_metadata", {}),
438
+ "tool_calls": getattr(write, "tool_calls", []),
439
+ "tool_call_id": getattr(write, "tool_call_id", ""),
440
+ "tool_call_chunks": getattr(write, "tool_call_chunks", []),
441
+ "response_metadata": getattr(write, "response_metadata", {}),
442
+ },
443
+ "metadata": {},
444
+ }
445
+ )
446
+ return types_pb2.Message(payload=message)
447
+
448
+
449
+ async def _get_init_request(request_iterator):
450
+ request = await anext(request_iterator)
451
+
452
+ if not hasattr(request, "init"):
453
+ raise ValueError("First message must be init")
454
+
455
+ return request.init
456
+
457
+
458
+ @functools.lru_cache(maxsize=1)
459
+ def _patch_base_message_with_ids() -> None:
460
+ """Patch the specific BaseMessage class used in your system."""
461
+ try:
462
+ from langchain_core.messages import BaseMessage
463
+
464
+ original_init = BaseMessage.__init__
465
+
466
+ def patched_init(self, content: Any, **kwargs: Any) -> None:
467
+ original_init(self, content, **kwargs)
468
+ if self.id is None:
469
+ self.id = str(uuid.uuid4())
470
+
471
+ BaseMessage.__init__ = patched_init # type: ignore[method-assign]
472
+ except Exception as e:
473
+ LOGGER.warning("Failed to patch BaseMessage with IDs: %s", e)
@@ -80,11 +80,8 @@ def extract_reserved_configurable(
80
80
  )
81
81
 
82
82
 
83
- def extract_nodes(nodes: dict[str, PregelNode]) -> dict[str, graph_pb2.NodeDefinition]:
84
- out = {}
85
- for k, v in nodes.items():
86
- out[k] = extract_node(k, v)
87
- return out
83
+ def extract_nodes(nodes: dict[str, PregelNode]) -> list[graph_pb2.NodeDefinition]:
84
+ return [extract_node(k, v) for k, v in nodes.items()]
88
85
 
89
86
 
90
87
  def extract_node(name: str, node: PregelNode) -> graph_pb2.NodeDefinition:
@@ -8,7 +8,7 @@ from pathlib import Path
8
8
  class ExecutorInfo:
9
9
  id: str
10
10
  pid: int
11
- port: int
11
+ address: str
12
12
  status: str
13
13
  start_time: float
14
14
  end_time: float | None = None
@@ -33,7 +33,7 @@ class ExecutorInfoLogger:
33
33
  data = {
34
34
  "id": executor_info.id,
35
35
  "pid": executor_info.pid,
36
- "port": executor_info.port,
36
+ "address": executor_info.address,
37
37
  "status": executor_info.status,
38
38
  "start_time": executor_info.start_time,
39
39
  "end_time": executor_info.end_time,
@@ -61,7 +61,7 @@ class ExecutorInfoLogger:
61
61
  return ExecutorInfo(
62
62
  id=data["id"],
63
63
  pid=data["pid"],
64
- port=data["port"],
64
+ address=data["address"],
65
65
  status=data["status"],
66
66
  start_time=data["start_time"],
67
67
  end_time=data.get("end_time"),
@@ -24,9 +24,10 @@ _sym_db = _symbol_database.Default()
24
24
 
25
25
  from . import types_pb2 as types__pb2
26
26
  from . import graph_pb2 as graph__pb2
27
+ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
27
28
 
28
29
 
29
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x65xecutor.proto\x12\x08\x65xecutor\x1a\x0btypes.proto\x1a\x0bgraph.proto\"\x13\n\x11ListGraphsRequest\")\n\x12ListGraphsResponse\x12\x13\n\x0bgraph_names\x18\x01 \x03(\t\"%\n\x0fGetGraphRequest\x12\x12\n\ngraph_name\x18\x01 \x01(\t\"D\n\x10GetGraphResponse\x12\x30\n\x10graph_definition\x18\x01 \x01(\x0b\x32\x16.graph.GraphDefinition\"\x8a\x02\n\x1d\x43hannelsFromCheckpointRequest\x12\x12\n\ngraph_name\x18\x01 \x01(\t\x12\x1e\n\x05specs\x18\x02 \x01(\x0b\x32\x0f.types.Channels\x12g\n\x19\x63heckpoint_channel_values\x18\x03 \x03(\x0b\x32\x44.executor.ChannelsFromCheckpointRequest.CheckpointChannelValuesEntry\x1aL\n\x1c\x43heckpointChannelValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1b\n\x05value\x18\x02 \x01(\x0b\x32\x0c.types.Value:\x02\x38\x01\"C\n\x1e\x43hannelsFromCheckpointResponse\x12!\n\x08\x63hannels\x18\x01 \x01(\x0b\x32\x0f.types.Channels\"\x88\x01\n\x12\x45xecuteTaskRequest\x12)\n\x04init\x18\x01 \x01(\x0b\x32\x19.executor.ExecuteTaskInitH\x00\x12<\n\x14\x63\x61\x63he_check_response\x18\x02 \x01(\x0b\x32\x1c.executor.CacheCheckResponseH\x00\x42\t\n\x07message\"\xd6\x01\n\x0f\x45xecuteTaskInit\x12\x19\n\x04task\x18\x01 \x01(\x0b\x32\x0b.types.Task\x12!\n\x08\x63hannels\x18\x02 \x01(\x0b\x32\x0f.types.Channels\x12\x14\n\x0cstream_modes\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\x12\x15\n\rcheckpoint_ns\x18\x05 \x03(\t\x12\x13\n\x0boutput_keys\x18\x06 \x03(\t\x12\x13\n\x0bstream_keys\x18\x07 \x03(\t\x12\x12\n\ngraph_name\x18\x08 \x01(\t\x12\x0c\n\x04stop\x18\t \x01(\x05\"\xbd\x01\n\x13\x45xecuteTaskResponse\x12+\n\x0btask_result\x18\x01 \x01(\x0b\x32\x14.executor.TaskResultH\x00\x12\x32\n\x18message_or_message_chunk\x18\x02 \x01(\x0b\x32\x0e.types.MessageH\x00\x12:\n\x13\x63\x61\x63he_check_request\x18\x03 \x01(\x0b\x32\x1b.executor.CacheCheckRequestH\x00\x42\t\n\x07message\"^\n\nTaskResult\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.types.ExecutorErrorH\x00\x88\x01\x01\x12\x1c\n\x06writes\x18\x02 \x03(\x0b\x32\x0c.types.WriteB\x08\n\x06_error\"\xa4\x01\n\x12\x41pplyWritesRequest\x12%\n\ncheckpoint\x18\x01 \x01(\x0b\x32\x11.types.Checkpoint\x12!\n\x08\x63hannels\x18\x02 \x01(\x0b\x32\x0f.types.Channels\x12\x1a\n\x05tasks\x18\x03 \x03(\x0b\x32\x0b.types.Task\x12\x14\n\x0cnext_version\x18\x04 \x01(\t\x12\x12\n\ngraph_name\x18\x05 \x01(\t\"b\n\x13\x41pplyWritesResponse\x12!\n\x07updates\x18\x01 \x01(\x0b\x32\x0e.types.UpdatesH\x00\x12\x1d\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0c.types.ValueH\x00\x42\t\n\x07message\"\xc8\x01\n\x17GenerateCacheKeyRequest\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x16\n\x0einput_channels\x18\x02 \x03(\t\x12L\n\x0e\x63hannel_values\x18\x03 \x03(\x0b\x32\x34.executor.GenerateCacheKeyRequest.ChannelValuesEntry\x1a\x34\n\x12\x43hannelValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"-\n\x18GenerateCacheKeyResponse\x12\x11\n\tcache_key\x18\x01 \x01(\t\"Y\n\x11\x43\x61\x63heCheckRequest\x12\x17\n\x0f\x63\x61\x63he_namespace\x18\x01 \x03(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\x12\x10\n\x03ttl\x18\x03 \x01(\x05H\x00\x88\x01\x01\x42\x06\n\x04_ttl\"\'\n\x12\x43\x61\x63heCheckResponse\x12\x11\n\tcache_hit\x18\x01 \x01(\x08\x32\x83\x04\n\x11LangGraphExecutor\x12G\n\nListGraphs\x12\x1b.executor.ListGraphsRequest\x1a\x1c.executor.ListGraphsResponse\x12\x41\n\x08GetGraph\x12\x19.executor.GetGraphRequest\x1a\x1a.executor.GetGraphResponse\x12k\n\x16\x43hannelsFromCheckpoint\x12\'.executor.ChannelsFromCheckpointRequest\x1a(.executor.ChannelsFromCheckpointResponse\x12N\n\x0b\x45xecuteTask\x12\x1c.executor.ExecuteTaskRequest\x1a\x1d.executor.ExecuteTaskResponse(\x01\x30\x01\x12J\n\x0b\x41pplyWrites\x12\x1c.executor.ApplyWritesRequest\x1a\x1d.executor.ApplyWritesResponse\x12Y\n\x10GenerateCacheKey\x12!.executor.GenerateCacheKeyRequest\x1a\".executor.GenerateCacheKeyResponseB1Z/github.com/langchain-ai/langgraph-go/runtime/pbb\x06proto3')
30
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x65xecutor.proto\x12\x08\x65xecutor\x1a\x0btypes.proto\x1a\x0bgraph.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x13\n\x11ListGraphsRequest\")\n\x12ListGraphsResponse\x12\x13\n\x0bgraph_names\x18\x01 \x03(\t\"%\n\x0fGetGraphRequest\x12\x12\n\ngraph_name\x18\x01 \x01(\t\"\x9a\x01\n\x10GetGraphResponse\x12\x30\n\x10graph_definition\x18\x01 \x01(\x0b\x32\x16.graph.GraphDefinition\x12\x18\n\x0bparent_name\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x19\n\x0c\x63heckpointer\x18\x03 \x01(\x08H\x01\x88\x01\x01\x42\x0e\n\x0c_parent_nameB\x0f\n\r_checkpointer\"\x15\n\x13GetAllGraphsRequest\"\x8a\x02\n\x1d\x43hannelsFromCheckpointRequest\x12\x12\n\ngraph_name\x18\x01 \x01(\t\x12\x1e\n\x05specs\x18\x02 \x01(\x0b\x32\x0f.types.Channels\x12g\n\x19\x63heckpoint_channel_values\x18\x03 \x03(\x0b\x32\x44.executor.ChannelsFromCheckpointRequest.CheckpointChannelValuesEntry\x1aL\n\x1c\x43heckpointChannelValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1b\n\x05value\x18\x02 \x01(\x0b\x32\x0c.types.Value:\x02\x38\x01\"C\n\x1e\x43hannelsFromCheckpointResponse\x12!\n\x08\x63hannels\x18\x01 \x01(\x0b\x32\x0f.types.Channels\"\x88\x01\n\x12\x45xecuteTaskRequest\x12)\n\x04init\x18\x01 \x01(\x0b\x32\x19.executor.ExecuteTaskInitH\x00\x12<\n\x14\x63\x61\x63he_check_response\x18\x02 \x01(\x0b\x32\x1c.executor.CacheCheckResponseH\x00\x42\t\n\x07message\"\xd6\x01\n\x0f\x45xecuteTaskInit\x12\x19\n\x04task\x18\x01 \x01(\x0b\x32\x0b.types.Task\x12!\n\x08\x63hannels\x18\x02 \x01(\x0b\x32\x0f.types.Channels\x12\x14\n\x0cstream_modes\x18\x03 \x03(\t\x12\x0c\n\x04step\x18\x04 \x01(\x05\x12\x15\n\rcheckpoint_ns\x18\x05 \x03(\t\x12\x13\n\x0boutput_keys\x18\x06 \x03(\t\x12\x13\n\x0bstream_keys\x18\x07 \x03(\t\x12\x12\n\ngraph_name\x18\x08 \x01(\t\x12\x0c\n\x04stop\x18\t \x01(\x05\"\xf9\x01\n\x13\x45xecuteTaskResponse\x12+\n\x0btask_result\x18\x01 \x01(\x0b\x32\x14.executor.TaskResultH\x00\x12\x32\n\x18message_or_message_chunk\x18\x02 \x01(\x0b\x32\x0e.types.MessageH\x00\x12:\n\x13\x63\x61\x63he_check_request\x18\x03 \x01(\x0b\x32\x1b.executor.CacheCheckRequestH\x00\x12:\n\x13\x63ustom_stream_event\x18\x04 \x01(\x0b\x32\x1b.executor.CustomStreamEventH\x00\x42\t\n\x07message\"^\n\nTaskResult\x12(\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x14.types.ExecutorErrorH\x00\x88\x01\x01\x12\x1c\n\x06writes\x18\x02 \x03(\x0b\x32\x0c.types.WriteB\x08\n\x06_error\"=\n\x11\x43ustomStreamEvent\x12(\n\x07payload\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xa4\x01\n\x12\x41pplyWritesRequest\x12%\n\ncheckpoint\x18\x01 \x01(\x0b\x32\x11.types.Checkpoint\x12!\n\x08\x63hannels\x18\x02 \x01(\x0b\x32\x0f.types.Channels\x12\x1a\n\x05tasks\x18\x03 \x03(\x0b\x32\x0b.types.Task\x12\x14\n\x0cnext_version\x18\x04 \x01(\t\x12\x12\n\ngraph_name\x18\x05 \x01(\t\"b\n\x13\x41pplyWritesResponse\x12!\n\x07updates\x18\x01 \x01(\x0b\x32\x0e.types.UpdatesH\x00\x12\x1d\n\x05\x65rror\x18\x02 \x01(\x0b\x32\x0c.types.ValueH\x00\x42\t\n\x07message\"\xc8\x01\n\x17GenerateCacheKeyRequest\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x16\n\x0einput_channels\x18\x02 \x03(\t\x12L\n\x0e\x63hannel_values\x18\x03 \x03(\x0b\x32\x34.executor.GenerateCacheKeyRequest.ChannelValuesEntry\x1a\x34\n\x12\x43hannelValuesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c:\x02\x38\x01\"-\n\x18GenerateCacheKeyResponse\x12\x11\n\tcache_key\x18\x01 \x01(\t\"Y\n\x11\x43\x61\x63heCheckRequest\x12\x17\n\x0f\x63\x61\x63he_namespace\x18\x01 \x03(\t\x12\x11\n\tcache_key\x18\x02 \x01(\t\x12\x10\n\x03ttl\x18\x03 \x01(\x05H\x00\x88\x01\x01\x42\x06\n\x04_ttl\"\'\n\x12\x43\x61\x63heCheckResponse\x12\x11\n\tcache_hit\x18\x01 \x01(\x08\x32\xd0\x04\n\x11LangGraphExecutor\x12G\n\nListGraphs\x12\x1b.executor.ListGraphsRequest\x1a\x1c.executor.ListGraphsResponse\x12\x41\n\x08GetGraph\x12\x19.executor.GetGraphRequest\x1a\x1a.executor.GetGraphResponse\x12K\n\x0cGetAllGraphs\x12\x1d.executor.GetAllGraphsRequest\x1a\x1a.executor.GetGraphResponse0\x01\x12k\n\x16\x43hannelsFromCheckpoint\x12\'.executor.ChannelsFromCheckpointRequest\x1a(.executor.ChannelsFromCheckpointResponse\x12N\n\x0b\x45xecuteTask\x12\x1c.executor.ExecuteTaskRequest\x1a\x1d.executor.ExecuteTaskResponse(\x01\x30\x01\x12J\n\x0b\x41pplyWrites\x12\x1c.executor.ApplyWritesRequest\x1a\x1d.executor.ApplyWritesResponse\x12Y\n\x10GenerateCacheKey\x12!.executor.GenerateCacheKeyRequest\x1a\".executor.GenerateCacheKeyResponseB1Z/github.com/langchain-ai/langgraph-go/runtime/pbb\x06proto3')
30
31
 
31
32
  _globals = globals()
32
33
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -38,42 +39,46 @@ if not _descriptor._USE_C_DESCRIPTORS:
38
39
  _globals['_CHANNELSFROMCHECKPOINTREQUEST_CHECKPOINTCHANNELVALUESENTRY']._serialized_options = b'8\001'
39
40
  _globals['_GENERATECACHEKEYREQUEST_CHANNELVALUESENTRY']._loaded_options = None
40
41
  _globals['_GENERATECACHEKEYREQUEST_CHANNELVALUESENTRY']._serialized_options = b'8\001'
41
- _globals['_LISTGRAPHSREQUEST']._serialized_start=54
42
- _globals['_LISTGRAPHSREQUEST']._serialized_end=73
43
- _globals['_LISTGRAPHSRESPONSE']._serialized_start=75
44
- _globals['_LISTGRAPHSRESPONSE']._serialized_end=116
45
- _globals['_GETGRAPHREQUEST']._serialized_start=118
46
- _globals['_GETGRAPHREQUEST']._serialized_end=155
47
- _globals['_GETGRAPHRESPONSE']._serialized_start=157
48
- _globals['_GETGRAPHRESPONSE']._serialized_end=225
49
- _globals['_CHANNELSFROMCHECKPOINTREQUEST']._serialized_start=228
50
- _globals['_CHANNELSFROMCHECKPOINTREQUEST']._serialized_end=494
51
- _globals['_CHANNELSFROMCHECKPOINTREQUEST_CHECKPOINTCHANNELVALUESENTRY']._serialized_start=418
52
- _globals['_CHANNELSFROMCHECKPOINTREQUEST_CHECKPOINTCHANNELVALUESENTRY']._serialized_end=494
53
- _globals['_CHANNELSFROMCHECKPOINTRESPONSE']._serialized_start=496
54
- _globals['_CHANNELSFROMCHECKPOINTRESPONSE']._serialized_end=563
55
- _globals['_EXECUTETASKREQUEST']._serialized_start=566
56
- _globals['_EXECUTETASKREQUEST']._serialized_end=702
57
- _globals['_EXECUTETASKINIT']._serialized_start=705
58
- _globals['_EXECUTETASKINIT']._serialized_end=919
59
- _globals['_EXECUTETASKRESPONSE']._serialized_start=922
60
- _globals['_EXECUTETASKRESPONSE']._serialized_end=1111
61
- _globals['_TASKRESULT']._serialized_start=1113
62
- _globals['_TASKRESULT']._serialized_end=1207
63
- _globals['_APPLYWRITESREQUEST']._serialized_start=1210
64
- _globals['_APPLYWRITESREQUEST']._serialized_end=1374
65
- _globals['_APPLYWRITESRESPONSE']._serialized_start=1376
66
- _globals['_APPLYWRITESRESPONSE']._serialized_end=1474
67
- _globals['_GENERATECACHEKEYREQUEST']._serialized_start=1477
68
- _globals['_GENERATECACHEKEYREQUEST']._serialized_end=1677
69
- _globals['_GENERATECACHEKEYREQUEST_CHANNELVALUESENTRY']._serialized_start=1625
70
- _globals['_GENERATECACHEKEYREQUEST_CHANNELVALUESENTRY']._serialized_end=1677
71
- _globals['_GENERATECACHEKEYRESPONSE']._serialized_start=1679
72
- _globals['_GENERATECACHEKEYRESPONSE']._serialized_end=1724
73
- _globals['_CACHECHECKREQUEST']._serialized_start=1726
74
- _globals['_CACHECHECKREQUEST']._serialized_end=1815
75
- _globals['_CACHECHECKRESPONSE']._serialized_start=1817
76
- _globals['_CACHECHECKRESPONSE']._serialized_end=1856
77
- _globals['_LANGGRAPHEXECUTOR']._serialized_start=1859
78
- _globals['_LANGGRAPHEXECUTOR']._serialized_end=2374
42
+ _globals['_LISTGRAPHSREQUEST']._serialized_start=84
43
+ _globals['_LISTGRAPHSREQUEST']._serialized_end=103
44
+ _globals['_LISTGRAPHSRESPONSE']._serialized_start=105
45
+ _globals['_LISTGRAPHSRESPONSE']._serialized_end=146
46
+ _globals['_GETGRAPHREQUEST']._serialized_start=148
47
+ _globals['_GETGRAPHREQUEST']._serialized_end=185
48
+ _globals['_GETGRAPHRESPONSE']._serialized_start=188
49
+ _globals['_GETGRAPHRESPONSE']._serialized_end=342
50
+ _globals['_GETALLGRAPHSREQUEST']._serialized_start=344
51
+ _globals['_GETALLGRAPHSREQUEST']._serialized_end=365
52
+ _globals['_CHANNELSFROMCHECKPOINTREQUEST']._serialized_start=368
53
+ _globals['_CHANNELSFROMCHECKPOINTREQUEST']._serialized_end=634
54
+ _globals['_CHANNELSFROMCHECKPOINTREQUEST_CHECKPOINTCHANNELVALUESENTRY']._serialized_start=558
55
+ _globals['_CHANNELSFROMCHECKPOINTREQUEST_CHECKPOINTCHANNELVALUESENTRY']._serialized_end=634
56
+ _globals['_CHANNELSFROMCHECKPOINTRESPONSE']._serialized_start=636
57
+ _globals['_CHANNELSFROMCHECKPOINTRESPONSE']._serialized_end=703
58
+ _globals['_EXECUTETASKREQUEST']._serialized_start=706
59
+ _globals['_EXECUTETASKREQUEST']._serialized_end=842
60
+ _globals['_EXECUTETASKINIT']._serialized_start=845
61
+ _globals['_EXECUTETASKINIT']._serialized_end=1059
62
+ _globals['_EXECUTETASKRESPONSE']._serialized_start=1062
63
+ _globals['_EXECUTETASKRESPONSE']._serialized_end=1311
64
+ _globals['_TASKRESULT']._serialized_start=1313
65
+ _globals['_TASKRESULT']._serialized_end=1407
66
+ _globals['_CUSTOMSTREAMEVENT']._serialized_start=1409
67
+ _globals['_CUSTOMSTREAMEVENT']._serialized_end=1470
68
+ _globals['_APPLYWRITESREQUEST']._serialized_start=1473
69
+ _globals['_APPLYWRITESREQUEST']._serialized_end=1637
70
+ _globals['_APPLYWRITESRESPONSE']._serialized_start=1639
71
+ _globals['_APPLYWRITESRESPONSE']._serialized_end=1737
72
+ _globals['_GENERATECACHEKEYREQUEST']._serialized_start=1740
73
+ _globals['_GENERATECACHEKEYREQUEST']._serialized_end=1940
74
+ _globals['_GENERATECACHEKEYREQUEST_CHANNELVALUESENTRY']._serialized_start=1888
75
+ _globals['_GENERATECACHEKEYREQUEST_CHANNELVALUESENTRY']._serialized_end=1940
76
+ _globals['_GENERATECACHEKEYRESPONSE']._serialized_start=1942
77
+ _globals['_GENERATECACHEKEYRESPONSE']._serialized_end=1987
78
+ _globals['_CACHECHECKREQUEST']._serialized_start=1989
79
+ _globals['_CACHECHECKREQUEST']._serialized_end=2078
80
+ _globals['_CACHECHECKRESPONSE']._serialized_start=2080
81
+ _globals['_CACHECHECKRESPONSE']._serialized_end=2119
82
+ _globals['_LANGGRAPHEXECUTOR']._serialized_start=2122
83
+ _globals['_LANGGRAPHEXECUTOR']._serialized_end=2714
79
84
  # @@protoc_insertion_point(module_scope)