langgraph-executor 0.0.1a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langgraph_executor/__init__.py +1 -0
- langgraph_executor/common.py +395 -0
- langgraph_executor/example.py +29 -0
- langgraph_executor/execute_task.py +239 -0
- langgraph_executor/executor.py +341 -0
- langgraph_executor/extract_graph.py +178 -0
- langgraph_executor/info_logger.py +111 -0
- langgraph_executor/pb/__init__.py +0 -0
- langgraph_executor/pb/executor_pb2.py +79 -0
- langgraph_executor/pb/executor_pb2.pyi +415 -0
- langgraph_executor/pb/executor_pb2_grpc.py +321 -0
- langgraph_executor/pb/executor_pb2_grpc.pyi +150 -0
- langgraph_executor/pb/graph_pb2.py +55 -0
- langgraph_executor/pb/graph_pb2.pyi +230 -0
- langgraph_executor/pb/graph_pb2_grpc.py +24 -0
- langgraph_executor/pb/graph_pb2_grpc.pyi +17 -0
- langgraph_executor/pb/runtime_pb2.py +68 -0
- langgraph_executor/pb/runtime_pb2.pyi +364 -0
- langgraph_executor/pb/runtime_pb2_grpc.py +322 -0
- langgraph_executor/pb/runtime_pb2_grpc.pyi +151 -0
- langgraph_executor/pb/types_pb2.py +144 -0
- langgraph_executor/pb/types_pb2.pyi +1044 -0
- langgraph_executor/pb/types_pb2_grpc.py +24 -0
- langgraph_executor/pb/types_pb2_grpc.pyi +17 -0
- langgraph_executor/py.typed +0 -0
- langgraph_executor/server.py +186 -0
- langgraph_executor/setup.sh +29 -0
- langgraph_executor/stream_utils.py +96 -0
- langgraph_executor-0.0.1a0.dist-info/METADATA +14 -0
- langgraph_executor-0.0.1a0.dist-info/RECORD +31 -0
- langgraph_executor-0.0.1a0.dist-info/WHEEL +4 -0
@@ -0,0 +1,24 @@
|
|
1
|
+
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
2
|
+
"""Client and server classes corresponding to protobuf-defined services."""
|
3
|
+
import grpc
|
4
|
+
import warnings
|
5
|
+
|
6
|
+
|
7
|
+
GRPC_GENERATED_VERSION = '1.74.0'
|
8
|
+
GRPC_VERSION = grpc.__version__
|
9
|
+
_version_not_supported = False
|
10
|
+
|
11
|
+
try:
|
12
|
+
from grpc._utilities import first_version_is_lower
|
13
|
+
_version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
|
14
|
+
except ImportError:
|
15
|
+
_version_not_supported = True
|
16
|
+
|
17
|
+
if _version_not_supported:
|
18
|
+
raise RuntimeError(
|
19
|
+
f'The grpc package installed is at version {GRPC_VERSION},'
|
20
|
+
+ f' but the generated code in types_pb2_grpc.py depends on'
|
21
|
+
+ f' grpcio>={GRPC_GENERATED_VERSION}.'
|
22
|
+
+ f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
|
23
|
+
+ f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
|
24
|
+
)
|
@@ -0,0 +1,17 @@
|
|
1
|
+
"""
|
2
|
+
@generated by mypy-protobuf. Do not edit manually!
|
3
|
+
isort:skip_file
|
4
|
+
"""
|
5
|
+
|
6
|
+
import abc
|
7
|
+
import collections.abc
|
8
|
+
import grpc
|
9
|
+
import grpc.aio
|
10
|
+
import typing
|
11
|
+
|
12
|
+
_T = typing.TypeVar("_T")
|
13
|
+
|
14
|
+
class _MaybeAsyncIterator(collections.abc.AsyncIterator[_T], collections.abc.Iterator[_T], metaclass=abc.ABCMeta): ...
|
15
|
+
|
16
|
+
class _ServicerContext(grpc.ServicerContext, grpc.aio.ServicerContext): # type: ignore[misc, type-arg]
|
17
|
+
...
|
File without changes
|
@@ -0,0 +1,186 @@
|
|
1
|
+
import argparse
|
2
|
+
import logging
|
3
|
+
import os
|
4
|
+
import pathlib
|
5
|
+
import signal
|
6
|
+
import sys
|
7
|
+
import time
|
8
|
+
import uuid
|
9
|
+
from concurrent import futures
|
10
|
+
|
11
|
+
import grpc
|
12
|
+
from langgraph.pregel import Pregel
|
13
|
+
|
14
|
+
from langgraph_executor.executor import LangGraphExecutorServicer
|
15
|
+
from langgraph_executor.info_logger import ExecutorInfo, ExecutorInfoLogger
|
16
|
+
from langgraph_executor.pb.executor_pb2_grpc import (
|
17
|
+
add_LangGraphExecutorServicer_to_server,
|
18
|
+
)
|
19
|
+
|
20
|
+
EXECUTOR_DEFAULT_PORT = 50052
|
21
|
+
|
22
|
+
EXECUTOR_INFO_FILE_NAME = "info.json"
|
23
|
+
|
24
|
+
YELLOW = "\033[93m"
|
25
|
+
RESET = "\033[0m"
|
26
|
+
|
27
|
+
|
28
|
+
class ColoredFormatter(logging.Formatter):
|
29
|
+
def __init__(self, color=YELLOW, *args, **kwargs):
|
30
|
+
super().__init__(*args, **kwargs)
|
31
|
+
self.color = color
|
32
|
+
|
33
|
+
def format(self, record):
|
34
|
+
formatted_message = super().format(record)
|
35
|
+
return f"{self.color}{formatted_message}{RESET}"
|
36
|
+
|
37
|
+
|
38
|
+
def setup_server_logging(component_id: str, debug=False, color=YELLOW):
|
39
|
+
"""Setup logging for server with executor ID label"""
|
40
|
+
level = logging.DEBUG if debug else logging.INFO
|
41
|
+
|
42
|
+
# Clear any existing handlers to prevent duplicates
|
43
|
+
root_logger = logging.getLogger()
|
44
|
+
for handler in root_logger.handlers[:]:
|
45
|
+
root_logger.removeHandler(handler)
|
46
|
+
|
47
|
+
# Configure logging
|
48
|
+
logging.basicConfig(
|
49
|
+
level=level,
|
50
|
+
format=f"[{component_id}] %(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
51
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
52
|
+
force=True,
|
53
|
+
)
|
54
|
+
|
55
|
+
# Apply colored formatter to all handlers
|
56
|
+
for handler in logging.root.handlers:
|
57
|
+
handler.setFormatter(
|
58
|
+
ColoredFormatter(
|
59
|
+
color=color,
|
60
|
+
fmt=f"[{component_id}] %(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
61
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
62
|
+
),
|
63
|
+
)
|
64
|
+
|
65
|
+
|
66
|
+
def create_executor_info(port: int, id: str):
|
67
|
+
return ExecutorInfo(
|
68
|
+
id=id,
|
69
|
+
pid=os.getpid(),
|
70
|
+
port=port,
|
71
|
+
start_time=time.time(),
|
72
|
+
status="starting",
|
73
|
+
error_message=None,
|
74
|
+
end_time=None,
|
75
|
+
)
|
76
|
+
|
77
|
+
|
78
|
+
def signum_to_name(signum):
|
79
|
+
try:
|
80
|
+
return signal.Signals(signum).name
|
81
|
+
except ValueError:
|
82
|
+
return f"UNKNOWN_SIGNAL_{signum}"
|
83
|
+
|
84
|
+
|
85
|
+
def serve(
|
86
|
+
graphs: dict[str, Pregel],
|
87
|
+
port: int,
|
88
|
+
debug: bool,
|
89
|
+
id: str | None = None,
|
90
|
+
log_dir: pathlib.Path | None = None,
|
91
|
+
):
|
92
|
+
"""Start the gRPC server.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
graphs: Dictionary mapping graph names to compiled graphs
|
96
|
+
port: Port to listen on
|
97
|
+
|
98
|
+
"""
|
99
|
+
id_ = id
|
100
|
+
if id_ is None:
|
101
|
+
id_ = str(uuid.uuid4())
|
102
|
+
|
103
|
+
setup_server_logging(f"EXECUTOR {id_}", debug=debug)
|
104
|
+
logger = logging.getLogger(__name__)
|
105
|
+
|
106
|
+
info_logger = ExecutorInfoLogger(
|
107
|
+
log_dir or pathlib.Path(__file__).resolve().parent.parent / "logs",
|
108
|
+
)
|
109
|
+
|
110
|
+
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
|
111
|
+
add_LangGraphExecutorServicer_to_server(LangGraphExecutorServicer(graphs), server)
|
112
|
+
port = server.add_insecure_port(f"[::]:{port}")
|
113
|
+
|
114
|
+
server.start()
|
115
|
+
|
116
|
+
# Signal handler for graceful shutdown
|
117
|
+
def signal_handler(signum, frame):
|
118
|
+
logger.info(f"Received signal {signum_to_name(signum)}. Shutting down...")
|
119
|
+
server.stop(5) # Give 5 seconds for graceful shutdown
|
120
|
+
info_logger.update_executor_info(
|
121
|
+
executor_info.id,
|
122
|
+
status="stopped",
|
123
|
+
error_message=f"Shutdown via signal {signum_to_name(signum)}",
|
124
|
+
end_time=time.time(),
|
125
|
+
)
|
126
|
+
logger.info("Shutdown complete")
|
127
|
+
sys.exit(0)
|
128
|
+
|
129
|
+
# Register signal handlers
|
130
|
+
signal.signal(signal.SIGINT, signal_handler)
|
131
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
132
|
+
|
133
|
+
executor_info = create_executor_info(port, id_)
|
134
|
+
info_logger.write_executor_info(executor_info)
|
135
|
+
|
136
|
+
try:
|
137
|
+
info_logger.update_executor_info(executor_info.id, status="running")
|
138
|
+
logger.info("Listening...")
|
139
|
+
server.wait_for_termination()
|
140
|
+
except Exception as e:
|
141
|
+
logger.exception("Unexpected error in executor")
|
142
|
+
server.stop(0)
|
143
|
+
info_logger.update_executor_info(
|
144
|
+
executor_info.id,
|
145
|
+
status="error",
|
146
|
+
error_message=str(e),
|
147
|
+
end_time=time.time(),
|
148
|
+
)
|
149
|
+
raise
|
150
|
+
|
151
|
+
|
152
|
+
def main():
|
153
|
+
"""Start a LangGraph executor server."""
|
154
|
+
parser = argparse.ArgumentParser()
|
155
|
+
parser.add_argument("--port", type=int, default=EXECUTOR_DEFAULT_PORT)
|
156
|
+
parser.add_argument("--debug", action="store_true")
|
157
|
+
parser.add_argument(
|
158
|
+
"--log-dir",
|
159
|
+
type=pathlib.Path,
|
160
|
+
default=pathlib.Path(__file__).resolve().parents[2] / "logs",
|
161
|
+
)
|
162
|
+
|
163
|
+
args = parser.parse_args()
|
164
|
+
|
165
|
+
id_ = str(uuid.uuid4())
|
166
|
+
|
167
|
+
setup_server_logging(f"EXECUTOR {id_}", debug=args.debug)
|
168
|
+
logging.getLogger(__name__)
|
169
|
+
|
170
|
+
# load graphs. This is a cyclic dep just for demo purposes
|
171
|
+
try:
|
172
|
+
from langgraph_runtime_integration.graphs import ( # type: ignore[import-not-found]
|
173
|
+
GRAPHS, # type: ignore[import-untyped]
|
174
|
+
)
|
175
|
+
except ImportError:
|
176
|
+
# Handle case where langgraph_runtime_integration is not available
|
177
|
+
GRAPHS = {}
|
178
|
+
|
179
|
+
graphs = GRAPHS
|
180
|
+
|
181
|
+
# serve
|
182
|
+
serve(graphs, args.port, args.debug, log_dir=args.log_dir)
|
183
|
+
|
184
|
+
|
185
|
+
if __name__ == "__main__":
|
186
|
+
main()
|
@@ -0,0 +1,29 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
|
3
|
+
echo "Setting up Python environment..."
|
4
|
+
|
5
|
+
# Create virtual environment if it doesn't exist
|
6
|
+
if [ ! -d ".venv" ]; then
|
7
|
+
echo "Creating virtual environment..."
|
8
|
+
python -m venv .venv
|
9
|
+
fi
|
10
|
+
|
11
|
+
# Activate virtual environment
|
12
|
+
source .venv/bin/activate
|
13
|
+
|
14
|
+
# Install dependencies
|
15
|
+
echo "Installing dependencies..."
|
16
|
+
python -m pip install -r requirements.txt
|
17
|
+
|
18
|
+
# Deactivate environment
|
19
|
+
deactivate
|
20
|
+
|
21
|
+
# Generate protobuf files
|
22
|
+
# echo "Generating Python protobuf files..."
|
23
|
+
# source .venv/bin/activate
|
24
|
+
# ./generate_proto.sh
|
25
|
+
|
26
|
+
echo "Python setup complete!"
|
27
|
+
echo ""
|
28
|
+
echo "To activate the environment:"
|
29
|
+
echo " source .venv/bin/activate"
|
@@ -0,0 +1,96 @@
|
|
1
|
+
from collections.abc import AsyncIterator, Iterator
|
2
|
+
from uuid import UUID
|
3
|
+
|
4
|
+
from langchain_core.callbacks import BaseCallbackHandler
|
5
|
+
from langchain_core.outputs import ChatGenerationChunk, LLMResult
|
6
|
+
|
7
|
+
try:
|
8
|
+
from langchain_core.tracers._streaming import _StreamingCallbackHandler
|
9
|
+
except ImportError:
|
10
|
+
_StreamingCallbackHandler = object # type: ignore
|
11
|
+
|
12
|
+
from typing import TypeVar
|
13
|
+
|
14
|
+
T = TypeVar("T")
|
15
|
+
|
16
|
+
|
17
|
+
class ExecutorStreamHandler(BaseCallbackHandler, _StreamingCallbackHandler):
|
18
|
+
"""Callback handler that captures LLM streaming tokens for Go executor."""
|
19
|
+
|
20
|
+
def __init__(self, stream_callback, task_id: str):
|
21
|
+
"""Initialize handler with callback to send stream chunks.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
stream_callback: Function to call with stream chunks
|
25
|
+
task_id: Task ID for metadata
|
26
|
+
"""
|
27
|
+
self.stream_callback = stream_callback
|
28
|
+
self.task_id = task_id
|
29
|
+
self.metadata = {} # Track run metadata like StreamMessagesHandler
|
30
|
+
|
31
|
+
def on_llm_start(
|
32
|
+
self,
|
33
|
+
serialized: dict,
|
34
|
+
prompts: list[str],
|
35
|
+
*,
|
36
|
+
run_id: UUID,
|
37
|
+
parent_run_id: UUID | None = None,
|
38
|
+
tags: list[str] | None = None,
|
39
|
+
metadata: dict | None = None,
|
40
|
+
**kwargs,
|
41
|
+
) -> None:
|
42
|
+
"""Store metadata for this LLM run."""
|
43
|
+
if metadata:
|
44
|
+
# Store minimal metadata needed for streaming context
|
45
|
+
self.metadata[run_id] = {"task_id": self.task_id, "metadata": metadata}
|
46
|
+
|
47
|
+
def on_llm_new_token(
|
48
|
+
self,
|
49
|
+
token: str,
|
50
|
+
*,
|
51
|
+
chunk: ChatGenerationChunk | None = None,
|
52
|
+
run_id: UUID,
|
53
|
+
parent_run_id: UUID | None = None,
|
54
|
+
tags: list[str] | None = None,
|
55
|
+
**kwargs,
|
56
|
+
) -> None:
|
57
|
+
"""Stream LLM token chunks immediately to Go."""
|
58
|
+
if not isinstance(chunk, ChatGenerationChunk):
|
59
|
+
return
|
60
|
+
|
61
|
+
if run_id in self.metadata:
|
62
|
+
# Send the message chunk immediately
|
63
|
+
self.stream_callback(chunk.message, self.metadata[run_id]["metadata"])
|
64
|
+
|
65
|
+
def on_llm_end(
|
66
|
+
self,
|
67
|
+
response: LLMResult,
|
68
|
+
*,
|
69
|
+
run_id: UUID,
|
70
|
+
parent_run_id: UUID | None = None,
|
71
|
+
**kwargs,
|
72
|
+
) -> None:
|
73
|
+
"""Handle final LLM result (will be handled by deduplication in Go)."""
|
74
|
+
# Clean up metadata
|
75
|
+
self.metadata.pop(run_id, None)
|
76
|
+
# Note: Final message will be sent through normal task completion flow
|
77
|
+
# Go-side deduplication will handle avoiding duplicates
|
78
|
+
|
79
|
+
def on_llm_error(
|
80
|
+
self,
|
81
|
+
error: BaseException,
|
82
|
+
*,
|
83
|
+
run_id: UUID,
|
84
|
+
parent_run_id: UUID | None = None,
|
85
|
+
**kwargs,
|
86
|
+
) -> None:
|
87
|
+
"""Clean up on LLM error."""
|
88
|
+
self.metadata.pop(run_id, None)
|
89
|
+
|
90
|
+
def tap_output_aiter(
|
91
|
+
self, run_id: UUID, output: AsyncIterator[T]
|
92
|
+
) -> AsyncIterator[T]:
|
93
|
+
return output
|
94
|
+
|
95
|
+
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
|
96
|
+
return output
|
@@ -0,0 +1,14 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: langgraph-executor
|
3
|
+
Version: 0.0.1a0
|
4
|
+
Summary: LangGraph python RPC server executable by the langgraph-go orchestrator.
|
5
|
+
Requires-Python: >=3.11
|
6
|
+
Requires-Dist: grpcio>=1.73.1
|
7
|
+
Requires-Dist: langgraph>=0.6.5
|
8
|
+
Requires-Dist: protobuf>=4.25.0
|
9
|
+
Requires-Dist: psutil>=7.0.0
|
10
|
+
Description-Content-Type: text/markdown
|
11
|
+
|
12
|
+
# Python Executor
|
13
|
+
|
14
|
+
This is the Python gRPC server that implements the LangGraph executor protocol.
|
@@ -0,0 +1,31 @@
|
|
1
|
+
langgraph_executor/__init__.py,sha256=508eDu4nPCyqYyg_NQBRjRZmK_68ggYywDmqDMXRF1I,24
|
2
|
+
langgraph_executor/common.py,sha256=w75Bqbrj5LOtiWoOOdIi45yVB05xYXDAnokwg7MgDQE,13688
|
3
|
+
langgraph_executor/example.py,sha256=TcfxgC9VfpZFliWnuVdJMllCDa8ji7vrSCRWnmdsUA8,900
|
4
|
+
langgraph_executor/execute_task.py,sha256=DmsLHhkfhBYhZeHFq2eWHCkJuK9WfcDSkUQ8a0nwCB0,6932
|
5
|
+
langgraph_executor/executor.py,sha256=9YI_-meJHz9ei9azWQjZGLzhgHDpa7Xj6Sb9HO01s_w,13242
|
6
|
+
langgraph_executor/extract_graph.py,sha256=VlbQ6KausNd7ZPtivItMP1g4bYDwhZtttOHtbuFy4Hc,6407
|
7
|
+
langgraph_executor/info_logger.py,sha256=BUcrg_nui_dN4JBUVyO7jurmCnt8gQ0TOubUNbyvYBk,3095
|
8
|
+
langgraph_executor/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
+
langgraph_executor/server.py,sha256=avtZzStLiIDvNCKyytFU3E2Li1_TBgz5oyWUBRbPzFk,5200
|
10
|
+
langgraph_executor/setup.sh,sha256=QI505EIya8sjEIohom6GDfFckFqOMF8bIEX-hSWcLUI,627
|
11
|
+
langgraph_executor/stream_utils.py,sha256=snxlEoZe0d4ae3h_6Ct3zNzLV_ugvPANcr4VECYofwk,2974
|
12
|
+
langgraph_executor/pb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
|
+
langgraph_executor/pb/executor_pb2.py,sha256=G9BI0TLB39yACBo2QX3eT0jDbeNFJyEY0IlGuOAeEH8,7783
|
14
|
+
langgraph_executor/pb/executor_pb2.pyi,sha256=bW6TYrLG8SySVqx1ie-5igw2PEdvHiQLIvnyb59GATc,16575
|
15
|
+
langgraph_executor/pb/executor_pb2_grpc.py,sha256=IvKDVBWxrWU7Rcntall4KAvpQ8CdLkbljKaCEloimQE,12224
|
16
|
+
langgraph_executor/pb/executor_pb2_grpc.pyi,sha256=iGHLqEpl7FFrBvBmBfbacvl0YgtZf8dH5pBkAaB0d64,5270
|
17
|
+
langgraph_executor/pb/graph_pb2.py,sha256=FyhQlHv1lIivUL_HoKVxB6x1oXxgdugV2bsKJd55A8o,4430
|
18
|
+
langgraph_executor/pb/graph_pb2.pyi,sha256=inoFjgLD42hxkG1ORIU3Lrrk57ubdmZZtRw53JZUsGI,10161
|
19
|
+
langgraph_executor/pb/graph_pb2_grpc.py,sha256=8j8j0GTUo21GL7RO-_UgVPN27DRnAfus0lhTWKI49no,886
|
20
|
+
langgraph_executor/pb/graph_pb2_grpc.pyi,sha256=Dl8kkjhqb6F1Kt24mcFg7ppish4iKVfjRiiBxEjsMMA,413
|
21
|
+
langgraph_executor/pb/runtime_pb2.py,sha256=tYVvki3PixSiXnrBh53izR9IynGJxtynKtkZs6Aoeu4,6445
|
22
|
+
langgraph_executor/pb/runtime_pb2.pyi,sha256=QNkLw4OV3IJQjpX3j-t_s6_zkcxMyuYnJCwVPGaBcJ0,14398
|
23
|
+
langgraph_executor/pb/runtime_pb2_grpc.py,sha256=HKn8fgGT9zTDAS-ZGCk-1AO2QForvCZH_BVnufTS0sM,11791
|
24
|
+
langgraph_executor/pb/runtime_pb2_grpc.pyi,sha256=TeEHQYvJDdypawBFA6JP4OiQ9NFa_RNVKzGwHBSlFNs,4614
|
25
|
+
langgraph_executor/pb/types_pb2.py,sha256=rUboWBthlLM-iJOmZZta-HSekwTNFuXfWLaYHes1918,15570
|
26
|
+
langgraph_executor/pb/types_pb2.pyi,sha256=tBX4LzmfWZ-IYscIeAEBTDC02GWWKEAQVB2JPNHChDc,40957
|
27
|
+
langgraph_executor/pb/types_pb2_grpc.py,sha256=EPv87wCc-6BNJ2xTNcb9d3ictDerK5cBt7qhd7EmJiQ,886
|
28
|
+
langgraph_executor/pb/types_pb2_grpc.pyi,sha256=Dl8kkjhqb6F1Kt24mcFg7ppish4iKVfjRiiBxEjsMMA,413
|
29
|
+
langgraph_executor-0.0.1a0.dist-info/METADATA,sha256=0v94S42MrwYAAitdIL1UfZGCHb5BVwtcDeFev_I8Ptk,433
|
30
|
+
langgraph_executor-0.0.1a0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
31
|
+
langgraph_executor-0.0.1a0.dist-info/RECORD,,
|