langgraph-api 0.2.26__py3-none-any.whl → 0.2.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- langgraph_api/__init__.py +1 -1
- langgraph_api/api/assistants.py +4 -4
- langgraph_api/api/store.py +10 -6
- langgraph_api/asgi_transport.py +171 -0
- langgraph_api/asyncio.py +17 -0
- langgraph_api/config.py +1 -0
- langgraph_api/graph.py +28 -5
- langgraph_api/js/remote.py +16 -11
- langgraph_api/metadata.py +28 -16
- langgraph_api/store.py +127 -0
- langgraph_api/stream.py +17 -7
- langgraph_api/worker.py +1 -1
- {langgraph_api-0.2.26.dist-info → langgraph_api-0.2.28.dist-info}/METADATA +24 -30
- {langgraph_api-0.2.26.dist-info → langgraph_api-0.2.28.dist-info}/RECORD +42 -64
- {langgraph_api-0.2.26.dist-info → langgraph_api-0.2.28.dist-info}/WHEEL +1 -1
- langgraph_api-0.2.28.dist-info/entry_points.txt +2 -0
- langgraph_api/js/tests/api.test.mts +0 -2194
- langgraph_api/js/tests/auth.test.mts +0 -648
- langgraph_api/js/tests/compose-postgres.auth.yml +0 -59
- langgraph_api/js/tests/compose-postgres.yml +0 -59
- langgraph_api/js/tests/graphs/.gitignore +0 -1
- langgraph_api/js/tests/graphs/agent.css +0 -1
- langgraph_api/js/tests/graphs/agent.mts +0 -187
- langgraph_api/js/tests/graphs/agent.ui.tsx +0 -10
- langgraph_api/js/tests/graphs/agent_simple.mts +0 -105
- langgraph_api/js/tests/graphs/auth.mts +0 -106
- langgraph_api/js/tests/graphs/command.mts +0 -48
- langgraph_api/js/tests/graphs/delay.mts +0 -30
- langgraph_api/js/tests/graphs/dynamic.mts +0 -24
- langgraph_api/js/tests/graphs/error.mts +0 -17
- langgraph_api/js/tests/graphs/http.mts +0 -76
- langgraph_api/js/tests/graphs/langgraph.json +0 -11
- langgraph_api/js/tests/graphs/nested.mts +0 -44
- langgraph_api/js/tests/graphs/package.json +0 -13
- langgraph_api/js/tests/graphs/weather.mts +0 -57
- langgraph_api/js/tests/graphs/yarn.lock +0 -242
- langgraph_api/js/tests/utils.mts +0 -17
- langgraph_api-0.2.26.dist-info/LICENSE +0 -93
- langgraph_api-0.2.26.dist-info/entry_points.txt +0 -3
- logging.json +0 -22
- openapi.json +0 -4562
- /LICENSE → /langgraph_api-0.2.28.dist-info/licenses/LICENSE +0 -0
langgraph_api/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.2.
|
|
1
|
+
__version__ = "0.2.28"
|
langgraph_api/api/assistants.py
CHANGED
|
@@ -9,6 +9,7 @@ from starlette.exceptions import HTTPException
|
|
|
9
9
|
from starlette.responses import Response
|
|
10
10
|
from starlette.routing import BaseRoute
|
|
11
11
|
|
|
12
|
+
from langgraph_api import store as api_store
|
|
12
13
|
from langgraph_api.graph import get_assistant_id, get_graph
|
|
13
14
|
from langgraph_api.js.base import BaseRemotePregel
|
|
14
15
|
from langgraph_api.route import ApiRequest, ApiResponse, ApiRoute
|
|
@@ -25,7 +26,6 @@ from langgraph_runtime.checkpoint import Checkpointer
|
|
|
25
26
|
from langgraph_runtime.database import connect
|
|
26
27
|
from langgraph_runtime.ops import Assistants
|
|
27
28
|
from langgraph_runtime.retry import retry_db
|
|
28
|
-
from langgraph_runtime.store import Store
|
|
29
29
|
|
|
30
30
|
logger = structlog.stdlib.get_logger(__name__)
|
|
31
31
|
|
|
@@ -194,7 +194,7 @@ async def get_assistant_graph(
|
|
|
194
194
|
assistant["graph_id"],
|
|
195
195
|
config,
|
|
196
196
|
checkpointer=Checkpointer(conn),
|
|
197
|
-
store=
|
|
197
|
+
store=(await api_store.get_store()),
|
|
198
198
|
) as graph:
|
|
199
199
|
xray: bool | int = False
|
|
200
200
|
xray_query = request.query_params.get("xray")
|
|
@@ -240,7 +240,7 @@ async def get_assistant_subgraphs(
|
|
|
240
240
|
assistant["graph_id"],
|
|
241
241
|
config,
|
|
242
242
|
checkpointer=Checkpointer(conn),
|
|
243
|
-
store=
|
|
243
|
+
store=(await api_store.get_store()),
|
|
244
244
|
) as graph:
|
|
245
245
|
namespace = request.path_params.get("namespace")
|
|
246
246
|
|
|
@@ -286,7 +286,7 @@ async def get_assistant_schemas(
|
|
|
286
286
|
assistant["graph_id"],
|
|
287
287
|
config,
|
|
288
288
|
checkpointer=Checkpointer(conn),
|
|
289
|
-
store=
|
|
289
|
+
store=(await api_store.get_store()),
|
|
290
290
|
) as graph:
|
|
291
291
|
if isinstance(graph, BaseRemotePregel):
|
|
292
292
|
schemas = await graph.fetch_state_schema()
|
langgraph_api/api/store.py
CHANGED
|
@@ -6,6 +6,7 @@ from starlette.routing import BaseRoute
|
|
|
6
6
|
|
|
7
7
|
from langgraph_api.auth.custom import handle_event as _handle_event
|
|
8
8
|
from langgraph_api.route import ApiRequest, ApiResponse, ApiRoute
|
|
9
|
+
from langgraph_api.store import get_store
|
|
9
10
|
from langgraph_api.utils import get_auth_ctx
|
|
10
11
|
from langgraph_api.validation import (
|
|
11
12
|
StoreDeleteRequest,
|
|
@@ -14,7 +15,6 @@ from langgraph_api.validation import (
|
|
|
14
15
|
StoreSearchRequest,
|
|
15
16
|
)
|
|
16
17
|
from langgraph_runtime.retry import retry_db
|
|
17
|
-
from langgraph_runtime.store import Store
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def _validate_namespace(namespace: tuple[str, ...]) -> Response | None:
|
|
@@ -57,7 +57,9 @@ async def put_item(request: ApiRequest):
|
|
|
57
57
|
"value": payload["value"],
|
|
58
58
|
}
|
|
59
59
|
await handle_event("put", handler_payload)
|
|
60
|
-
await
|
|
60
|
+
await (await get_store()).aput(
|
|
61
|
+
namespace, handler_payload["key"], handler_payload["value"]
|
|
62
|
+
)
|
|
61
63
|
return Response(status_code=204)
|
|
62
64
|
|
|
63
65
|
|
|
@@ -75,7 +77,7 @@ async def get_item(request: ApiRequest):
|
|
|
75
77
|
"key": key,
|
|
76
78
|
}
|
|
77
79
|
await handle_event("get", handler_payload)
|
|
78
|
-
result = await
|
|
80
|
+
result = await (await get_store()).aget(namespace, key)
|
|
79
81
|
return ApiResponse(result.dict() if result is not None else None)
|
|
80
82
|
|
|
81
83
|
|
|
@@ -91,7 +93,9 @@ async def delete_item(request: ApiRequest):
|
|
|
91
93
|
"key": payload["key"],
|
|
92
94
|
}
|
|
93
95
|
await handle_event("delete", handler_payload)
|
|
94
|
-
await
|
|
96
|
+
await (await get_store()).adelete(
|
|
97
|
+
handler_payload["namespace"], handler_payload["key"]
|
|
98
|
+
)
|
|
95
99
|
return Response(status_code=204)
|
|
96
100
|
|
|
97
101
|
|
|
@@ -114,7 +118,7 @@ async def search_items(request: ApiRequest):
|
|
|
114
118
|
"query": query,
|
|
115
119
|
}
|
|
116
120
|
await handle_event("search", handler_payload)
|
|
117
|
-
items = await
|
|
121
|
+
items = await (await get_store()).asearch(
|
|
118
122
|
handler_payload["namespace"],
|
|
119
123
|
filter=handler_payload["filter"],
|
|
120
124
|
limit=handler_payload["limit"],
|
|
@@ -145,7 +149,7 @@ async def list_namespaces(request: ApiRequest):
|
|
|
145
149
|
"offset": offset,
|
|
146
150
|
}
|
|
147
151
|
await handle_event("list_namespaces", handler_payload)
|
|
148
|
-
result = await
|
|
152
|
+
result = await (await get_store()).alist_namespaces(
|
|
149
153
|
prefix=handler_payload["namespace"],
|
|
150
154
|
suffix=handler_payload["suffix"],
|
|
151
155
|
max_depth=handler_payload["max_depth"],
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
"""ASGI transport that lets you schedule to the main loop.
|
|
2
|
+
|
|
3
|
+
Adapted from: https://github.com/encode/httpx/blob/6c7af967734bafd011164f2a1653abc87905a62b/httpx/_transports/asgi.py#L1
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import typing
|
|
9
|
+
|
|
10
|
+
from httpx import ASGITransport as ASGITransportBase
|
|
11
|
+
from httpx import AsyncByteStream, Request, Response
|
|
12
|
+
|
|
13
|
+
if typing.TYPE_CHECKING: # pragma: no cover
|
|
14
|
+
import asyncio
|
|
15
|
+
|
|
16
|
+
import trio
|
|
17
|
+
|
|
18
|
+
Event = asyncio.Event | trio.Event
|
|
19
|
+
|
|
20
|
+
__all__ = ["ASGITransport"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def is_running_trio() -> bool:
|
|
24
|
+
try:
|
|
25
|
+
# sniffio is a dependency of trio.
|
|
26
|
+
|
|
27
|
+
# See https://github.com/python-trio/trio/issues/2802
|
|
28
|
+
import sniffio
|
|
29
|
+
|
|
30
|
+
if sniffio.current_async_library() == "trio":
|
|
31
|
+
return True
|
|
32
|
+
except ImportError: # pragma: nocover
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
return False
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def create_event() -> Event:
|
|
39
|
+
if is_running_trio():
|
|
40
|
+
import trio
|
|
41
|
+
|
|
42
|
+
return trio.Event()
|
|
43
|
+
|
|
44
|
+
import asyncio
|
|
45
|
+
|
|
46
|
+
return asyncio.Event()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ASGIResponseStream(AsyncByteStream):
|
|
50
|
+
def __init__(self, body: list[bytes]) -> None:
|
|
51
|
+
self._body = body
|
|
52
|
+
|
|
53
|
+
async def __aiter__(self) -> typing.AsyncIterator[bytes]:
|
|
54
|
+
yield b"".join(self._body)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class ASGITransport(ASGITransportBase):
|
|
58
|
+
"""
|
|
59
|
+
A custom AsyncTransport that handles sending requests directly to an ASGI app.
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
transport = httpx.ASGITransport(
|
|
63
|
+
app=app,
|
|
64
|
+
root_path="/submount",
|
|
65
|
+
client=("1.2.3.4", 123)
|
|
66
|
+
)
|
|
67
|
+
client = httpx.AsyncClient(transport=transport)
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
Arguments:
|
|
71
|
+
|
|
72
|
+
* `app` - The ASGI application.
|
|
73
|
+
* `raise_app_exceptions` - Boolean indicating if exceptions in the application
|
|
74
|
+
should be raised. Default to `True`. Can be set to `False` for use cases
|
|
75
|
+
such as testing the content of a client 500 response.
|
|
76
|
+
* `root_path` - The root path on which the ASGI application should be mounted.
|
|
77
|
+
* `client` - A two-tuple indicating the client IP and port of incoming requests.
|
|
78
|
+
```
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
async def handle_async_request(
|
|
82
|
+
self,
|
|
83
|
+
request: Request,
|
|
84
|
+
) -> Response:
|
|
85
|
+
from langgraph_api.asyncio import call_soon_in_main_loop
|
|
86
|
+
|
|
87
|
+
assert isinstance(request.stream, AsyncByteStream)
|
|
88
|
+
|
|
89
|
+
# ASGI scope.
|
|
90
|
+
scope = {
|
|
91
|
+
"type": "http",
|
|
92
|
+
"asgi": {"version": "3.0"},
|
|
93
|
+
"http_version": "1.1",
|
|
94
|
+
"method": request.method,
|
|
95
|
+
"headers": [(k.lower(), v) for (k, v) in request.headers.raw],
|
|
96
|
+
"scheme": request.url.scheme,
|
|
97
|
+
"path": request.url.path,
|
|
98
|
+
"raw_path": request.url.raw_path.split(b"?")[0],
|
|
99
|
+
"query_string": request.url.query,
|
|
100
|
+
"server": (request.url.host, request.url.port),
|
|
101
|
+
"client": self.client,
|
|
102
|
+
"root_path": self.root_path,
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# Request.
|
|
106
|
+
request_body_chunks = request.stream.__aiter__()
|
|
107
|
+
request_complete = False
|
|
108
|
+
|
|
109
|
+
# Response.
|
|
110
|
+
status_code = None
|
|
111
|
+
response_headers = None
|
|
112
|
+
body_parts = []
|
|
113
|
+
response_started = False
|
|
114
|
+
response_complete = create_event()
|
|
115
|
+
|
|
116
|
+
# ASGI callables.
|
|
117
|
+
|
|
118
|
+
async def receive() -> dict[str, typing.Any]:
|
|
119
|
+
nonlocal request_complete
|
|
120
|
+
|
|
121
|
+
if request_complete:
|
|
122
|
+
await response_complete.wait()
|
|
123
|
+
return {"type": "http.disconnect"}
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
body = await request_body_chunks.__anext__()
|
|
127
|
+
except StopAsyncIteration:
|
|
128
|
+
request_complete = True
|
|
129
|
+
return {"type": "http.request", "body": b"", "more_body": False}
|
|
130
|
+
return {"type": "http.request", "body": body, "more_body": True}
|
|
131
|
+
|
|
132
|
+
async def send(message: typing.MutableMapping[str, typing.Any]) -> None:
|
|
133
|
+
nonlocal status_code, response_headers, response_started
|
|
134
|
+
|
|
135
|
+
if message["type"] == "http.response.start":
|
|
136
|
+
assert not response_started
|
|
137
|
+
|
|
138
|
+
status_code = message["status"]
|
|
139
|
+
response_headers = message.get("headers", [])
|
|
140
|
+
response_started = True
|
|
141
|
+
|
|
142
|
+
elif message["type"] == "http.response.body":
|
|
143
|
+
assert not response_complete.is_set()
|
|
144
|
+
body = message.get("body", b"")
|
|
145
|
+
more_body = message.get("more_body", False)
|
|
146
|
+
|
|
147
|
+
if body and request.method != "HEAD":
|
|
148
|
+
body_parts.append(body)
|
|
149
|
+
|
|
150
|
+
if not more_body:
|
|
151
|
+
response_complete.set()
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
await call_soon_in_main_loop(self.app(scope, receive, send))
|
|
155
|
+
except Exception: # noqa: PIE-786
|
|
156
|
+
if self.raise_app_exceptions:
|
|
157
|
+
raise
|
|
158
|
+
|
|
159
|
+
response_complete.set()
|
|
160
|
+
if status_code is None:
|
|
161
|
+
status_code = 500
|
|
162
|
+
if response_headers is None:
|
|
163
|
+
response_headers = {}
|
|
164
|
+
|
|
165
|
+
assert response_complete.is_set()
|
|
166
|
+
assert status_code is not None
|
|
167
|
+
assert response_headers is not None
|
|
168
|
+
|
|
169
|
+
stream = ASGIResponseStream(body_parts)
|
|
170
|
+
|
|
171
|
+
return Response(status_code, headers=response_headers, stream=stream)
|
langgraph_api/asyncio.py
CHANGED
|
@@ -6,6 +6,7 @@ from functools import partial
|
|
|
6
6
|
from typing import Any, Generic, TypeVar
|
|
7
7
|
|
|
8
8
|
import structlog
|
|
9
|
+
from langgraph.utils.future import chain_future
|
|
9
10
|
|
|
10
11
|
T = TypeVar("T")
|
|
11
12
|
|
|
@@ -19,6 +20,12 @@ def set_event_loop(loop: asyncio.AbstractEventLoop) -> None:
|
|
|
19
20
|
_MAIN_LOOP = loop
|
|
20
21
|
|
|
21
22
|
|
|
23
|
+
def get_event_loop() -> asyncio.AbstractEventLoop:
|
|
24
|
+
if _MAIN_LOOP is None:
|
|
25
|
+
raise RuntimeError("No event loop set")
|
|
26
|
+
return _MAIN_LOOP
|
|
27
|
+
|
|
28
|
+
|
|
22
29
|
async def sleep_if_not_done(delay: float, done: asyncio.Event) -> None:
|
|
23
30
|
try:
|
|
24
31
|
await asyncio.wait_for(done.wait(), delay)
|
|
@@ -118,6 +125,16 @@ def run_coroutine_threadsafe(
|
|
|
118
125
|
return future
|
|
119
126
|
|
|
120
127
|
|
|
128
|
+
def call_soon_in_main_loop(coro: Coroutine[Any, Any, T]) -> asyncio.Future[T]:
|
|
129
|
+
"""Run a coroutine in the main event loop."""
|
|
130
|
+
if _MAIN_LOOP is None:
|
|
131
|
+
raise RuntimeError("No event loop set")
|
|
132
|
+
main_loop_fut = asyncio.ensure_future(coro, loop=_MAIN_LOOP)
|
|
133
|
+
this_loop_fut = asyncio.get_running_loop().create_future()
|
|
134
|
+
_MAIN_LOOP.call_soon_threadsafe(chain_future, main_loop_fut, this_loop_fut)
|
|
135
|
+
return this_loop_fut
|
|
136
|
+
|
|
137
|
+
|
|
121
138
|
class SimpleTaskGroup(AbstractAsyncContextManager["SimpleTaskGroup"]):
|
|
122
139
|
"""An async task group that can be configured to wait and/or cancel tasks on exit.
|
|
123
140
|
|
langgraph_api/config.py
CHANGED
langgraph_api/graph.py
CHANGED
|
@@ -3,7 +3,6 @@ import functools
|
|
|
3
3
|
import glob
|
|
4
4
|
import importlib.util
|
|
5
5
|
import inspect
|
|
6
|
-
import json
|
|
7
6
|
import os
|
|
8
7
|
import sys
|
|
9
8
|
import warnings
|
|
@@ -14,6 +13,7 @@ from random import choice
|
|
|
14
13
|
from typing import TYPE_CHECKING, Any, NamedTuple
|
|
15
14
|
from uuid import UUID, uuid5
|
|
16
15
|
|
|
16
|
+
import orjson
|
|
17
17
|
import structlog
|
|
18
18
|
from langchain_core.runnables.config import run_in_executor, var_child_runnable_config
|
|
19
19
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
@@ -60,6 +60,12 @@ async def register_graph(
|
|
|
60
60
|
if callable(graph):
|
|
61
61
|
FACTORY_ACCEPTS_CONFIG[graph_id] = len(inspect.signature(graph).parameters) > 0
|
|
62
62
|
async with connect() as conn:
|
|
63
|
+
graph_name = getattr(graph, "name", None) if isinstance(graph, Pregel) else None
|
|
64
|
+
assistant_name = (
|
|
65
|
+
graph_name
|
|
66
|
+
if graph_name is not None and graph_name != "LangGraph"
|
|
67
|
+
else graph_id
|
|
68
|
+
)
|
|
63
69
|
await Assistants.put(
|
|
64
70
|
conn,
|
|
65
71
|
str(uuid5(NAMESPACE_GRAPH, graph_id)),
|
|
@@ -67,7 +73,7 @@ async def register_graph(
|
|
|
67
73
|
metadata={"created_by": "system"},
|
|
68
74
|
config=config or {},
|
|
69
75
|
if_exists="do_nothing",
|
|
70
|
-
name=
|
|
76
|
+
name=assistant_name,
|
|
71
77
|
description=description,
|
|
72
78
|
)
|
|
73
79
|
|
|
@@ -200,10 +206,19 @@ def _load_graph_config_from_env() -> dict | None:
|
|
|
200
206
|
config_str = os.getenv("LANGGRAPH_CONFIG")
|
|
201
207
|
if not config_str:
|
|
202
208
|
return None
|
|
209
|
+
try:
|
|
210
|
+
config_per_id = orjson.loads(config_str)
|
|
211
|
+
except orjson.JSONDecodeError as e:
|
|
212
|
+
raise ValueError(
|
|
213
|
+
"Provided environment variable LANGGRAPH_CONFIG must be a valid JSON object"
|
|
214
|
+
f"\nFound: {config_str}"
|
|
215
|
+
) from e
|
|
203
216
|
|
|
204
|
-
config_per_id = json.loads(config_str)
|
|
205
217
|
if not isinstance(config_per_id, dict):
|
|
206
|
-
raise ValueError(
|
|
218
|
+
raise ValueError(
|
|
219
|
+
"Provided environment variable LANGGRAPH_CONFIG must be a JSON object"
|
|
220
|
+
f"\nFound: {config_str}"
|
|
221
|
+
)
|
|
207
222
|
|
|
208
223
|
return config_per_id
|
|
209
224
|
|
|
@@ -218,7 +233,15 @@ async def collect_graphs_from_env(register: bool = False) -> None:
|
|
|
218
233
|
specs = []
|
|
219
234
|
# graphs-config can be either a mapping from graph id to path where the graph
|
|
220
235
|
# is defined or graph id to a dictionary containing information about the graph.
|
|
221
|
-
|
|
236
|
+
try:
|
|
237
|
+
graphs_config = orjson.loads(paths_str)
|
|
238
|
+
except orjson.JSONDecodeError as e:
|
|
239
|
+
raise ValueError(
|
|
240
|
+
"LANGSERVE_GRAPHS must be a valid JSON object."
|
|
241
|
+
f"\nFound: {paths_str}"
|
|
242
|
+
"\n The LANGSERVE_GRAPHS environment variable is typically set"
|
|
243
|
+
'from the "graphs" field in your configuration (langgraph.json) file.'
|
|
244
|
+
) from e
|
|
222
245
|
|
|
223
246
|
for key, value in graphs_config.items():
|
|
224
247
|
if isinstance(value, dict) and "path" in value:
|
langgraph_api/js/remote.py
CHANGED
|
@@ -39,6 +39,7 @@ from starlette.exceptions import HTTPException
|
|
|
39
39
|
from starlette.requests import HTTPConnection, Request
|
|
40
40
|
from starlette.routing import Route
|
|
41
41
|
|
|
42
|
+
from langgraph_api import store as api_store
|
|
42
43
|
from langgraph_api.auth.custom import DotDict, ProxyUser
|
|
43
44
|
from langgraph_api.config import LANGGRAPH_AUTH_TYPE
|
|
44
45
|
from langgraph_api.js.base import BaseRemotePregel
|
|
@@ -70,6 +71,12 @@ _client = httpx.AsyncClient(
|
|
|
70
71
|
)
|
|
71
72
|
|
|
72
73
|
|
|
74
|
+
def _snapshot_defaults():
|
|
75
|
+
if not hasattr(StateSnapshot, "interrupts"):
|
|
76
|
+
return {}
|
|
77
|
+
return {"interrupts": tuple()}
|
|
78
|
+
|
|
79
|
+
|
|
73
80
|
def default_command(obj):
|
|
74
81
|
if isinstance(obj, Send):
|
|
75
82
|
return {"node": obj.node, "args": obj.arg}
|
|
@@ -251,7 +258,7 @@ class RemotePregel(BaseRemotePregel):
|
|
|
251
258
|
item.get("parentConfig"),
|
|
252
259
|
_convert_tasks(item.get("tasks", [])),
|
|
253
260
|
# TODO: add handling of interrupts when multiple resumes land in JS
|
|
254
|
-
|
|
261
|
+
**_snapshot_defaults(),
|
|
255
262
|
)
|
|
256
263
|
|
|
257
264
|
async def aget_state(
|
|
@@ -473,10 +480,8 @@ def _get_passthrough_checkpointer(conn: AsyncConnectionProto):
|
|
|
473
480
|
return checkpointer
|
|
474
481
|
|
|
475
482
|
|
|
476
|
-
def _get_passthrough_store():
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
return Store()
|
|
483
|
+
async def _get_passthrough_store():
|
|
484
|
+
return await api_store.get_store()
|
|
480
485
|
|
|
481
486
|
|
|
482
487
|
# Setup a HTTP server on top of CHECKPOINTER_SOCKET unix socket
|
|
@@ -574,7 +579,7 @@ async def run_remote_checkpointer():
|
|
|
574
579
|
else:
|
|
575
580
|
raise ValueError(f"Unknown operation type: {op}")
|
|
576
581
|
|
|
577
|
-
store = _get_passthrough_store()
|
|
582
|
+
store = await _get_passthrough_store()
|
|
578
583
|
results = await store.abatch(processed_operations)
|
|
579
584
|
|
|
580
585
|
# Handle potentially undefined or non-dict results
|
|
@@ -613,7 +618,7 @@ async def run_remote_checkpointer():
|
|
|
613
618
|
|
|
614
619
|
namespaces = namespaces_str.split(".")
|
|
615
620
|
|
|
616
|
-
store = _get_passthrough_store()
|
|
621
|
+
store = await _get_passthrough_store()
|
|
617
622
|
result = await store.aget(namespaces, key)
|
|
618
623
|
|
|
619
624
|
return result
|
|
@@ -626,7 +631,7 @@ async def run_remote_checkpointer():
|
|
|
626
631
|
value = payload["value"]
|
|
627
632
|
index = payload.get("index")
|
|
628
633
|
|
|
629
|
-
store = _get_passthrough_store()
|
|
634
|
+
store = await _get_passthrough_store()
|
|
630
635
|
await store.aput(namespace, key, value, index=index)
|
|
631
636
|
|
|
632
637
|
return {"success": True}
|
|
@@ -639,7 +644,7 @@ async def run_remote_checkpointer():
|
|
|
639
644
|
offset = payload.get("offset", 0)
|
|
640
645
|
query = payload.get("query")
|
|
641
646
|
|
|
642
|
-
store = _get_passthrough_store()
|
|
647
|
+
store = await _get_passthrough_store()
|
|
643
648
|
result = await store.asearch(
|
|
644
649
|
namespace_prefix, filter=filter, limit=limit, offset=offset, query=query
|
|
645
650
|
)
|
|
@@ -652,7 +657,7 @@ async def run_remote_checkpointer():
|
|
|
652
657
|
namespace = tuple(payload["namespace"])
|
|
653
658
|
key = payload["key"]
|
|
654
659
|
|
|
655
|
-
store = _get_passthrough_store()
|
|
660
|
+
store = await _get_passthrough_store()
|
|
656
661
|
await store.adelete(namespace, key)
|
|
657
662
|
|
|
658
663
|
return {"success": True}
|
|
@@ -665,7 +670,7 @@ async def run_remote_checkpointer():
|
|
|
665
670
|
limit = payload.get("limit", 100)
|
|
666
671
|
offset = payload.get("offset", 0)
|
|
667
672
|
|
|
668
|
-
store = _get_passthrough_store()
|
|
673
|
+
store = await _get_passthrough_store()
|
|
669
674
|
result = await store.alist_namespaces(
|
|
670
675
|
prefix=prefix,
|
|
671
676
|
suffix=suffix,
|
langgraph_api/metadata.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import os
|
|
3
|
+
from collections import defaultdict
|
|
3
4
|
from datetime import UTC, datetime
|
|
4
5
|
|
|
5
6
|
import langgraph.version
|
|
@@ -36,8 +37,8 @@ PLAN = "enterprise" if plus_features_enabled() else "developer"
|
|
|
36
37
|
USER_API_URL = os.getenv("LANGGRAPH_API_URL", None)
|
|
37
38
|
|
|
38
39
|
LOGS: list[dict] = []
|
|
39
|
-
RUN_COUNTER =
|
|
40
|
-
NODE_COUNTER =
|
|
40
|
+
RUN_COUNTER = defaultdict(int)
|
|
41
|
+
NODE_COUNTER = defaultdict(int)
|
|
41
42
|
FROM_TIMESTAMP = datetime.now(UTC).isoformat()
|
|
42
43
|
|
|
43
44
|
if (
|
|
@@ -49,14 +50,12 @@ else:
|
|
|
49
50
|
METADATA_ENDPOINT = "https://api.smith.langchain.com/v1/metadata/submit"
|
|
50
51
|
|
|
51
52
|
|
|
52
|
-
def incr_runs(*, incr: int = 1) -> None:
|
|
53
|
-
|
|
54
|
-
RUN_COUNTER += incr
|
|
53
|
+
def incr_runs(*, graph_id: str | None = None, incr: int = 1) -> None:
|
|
54
|
+
RUN_COUNTER[graph_id] += incr
|
|
55
55
|
|
|
56
56
|
|
|
57
|
-
def incr_nodes(_,
|
|
58
|
-
|
|
59
|
-
NODE_COUNTER += incr
|
|
57
|
+
def incr_nodes(*_, graph_id: str | None = None, incr: int = 1) -> None:
|
|
58
|
+
NODE_COUNTER[graph_id] += incr
|
|
60
59
|
|
|
61
60
|
|
|
62
61
|
def append_log(log: dict) -> None:
|
|
@@ -89,13 +88,23 @@ async def metadata_loop() -> None:
|
|
|
89
88
|
# we don't need a lock as long as there's no awaits in this block
|
|
90
89
|
from_timestamp = FROM_TIMESTAMP
|
|
91
90
|
to_timestamp = datetime.now(UTC).isoformat()
|
|
92
|
-
nodes = NODE_COUNTER
|
|
93
|
-
runs = RUN_COUNTER
|
|
91
|
+
nodes = NODE_COUNTER.copy()
|
|
92
|
+
runs = RUN_COUNTER.copy()
|
|
94
93
|
logs = LOGS.copy()
|
|
95
94
|
LOGS.clear()
|
|
96
|
-
RUN_COUNTER
|
|
97
|
-
NODE_COUNTER
|
|
95
|
+
RUN_COUNTER.clear()
|
|
96
|
+
NODE_COUNTER.clear()
|
|
98
97
|
FROM_TIMESTAMP = to_timestamp
|
|
98
|
+
graph_measures = {
|
|
99
|
+
f"langgraph.platform.graph_runs.{graph_id}": runs.get(graph_id, 0)
|
|
100
|
+
for graph_id in runs
|
|
101
|
+
}
|
|
102
|
+
graph_measures.update(
|
|
103
|
+
{
|
|
104
|
+
f"langgraph.platform.graph_nodes.{graph_id}": nodes.get(graph_id, 0)
|
|
105
|
+
for graph_id in nodes
|
|
106
|
+
}
|
|
107
|
+
)
|
|
99
108
|
|
|
100
109
|
payload = {
|
|
101
110
|
"license_key": LANGGRAPH_CLOUD_LICENSE_KEY,
|
|
@@ -120,8 +129,9 @@ async def metadata_loop() -> None:
|
|
|
120
129
|
"user_app.uses_store_ttl": str(USES_STORE_TTL),
|
|
121
130
|
},
|
|
122
131
|
"measures": {
|
|
123
|
-
"langgraph.platform.runs": runs,
|
|
124
|
-
"langgraph.platform.nodes": nodes,
|
|
132
|
+
"langgraph.platform.runs": sum(runs.values()),
|
|
133
|
+
"langgraph.platform.nodes": sum(nodes.values()),
|
|
134
|
+
**graph_measures,
|
|
125
135
|
},
|
|
126
136
|
"logs": logs,
|
|
127
137
|
}
|
|
@@ -134,8 +144,10 @@ async def metadata_loop() -> None:
|
|
|
134
144
|
)
|
|
135
145
|
except Exception as e:
|
|
136
146
|
# retry on next iteration
|
|
137
|
-
|
|
138
|
-
|
|
147
|
+
for graph_id, incr in runs.items():
|
|
148
|
+
incr_runs(graph_id=graph_id, incr=incr)
|
|
149
|
+
for graph_id, incr in nodes.items():
|
|
150
|
+
incr_nodes(graph_id=graph_id, incr=incr)
|
|
139
151
|
FROM_TIMESTAMP = from_timestamp
|
|
140
152
|
await logger.ainfo("Metadata submission skipped.", error=str(e))
|
|
141
153
|
await asyncio.sleep(INTERVAL)
|