langgraph-api 0.0.30__py3-none-any.whl → 0.0.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- langgraph_api/api/assistants.py +9 -2
- langgraph_api/api/runs.py +1 -1
- langgraph_api/api/threads.py +10 -1
- langgraph_api/cli.py +10 -43
- langgraph_api/command.py +29 -0
- langgraph_api/config.py +83 -2
- langgraph_api/js/remote.py +72 -51
- langgraph_api/js/tests/api.test.mts +1 -1
- langgraph_api/lifespan.py +3 -0
- langgraph_api/stream.py +3 -29
- langgraph_api/validation.py +13 -1
- {langgraph_api-0.0.30.dist-info → langgraph_api-0.0.32.dist-info}/METADATA +4 -4
- {langgraph_api-0.0.30.dist-info → langgraph_api-0.0.32.dist-info}/RECORD +19 -18
- langgraph_storage/ops.py +75 -1
- langgraph_storage/store.py +4 -0
- openapi.json +102 -0
- {langgraph_api-0.0.30.dist-info → langgraph_api-0.0.32.dist-info}/LICENSE +0 -0
- {langgraph_api-0.0.30.dist-info → langgraph_api-0.0.32.dist-info}/WHEEL +0 -0
- {langgraph_api-0.0.30.dist-info → langgraph_api-0.0.32.dist-info}/entry_points.txt +0 -0
langgraph_api/api/assistants.py
CHANGED
|
@@ -87,7 +87,13 @@ def _graph_schemas(graph: Pregel) -> dict:
|
|
|
87
87
|
f"Failed to get output schema for graph {graph.name} with error: `{str(e)}`"
|
|
88
88
|
)
|
|
89
89
|
output_schema = None
|
|
90
|
-
|
|
90
|
+
try:
|
|
91
|
+
state_schema = _state_jsonschema(graph)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
logger.warning(
|
|
94
|
+
f"Failed to get state schema for graph {graph.name} with error: `{str(e)}`"
|
|
95
|
+
)
|
|
96
|
+
state_schema = None
|
|
91
97
|
try:
|
|
92
98
|
config_schema = _get_configurable_jsonschema(graph)
|
|
93
99
|
except Exception as e:
|
|
@@ -184,7 +190,8 @@ async def get_assistant_graph(
|
|
|
184
190
|
return ApiResponse(drawable_graph.to_json())
|
|
185
191
|
|
|
186
192
|
try:
|
|
187
|
-
|
|
193
|
+
drawable_graph = await graph.aget_graph(xray=xray)
|
|
194
|
+
return ApiResponse(drawable_graph.to_json())
|
|
188
195
|
except NotImplementedError:
|
|
189
196
|
raise HTTPException(
|
|
190
197
|
422, detail="The graph does not support visualization"
|
langgraph_api/api/runs.py
CHANGED
|
@@ -354,7 +354,7 @@ async def join_run_stream_endpoint(request: ApiRequest):
|
|
|
354
354
|
cancel_on_disconnect = cancel_on_disconnect_str.lower() in {"true", "yes", "1"}
|
|
355
355
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
356
356
|
validate_uuid(run_id, "Invalid run ID: must be a UUID")
|
|
357
|
-
stream_mode = request.query_params.get("stream_mode")
|
|
357
|
+
stream_mode = request.query_params.get("stream_mode") or None
|
|
358
358
|
return EventSourceResponse(
|
|
359
359
|
Runs.Stream.join(
|
|
360
360
|
run_id,
|
langgraph_api/api/threads.py
CHANGED
|
@@ -28,12 +28,21 @@ async def create_thread(
|
|
|
28
28
|
if thread_id := payload.get("thread_id"):
|
|
29
29
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
30
30
|
async with connect() as conn:
|
|
31
|
+
thread_id = thread_id or str(uuid4())
|
|
31
32
|
iter = await Threads.put(
|
|
32
33
|
conn,
|
|
33
|
-
thread_id
|
|
34
|
+
thread_id,
|
|
34
35
|
metadata=payload.get("metadata"),
|
|
35
36
|
if_exists=payload.get("if_exists") or "raise",
|
|
36
37
|
)
|
|
38
|
+
|
|
39
|
+
if supersteps := payload.get("supersteps"):
|
|
40
|
+
await Threads.State.bulk(
|
|
41
|
+
conn,
|
|
42
|
+
config={"configurable": {"thread_id": thread_id}},
|
|
43
|
+
supersteps=supersteps,
|
|
44
|
+
)
|
|
45
|
+
|
|
37
46
|
return ApiResponse(await fetchone(iter, not_found_code=409))
|
|
38
47
|
|
|
39
48
|
|
langgraph_api/cli.py
CHANGED
|
@@ -10,7 +10,7 @@ from collections.abc import Mapping, Sequence
|
|
|
10
10
|
from typing_extensions import TypedDict
|
|
11
11
|
|
|
12
12
|
if typing.TYPE_CHECKING:
|
|
13
|
-
from langgraph_api.config import HttpConfig
|
|
13
|
+
from langgraph_api.config import HttpConfig, StoreConfig
|
|
14
14
|
|
|
15
15
|
logging.basicConfig(level=logging.INFO)
|
|
16
16
|
logger = logging.getLogger(__name__)
|
|
@@ -75,40 +75,6 @@ def patch_environment(**kwargs):
|
|
|
75
75
|
os.environ[key] = value
|
|
76
76
|
|
|
77
77
|
|
|
78
|
-
class IndexConfig(TypedDict, total=False):
|
|
79
|
-
"""Configuration for indexing documents for semantic search in the store."""
|
|
80
|
-
|
|
81
|
-
dims: int
|
|
82
|
-
"""Number of dimensions in the embedding vectors.
|
|
83
|
-
|
|
84
|
-
Common embedding models have the following dimensions:
|
|
85
|
-
- OpenAI text-embedding-3-large: 256, 1024, or 3072
|
|
86
|
-
- OpenAI text-embedding-3-small: 512 or 1536
|
|
87
|
-
- OpenAI text-embedding-ada-002: 1536
|
|
88
|
-
- Cohere embed-english-v3.0: 1024
|
|
89
|
-
- Cohere embed-english-light-v3.0: 384
|
|
90
|
-
- Cohere embed-multilingual-v3.0: 1024
|
|
91
|
-
- Cohere embed-multilingual-light-v3.0: 384
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
embed: str
|
|
95
|
-
"""Either a path to an embedding model (./path/to/file.py:embedding_model)
|
|
96
|
-
or a name of an embedding model (openai:text-embedding-3-small)
|
|
97
|
-
|
|
98
|
-
Note: LangChain is required to use the model format specification.
|
|
99
|
-
"""
|
|
100
|
-
|
|
101
|
-
fields: list[str] | None
|
|
102
|
-
"""Fields to extract text from for embedding generation.
|
|
103
|
-
|
|
104
|
-
Defaults to the root ["$"], which embeds the json object as a whole.
|
|
105
|
-
"""
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
class StoreConfig(TypedDict, total=False):
|
|
109
|
-
index: IndexConfig
|
|
110
|
-
|
|
111
|
-
|
|
112
78
|
class SecurityConfig(TypedDict, total=False):
|
|
113
79
|
securitySchemes: dict
|
|
114
80
|
security: list
|
|
@@ -160,9 +126,10 @@ def run_server(
|
|
|
160
126
|
env: str | pathlib.Path | Mapping[str, str] | None = None,
|
|
161
127
|
reload_includes: Sequence[str] | None = None,
|
|
162
128
|
reload_excludes: Sequence[str] | None = None,
|
|
163
|
-
store: StoreConfig
|
|
129
|
+
store: typing.Optional["StoreConfig"] = None,
|
|
164
130
|
auth: AuthConfig | None = None,
|
|
165
131
|
http: typing.Optional["HttpConfig"] = None,
|
|
132
|
+
studio_url: str | None = None,
|
|
166
133
|
**kwargs: typing.Any,
|
|
167
134
|
):
|
|
168
135
|
"""Run the LangGraph API server."""
|
|
@@ -226,11 +193,11 @@ def run_server(
|
|
|
226
193
|
ALLOW_PRIVATE_NETWORK="true",
|
|
227
194
|
**(env_vars or {}),
|
|
228
195
|
):
|
|
229
|
-
studio_origin = _get_ls_origin() or "https://smith.langchain.com"
|
|
230
|
-
|
|
196
|
+
studio_origin = studio_url or _get_ls_origin() or "https://smith.langchain.com"
|
|
197
|
+
full_studio_url = f"{studio_origin}/studio/?baseUrl={local_url}"
|
|
231
198
|
|
|
232
199
|
def _open_browser():
|
|
233
|
-
nonlocal studio_origin,
|
|
200
|
+
nonlocal studio_origin, full_studio_url
|
|
234
201
|
import time
|
|
235
202
|
import urllib.request
|
|
236
203
|
import webbrowser
|
|
@@ -252,7 +219,7 @@ def run_server(
|
|
|
252
219
|
try:
|
|
253
220
|
org_id = org_id_future.result(timeout=3.0)
|
|
254
221
|
if org_id:
|
|
255
|
-
|
|
222
|
+
full_studio_url = f"{studio_origin}/studio/?baseUrl={local_url}&organizationId={org_id}"
|
|
256
223
|
except TimeoutError as e:
|
|
257
224
|
thread_logger.debug(
|
|
258
225
|
f"Failed to get organization ID: {str(e)}"
|
|
@@ -264,8 +231,8 @@ def run_server(
|
|
|
264
231
|
thread_logger.info(
|
|
265
232
|
"🎨 Opening Studio in your browser..."
|
|
266
233
|
)
|
|
267
|
-
thread_logger.info("URL: " +
|
|
268
|
-
webbrowser.open(
|
|
234
|
+
thread_logger.info("URL: " + full_studio_url)
|
|
235
|
+
webbrowser.open(full_studio_url)
|
|
269
236
|
return
|
|
270
237
|
except urllib.error.URLError:
|
|
271
238
|
pass
|
|
@@ -280,7 +247,7 @@ def run_server(
|
|
|
280
247
|
╩═╝┴ ┴┘└┘└─┘╚═╝┴└─┴ ┴┴ ┴ ┴
|
|
281
248
|
|
|
282
249
|
- 🚀 API: \033[36m{local_url}\033[0m
|
|
283
|
-
- 🎨 Studio UI: \033[36m{
|
|
250
|
+
- 🎨 Studio UI: \033[36m{full_studio_url}\033[0m
|
|
284
251
|
- 📚 API Docs: \033[36m{local_url}/docs\033[0m
|
|
285
252
|
|
|
286
253
|
This in-memory server is designed for development and testing.
|
langgraph_api/command.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from langgraph.types import Command, Send
|
|
2
|
+
|
|
3
|
+
from langgraph_api.schema import RunCommand
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def map_cmd(cmd: RunCommand) -> Command:
|
|
7
|
+
goto = cmd.get("goto")
|
|
8
|
+
if goto is not None and not isinstance(goto, list):
|
|
9
|
+
goto = [cmd.get("goto")]
|
|
10
|
+
|
|
11
|
+
update = cmd.get("update")
|
|
12
|
+
if isinstance(update, tuple | list) and all(
|
|
13
|
+
isinstance(t, tuple | list) and len(t) == 2 and isinstance(t[0], str)
|
|
14
|
+
for t in update
|
|
15
|
+
):
|
|
16
|
+
update = [tuple(t) for t in update]
|
|
17
|
+
|
|
18
|
+
return Command(
|
|
19
|
+
update=update,
|
|
20
|
+
goto=(
|
|
21
|
+
[
|
|
22
|
+
it if isinstance(it, str) else Send(it["node"], it["input"])
|
|
23
|
+
for it in goto
|
|
24
|
+
]
|
|
25
|
+
if goto
|
|
26
|
+
else None
|
|
27
|
+
),
|
|
28
|
+
resume=cmd.get("resume"),
|
|
29
|
+
)
|
langgraph_api/config.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from os import environ, getenv
|
|
2
|
-
from typing import TypedDict
|
|
2
|
+
from typing import Literal, TypedDict
|
|
3
3
|
|
|
4
4
|
import orjson
|
|
5
5
|
from starlette.config import Config, undefined
|
|
@@ -34,6 +34,66 @@ class HttpConfig(TypedDict, total=False):
|
|
|
34
34
|
cors: CorsConfig | None
|
|
35
35
|
|
|
36
36
|
|
|
37
|
+
class IndexConfig(TypedDict, total=False):
|
|
38
|
+
"""Configuration for indexing documents for semantic search in the store."""
|
|
39
|
+
|
|
40
|
+
dims: int
|
|
41
|
+
"""Number of dimensions in the embedding vectors.
|
|
42
|
+
|
|
43
|
+
Common embedding models have the following dimensions:
|
|
44
|
+
- OpenAI text-embedding-3-large: 256, 1024, or 3072
|
|
45
|
+
- OpenAI text-embedding-3-small: 512 or 1536
|
|
46
|
+
- OpenAI text-embedding-ada-002: 1536
|
|
47
|
+
- Cohere embed-english-v3.0: 1024
|
|
48
|
+
- Cohere embed-english-light-v3.0: 384
|
|
49
|
+
- Cohere embed-multilingual-v3.0: 1024
|
|
50
|
+
- Cohere embed-multilingual-light-v3.0: 384
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
embed: str
|
|
54
|
+
"""Either a path to an embedding model (./path/to/file.py:embedding_model)
|
|
55
|
+
or a name of an embedding model (openai:text-embedding-3-small)
|
|
56
|
+
|
|
57
|
+
Note: LangChain is required to use the model format specification.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
fields: list[str] | None
|
|
61
|
+
"""Fields to extract text from for embedding generation.
|
|
62
|
+
|
|
63
|
+
Defaults to the root ["$"], which embeds the json object as a whole.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class TTLConfig(TypedDict, total=False):
|
|
68
|
+
"""Configuration for TTL (time-to-live) behavior in the store."""
|
|
69
|
+
|
|
70
|
+
refresh_on_read: bool
|
|
71
|
+
"""Default behavior for refreshing TTLs on read operations (GET and SEARCH).
|
|
72
|
+
|
|
73
|
+
If True, TTLs will be refreshed on read operations (get/search) by default.
|
|
74
|
+
This can be overridden per-operation by explicitly setting refresh_ttl.
|
|
75
|
+
Defaults to True if not configured.
|
|
76
|
+
"""
|
|
77
|
+
default_ttl: float | None
|
|
78
|
+
"""Default TTL (time-to-live) in minutes for new items.
|
|
79
|
+
|
|
80
|
+
If provided, new items will expire after this many minutes after their last access.
|
|
81
|
+
The expiration timer refreshes on both read and write operations.
|
|
82
|
+
Defaults to None (no expiration).
|
|
83
|
+
"""
|
|
84
|
+
sweep_interval_minutes: int | None
|
|
85
|
+
"""Interval in minutes between TTL sweep operations.
|
|
86
|
+
|
|
87
|
+
If provided, the store will periodically delete expired items based on TTL.
|
|
88
|
+
Defaults to None (no sweeping).
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class StoreConfig(TypedDict, total=False):
|
|
93
|
+
index: IndexConfig
|
|
94
|
+
ttl: TTLConfig
|
|
95
|
+
|
|
96
|
+
|
|
37
97
|
# env
|
|
38
98
|
|
|
39
99
|
env = Config()
|
|
@@ -55,6 +115,18 @@ STATS_INTERVAL_SECS = env("STATS_INTERVAL_SECS", cast=int, default=60)
|
|
|
55
115
|
DATABASE_URI = env("DATABASE_URI", cast=str, default=getenv("POSTGRES_URI", undefined))
|
|
56
116
|
MIGRATIONS_PATH = env("MIGRATIONS_PATH", cast=str, default="/storage/migrations")
|
|
57
117
|
|
|
118
|
+
|
|
119
|
+
def _get_encryption_key(key_str: str | None):
|
|
120
|
+
if not key_str:
|
|
121
|
+
return None
|
|
122
|
+
key = key_str.encode(encoding="utf-8")
|
|
123
|
+
if len(key) not in (16, 24, 32):
|
|
124
|
+
raise ValueError("LANGGRAPH_AES_KEY must be 16, 24, or 32 bytes long.")
|
|
125
|
+
return key
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
LANGGRAPH_AES_KEY = env("LANGGRAPH_AES_KEY", default=None, cast=_get_encryption_key)
|
|
129
|
+
|
|
58
130
|
# redis
|
|
59
131
|
REDIS_URI = env("REDIS_URI", cast=str)
|
|
60
132
|
REDIS_CLUSTER = env("REDIS_CLUSTER", cast=bool, default=False)
|
|
@@ -69,7 +141,9 @@ See https://developer.chrome.com/blog/private-network-access-update-2024-03
|
|
|
69
141
|
"""
|
|
70
142
|
|
|
71
143
|
HTTP_CONFIG: HttpConfig | None = env("LANGGRAPH_HTTP", cast=_parse_json, default=None)
|
|
72
|
-
STORE_CONFIG:
|
|
144
|
+
STORE_CONFIG: StoreConfig | None = env(
|
|
145
|
+
"LANGGRAPH_STORE", cast=_parse_json, default=None
|
|
146
|
+
)
|
|
73
147
|
CORS_ALLOW_ORIGINS = env("CORS_ALLOW_ORIGINS", cast=CommaSeparatedStrings, default="*")
|
|
74
148
|
if HTTP_CONFIG and HTTP_CONFIG.get("cors"):
|
|
75
149
|
CORS_CONFIG = HTTP_CONFIG["cors"]
|
|
@@ -132,6 +206,13 @@ FF_CRONS_ENABLED = env("FF_CRONS_ENABLED", cast=bool, default=True)
|
|
|
132
206
|
# auth
|
|
133
207
|
|
|
134
208
|
LANGGRAPH_AUTH_TYPE = env("LANGGRAPH_AUTH_TYPE", cast=str, default="noop")
|
|
209
|
+
LANGGRAPH_POSTGRES_EXTENSIONS: Literal["standard", "lite"] = env(
|
|
210
|
+
"LANGGRAPH_POSTGRES_EXTENSIONS", cast=str, default="standard"
|
|
211
|
+
)
|
|
212
|
+
if LANGGRAPH_POSTGRES_EXTENSIONS not in ("standard", "lite"):
|
|
213
|
+
raise ValueError(
|
|
214
|
+
f"Unknown LANGGRAPH_POSTGRES_EXTENSIONS value: {LANGGRAPH_POSTGRES_EXTENSIONS}"
|
|
215
|
+
)
|
|
135
216
|
LANGGRAPH_AUTH = env("LANGGRAPH_AUTH", cast=_parse_json, default=None)
|
|
136
217
|
LANGSMITH_TENANT_ID = env("LANGSMITH_TENANT_ID", cast=str, default=None)
|
|
137
218
|
LANGSMITH_AUTH_VERIFY_TENANT_ID = env(
|
langgraph_api/js/remote.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import logging
|
|
2
3
|
import os
|
|
3
4
|
import shutil
|
|
4
5
|
import ssl
|
|
5
6
|
from collections.abc import AsyncIterator
|
|
7
|
+
from contextlib import AbstractContextManager
|
|
6
8
|
from typing import Any, Literal
|
|
7
9
|
|
|
8
10
|
import certifi
|
|
@@ -605,60 +607,79 @@ async def run_remote_checkpointer():
|
|
|
605
607
|
await server.serve()
|
|
606
608
|
|
|
607
609
|
|
|
610
|
+
class DisableHttpxLoggingContextManager(AbstractContextManager):
|
|
611
|
+
"""
|
|
612
|
+
Disable HTTP/1.1 200 OK logs spamming stdout.
|
|
613
|
+
"""
|
|
614
|
+
|
|
615
|
+
filter: logging.Filter
|
|
616
|
+
|
|
617
|
+
def filter(self, record: logging.LogRecord) -> bool:
|
|
618
|
+
return "200 OK" not in record.getMessage()
|
|
619
|
+
|
|
620
|
+
def __enter__(self):
|
|
621
|
+
logging.getLogger("httpx").addFilter(self.filter)
|
|
622
|
+
|
|
623
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
624
|
+
logging.getLogger("httpx").removeFilter(self.filter)
|
|
625
|
+
|
|
626
|
+
|
|
608
627
|
async def wait_until_js_ready():
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
628
|
+
with DisableHttpxLoggingContextManager():
|
|
629
|
+
async with (
|
|
630
|
+
httpx.AsyncClient(
|
|
631
|
+
base_url=f"http://localhost:{GRAPH_PORT}",
|
|
632
|
+
limits=httpx.Limits(max_connections=1),
|
|
633
|
+
transport=httpx.AsyncHTTPTransport(verify=SSL),
|
|
634
|
+
) as graph_client,
|
|
635
|
+
httpx.AsyncClient(
|
|
636
|
+
base_url=f"http://localhost:{REMOTE_PORT}",
|
|
637
|
+
limits=httpx.Limits(max_connections=1),
|
|
638
|
+
transport=httpx.AsyncHTTPTransport(verify=SSL),
|
|
639
|
+
) as checkpointer_client,
|
|
640
|
+
):
|
|
641
|
+
attempt = 0
|
|
642
|
+
while not asyncio.current_task().cancelled():
|
|
643
|
+
try:
|
|
644
|
+
res = await graph_client.get("/ok")
|
|
645
|
+
res.raise_for_status()
|
|
646
|
+
res = await checkpointer_client.get("/ok")
|
|
647
|
+
res.raise_for_status()
|
|
648
|
+
return
|
|
649
|
+
except httpx.HTTPError:
|
|
650
|
+
if attempt > 240:
|
|
651
|
+
raise
|
|
652
|
+
else:
|
|
653
|
+
attempt += 1
|
|
654
|
+
await asyncio.sleep(0.5)
|
|
655
|
+
|
|
656
|
+
|
|
657
|
+
async def js_healthcheck():
|
|
658
|
+
with DisableHttpxLoggingContextManager():
|
|
659
|
+
async with (
|
|
660
|
+
httpx.AsyncClient(
|
|
661
|
+
base_url=f"http://localhost:{GRAPH_PORT}",
|
|
662
|
+
limits=httpx.Limits(max_connections=1),
|
|
663
|
+
transport=httpx.AsyncHTTPTransport(verify=SSL),
|
|
664
|
+
) as graph_client,
|
|
665
|
+
httpx.AsyncClient(
|
|
666
|
+
base_url=f"http://localhost:{REMOTE_PORT}",
|
|
667
|
+
limits=httpx.Limits(max_connections=1),
|
|
668
|
+
transport=httpx.AsyncHTTPTransport(verify=SSL),
|
|
669
|
+
) as checkpointer_client,
|
|
670
|
+
):
|
|
623
671
|
try:
|
|
624
672
|
res = await graph_client.get("/ok")
|
|
625
673
|
res.raise_for_status()
|
|
626
674
|
res = await checkpointer_client.get("/ok")
|
|
627
675
|
res.raise_for_status()
|
|
628
|
-
return
|
|
629
|
-
except httpx.HTTPError:
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
async with (
|
|
639
|
-
httpx.AsyncClient(
|
|
640
|
-
base_url=f"http://localhost:{GRAPH_PORT}",
|
|
641
|
-
limits=httpx.Limits(max_connections=1),
|
|
642
|
-
transport=httpx.AsyncHTTPTransport(verify=SSL),
|
|
643
|
-
) as graph_client,
|
|
644
|
-
httpx.AsyncClient(
|
|
645
|
-
base_url=f"http://localhost:{REMOTE_PORT}",
|
|
646
|
-
limits=httpx.Limits(max_connections=1),
|
|
647
|
-
transport=httpx.AsyncHTTPTransport(verify=SSL),
|
|
648
|
-
) as checkpointer_client,
|
|
649
|
-
):
|
|
650
|
-
try:
|
|
651
|
-
res = await graph_client.get("/ok")
|
|
652
|
-
res.raise_for_status()
|
|
653
|
-
res = await checkpointer_client.get("/ok")
|
|
654
|
-
res.raise_for_status()
|
|
655
|
-
return True
|
|
656
|
-
except httpx.HTTPError as exc:
|
|
657
|
-
logger.warning(
|
|
658
|
-
"JS healthcheck failed. Either the JS server is not running or the event loop is blocked by a CPU-intensive task.",
|
|
659
|
-
error=exc,
|
|
660
|
-
)
|
|
661
|
-
raise HTTPException(
|
|
662
|
-
status_code=500,
|
|
663
|
-
detail="JS healthcheck failed. Either the JS server is not running or the event loop is blocked by a CPU-intensive task.",
|
|
664
|
-
) from exc
|
|
676
|
+
return True
|
|
677
|
+
except httpx.HTTPError as exc:
|
|
678
|
+
logger.warning(
|
|
679
|
+
"JS healthcheck failed. Either the JS server is not running or the event loop is blocked by a CPU-intensive task.",
|
|
680
|
+
error=exc,
|
|
681
|
+
)
|
|
682
|
+
raise HTTPException(
|
|
683
|
+
status_code=500,
|
|
684
|
+
detail="JS healthcheck failed. Either the JS server is not running or the event loop is blocked by a CPU-intensive task.",
|
|
685
|
+
) from exc
|
|
@@ -765,7 +765,7 @@ describe("runs", () => {
|
|
|
765
765
|
|
|
766
766
|
it.concurrent(
|
|
767
767
|
"human in the loop - no modification",
|
|
768
|
-
{ retry:
|
|
768
|
+
{ retry: 3 },
|
|
769
769
|
async () => {
|
|
770
770
|
const assistant = await client.assistants.create({ graphId: "agent" });
|
|
771
771
|
const thread = await client.threads.create();
|
langgraph_api/lifespan.py
CHANGED
|
@@ -12,6 +12,7 @@ from langgraph_api.metadata import metadata_loop
|
|
|
12
12
|
from langgraph_license.validation import get_license_status, plus_features_enabled
|
|
13
13
|
from langgraph_storage.database import start_pool, stop_pool
|
|
14
14
|
from langgraph_storage.queue import queue
|
|
15
|
+
from langgraph_storage.store import Store
|
|
15
16
|
|
|
16
17
|
|
|
17
18
|
@asynccontextmanager
|
|
@@ -44,6 +45,8 @@ async def lifespan(
|
|
|
44
45
|
and plus_features_enabled()
|
|
45
46
|
):
|
|
46
47
|
tg.create_task(cron_scheduler())
|
|
48
|
+
if config.STORE_CONFIG and config.STORE_CONFIG.get("ttl"):
|
|
49
|
+
tg.create_task(Store().start_ttl_sweeper())
|
|
47
50
|
yield
|
|
48
51
|
finally:
|
|
49
52
|
await stop_remote_graphs()
|
langgraph_api/stream.py
CHANGED
|
@@ -19,15 +19,15 @@ from langgraph.errors import (
|
|
|
19
19
|
InvalidUpdateError,
|
|
20
20
|
)
|
|
21
21
|
from langgraph.pregel.debug import CheckpointPayload, TaskResultPayload
|
|
22
|
-
from langgraph.types import Command, Send
|
|
23
22
|
from pydantic import ValidationError
|
|
24
23
|
from pydantic.v1 import ValidationError as ValidationErrorLegacy
|
|
25
24
|
|
|
26
25
|
from langgraph_api.asyncio import ValueEvent, wait_if_not_done
|
|
26
|
+
from langgraph_api.command import map_cmd
|
|
27
27
|
from langgraph_api.graph import get_graph
|
|
28
28
|
from langgraph_api.js.base import BaseRemotePregel
|
|
29
29
|
from langgraph_api.metadata import HOST, PLAN, incr_nodes
|
|
30
|
-
from langgraph_api.schema import Run,
|
|
30
|
+
from langgraph_api.schema import Run, StreamMode
|
|
31
31
|
from langgraph_api.serde import json_dumpb
|
|
32
32
|
from langgraph_api.utils import AsyncConnectionProto
|
|
33
33
|
from langgraph_storage.checkpoint import Checkpointer
|
|
@@ -70,32 +70,6 @@ def _preprocess_debug_checkpoint(payload: CheckpointPayload | None) -> dict[str,
|
|
|
70
70
|
return payload
|
|
71
71
|
|
|
72
72
|
|
|
73
|
-
def _map_cmd(cmd: RunCommand) -> Command:
|
|
74
|
-
goto = cmd.get("goto")
|
|
75
|
-
if goto is not None and not isinstance(goto, list):
|
|
76
|
-
goto = [cmd.get("goto")]
|
|
77
|
-
|
|
78
|
-
update = cmd.get("update")
|
|
79
|
-
if isinstance(update, tuple | list) and all(
|
|
80
|
-
isinstance(t, tuple | list) and len(t) == 2 and isinstance(t[0], str)
|
|
81
|
-
for t in update
|
|
82
|
-
):
|
|
83
|
-
update = [tuple(t) for t in update]
|
|
84
|
-
|
|
85
|
-
return Command(
|
|
86
|
-
update=update,
|
|
87
|
-
goto=(
|
|
88
|
-
[
|
|
89
|
-
it if isinstance(it, str) else Send(it["node"], it["input"])
|
|
90
|
-
for it in goto
|
|
91
|
-
]
|
|
92
|
-
if goto
|
|
93
|
-
else None
|
|
94
|
-
),
|
|
95
|
-
resume=cmd.get("resume"),
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
|
|
99
73
|
async def astream_state(
|
|
100
74
|
stack: AsyncExitStack,
|
|
101
75
|
conn: AsyncConnectionProto,
|
|
@@ -125,7 +99,7 @@ async def astream_state(
|
|
|
125
99
|
)
|
|
126
100
|
input = kwargs.pop("input")
|
|
127
101
|
if cmd := kwargs.pop("command"):
|
|
128
|
-
input =
|
|
102
|
+
input = map_cmd(cmd)
|
|
129
103
|
stream_mode: list[StreamMode] = kwargs.pop("stream_mode")
|
|
130
104
|
feedback_keys = kwargs.pop("feedback_keys", None)
|
|
131
105
|
stream_modes_set: set[StreamMode] = set(stream_mode) - {"events"}
|
langgraph_api/validation.py
CHANGED
|
@@ -27,7 +27,18 @@ AssistantVersionChange = jsonschema_rs.validator_for(
|
|
|
27
27
|
openapi["components"]["schemas"]["AssistantVersionChange"]
|
|
28
28
|
)
|
|
29
29
|
ThreadCreate = jsonschema_rs.validator_for(
|
|
30
|
-
|
|
30
|
+
{
|
|
31
|
+
**openapi["components"]["schemas"]["ThreadCreate"],
|
|
32
|
+
"components": {
|
|
33
|
+
"schemas": {
|
|
34
|
+
"ThreadSuperstepUpdate": openapi["components"]["schemas"][
|
|
35
|
+
"ThreadSuperstepUpdate"
|
|
36
|
+
],
|
|
37
|
+
"Command": openapi["components"]["schemas"]["Command"],
|
|
38
|
+
"Send": openapi["components"]["schemas"]["Send"],
|
|
39
|
+
}
|
|
40
|
+
},
|
|
41
|
+
}
|
|
31
42
|
)
|
|
32
43
|
ThreadPatch = jsonschema_rs.validator_for(
|
|
33
44
|
openapi["components"]["schemas"]["ThreadPatch"]
|
|
@@ -42,6 +53,7 @@ ThreadStateUpdate = jsonschema_rs.validator_for(
|
|
|
42
53
|
},
|
|
43
54
|
}
|
|
44
55
|
)
|
|
56
|
+
|
|
45
57
|
ThreadStateCheckpointRequest = jsonschema_rs.validator_for(
|
|
46
58
|
{
|
|
47
59
|
**openapi["components"]["schemas"]["ThreadStateCheckpointRequest"],
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langgraph-api
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.32
|
|
4
4
|
Summary:
|
|
5
5
|
License: Elastic-2.0
|
|
6
6
|
Author: Nuno Campos
|
|
@@ -12,11 +12,11 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.12
|
|
13
13
|
Requires-Dist: cryptography (>=43.0.3,<44.0.0)
|
|
14
14
|
Requires-Dist: httpx (>=0.25.0)
|
|
15
|
-
Requires-Dist: jsonschema-rs (>=0.20.0,<0.
|
|
15
|
+
Requires-Dist: jsonschema-rs (>=0.20.0,<0.30)
|
|
16
16
|
Requires-Dist: langchain-core (>=0.2.38,<0.4.0)
|
|
17
17
|
Requires-Dist: langgraph (>=0.2.56,<0.4.0)
|
|
18
|
-
Requires-Dist: langgraph-checkpoint (>=2.0.
|
|
19
|
-
Requires-Dist: langgraph-sdk (>=0.1.
|
|
18
|
+
Requires-Dist: langgraph-checkpoint (>=2.0.21,<3.0)
|
|
19
|
+
Requires-Dist: langgraph-sdk (>=0.1.58,<0.2.0)
|
|
20
20
|
Requires-Dist: langsmith (>=0.1.63,<0.4.0)
|
|
21
21
|
Requires-Dist: orjson (>=3.9.7)
|
|
22
22
|
Requires-Dist: pyjwt (>=2.9.0,<3.0.0)
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
|
|
2
2
|
langgraph_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
langgraph_api/api/__init__.py,sha256=B2kHOBhKjBRdNiWPljYuJFxP__wYPCvgtgTaWYKZGTQ,5129
|
|
4
|
-
langgraph_api/api/assistants.py,sha256=
|
|
4
|
+
langgraph_api/api/assistants.py,sha256=nU6tnbgdr_6Utlq0A9nw2a6xxpUM_DNuCFI42_Kcs_o,14233
|
|
5
5
|
langgraph_api/api/meta.py,sha256=ifJ_Ki0Qf2DYbmY6OKlqKhLGxbt55gm0lEqH1A0cJbw,2790
|
|
6
6
|
langgraph_api/api/openapi.py,sha256=f9gfmWN2AMKNUpLCpSgZuw_aeOF9jCXPdOtFT5PaTWM,10960
|
|
7
|
-
langgraph_api/api/runs.py,sha256=
|
|
7
|
+
langgraph_api/api/runs.py,sha256=_RWKtmjD89ALnTk56dwo2rJwEi2oghk2Tqp0l1aCcZg,16677
|
|
8
8
|
langgraph_api/api/store.py,sha256=VzAJVOwO0IxosBB7km5TTf2rhlWGyPkVz_LpvbxetVY,5437
|
|
9
|
-
langgraph_api/api/threads.py,sha256=
|
|
9
|
+
langgraph_api/api/threads.py,sha256=meaDGF3R2bYkx0KRa_le4Ka5nOSeqlMDWtLdnEhVYSY,8930
|
|
10
10
|
langgraph_api/api/ui.py,sha256=LiOZVewKOPbKEykCm30hCEaOA7vuS_Ti5hB32EEy4vw,2082
|
|
11
11
|
langgraph_api/asyncio.py,sha256=ipxOGL0CuKZeHw8895ojtfoBU2fj0iJOp48uhiLAmss,7786
|
|
12
12
|
langgraph_api/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -17,8 +17,9 @@ langgraph_api/auth/langsmith/client.py,sha256=eKchvAom7hdkUXauD8vHNceBDDUijrFgdT
|
|
|
17
17
|
langgraph_api/auth/middleware.py,sha256=jU8aDSIZHdzCGdifejRF7ndHkSjBtqIHcBwFIuUdHEA,1875
|
|
18
18
|
langgraph_api/auth/noop.py,sha256=Bk6Nf3p8D_iMVy_OyfPlyiJp_aEwzL-sHrbxoXpCbac,586
|
|
19
19
|
langgraph_api/auth/studio_user.py,sha256=FzFQRROKDlA9JjtBuwyZvk6Mbwno5M9RVYjDO6FU3F8,186
|
|
20
|
-
langgraph_api/cli.py,sha256=
|
|
21
|
-
langgraph_api/
|
|
20
|
+
langgraph_api/cli.py,sha256=zzlGrMgiRXITuvztQ12y26ljOgEXk93Vq2quKYhbaD8,11750
|
|
21
|
+
langgraph_api/command.py,sha256=3O9v3i0OPa96ARyJ_oJbLXkfO8rPgDhLCswgO9koTFA,768
|
|
22
|
+
langgraph_api/config.py,sha256=XMJEVfPe1LrmVKGM5sisQs1eZOUDyU3JwscwJOm0q_k,8572
|
|
22
23
|
langgraph_api/cron_scheduler.py,sha256=9yzbbGxzNgJdIg4ZT7yu2oTwT_wRuPxD1c2sbbd52xs,2630
|
|
23
24
|
langgraph_api/errors.py,sha256=Bu_i5drgNTyJcLiyrwVE_6-XrSU50BHf9TDpttki9wQ,1690
|
|
24
25
|
langgraph_api/graph.py,sha256=viFyQa8-BRRNRZqNNUzLB31cB4IypEMkMucQEJQYLWY,16556
|
|
@@ -30,7 +31,7 @@ langgraph_api/js/client.mts,sha256=2dptAX8fMowV9OC4DU4khjpZUgALBLVBTu3jTQbeUJY,2
|
|
|
30
31
|
langgraph_api/js/errors.py,sha256=Cm1TKWlUCwZReDC5AQ6SgNIVGD27Qov2xcgHyf8-GXo,361
|
|
31
32
|
langgraph_api/js/global.d.ts,sha256=cLJRZfYVGmgQ6o_xFevVNNTIi918ZUdxVRnpLVSjiAY,133
|
|
32
33
|
langgraph_api/js/package.json,sha256=j6DMoVgwRqWqTwdd7R1f-kvmiTUAbO3HaUhM8K64lbE,1224
|
|
33
|
-
langgraph_api/js/remote.py,sha256=
|
|
34
|
+
langgraph_api/js/remote.py,sha256=g0H2x3W7kejxswhgZfFaljzl4Y7agMCnO5BdNVj1rDY,23474
|
|
34
35
|
langgraph_api/js/schema.py,sha256=7idnv7URlYUdSNMBXQcw7E4SxaPxCq_Oxwnlml8q5ik,408
|
|
35
36
|
langgraph_api/js/src/graph.mts,sha256=mRyMUp03Fwd5DlmNIFl3RiUCQuJ5XwmFp1AfAeKDfVc,3169
|
|
36
37
|
langgraph_api/js/src/hooks.mjs,sha256=XtktgmIHlls_DsknAuwib-z7TqCm0haRoTXvnkgzMuo,601
|
|
@@ -42,7 +43,7 @@ langgraph_api/js/src/utils/importMap.mts,sha256=pX4TGOyUpuuWF82kXcxcv3-8mgusRezO
|
|
|
42
43
|
langgraph_api/js/src/utils/pythonSchemas.mts,sha256=98IW7Z_VP7L_CHNRMb3_MsiV3BgLE2JsWQY_PQcRR3o,685
|
|
43
44
|
langgraph_api/js/src/utils/serde.mts,sha256=OuyyO9btvwWd55rU_H4x91dFEJiaPxL-lL9O6Zgo908,742
|
|
44
45
|
langgraph_api/js/sse.py,sha256=lsfp4nyJyA1COmlKG9e2gJnTttf_HGCB5wyH8OZBER8,4105
|
|
45
|
-
langgraph_api/js/tests/api.test.mts,sha256=
|
|
46
|
+
langgraph_api/js/tests/api.test.mts,sha256=RTJb9ZMX_IhqUPcShK61LZG9v_-xzdIZ0oEmOIjbJ4c,57709
|
|
46
47
|
langgraph_api/js/tests/compose-postgres.yml,sha256=wV1Kws7WwUWVIudPkB--v58MOPL9hOcV0MUK-cvNrpA,1738
|
|
47
48
|
langgraph_api/js/tests/graphs/.gitignore,sha256=26J8MarZNXh7snXD5eTpV3CPFTht5Znv8dtHYCLNfkw,12
|
|
48
49
|
langgraph_api/js/tests/graphs/agent.css,sha256=QgcOC0W7IBsrg4pSqqpull-WTgtULZfx_lF_5ZxLdag,23
|
|
@@ -58,7 +59,7 @@ langgraph_api/js/tests/graphs/yarn.lock,sha256=i2AAIgXA3XBLM8-oU45wgUefCSG-Tne4g
|
|
|
58
59
|
langgraph_api/js/tests/parser.test.mts,sha256=dEC8KTqKygeb1u39ZvpPqCT4HtfPD947nLmITt2buxA,27883
|
|
59
60
|
langgraph_api/js/tests/utils.mts,sha256=2kTybJ3O7Yfe1q3ehDouqV54ibXkNzsPZ_wBZLJvY-4,421
|
|
60
61
|
langgraph_api/js/yarn.lock,sha256=W89dVYZMThcec08lJMcYnvEEnQK7VM5cPglvwpIdRv0,82773
|
|
61
|
-
langgraph_api/lifespan.py,sha256=
|
|
62
|
+
langgraph_api/lifespan.py,sha256=u5Fv9I4JqfLk30p5cNqkcEqeB2vTRMGSM5WlFHlE0mU,2219
|
|
62
63
|
langgraph_api/logging.py,sha256=KB1ciduIWcMFfG0q9c5_SZemGrscht1RZXLZfeJSS00,3619
|
|
63
64
|
langgraph_api/metadata.py,sha256=jPLNIRxHi7taZ0g60UdOEXenkvDwoYdI11tsmHenb28,3443
|
|
64
65
|
langgraph_api/middleware/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -74,9 +75,9 @@ langgraph_api/serde.py,sha256=VoJ7Z1IuqrQGXFzEP1qijAITtWCrmjtVqlCRuScjXJI,3533
|
|
|
74
75
|
langgraph_api/server.py,sha256=CiNK327zTsEpoVGeJK1JOtZHvOBYRoz0CnBTZUmsC7c,4567
|
|
75
76
|
langgraph_api/sse.py,sha256=2wNodCOP2eg7a9mpSu0S3FQ0CHk2BBV_vv0UtIgJIcc,4034
|
|
76
77
|
langgraph_api/state.py,sha256=8jx4IoTCOjTJuwzuXJKKFwo1VseHjNnw_CCq4x1SW14,2284
|
|
77
|
-
langgraph_api/stream.py,sha256=
|
|
78
|
+
langgraph_api/stream.py,sha256=AgDjVxE8Tf3_TAB5td36mVy7_-9OwKcZJpHRWJdVCSM,11281
|
|
78
79
|
langgraph_api/utils.py,sha256=92mSti9GfGdMRRWyESKQW5yV-75Z9icGHnIrBYvdypU,3619
|
|
79
|
-
langgraph_api/validation.py,sha256=
|
|
80
|
+
langgraph_api/validation.py,sha256=LnEdfgID2Z0HP75R_hqWxCn5d5ULGcDJ3r1xaDVv6-w,4845
|
|
80
81
|
langgraph_api/webhook.py,sha256=1ncwO0rIZcj-Df9sxSnFEzd1gP1bfS4okeZQS8NSRoE,1382
|
|
81
82
|
langgraph_api/worker.py,sha256=7yQfZBANt1kgJDOEs5B5c3Xy65lzNMmngVbBqLs-r5s,9802
|
|
82
83
|
langgraph_license/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -86,15 +87,15 @@ langgraph_storage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
|
|
|
86
87
|
langgraph_storage/checkpoint.py,sha256=V4t2GwYEJdPCHbhq_4Udhlv0TWKDzlMu_rlNPdTDc50,3589
|
|
87
88
|
langgraph_storage/database.py,sha256=I0AgFeJ-NSTT34vxKxQBUf1z2syFP0S8QpKCqTixrzY,5652
|
|
88
89
|
langgraph_storage/inmem_stream.py,sha256=8bxkILIuFpr7P7RQ37SQAxrpRKvmbHdRB_nbfFiomlk,3263
|
|
89
|
-
langgraph_storage/ops.py,sha256=
|
|
90
|
+
langgraph_storage/ops.py,sha256=0DhtDQllBnV495DPrwhbR_bQUg7ZrGBe0FxSZcicz5g,72469
|
|
90
91
|
langgraph_storage/queue.py,sha256=UDgsUTtUMfBSRDrQ8Onis-FJO4n7KTsX6sdpbY8Hs0A,5055
|
|
91
92
|
langgraph_storage/retry.py,sha256=XmldOP4e_H5s264CagJRVnQMDFcEJR_dldVR1Hm5XvM,763
|
|
92
|
-
langgraph_storage/store.py,sha256=
|
|
93
|
+
langgraph_storage/store.py,sha256=33-J5-Xvobb9ArSa-GezP5KtfXgzWkHUHPyjRYmdw-E,2985
|
|
93
94
|
langgraph_storage/ttl_dict.py,sha256=FlpEY8EANeXWKo_G5nmIotPquABZGyIJyk6HD9u6vqY,1533
|
|
94
95
|
logging.json,sha256=3RNjSADZmDq38eHePMm1CbP6qZ71AmpBtLwCmKU9Zgo,379
|
|
95
|
-
openapi.json,sha256=
|
|
96
|
-
langgraph_api-0.0.
|
|
97
|
-
langgraph_api-0.0.
|
|
98
|
-
langgraph_api-0.0.
|
|
99
|
-
langgraph_api-0.0.
|
|
100
|
-
langgraph_api-0.0.
|
|
96
|
+
openapi.json,sha256=6okfuwPAKB2Hi1tup-EFza49pmN_iD2Yt5JktMALcq4,127984
|
|
97
|
+
langgraph_api-0.0.32.dist-info/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
|
|
98
|
+
langgraph_api-0.0.32.dist-info/METADATA,sha256=LrWNwNTUVge4rKtDDFwJyFKdE5WRTPCLPj8MjSHsDvc,4027
|
|
99
|
+
langgraph_api-0.0.32.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
100
|
+
langgraph_api-0.0.32.dist-info/entry_points.txt,sha256=3EYLgj89DfzqJHHYGxPH4A_fEtClvlRbWRUHaXO7hj4,77
|
|
101
|
+
langgraph_api-0.0.32.dist-info/RECORD,,
|
langgraph_storage/ops.py
CHANGED
|
@@ -22,6 +22,7 @@ from starlette.exceptions import HTTPException
|
|
|
22
22
|
|
|
23
23
|
from langgraph_api.asyncio import SimpleTaskGroup, ValueEvent, create_task
|
|
24
24
|
from langgraph_api.auth.custom import handle_event
|
|
25
|
+
from langgraph_api.command import map_cmd
|
|
25
26
|
from langgraph_api.errors import UserInterrupt, UserRollback
|
|
26
27
|
from langgraph_api.graph import get_graph
|
|
27
28
|
from langgraph_api.schema import (
|
|
@@ -1081,6 +1082,78 @@ class Threads(Authenticated):
|
|
|
1081
1082
|
else:
|
|
1082
1083
|
raise HTTPException(status_code=400, detail="Thread has no graph ID.")
|
|
1083
1084
|
|
|
1085
|
+
@staticmethod
|
|
1086
|
+
async def bulk(
|
|
1087
|
+
conn: InMemConnectionProto,
|
|
1088
|
+
*,
|
|
1089
|
+
config: Config,
|
|
1090
|
+
supersteps: Sequence[dict],
|
|
1091
|
+
ctx: Auth.types.BaseAuthContext | None = None,
|
|
1092
|
+
) -> ThreadUpdateResponse:
|
|
1093
|
+
"""Update a thread with a batch of state updates."""
|
|
1094
|
+
|
|
1095
|
+
from langgraph.pregel.types import StateUpdate
|
|
1096
|
+
|
|
1097
|
+
thread_id = _ensure_uuid(config["configurable"]["thread_id"])
|
|
1098
|
+
filters = await Threads.handle_event(
|
|
1099
|
+
ctx,
|
|
1100
|
+
"update",
|
|
1101
|
+
Auth.types.ThreadsUpdate(thread_id=thread_id),
|
|
1102
|
+
)
|
|
1103
|
+
|
|
1104
|
+
thread_iter = await Threads.get(conn, thread_id, ctx=ctx)
|
|
1105
|
+
thread = await fetchone(
|
|
1106
|
+
thread_iter, not_found_detail=f"Thread {thread_id} not found."
|
|
1107
|
+
)
|
|
1108
|
+
|
|
1109
|
+
thread_config = thread["config"]
|
|
1110
|
+
metadata = thread["metadata"]
|
|
1111
|
+
|
|
1112
|
+
if not thread:
|
|
1113
|
+
raise HTTPException(status_code=404, detail="Thread not found")
|
|
1114
|
+
|
|
1115
|
+
if not _check_filter_match(metadata, filters):
|
|
1116
|
+
raise HTTPException(status_code=403, detail="Forbidden")
|
|
1117
|
+
|
|
1118
|
+
if graph_id := metadata.get("graph_id"):
|
|
1119
|
+
config["configurable"].setdefault("graph_id", graph_id)
|
|
1120
|
+
config["configurable"].setdefault("checkpoint_ns", "")
|
|
1121
|
+
|
|
1122
|
+
async with get_graph(
|
|
1123
|
+
graph_id, thread_config, checkpointer=Checkpointer(conn)
|
|
1124
|
+
) as graph:
|
|
1125
|
+
next_config = await graph.abulk_update_state(
|
|
1126
|
+
config,
|
|
1127
|
+
[
|
|
1128
|
+
[
|
|
1129
|
+
StateUpdate(
|
|
1130
|
+
map_cmd(update.get("command"))
|
|
1131
|
+
if update.get("command")
|
|
1132
|
+
else update.get("values"),
|
|
1133
|
+
update.get("as_node"),
|
|
1134
|
+
)
|
|
1135
|
+
for update in superstep.get("updates", [])
|
|
1136
|
+
]
|
|
1137
|
+
for superstep in supersteps
|
|
1138
|
+
],
|
|
1139
|
+
)
|
|
1140
|
+
|
|
1141
|
+
state = await Threads.State.get(
|
|
1142
|
+
conn, config, subgraphs=False, ctx=ctx
|
|
1143
|
+
)
|
|
1144
|
+
|
|
1145
|
+
# update thread values
|
|
1146
|
+
for thread in conn.store["threads"]:
|
|
1147
|
+
if thread["thread_id"] == thread_id:
|
|
1148
|
+
thread["values"] = state.values
|
|
1149
|
+
break
|
|
1150
|
+
|
|
1151
|
+
return ThreadUpdateResponse(
|
|
1152
|
+
checkpoint=next_config["configurable"],
|
|
1153
|
+
)
|
|
1154
|
+
else:
|
|
1155
|
+
raise HTTPException(status_code=400, detail="Thread has no graph ID")
|
|
1156
|
+
|
|
1084
1157
|
@staticmethod
|
|
1085
1158
|
async def list(
|
|
1086
1159
|
conn: InMemConnectionProto,
|
|
@@ -1905,7 +1978,8 @@ class Crons:
|
|
|
1905
1978
|
conn: InMemConnectionProto,
|
|
1906
1979
|
ctx: Auth.types.BaseAuthContext | None = None,
|
|
1907
1980
|
) -> AsyncIterator[Cron]:
|
|
1908
|
-
|
|
1981
|
+
yield
|
|
1982
|
+
raise NotImplementedError("The in-mem server does not implement Crons.")
|
|
1909
1983
|
|
|
1910
1984
|
@staticmethod
|
|
1911
1985
|
async def set_next_run_date(
|
langgraph_storage/store.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import os
|
|
2
3
|
import threading
|
|
3
4
|
from collections import defaultdict
|
|
@@ -42,6 +43,9 @@ class DiskBackedInMemStore(InMemoryStore):
|
|
|
42
43
|
f"Unexpected error loading store {which} from {container.filename}: {str(e)}"
|
|
43
44
|
) from e
|
|
44
45
|
|
|
46
|
+
async def start_ttl_sweeper(self) -> asyncio.Task[None]:
|
|
47
|
+
return asyncio.create_task(asyncio.sleep(0))
|
|
48
|
+
|
|
45
49
|
def close(self) -> None:
|
|
46
50
|
self._data.close()
|
|
47
51
|
self._vectors.close()
|
openapi.json
CHANGED
|
@@ -805,6 +805,58 @@
|
|
|
805
805
|
}
|
|
806
806
|
}
|
|
807
807
|
},
|
|
808
|
+
"/threads/state/bulk": {
|
|
809
|
+
"post": {
|
|
810
|
+
"tags": [
|
|
811
|
+
"Threads"
|
|
812
|
+
],
|
|
813
|
+
"summary": "Bulk Update Thread State",
|
|
814
|
+
"description": "Create a new thread from a batch of state updates.",
|
|
815
|
+
"operationId": "bulk_update_thread_state_post",
|
|
816
|
+
"requestBody": {
|
|
817
|
+
"content": {
|
|
818
|
+
"application/json": {
|
|
819
|
+
"schema": {
|
|
820
|
+
"$ref": "#/components/schemas/ThreadStateBulkUpdate"
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
},
|
|
824
|
+
"required": true
|
|
825
|
+
},
|
|
826
|
+
"responses": {
|
|
827
|
+
"200": {
|
|
828
|
+
"description": "Success",
|
|
829
|
+
"content": {
|
|
830
|
+
"application/json": {
|
|
831
|
+
"schema": {
|
|
832
|
+
"$ref": "#/components/schemas/Thread"
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
}
|
|
836
|
+
},
|
|
837
|
+
"409": {
|
|
838
|
+
"description": "Conflict",
|
|
839
|
+
"content": {
|
|
840
|
+
"application/json": {
|
|
841
|
+
"schema": {
|
|
842
|
+
"$ref": "#/components/schemas/ErrorResponse"
|
|
843
|
+
}
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
},
|
|
847
|
+
"422": {
|
|
848
|
+
"description": "Validation Error",
|
|
849
|
+
"content": {
|
|
850
|
+
"application/json": {
|
|
851
|
+
"schema": {
|
|
852
|
+
"$ref": "#/components/schemas/ErrorResponse"
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
}
|
|
856
|
+
}
|
|
857
|
+
}
|
|
858
|
+
}
|
|
859
|
+
},
|
|
808
860
|
"/threads/{thread_id}/state": {
|
|
809
861
|
"get": {
|
|
810
862
|
"tags": [
|
|
@@ -3906,6 +3958,19 @@
|
|
|
3906
3958
|
"title": "If Exists",
|
|
3907
3959
|
"description": "How to handle duplicate creation. Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing thread).",
|
|
3908
3960
|
"default": "raise"
|
|
3961
|
+
},
|
|
3962
|
+
"supersteps": {
|
|
3963
|
+
"type": "array",
|
|
3964
|
+
"items": {
|
|
3965
|
+
"type": "object",
|
|
3966
|
+
"properties": {
|
|
3967
|
+
"updates": {
|
|
3968
|
+
"type": "array",
|
|
3969
|
+
"items": { "$ref": "#/components/schemas/ThreadSuperstepUpdate" }
|
|
3970
|
+
}
|
|
3971
|
+
},
|
|
3972
|
+
"required": ["updates"]
|
|
3973
|
+
}
|
|
3909
3974
|
}
|
|
3910
3975
|
},
|
|
3911
3976
|
"type": "object",
|
|
@@ -4094,6 +4159,43 @@
|
|
|
4094
4159
|
"title": "ThreadStateUpdate",
|
|
4095
4160
|
"description": "Payload for updating the state of a thread."
|
|
4096
4161
|
},
|
|
4162
|
+
"ThreadSuperstepUpdate": {
|
|
4163
|
+
"properties": {
|
|
4164
|
+
"values": {
|
|
4165
|
+
"anyOf": [
|
|
4166
|
+
{
|
|
4167
|
+
"type": "array",
|
|
4168
|
+
"items": {
|
|
4169
|
+
"type": "object"
|
|
4170
|
+
}
|
|
4171
|
+
},
|
|
4172
|
+
{
|
|
4173
|
+
"type": "object"
|
|
4174
|
+
},
|
|
4175
|
+
{
|
|
4176
|
+
"type": "null"
|
|
4177
|
+
}
|
|
4178
|
+
]
|
|
4179
|
+
},
|
|
4180
|
+
"command": {
|
|
4181
|
+
"anyOf": [
|
|
4182
|
+
{
|
|
4183
|
+
"$ref": "#/components/schemas/Command"
|
|
4184
|
+
},
|
|
4185
|
+
{
|
|
4186
|
+
"type": "null"
|
|
4187
|
+
}
|
|
4188
|
+
],
|
|
4189
|
+
"description": "The command associated with the update."
|
|
4190
|
+
},
|
|
4191
|
+
"as_node": {
|
|
4192
|
+
"type": "string",
|
|
4193
|
+
"description": "Update the state as if this node had just executed."
|
|
4194
|
+
}
|
|
4195
|
+
},
|
|
4196
|
+
"required": ["as_node"],
|
|
4197
|
+
"type": "object"
|
|
4198
|
+
},
|
|
4097
4199
|
"ThreadStateUpdateResponse": {
|
|
4098
4200
|
"properties": {
|
|
4099
4201
|
"checkpoint": {
|
|
File without changes
|
|
File without changes
|
|
File without changes
|