langgraph-api 0.4.40__py3-none-any.whl → 0.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- langgraph_api/__init__.py +1 -1
- langgraph_api/api/assistants.py +65 -61
- langgraph_api/api/meta.py +6 -0
- langgraph_api/api/threads.py +11 -7
- langgraph_api/auth/custom.py +29 -24
- langgraph_api/cli.py +2 -49
- langgraph_api/config.py +131 -16
- langgraph_api/graph.py +1 -1
- langgraph_api/grpc/client.py +183 -0
- langgraph_api/grpc/config_conversion.py +225 -0
- langgraph_api/grpc/generated/core_api_pb2.py +275 -0
- langgraph_api/{grpc_ops → grpc}/generated/core_api_pb2.pyi +35 -40
- langgraph_api/grpc/generated/engine_common_pb2.py +190 -0
- langgraph_api/grpc/generated/engine_common_pb2.pyi +634 -0
- langgraph_api/grpc/generated/engine_common_pb2_grpc.py +24 -0
- langgraph_api/grpc/ops.py +1045 -0
- langgraph_api/js/build.mts +1 -1
- langgraph_api/js/client.http.mts +1 -1
- langgraph_api/js/client.mts +1 -1
- langgraph_api/js/package.json +12 -12
- langgraph_api/js/src/graph.mts +20 -0
- langgraph_api/js/yarn.lock +176 -234
- langgraph_api/metadata.py +29 -21
- langgraph_api/queue_entrypoint.py +2 -2
- langgraph_api/route.py +14 -4
- langgraph_api/schema.py +2 -2
- langgraph_api/self_hosted_metrics.py +48 -2
- langgraph_api/serde.py +58 -14
- langgraph_api/server.py +16 -2
- langgraph_api/worker.py +1 -1
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/METADATA +6 -6
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/RECORD +38 -34
- langgraph_api/grpc_ops/client.py +0 -80
- langgraph_api/grpc_ops/generated/core_api_pb2.py +0 -274
- langgraph_api/grpc_ops/ops.py +0 -610
- /langgraph_api/{grpc_ops → grpc}/__init__.py +0 -0
- /langgraph_api/{grpc_ops → grpc}/generated/__init__.py +0 -0
- /langgraph_api/{grpc_ops → grpc}/generated/core_api_pb2_grpc.py +0 -0
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/WHEEL +0 -0
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/entry_points.txt +0 -0
- {langgraph_api-0.4.40.dist-info → langgraph_api-0.5.6.dist-info}/licenses/LICENSE +0 -0
langgraph_api/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.5.6"
|
langgraph_api/api/assistants.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from functools import partial
|
|
1
2
|
from typing import Any
|
|
2
3
|
from uuid import uuid4
|
|
3
4
|
|
|
@@ -15,7 +16,7 @@ from starlette.routing import BaseRoute
|
|
|
15
16
|
from langgraph_api import store as api_store
|
|
16
17
|
from langgraph_api.feature_flags import FF_USE_CORE_API, USE_RUNTIME_CONTEXT_API
|
|
17
18
|
from langgraph_api.graph import get_assistant_id, get_graph
|
|
18
|
-
from langgraph_api.
|
|
19
|
+
from langgraph_api.grpc.ops import Assistants as GrpcAssistants
|
|
19
20
|
from langgraph_api.js.base import BaseRemotePregel
|
|
20
21
|
from langgraph_api.route import ApiRequest, ApiResponse, ApiRoute
|
|
21
22
|
from langgraph_api.schema import ASSISTANT_FIELDS
|
|
@@ -37,7 +38,7 @@ from langgraph_api.validation import (
|
|
|
37
38
|
ConfigValidator,
|
|
38
39
|
)
|
|
39
40
|
from langgraph_runtime.checkpoint import Checkpointer
|
|
40
|
-
from langgraph_runtime.database import connect
|
|
41
|
+
from langgraph_runtime.database import connect as base_connect
|
|
41
42
|
from langgraph_runtime.ops import Assistants
|
|
42
43
|
from langgraph_runtime.retry import retry_db
|
|
43
44
|
|
|
@@ -45,6 +46,8 @@ logger = structlog.stdlib.get_logger(__name__)
|
|
|
45
46
|
|
|
46
47
|
CrudAssistants = GrpcAssistants if FF_USE_CORE_API else Assistants
|
|
47
48
|
|
|
49
|
+
connect = partial(base_connect, supports_core_api=FF_USE_CORE_API)
|
|
50
|
+
|
|
48
51
|
EXCLUDED_CONFIG_SCHEMA = (
|
|
49
52
|
"__pregel_checkpointer",
|
|
50
53
|
"__pregel_store",
|
|
@@ -255,7 +258,7 @@ async def get_assistant_graph(
|
|
|
255
258
|
assistant_id = get_assistant_id(str(request.path_params["assistant_id"]))
|
|
256
259
|
validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
|
|
257
260
|
async with connect() as conn:
|
|
258
|
-
assistant_ = await
|
|
261
|
+
assistant_ = await CrudAssistants.get(conn, assistant_id)
|
|
259
262
|
assistant = await fetchone(assistant_)
|
|
260
263
|
config = json_loads(assistant["config"])
|
|
261
264
|
configurable = config.setdefault("configurable", {})
|
|
@@ -312,43 +315,44 @@ async def get_assistant_subgraphs(
|
|
|
312
315
|
assistant_id = request.path_params["assistant_id"]
|
|
313
316
|
validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
|
|
314
317
|
async with connect() as conn:
|
|
315
|
-
assistant_ = await
|
|
318
|
+
assistant_ = await CrudAssistants.get(conn, assistant_id)
|
|
316
319
|
assistant = await fetchone(assistant_)
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
)
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
320
|
+
|
|
321
|
+
config = json_loads(assistant["config"])
|
|
322
|
+
configurable = config.setdefault("configurable", {})
|
|
323
|
+
configurable.update(get_configurable_headers(request.headers))
|
|
324
|
+
async with get_graph(
|
|
325
|
+
assistant["graph_id"],
|
|
326
|
+
config,
|
|
327
|
+
checkpointer=Checkpointer(),
|
|
328
|
+
store=(await api_store.get_store()),
|
|
329
|
+
) as graph:
|
|
330
|
+
namespace = request.path_params.get("namespace")
|
|
331
|
+
|
|
332
|
+
if isinstance(graph, BaseRemotePregel):
|
|
333
|
+
return ApiResponse(
|
|
334
|
+
await graph.fetch_subgraphs(
|
|
335
|
+
namespace=namespace,
|
|
336
|
+
recurse=request.query_params.get("recurse", "False")
|
|
337
|
+
in ("true", "True"),
|
|
338
|
+
)
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
try:
|
|
342
|
+
return ApiResponse(
|
|
343
|
+
{
|
|
344
|
+
ns: _graph_schemas(subgraph)
|
|
345
|
+
async for ns, subgraph in graph.aget_subgraphs(
|
|
331
346
|
namespace=namespace,
|
|
332
347
|
recurse=request.query_params.get("recurse", "False")
|
|
333
348
|
in ("true", "True"),
|
|
334
349
|
)
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
async for ns, subgraph in graph.aget_subgraphs(
|
|
342
|
-
namespace=namespace,
|
|
343
|
-
recurse=request.query_params.get("recurse", "False")
|
|
344
|
-
in ("true", "True"),
|
|
345
|
-
)
|
|
346
|
-
}
|
|
347
|
-
)
|
|
348
|
-
except NotImplementedError:
|
|
349
|
-
raise HTTPException(
|
|
350
|
-
422, detail="The graph does not support visualization"
|
|
351
|
-
) from None
|
|
350
|
+
}
|
|
351
|
+
)
|
|
352
|
+
except NotImplementedError:
|
|
353
|
+
raise HTTPException(
|
|
354
|
+
422, detail="The graph does not support visualization"
|
|
355
|
+
) from None
|
|
352
356
|
|
|
353
357
|
|
|
354
358
|
@retry_db
|
|
@@ -359,40 +363,40 @@ async def get_assistant_schemas(
|
|
|
359
363
|
assistant_id = request.path_params["assistant_id"]
|
|
360
364
|
validate_uuid(assistant_id, "Invalid assistant ID: must be a UUID")
|
|
361
365
|
async with connect() as conn:
|
|
362
|
-
assistant_ = await
|
|
363
|
-
# TODO Implementa cache so we can de-dent and release this connection.
|
|
366
|
+
assistant_ = await CrudAssistants.get(conn, assistant_id)
|
|
364
367
|
assistant = await fetchone(assistant_)
|
|
365
|
-
config = json_loads(assistant["config"])
|
|
366
|
-
configurable = config.setdefault("configurable", {})
|
|
367
|
-
configurable.update(get_configurable_headers(request.headers))
|
|
368
|
-
async with get_graph(
|
|
369
|
-
assistant["graph_id"],
|
|
370
|
-
config,
|
|
371
|
-
checkpointer=Checkpointer(),
|
|
372
|
-
store=(await api_store.get_store()),
|
|
373
|
-
) as graph:
|
|
374
|
-
if isinstance(graph, BaseRemotePregel):
|
|
375
|
-
schemas = await graph.fetch_state_schema()
|
|
376
|
-
return ApiResponse(
|
|
377
|
-
{
|
|
378
|
-
"graph_id": assistant["graph_id"],
|
|
379
|
-
"input_schema": schemas.get("input"),
|
|
380
|
-
"output_schema": schemas.get("output"),
|
|
381
|
-
"state_schema": schemas.get("state"),
|
|
382
|
-
"config_schema": schemas.get("config"),
|
|
383
|
-
"context_schema": schemas.get("context"),
|
|
384
|
-
}
|
|
385
|
-
)
|
|
386
|
-
|
|
387
|
-
schemas = _graph_schemas(graph)
|
|
388
368
|
|
|
369
|
+
config = json_loads(assistant["config"])
|
|
370
|
+
configurable = config.setdefault("configurable", {})
|
|
371
|
+
configurable.update(get_configurable_headers(request.headers))
|
|
372
|
+
async with get_graph(
|
|
373
|
+
assistant["graph_id"],
|
|
374
|
+
config,
|
|
375
|
+
checkpointer=Checkpointer(),
|
|
376
|
+
store=(await api_store.get_store()),
|
|
377
|
+
) as graph:
|
|
378
|
+
if isinstance(graph, BaseRemotePregel):
|
|
379
|
+
schemas = await graph.fetch_state_schema()
|
|
389
380
|
return ApiResponse(
|
|
390
381
|
{
|
|
391
382
|
"graph_id": assistant["graph_id"],
|
|
392
|
-
|
|
383
|
+
"input_schema": schemas.get("input"),
|
|
384
|
+
"output_schema": schemas.get("output"),
|
|
385
|
+
"state_schema": schemas.get("state"),
|
|
386
|
+
"config_schema": schemas.get("config"),
|
|
387
|
+
"context_schema": schemas.get("context"),
|
|
393
388
|
}
|
|
394
389
|
)
|
|
395
390
|
|
|
391
|
+
schemas = _graph_schemas(graph)
|
|
392
|
+
|
|
393
|
+
return ApiResponse(
|
|
394
|
+
{
|
|
395
|
+
"graph_id": assistant["graph_id"],
|
|
396
|
+
**schemas,
|
|
397
|
+
}
|
|
398
|
+
)
|
|
399
|
+
|
|
396
400
|
|
|
397
401
|
@retry_db
|
|
398
402
|
async def patch_assistant(
|
langgraph_api/api/meta.py
CHANGED
|
@@ -86,6 +86,12 @@ async def meta_metrics(request: ApiRequest):
|
|
|
86
86
|
"# HELP lg_api_num_running_runs The number of runs currently running.",
|
|
87
87
|
"# TYPE lg_api_num_running_runs gauge",
|
|
88
88
|
f'lg_api_num_running_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_running"]}',
|
|
89
|
+
"# HELP lg_api_pending_runs_wait_time_max The maximum time a run has been pending, in seconds.",
|
|
90
|
+
"# TYPE lg_api_pending_runs_wait_time_max gauge",
|
|
91
|
+
f'lg_api_pending_runs_wait_time_max{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats.get("pending_runs_wait_time_max_secs") or 0}',
|
|
92
|
+
"# HELP lg_api_pending_runs_wait_time_med The median pending wait time across runs, in seconds.",
|
|
93
|
+
"# TYPE lg_api_pending_runs_wait_time_med gauge",
|
|
94
|
+
f'lg_api_pending_runs_wait_time_med{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats.get("pending_runs_wait_time_med_secs") or 0}',
|
|
89
95
|
]
|
|
90
96
|
)
|
|
91
97
|
except Exception as e:
|
langgraph_api/api/threads.py
CHANGED
|
@@ -5,6 +5,8 @@ from starlette.exceptions import HTTPException
|
|
|
5
5
|
from starlette.responses import Response
|
|
6
6
|
from starlette.routing import BaseRoute
|
|
7
7
|
|
|
8
|
+
from langgraph_api.feature_flags import FF_USE_CORE_API
|
|
9
|
+
from langgraph_api.grpc.ops import Threads as GrpcThreads
|
|
8
10
|
from langgraph_api.route import ApiRequest, ApiResponse, ApiRoute
|
|
9
11
|
from langgraph_api.schema import THREAD_FIELDS, ThreadStreamMode
|
|
10
12
|
from langgraph_api.sse import EventSourceResponse
|
|
@@ -30,6 +32,8 @@ from langgraph_runtime.database import connect
|
|
|
30
32
|
from langgraph_runtime.ops import Threads
|
|
31
33
|
from langgraph_runtime.retry import retry_db
|
|
32
34
|
|
|
35
|
+
CrudThreads = GrpcThreads if FF_USE_CORE_API else Threads
|
|
36
|
+
|
|
33
37
|
|
|
34
38
|
@retry_db
|
|
35
39
|
async def create_thread(
|
|
@@ -41,7 +45,7 @@ async def create_thread(
|
|
|
41
45
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
42
46
|
async with connect() as conn:
|
|
43
47
|
thread_id = thread_id or str(uuid4())
|
|
44
|
-
iter = await
|
|
48
|
+
iter = await CrudThreads.put(
|
|
45
49
|
conn,
|
|
46
50
|
thread_id,
|
|
47
51
|
metadata=payload.get("metadata"),
|
|
@@ -78,7 +82,7 @@ async def search_threads(
|
|
|
78
82
|
limit = int(payload.get("limit") or 10)
|
|
79
83
|
offset = int(payload.get("offset") or 0)
|
|
80
84
|
async with connect() as conn:
|
|
81
|
-
threads_iter, next_offset = await
|
|
85
|
+
threads_iter, next_offset = await CrudThreads.search(
|
|
82
86
|
conn,
|
|
83
87
|
status=payload.get("status"),
|
|
84
88
|
values=payload.get("values"),
|
|
@@ -103,7 +107,7 @@ async def count_threads(
|
|
|
103
107
|
"""Count threads."""
|
|
104
108
|
payload = await request.json(ThreadCountRequest)
|
|
105
109
|
async with connect() as conn:
|
|
106
|
-
count = await
|
|
110
|
+
count = await CrudThreads.count(
|
|
107
111
|
conn,
|
|
108
112
|
status=payload.get("status"),
|
|
109
113
|
values=payload.get("values"),
|
|
@@ -277,7 +281,7 @@ async def get_thread(
|
|
|
277
281
|
thread_id = request.path_params["thread_id"]
|
|
278
282
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
279
283
|
async with connect() as conn:
|
|
280
|
-
thread = await
|
|
284
|
+
thread = await CrudThreads.get(conn, thread_id)
|
|
281
285
|
return ApiResponse(await fetchone(thread))
|
|
282
286
|
|
|
283
287
|
|
|
@@ -290,7 +294,7 @@ async def patch_thread(
|
|
|
290
294
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
291
295
|
payload = await request.json(ThreadPatch)
|
|
292
296
|
async with connect() as conn:
|
|
293
|
-
thread = await
|
|
297
|
+
thread = await CrudThreads.patch(
|
|
294
298
|
conn,
|
|
295
299
|
thread_id,
|
|
296
300
|
metadata=payload.get("metadata", {}),
|
|
@@ -305,7 +309,7 @@ async def delete_thread(request: ApiRequest):
|
|
|
305
309
|
thread_id = request.path_params["thread_id"]
|
|
306
310
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
307
311
|
async with connect() as conn:
|
|
308
|
-
tid = await
|
|
312
|
+
tid = await CrudThreads.delete(conn, thread_id)
|
|
309
313
|
await fetchone(tid)
|
|
310
314
|
return Response(status_code=204)
|
|
311
315
|
|
|
@@ -314,7 +318,7 @@ async def delete_thread(request: ApiRequest):
|
|
|
314
318
|
async def copy_thread(request: ApiRequest):
|
|
315
319
|
thread_id = request.path_params["thread_id"]
|
|
316
320
|
async with connect() as conn:
|
|
317
|
-
iter = await
|
|
321
|
+
iter = await CrudThreads.copy(conn, thread_id)
|
|
318
322
|
return ApiResponse(await fetchone(iter, not_found_code=409))
|
|
319
323
|
|
|
320
324
|
|
langgraph_api/auth/custom.py
CHANGED
|
@@ -355,34 +355,39 @@ def _solve_fastapi_dependencies(
|
|
|
355
355
|
}
|
|
356
356
|
|
|
357
357
|
async def decorator(scope: dict, request: Request):
|
|
358
|
-
async with AsyncExitStack() as
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
358
|
+
async with AsyncExitStack() as request_stack:
|
|
359
|
+
scope["fastapi_inner_astack"] = request_stack
|
|
360
|
+
async with AsyncExitStack() as stack:
|
|
361
|
+
scope["fastapi_function_astack"] = stack
|
|
362
|
+
all_solved = await asyncio.gather(
|
|
363
|
+
*(
|
|
364
|
+
solve_dependencies(
|
|
365
|
+
request=request,
|
|
366
|
+
dependant=dependent,
|
|
367
|
+
async_exit_stack=stack,
|
|
368
|
+
embed_body_fields=False,
|
|
369
|
+
)
|
|
370
|
+
for dependent in dependents.values()
|
|
366
371
|
)
|
|
367
|
-
for dependent in dependents.values()
|
|
368
372
|
)
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
373
|
+
all_injected = await asyncio.gather(
|
|
374
|
+
*(
|
|
375
|
+
_run_async(dependent.call, solved.values, is_async)
|
|
376
|
+
for dependent, solved in zip(
|
|
377
|
+
dependents.values(), all_solved, strict=False
|
|
378
|
+
)
|
|
375
379
|
)
|
|
376
380
|
)
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
381
|
+
kwargs = {
|
|
382
|
+
name: value
|
|
383
|
+
for name, value in zip(
|
|
384
|
+
dependents.keys(), all_injected, strict=False
|
|
385
|
+
)
|
|
386
|
+
}
|
|
387
|
+
other_params = _extract_arguments_from_scope(
|
|
388
|
+
scope, _param_names, request=request
|
|
389
|
+
)
|
|
390
|
+
return await fn(**(kwargs | other_params))
|
|
386
391
|
|
|
387
392
|
return decorator
|
|
388
393
|
|
langgraph_api/cli.py
CHANGED
|
@@ -8,12 +8,10 @@ import typing
|
|
|
8
8
|
from collections.abc import Mapping, Sequence
|
|
9
9
|
from typing import Literal
|
|
10
10
|
|
|
11
|
-
from typing_extensions import TypedDict
|
|
12
|
-
|
|
13
11
|
if typing.TYPE_CHECKING:
|
|
14
12
|
from packaging.version import Version
|
|
15
13
|
|
|
16
|
-
from langgraph_api.config import HttpConfig, StoreConfig
|
|
14
|
+
from langgraph_api.config import AuthConfig, HttpConfig, StoreConfig
|
|
17
15
|
|
|
18
16
|
logging.basicConfig(level=logging.INFO)
|
|
19
17
|
logger = logging.getLogger(__name__)
|
|
@@ -81,51 +79,6 @@ def patch_environment(**kwargs):
|
|
|
81
79
|
os.environ[key] = value
|
|
82
80
|
|
|
83
81
|
|
|
84
|
-
class SecurityConfig(TypedDict, total=False):
|
|
85
|
-
securitySchemes: dict
|
|
86
|
-
security: list
|
|
87
|
-
# path => {method => security}
|
|
88
|
-
paths: dict[str, dict[str, list]]
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
class CacheConfig(TypedDict, total=False):
|
|
92
|
-
cache_keys: list[str]
|
|
93
|
-
ttl_seconds: int
|
|
94
|
-
max_size: int
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
class AuthConfig(TypedDict, total=False):
|
|
98
|
-
path: str
|
|
99
|
-
"""Path to the authentication function in a Python file."""
|
|
100
|
-
disable_studio_auth: bool
|
|
101
|
-
"""Whether to disable auth when connecting from the LangSmith Studio."""
|
|
102
|
-
openapi: SecurityConfig
|
|
103
|
-
"""The schema to use for updating the openapi spec.
|
|
104
|
-
|
|
105
|
-
Example:
|
|
106
|
-
{
|
|
107
|
-
"securitySchemes": {
|
|
108
|
-
"OAuth2": {
|
|
109
|
-
"type": "oauth2",
|
|
110
|
-
"flows": {
|
|
111
|
-
"password": {
|
|
112
|
-
"tokenUrl": "/token",
|
|
113
|
-
"scopes": {
|
|
114
|
-
"me": "Read information about the current user",
|
|
115
|
-
"items": "Access to create and manage items"
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
},
|
|
121
|
-
"security": [
|
|
122
|
-
{"OAuth2": ["me"]} # Default security requirement for all endpoints
|
|
123
|
-
]
|
|
124
|
-
}
|
|
125
|
-
"""
|
|
126
|
-
cache: CacheConfig | None
|
|
127
|
-
|
|
128
|
-
|
|
129
82
|
def run_server(
|
|
130
83
|
host: str = "127.0.0.1",
|
|
131
84
|
port: int = 2024,
|
|
@@ -141,7 +94,7 @@ def run_server(
|
|
|
141
94
|
reload_includes: Sequence[str] | None = None,
|
|
142
95
|
reload_excludes: Sequence[str] | None = None,
|
|
143
96
|
store: typing.Optional["StoreConfig"] = None,
|
|
144
|
-
auth: AuthConfig
|
|
97
|
+
auth: typing.Optional["AuthConfig"] = None,
|
|
145
98
|
http: typing.Optional["HttpConfig"] = None,
|
|
146
99
|
ui: dict | None = None,
|
|
147
100
|
ui_config: dict | None = None,
|
langgraph_api/config.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import os
|
|
2
|
+
from collections.abc import Callable
|
|
2
3
|
from os import environ, getenv
|
|
3
|
-
from typing import Literal
|
|
4
|
+
from typing import Literal, TypeVar, cast
|
|
4
5
|
|
|
5
6
|
import orjson
|
|
7
|
+
from pydantic import TypeAdapter
|
|
6
8
|
from starlette.config import Config, undefined
|
|
7
9
|
from starlette.datastructures import CommaSeparatedStrings
|
|
8
10
|
from typing_extensions import TypedDict
|
|
@@ -22,11 +24,14 @@ class CorsConfig(TypedDict, total=False):
|
|
|
22
24
|
max_age: int
|
|
23
25
|
|
|
24
26
|
|
|
25
|
-
class ConfigurableHeaders(TypedDict):
|
|
27
|
+
class ConfigurableHeaders(TypedDict, total=False):
|
|
26
28
|
includes: list[str] | None
|
|
27
29
|
excludes: list[str] | None
|
|
28
30
|
|
|
29
31
|
|
|
32
|
+
MiddlewareOrders = Literal["auth_first", "middleware_first"]
|
|
33
|
+
|
|
34
|
+
|
|
30
35
|
class HttpConfig(TypedDict, total=False):
|
|
31
36
|
app: str
|
|
32
37
|
"""Import path for a custom Starlette/FastAPI app to mount"""
|
|
@@ -52,6 +57,8 @@ class HttpConfig(TypedDict, total=False):
|
|
|
52
57
|
"""Prefix for mounted routes. E.g., "/my-deployment/api"."""
|
|
53
58
|
configurable_headers: ConfigurableHeaders | None
|
|
54
59
|
logging_headers: ConfigurableHeaders | None
|
|
60
|
+
enable_custom_route_auth: bool
|
|
61
|
+
middleware_order: MiddlewareOrders | None
|
|
55
62
|
|
|
56
63
|
|
|
57
64
|
class ThreadTTLConfig(TypedDict, total=False):
|
|
@@ -121,6 +128,45 @@ class StoreConfig(TypedDict, total=False):
|
|
|
121
128
|
ttl: TTLConfig
|
|
122
129
|
|
|
123
130
|
|
|
131
|
+
class SerdeConfig(TypedDict, total=False):
|
|
132
|
+
"""Configuration for the built-in serde, which handles checkpointing of state.
|
|
133
|
+
|
|
134
|
+
If omitted, no serde is set up (the object store will still be present, however)."""
|
|
135
|
+
|
|
136
|
+
allowed_json_modules: list[list[str]] | Literal[True] | None
|
|
137
|
+
"""Optional. List of allowed python modules to de-serialize custom objects from.
|
|
138
|
+
|
|
139
|
+
If provided, only the specified modules will be allowed to be deserialized.
|
|
140
|
+
If omitted, no modules are allowed, and the object returned will simply be a json object OR
|
|
141
|
+
a deserialized langchain object.
|
|
142
|
+
|
|
143
|
+
Example:
|
|
144
|
+
{...
|
|
145
|
+
"serde": {
|
|
146
|
+
"allowed_json_modules": [
|
|
147
|
+
["my_agent", "my_file", "SomeType"],
|
|
148
|
+
]
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
If you set this to True, any module will be allowed to be deserialized.
|
|
153
|
+
|
|
154
|
+
Example:
|
|
155
|
+
{...
|
|
156
|
+
"serde": {
|
|
157
|
+
"allowed_json_modules": true
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
"""
|
|
162
|
+
pickle_fallback: bool
|
|
163
|
+
"""Optional. Whether to allow pickling as a fallback for deserialization.
|
|
164
|
+
|
|
165
|
+
If True, pickling will be allowed as a fallback for deserialization.
|
|
166
|
+
If False, pickling will not be allowed as a fallback for deserialization.
|
|
167
|
+
Defaults to True if not configured."""
|
|
168
|
+
|
|
169
|
+
|
|
124
170
|
class CheckpointerConfig(TypedDict, total=False):
|
|
125
171
|
"""Configuration for the built-in checkpointer, which handles checkpointing of state.
|
|
126
172
|
|
|
@@ -133,6 +179,53 @@ class CheckpointerConfig(TypedDict, total=False):
|
|
|
133
179
|
If provided, the checkpointer will apply TTL settings according to the configuration.
|
|
134
180
|
If omitted, no TTL behavior is configured.
|
|
135
181
|
"""
|
|
182
|
+
serde: SerdeConfig | None
|
|
183
|
+
"""Optional. Defines the configuration for how checkpoints are serialized."""
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class SecurityConfig(TypedDict, total=False):
|
|
187
|
+
securitySchemes: dict
|
|
188
|
+
security: list
|
|
189
|
+
# path => {method => security}
|
|
190
|
+
paths: dict[str, dict[str, list]]
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class CacheConfig(TypedDict, total=False):
|
|
194
|
+
cache_keys: list[str]
|
|
195
|
+
ttl_seconds: int
|
|
196
|
+
max_size: int
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class AuthConfig(TypedDict, total=False):
|
|
200
|
+
path: str
|
|
201
|
+
"""Path to the authentication function in a Python file."""
|
|
202
|
+
disable_studio_auth: bool
|
|
203
|
+
"""Whether to disable auth when connecting from the LangSmith Studio."""
|
|
204
|
+
openapi: SecurityConfig
|
|
205
|
+
"""The schema to use for updating the openapi spec.
|
|
206
|
+
|
|
207
|
+
Example:
|
|
208
|
+
{
|
|
209
|
+
"securitySchemes": {
|
|
210
|
+
"OAuth2": {
|
|
211
|
+
"type": "oauth2",
|
|
212
|
+
"flows": {
|
|
213
|
+
"password": {
|
|
214
|
+
"tokenUrl": "/token",
|
|
215
|
+
"scopes": {
|
|
216
|
+
"me": "Read information about the current user",
|
|
217
|
+
"items": "Access to create and manage items"
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
},
|
|
223
|
+
"security": [
|
|
224
|
+
{"OAuth2": ["me"]} # Default security requirement for all endpoints
|
|
225
|
+
]
|
|
226
|
+
}
|
|
227
|
+
"""
|
|
228
|
+
cache: CacheConfig | None
|
|
136
229
|
|
|
137
230
|
|
|
138
231
|
# env
|
|
@@ -140,13 +233,22 @@ class CheckpointerConfig(TypedDict, total=False):
|
|
|
140
233
|
env = Config()
|
|
141
234
|
|
|
142
235
|
|
|
143
|
-
|
|
236
|
+
TD = TypeVar("TD")
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
def _parse_json(json: str | None, schema: TypeAdapter | None = None) -> dict | None:
|
|
144
240
|
if not json:
|
|
145
241
|
return None
|
|
146
|
-
parsed = orjson.loads(json)
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
242
|
+
parsed = schema.validate_json(json) if schema else orjson.loads(json)
|
|
243
|
+
return parsed or None
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _parse_schema(schema: type[TD]) -> Callable[[str | None], TD | None]:
|
|
247
|
+
def composed(json: str | None) -> TD | None:
|
|
248
|
+
return cast(TD | None, _parse_json(json, schema=TypeAdapter(schema)))
|
|
249
|
+
|
|
250
|
+
composed.__name__ = schema.__name__ # This just gives a nicer error message if the user provides an incompatible value
|
|
251
|
+
return composed
|
|
150
252
|
|
|
151
253
|
|
|
152
254
|
STATS_INTERVAL_SECS = env("STATS_INTERVAL_SECS", cast=int, default=60)
|
|
@@ -179,6 +281,9 @@ REDIS_URI = env("REDIS_URI", cast=str)
|
|
|
179
281
|
REDIS_CLUSTER = env("REDIS_CLUSTER", cast=bool, default=False)
|
|
180
282
|
REDIS_MAX_CONNECTIONS = env("REDIS_MAX_CONNECTIONS", cast=int, default=2000)
|
|
181
283
|
REDIS_CONNECT_TIMEOUT = env("REDIS_CONNECT_TIMEOUT", cast=float, default=10.0)
|
|
284
|
+
REDIS_HEALTH_CHECK_INTERVAL = env(
|
|
285
|
+
"REDIS_HEALTH_CHECK_INTERVAL", cast=float, default=10.0
|
|
286
|
+
)
|
|
182
287
|
REDIS_KEY_PREFIX = env("REDIS_KEY_PREFIX", cast=str, default="")
|
|
183
288
|
RUN_STATS_CACHE_SECONDS = env("RUN_STATS_CACHE_SECONDS", cast=int, default=60)
|
|
184
289
|
|
|
@@ -189,17 +294,22 @@ ALLOW_PRIVATE_NETWORK = env("ALLOW_PRIVATE_NETWORK", cast=bool, default=False)
|
|
|
189
294
|
See https://developer.chrome.com/blog/private-network-access-update-2024-03
|
|
190
295
|
"""
|
|
191
296
|
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
297
|
+
# gRPC client pool size for persistence server.
|
|
298
|
+
GRPC_CLIENT_POOL_SIZE = env("GRPC_CLIENT_POOL_SIZE", cast=int, default=5)
|
|
299
|
+
|
|
300
|
+
# Minimum payload size to use the dedicated thread pool for JSON parsing.
|
|
301
|
+
# (Otherwise, the payload is parsed directly in the event loop.)
|
|
302
|
+
JSON_THREAD_POOL_MINIMUM_SIZE_BYTES = 100 * 1024 # 100 KB
|
|
303
|
+
|
|
304
|
+
HTTP_CONFIG = env("LANGGRAPH_HTTP", cast=_parse_schema(HttpConfig), default=None)
|
|
305
|
+
STORE_CONFIG = env("LANGGRAPH_STORE", cast=_parse_schema(StoreConfig), default=None)
|
|
196
306
|
|
|
197
307
|
MOUNT_PREFIX: str | None = env("MOUNT_PREFIX", cast=str, default=None) or (
|
|
198
308
|
HTTP_CONFIG.get("mount_prefix") if HTTP_CONFIG else None
|
|
199
309
|
)
|
|
200
310
|
|
|
201
311
|
CORS_ALLOW_ORIGINS = env("CORS_ALLOW_ORIGINS", cast=CommaSeparatedStrings, default="*")
|
|
202
|
-
CORS_CONFIG
|
|
312
|
+
CORS_CONFIG = env("CORS_CONFIG", cast=_parse_schema(CorsConfig), default=None) or (
|
|
203
313
|
HTTP_CONFIG.get("cors") if HTTP_CONFIG else None
|
|
204
314
|
)
|
|
205
315
|
"""
|
|
@@ -277,8 +387,13 @@ def _parse_thread_ttl(value: str | None) -> ThreadTTLConfig | None:
|
|
|
277
387
|
}
|
|
278
388
|
|
|
279
389
|
|
|
280
|
-
CHECKPOINTER_CONFIG
|
|
281
|
-
"LANGGRAPH_CHECKPOINTER", cast=
|
|
390
|
+
CHECKPOINTER_CONFIG = env(
|
|
391
|
+
"LANGGRAPH_CHECKPOINTER", cast=_parse_schema(CheckpointerConfig), default=None
|
|
392
|
+
)
|
|
393
|
+
SERDE: SerdeConfig | None = (
|
|
394
|
+
CHECKPOINTER_CONFIG["serde"]
|
|
395
|
+
if CHECKPOINTER_CONFIG and "serde" in CHECKPOINTER_CONFIG
|
|
396
|
+
else None
|
|
282
397
|
)
|
|
283
398
|
THREAD_TTL: ThreadTTLConfig | None = env(
|
|
284
399
|
"LANGGRAPH_THREAD_TTL", cast=_parse_thread_ttl, default=None
|
|
@@ -290,8 +405,8 @@ N_JOBS_PER_WORKER = env("N_JOBS_PER_WORKER", cast=int, default=10)
|
|
|
290
405
|
BG_JOB_TIMEOUT_SECS = env("BG_JOB_TIMEOUT_SECS", cast=float, default=3600)
|
|
291
406
|
|
|
292
407
|
FF_CRONS_ENABLED = env("FF_CRONS_ENABLED", cast=bool, default=True)
|
|
293
|
-
FF_RICH_THREADS = env("FF_RICH_THREADS", cast=bool, default=True)
|
|
294
408
|
FF_LOG_DROPPED_EVENTS = env("FF_LOG_DROPPED_EVENTS", cast=bool, default=False)
|
|
409
|
+
FF_LOG_QUERY_AND_PARAMS = env("FF_LOG_QUERY_AND_PARAMS", cast=bool, default=False)
|
|
295
410
|
|
|
296
411
|
# auth
|
|
297
412
|
|
|
@@ -303,7 +418,7 @@ if LANGGRAPH_POSTGRES_EXTENSIONS not in ("standard", "lite"):
|
|
|
303
418
|
raise ValueError(
|
|
304
419
|
f"Unknown LANGGRAPH_POSTGRES_EXTENSIONS value: {LANGGRAPH_POSTGRES_EXTENSIONS}"
|
|
305
420
|
)
|
|
306
|
-
LANGGRAPH_AUTH = env("LANGGRAPH_AUTH", cast=
|
|
421
|
+
LANGGRAPH_AUTH = env("LANGGRAPH_AUTH", cast=_parse_schema(AuthConfig), default=None)
|
|
307
422
|
LANGSMITH_TENANT_ID = env("LANGSMITH_TENANT_ID", cast=str, default=None)
|
|
308
423
|
LANGSMITH_AUTH_VERIFY_TENANT_ID = env(
|
|
309
424
|
"LANGSMITH_AUTH_VERIFY_TENANT_ID",
|
langgraph_api/graph.py
CHANGED
|
@@ -51,7 +51,7 @@ async def register_graph(
|
|
|
51
51
|
description: str | None = None,
|
|
52
52
|
) -> None:
|
|
53
53
|
"""Register a graph."""
|
|
54
|
-
from langgraph_api.
|
|
54
|
+
from langgraph_api.grpc.ops import Assistants as AssistantsGrpc
|
|
55
55
|
from langgraph_runtime.database import connect
|
|
56
56
|
from langgraph_runtime.ops import Assistants as AssistantsRuntime
|
|
57
57
|
|