langgraph-api 0.3.0__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

langgraph_api/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.3.0"
1
+ __version__ = "0.3.4"
langgraph_api/api/meta.py CHANGED
@@ -54,10 +54,16 @@ async def meta_metrics(request: ApiRequest):
54
54
  metadata.PROJECT_ID, metadata.HOST_REVISION_ID, metrics_format
55
55
  )
56
56
 
57
+ pg_redis_stats = pool_stats(
58
+ project_id=metadata.PROJECT_ID,
59
+ revision_id=metadata.HOST_REVISION_ID,
60
+ format=metrics_format,
61
+ )
62
+
57
63
  if metrics_format == "json":
58
64
  async with connect() as conn:
59
65
  resp = {
60
- **pool_stats(),
66
+ **pg_redis_stats,
61
67
  "queue": await Runs.stats(conn),
62
68
  **http_metrics,
63
69
  }
@@ -93,6 +99,7 @@ async def meta_metrics(request: ApiRequest):
93
99
  )
94
100
 
95
101
  metrics.extend(http_metrics)
102
+ metrics.extend(pg_redis_stats)
96
103
 
97
104
  metrics_response = "\n".join(metrics)
98
105
  return PlainTextResponse(metrics_response)
langgraph_api/config.py CHANGED
@@ -180,6 +180,7 @@ REDIS_CLUSTER = env("REDIS_CLUSTER", cast=bool, default=False)
180
180
  REDIS_MAX_CONNECTIONS = env("REDIS_MAX_CONNECTIONS", cast=int, default=2000)
181
181
  REDIS_CONNECT_TIMEOUT = env("REDIS_CONNECT_TIMEOUT", cast=float, default=10.0)
182
182
  REDIS_MAX_IDLE_TIME = env("REDIS_MAX_IDLE_TIME", cast=float, default=120.0)
183
+ REDIS_STREAM_TIMEOUT = env("REDIS_STREAM_TIMEOUT", cast=float, default=30.0)
183
184
  REDIS_KEY_PREFIX = env("REDIS_KEY_PREFIX", cast=str, default="")
184
185
  RUN_STATS_CACHE_SECONDS = env("RUN_STATS_CACHE_SECONDS", cast=int, default=60)
185
186
 
@@ -374,6 +375,7 @@ API_VARIANT = env("LANGSMITH_LANGGRAPH_API_VARIANT", cast=str, default="")
374
375
  # UI
375
376
  UI_USE_BUNDLER = env("LANGGRAPH_UI_BUNDLER", cast=bool, default=False)
376
377
  IS_QUEUE_ENTRYPOINT = False
378
+ IS_EXECUTOR_ENTRYPOINT = False
377
379
  ref_sha = None
378
380
  if not os.getenv("LANGCHAIN_REVISION_ID") and (
379
381
  ref_sha := os.getenv("LANGSMITH_LANGGRAPH_GIT_REF_SHA")
@@ -20,4 +20,7 @@ if __name__ == "__main__":
20
20
  uvloop.install()
21
21
  except ImportError:
22
22
  pass
23
+ from langgraph_api import config
24
+
25
+ config.IS_EXECUTOR_ENTRYPOINT = True
23
26
  asyncio.run(main(grpc_port=args.grpc_port, entrypoint_name="python-executor"))
@@ -11,10 +11,8 @@ if not (
11
11
 
12
12
  import asyncio
13
13
  import contextlib
14
- import http.server
15
14
  import json
16
15
  import logging.config
17
- import os
18
16
  import pathlib
19
17
  import signal
20
18
  from contextlib import asynccontextmanager
@@ -22,6 +20,7 @@ from typing import cast
22
20
 
23
21
  import structlog
24
22
 
23
+ from langgraph_runtime.database import pool_stats
25
24
  from langgraph_runtime.lifespan import lifespan
26
25
  from langgraph_runtime.metrics import get_metrics
27
26
 
@@ -29,69 +28,68 @@ logger = structlog.stdlib.get_logger(__name__)
29
28
 
30
29
 
31
30
  async def health_and_metrics_server():
31
+ import uvicorn
32
+ from starlette.applications import Starlette
33
+ from starlette.responses import JSONResponse, PlainTextResponse
34
+ from starlette.routing import Route
35
+
32
36
  port = int(os.getenv("PORT", "8080"))
33
- ok = json.dumps({"status": "ok"}).encode()
34
- ok_len = str(len(ok))
35
-
36
- class HealthAndMetricsHandler(http.server.SimpleHTTPRequestHandler):
37
- def log_message(self, format, *args):
38
- # Skip logging for /ok and /metrics endpoints
39
- if getattr(self, "path", None) in ["/ok", "/metrics"]:
40
- return
41
- # Log other requests normally
42
- super().log_message(format, *args)
43
-
44
- def do_GET(self):
45
- path = getattr(self, "path", None)
46
- if path == "/ok":
47
- self.send_response(200)
48
- self.send_header("Content-Type", "application/json")
49
- self.send_header("Content-Length", ok_len)
50
- self.end_headers()
51
- self.wfile.write(ok)
52
- elif path == "/metrics":
53
- metrics = get_metrics()
54
- worker_metrics = cast(dict[str, int], metrics["workers"])
55
- workers_max = worker_metrics["max"]
56
- workers_active = worker_metrics["active"]
57
- workers_available = worker_metrics["available"]
58
-
59
- project_id = os.getenv("LANGSMITH_HOST_PROJECT_ID")
60
- revision_id = os.getenv("LANGSMITH_HOST_REVISION_ID")
61
-
62
- metrics = [
63
- "# HELP lg_api_workers_max The maximum number of workers available.",
64
- "# TYPE lg_api_workers_max gauge",
65
- f'lg_api_workers_max{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_max}',
66
- "# HELP lg_api_workers_active The number of currently active workers.",
67
- "# TYPE lg_api_workers_active gauge",
68
- f'lg_api_workers_active{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_active}',
69
- "# HELP lg_api_workers_available The number of available (idle) workers.",
70
- "# TYPE lg_api_workers_available gauge",
71
- f'lg_api_workers_available{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_available}',
72
- ]
73
-
74
- metrics_response = "\n".join(metrics).encode()
75
- metrics_len = str(len(metrics_response))
76
-
77
- self.send_response(200)
78
- self.send_header(
79
- "Content-Type", "text/plain; version=0.0.4; charset=utf-8"
80
- )
81
- self.send_header("Content-Length", metrics_len)
82
- self.end_headers()
83
- self.wfile.write(metrics_response)
84
- else:
85
- self.send_error(http.HTTPStatus.NOT_FOUND)
86
-
87
- with http.server.ThreadingHTTPServer(
88
- ("0.0.0.0", port), HealthAndMetricsHandler
89
- ) as httpd:
90
- logger.info(f"Health and metrics server started at http://0.0.0.0:{port}")
91
- try:
92
- await asyncio.to_thread(httpd.serve_forever)
93
- finally:
94
- httpd.shutdown()
37
+
38
+ async def health_endpoint(request):
39
+ return JSONResponse({"status": "ok"})
40
+
41
+ async def metrics_endpoint(request):
42
+ metrics = get_metrics()
43
+ worker_metrics = cast(dict[str, int], metrics["workers"])
44
+ workers_max = worker_metrics["max"]
45
+ workers_active = worker_metrics["active"]
46
+ workers_available = worker_metrics["available"]
47
+
48
+ project_id = os.getenv("LANGSMITH_HOST_PROJECT_ID")
49
+ revision_id = os.getenv("LANGSMITH_HOST_REVISION_ID")
50
+
51
+ metrics_lines = [
52
+ "# HELP lg_api_workers_max The maximum number of workers available.",
53
+ "# TYPE lg_api_workers_max gauge",
54
+ f'lg_api_workers_max{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_max}',
55
+ "# HELP lg_api_workers_active The number of currently active workers.",
56
+ "# TYPE lg_api_workers_active gauge",
57
+ f'lg_api_workers_active{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_active}',
58
+ "# HELP lg_api_workers_available The number of available (idle) workers.",
59
+ "# TYPE lg_api_workers_available gauge",
60
+ f'lg_api_workers_available{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_available}',
61
+ ]
62
+
63
+ metrics_lines.extend(
64
+ pool_stats(
65
+ project_id=project_id,
66
+ revision_id=revision_id,
67
+ )
68
+ )
69
+
70
+ return PlainTextResponse(
71
+ "\n".join(metrics_lines),
72
+ media_type="text/plain; version=0.0.4; charset=utf-8",
73
+ )
74
+
75
+ app = Starlette(
76
+ routes=[
77
+ Route("/ok", health_endpoint),
78
+ Route("/metrics", metrics_endpoint),
79
+ ]
80
+ )
81
+
82
+ config = uvicorn.Config(
83
+ app,
84
+ host="0.0.0.0",
85
+ port=port,
86
+ log_level="error",
87
+ access_log=False,
88
+ )
89
+ server = uvicorn.Server(config)
90
+
91
+ logger.info(f"Health and metrics server started at http://0.0.0.0:{port}")
92
+ await server.serve()
95
93
 
96
94
 
97
95
  async def entrypoint(
langgraph_api/stream.py CHANGED
@@ -2,7 +2,7 @@ import uuid
2
2
  from collections.abc import AsyncIterator, Callable
3
3
  from contextlib import AsyncExitStack, aclosing, asynccontextmanager
4
4
  from functools import lru_cache
5
- from typing import Any, cast
5
+ from typing import Any, Literal, cast
6
6
 
7
7
  import langgraph.version
8
8
  import langsmith
@@ -423,7 +423,7 @@ async def consume(
423
423
  stream: AnyStream,
424
424
  run_id: str | uuid.UUID,
425
425
  resumable: bool = False,
426
- stream_modes: set[StreamMode] | None = None,
426
+ stream_modes: set[StreamMode | Literal["metadata"]] | None = None,
427
427
  ) -> None:
428
428
  stream_modes = stream_modes or set()
429
429
  if "messages-tuple" in stream_modes:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.3.0
3
+ Version: 0.3.4
4
4
  Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -1,12 +1,12 @@
1
- langgraph_api/__init__.py,sha256=VrXpHDu3erkzwl_WXrqINBm9xWkcyUy53IQOj042dOs,22
1
+ langgraph_api/__init__.py,sha256=oYLGMpySamd16KLiaBTfRyrAS7_oyp-TOEHmzmeumwg,22
2
2
  langgraph_api/asgi_transport.py,sha256=XtiLOu4WWsd-xizagBLzT5xUkxc9ZG9YqwvETBPjBFE,5161
3
3
  langgraph_api/asyncio.py,sha256=mZ7G32JjrGxrlH4OMy7AKlBQo5bZt4Sm2rlrBcU-Vj8,9483
4
4
  langgraph_api/cli.py,sha256=-ruIeKi1imvS6GriOfRDZY-waV4SbWiJ0BEFAciPVYI,16330
5
5
  langgraph_api/command.py,sha256=3O9v3i0OPa96ARyJ_oJbLXkfO8rPgDhLCswgO9koTFA,768
6
- langgraph_api/config.py,sha256=9wXEcxvVfPQjiR7jugomwTJ3vs5d-YoYYfgLDjRc3EQ,12133
6
+ langgraph_api/config.py,sha256=r9mmbyZlhBuJLpnTkaOLcNH6ufFNqm_2eCiuOmhqRl0,12241
7
7
  langgraph_api/cron_scheduler.py,sha256=25wYzEQrhPEivZrAPYOmzLPDOQa-aFogU37mTXc9TJk,2566
8
8
  langgraph_api/errors.py,sha256=zlnl3xXIwVG0oGNKKpXf1an9Rn_SBDHSyhe53hU6aLw,1858
9
- langgraph_api/executor_entrypoint.py,sha256=ClMyM9TB9oPisQzHqixA77Lnj_QGUg55MtQx-xku4o8,671
9
+ langgraph_api/executor_entrypoint.py,sha256=CaX813ygtf9CpOaBkfkQXJAHjFtmlScCkrOvTDmu4Aw,750
10
10
  langgraph_api/feature_flags.py,sha256=GjwmNjfg0Jhs3OzR2VbK2WgrRy3o5l8ibIYiUtQkDPA,363
11
11
  langgraph_api/graph.py,sha256=HTjJNQadrdi1tzJYNJ_iPIR6-zqC4-hj6YTD6zGQHYA,25072
12
12
  langgraph_api/http.py,sha256=fyK-H-0UfNy_BzuVW3aWWGvhRavmGAVMkDwDArryJ_4,5659
@@ -14,7 +14,7 @@ langgraph_api/http_metrics.py,sha256=MU9ccXt7aBb0AJ2SWEjwtbtbJEWmeqSdx7-CI51e32o
14
14
  langgraph_api/logging.py,sha256=ZZ95dDdWDayIbH1bgwNfn0U3CQ8kDoAvDFBDACna4-A,5150
15
15
  langgraph_api/metadata.py,sha256=fVsbwxVitAj4LGVYpCcadYeIFANEaNtcx6LBxQLcTqg,6949
16
16
  langgraph_api/patch.py,sha256=iLwSd9ZWoVj6MxozMyGyMvWWbE9RIP5eZX1dpCBSlSU,1480
17
- langgraph_api/queue_entrypoint.py,sha256=KDLpQtBu3amZTbNHS-RGFLR0DphuVQN6kUZm3ZGLe9g,5991
17
+ langgraph_api/queue_entrypoint.py,sha256=yFzVX3_YKTq4w1A5h5nRpVfiWuSOeJ9acHMPAcTIrKY,5282
18
18
  langgraph_api/route.py,sha256=EBhELuJ1He-ZYcAnR5YTImcIeDtWthDae5CHELBxPkM,5056
19
19
  langgraph_api/schema.py,sha256=6gabS4_1IeRWV5lyuDV-2i__8brXl89elAlmD5BmEII,8370
20
20
  langgraph_api/serde.py,sha256=3GvelKhySjlXaNqpg2GyUxU6-NEkvif7WlMF9if_EgU,6029
@@ -22,7 +22,7 @@ langgraph_api/server.py,sha256=uCAqPgCLJ6ckslLs0i_dacSR8mzuR0Y6PkkJYk0O3bE,7196
22
22
  langgraph_api/sse.py,sha256=SLdtZmTdh5D8fbWrQjuY9HYLd2dg8Rmi6ZMmFMVc2iE,4204
23
23
  langgraph_api/state.py,sha256=5RTOShiFVnkx-o6t99_x63CGwXw_8Eb-dSTpYirP8ro,4683
24
24
  langgraph_api/store.py,sha256=NIoNZojs6NbtG3VLBPQEFNttvp7XPkHAfjbQ3gY7aLY,4701
25
- langgraph_api/stream.py,sha256=iEApgVxJU9v58J5oKSuNe_c7ThpWf0bprgIDHUs8IzA,18397
25
+ langgraph_api/stream.py,sha256=iRF4Hu6A1n-KvDR4ki1BeOvOnOxGoV2NV8tiBHUWZOs,18428
26
26
  langgraph_api/thread_ttl.py,sha256=7H3gFlWcUiODPoaEzcwB0LR61uvcuyjD0ew_4BztB7k,1902
27
27
  langgraph_api/traceblock.py,sha256=Qq5CUdefnMDaRDnyvBSWGBClEj-f3oO7NbH6fedxOSE,630
28
28
  langgraph_api/utils.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -32,7 +32,7 @@ langgraph_api/worker.py,sha256=HVGyGVEYcXG-iKVgoBdFgANGxPjSs57JRl5OB4ra4nw,15267
32
32
  langgraph_api/api/__init__.py,sha256=WHy6oNLWtH1K7AxmmsU9RD-Vm6WP-Ov16xS8Ey9YCmQ,6090
33
33
  langgraph_api/api/assistants.py,sha256=5gVvU58Y1-EftBhCHGbEaOi_7cqGMKWhOt_GVfBC0Gg,16836
34
34
  langgraph_api/api/mcp.py,sha256=qe10ZRMN3f-Hli-9TI8nbQyWvMeBb72YB1PZVbyqBQw,14418
35
- langgraph_api/api/meta.py,sha256=w88TK1Wu4xOhgCfs04LBfL4pZkWhUW6QRwwAWdFby5A,4245
35
+ langgraph_api/api/meta.py,sha256=dFD9ZgykbKARLdVSaJD9vO3CShvEyBmGpkjE8tqii0c,4448
36
36
  langgraph_api/api/openapi.py,sha256=If-z1ckXt-Yu5bwQytK1LWyX_T7G46UtLfixgEP8hwc,11959
37
37
  langgraph_api/api/runs.py,sha256=AiohGTFLjWCb-oTXoNDvPMod4v6RS_ivlieoiqDmtQM,21812
38
38
  langgraph_api/api/store.py,sha256=xGcPFx4v-VxlK6HRU9uCjzCQ0v66cvc3o_PB5_g7n0Q,5550
@@ -97,8 +97,8 @@ langgraph_runtime/store.py,sha256=7mowndlsIroGHv3NpTSOZDJR0lCuaYMBoTnTrewjslw,11
97
97
  LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
98
98
  logging.json,sha256=3RNjSADZmDq38eHePMm1CbP6qZ71AmpBtLwCmKU9Zgo,379
99
99
  openapi.json,sha256=h1LbSeGqr2Oor6vO8d3m67XJ1lHhVYVyt2ULvyhf_Ks,160215
100
- langgraph_api-0.3.0.dist-info/METADATA,sha256=d8jDrigzqnFIyEDbLFhtK_xmHD45s1LIACSzVkRtiMU,3890
101
- langgraph_api-0.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
- langgraph_api-0.3.0.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
103
- langgraph_api-0.3.0.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
104
- langgraph_api-0.3.0.dist-info/RECORD,,
100
+ langgraph_api-0.3.4.dist-info/METADATA,sha256=L5VNuwfF9vgK-Pq3KK1AOz1E2_45tosXJlbigcHVd0Q,3890
101
+ langgraph_api-0.3.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
102
+ langgraph_api-0.3.4.dist-info/entry_points.txt,sha256=hGedv8n7cgi41PypMfinwS_HfCwA7xJIfS0jAp8htV8,78
103
+ langgraph_api-0.3.4.dist-info/licenses/LICENSE,sha256=ZPwVR73Biwm3sK6vR54djCrhaRiM4cAD2zvOQZV8Xis,3859
104
+ langgraph_api-0.3.4.dist-info/RECORD,,