langgraph-api 0.3.1__tar.gz → 0.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

Files changed (120) hide show
  1. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/Makefile +4 -1
  2. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/PKG-INFO +1 -1
  3. langgraph_api-0.3.4/langgraph_api/__init__.py +1 -0
  4. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/meta.py +8 -1
  5. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/config.py +2 -0
  6. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/executor_entrypoint.py +3 -0
  7. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/queue_entrypoint.py +62 -64
  8. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/stream.py +2 -2
  9. langgraph_api-0.3.1/langgraph_api/__init__.py +0 -1
  10. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/.gitignore +0 -0
  11. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/LICENSE +0 -0
  12. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/README.md +0 -0
  13. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/.gitignore +0 -0
  14. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/Makefile +0 -0
  15. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/README.md +0 -0
  16. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/burst.js +0 -0
  17. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/clean.js +0 -0
  18. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/graphs.js +0 -0
  19. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/package.json +0 -0
  20. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/ramp.js +0 -0
  21. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/update-revision.js +0 -0
  22. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/benchmark/weather.js +0 -0
  23. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/constraints.txt +0 -0
  24. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/forbidden.txt +0 -0
  25. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/healthcheck.py +0 -0
  26. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/__init__.py +0 -0
  27. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/assistants.py +0 -0
  28. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/mcp.py +0 -0
  29. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/openapi.py +0 -0
  30. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/runs.py +0 -0
  31. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/store.py +0 -0
  32. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/threads.py +0 -0
  33. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/api/ui.py +0 -0
  34. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/asgi_transport.py +0 -0
  35. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/asyncio.py +0 -0
  36. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/__init__.py +0 -0
  37. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/custom.py +0 -0
  38. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/langsmith/__init__.py +0 -0
  39. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/langsmith/backend.py +0 -0
  40. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/langsmith/client.py +0 -0
  41. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/middleware.py +0 -0
  42. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/noop.py +0 -0
  43. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/auth/studio_user.py +0 -0
  44. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/cli.py +0 -0
  45. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/command.py +0 -0
  46. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/cron_scheduler.py +0 -0
  47. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/errors.py +0 -0
  48. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/feature_flags.py +0 -0
  49. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/graph.py +0 -0
  50. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/http.py +0 -0
  51. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/http_metrics.py +0 -0
  52. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/.gitignore +0 -0
  53. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/.prettierrc +0 -0
  54. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/__init__.py +0 -0
  55. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/base.py +0 -0
  56. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/build.mts +0 -0
  57. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/client.http.mts +0 -0
  58. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/client.mts +0 -0
  59. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/errors.py +0 -0
  60. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/global.d.ts +0 -0
  61. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/package.json +0 -0
  62. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/remote.py +0 -0
  63. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/schema.py +0 -0
  64. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/graph.mts +0 -0
  65. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/load.hooks.mjs +0 -0
  66. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/preload.mjs +0 -0
  67. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/utils/files.mts +0 -0
  68. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/utils/importMap.mts +0 -0
  69. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/utils/pythonSchemas.mts +0 -0
  70. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/src/utils/serde.mts +0 -0
  71. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/sse.py +0 -0
  72. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/traceblock.mts +0 -0
  73. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/tsconfig.json +0 -0
  74. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/ui.py +0 -0
  75. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/js/yarn.lock +0 -0
  76. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/logging.py +0 -0
  77. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/metadata.py +0 -0
  78. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/middleware/__init__.py +0 -0
  79. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/middleware/http_logger.py +0 -0
  80. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/middleware/private_network.py +0 -0
  81. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/middleware/request_id.py +0 -0
  82. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/models/__init__.py +0 -0
  83. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/models/run.py +0 -0
  84. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/patch.py +0 -0
  85. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/route.py +0 -0
  86. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/schema.py +0 -0
  87. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/serde.py +0 -0
  88. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/server.py +0 -0
  89. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/sse.py +0 -0
  90. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/state.py +0 -0
  91. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/store.py +0 -0
  92. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/thread_ttl.py +0 -0
  93. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/traceblock.py +0 -0
  94. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/tunneling/cloudflare.py +0 -0
  95. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils/__init__.py +0 -0
  96. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils/cache.py +0 -0
  97. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils/config.py +0 -0
  98. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils/future.py +0 -0
  99. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils/headers.py +0 -0
  100. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils/uuids.py +0 -0
  101. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/utils.py +0 -0
  102. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/validation.py +0 -0
  103. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/webhook.py +0 -0
  104. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_api/worker.py +0 -0
  105. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_license/__init__.py +0 -0
  106. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_license/validation.py +0 -0
  107. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/__init__.py +0 -0
  108. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/checkpoint.py +0 -0
  109. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/database.py +0 -0
  110. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/lifespan.py +0 -0
  111. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/metrics.py +0 -0
  112. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/ops.py +0 -0
  113. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/queue.py +0 -0
  114. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/retry.py +0 -0
  115. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/langgraph_runtime/store.py +0 -0
  116. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/logging.json +0 -0
  117. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/openapi.json +0 -0
  118. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/pyproject.toml +0 -0
  119. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/scripts/create_license.py +0 -0
  120. {langgraph_api-0.3.1 → langgraph_api-0.3.4}/uv.lock +0 -0
@@ -1,4 +1,4 @@
1
- .PHONY: build release lint format test test_watch start start-inmem start-inmem-license-oss start check-version
1
+ .PHONY: build release lint format test test_watch start start-inmem start-inmem-license-oss start check-version check-base-imports
2
2
 
3
3
  # lint commands
4
4
 
@@ -11,6 +11,9 @@ format:
11
11
  uv run ruff check --fix .
12
12
  uv run ruff format .
13
13
 
14
+ check-base-imports:
15
+ LANGGRAPH_RUNTIME_EDITION=inmem DATABASE_URI=:memory: REDIS_URI=_FAKE uv run python -c "from langgraph_api.config import *; from langgraph_runtime import *"
16
+
14
17
  # test commands
15
18
 
16
19
  TEST ?= tests/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.3.1
3
+ Version: 0.3.4
4
4
  Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -0,0 +1 @@
1
+ __version__ = "0.3.4"
@@ -54,10 +54,16 @@ async def meta_metrics(request: ApiRequest):
54
54
  metadata.PROJECT_ID, metadata.HOST_REVISION_ID, metrics_format
55
55
  )
56
56
 
57
+ pg_redis_stats = pool_stats(
58
+ project_id=metadata.PROJECT_ID,
59
+ revision_id=metadata.HOST_REVISION_ID,
60
+ format=metrics_format,
61
+ )
62
+
57
63
  if metrics_format == "json":
58
64
  async with connect() as conn:
59
65
  resp = {
60
- **pool_stats(),
66
+ **pg_redis_stats,
61
67
  "queue": await Runs.stats(conn),
62
68
  **http_metrics,
63
69
  }
@@ -93,6 +99,7 @@ async def meta_metrics(request: ApiRequest):
93
99
  )
94
100
 
95
101
  metrics.extend(http_metrics)
102
+ metrics.extend(pg_redis_stats)
96
103
 
97
104
  metrics_response = "\n".join(metrics)
98
105
  return PlainTextResponse(metrics_response)
@@ -180,6 +180,7 @@ REDIS_CLUSTER = env("REDIS_CLUSTER", cast=bool, default=False)
180
180
  REDIS_MAX_CONNECTIONS = env("REDIS_MAX_CONNECTIONS", cast=int, default=2000)
181
181
  REDIS_CONNECT_TIMEOUT = env("REDIS_CONNECT_TIMEOUT", cast=float, default=10.0)
182
182
  REDIS_MAX_IDLE_TIME = env("REDIS_MAX_IDLE_TIME", cast=float, default=120.0)
183
+ REDIS_STREAM_TIMEOUT = env("REDIS_STREAM_TIMEOUT", cast=float, default=30.0)
183
184
  REDIS_KEY_PREFIX = env("REDIS_KEY_PREFIX", cast=str, default="")
184
185
  RUN_STATS_CACHE_SECONDS = env("RUN_STATS_CACHE_SECONDS", cast=int, default=60)
185
186
 
@@ -374,6 +375,7 @@ API_VARIANT = env("LANGSMITH_LANGGRAPH_API_VARIANT", cast=str, default="")
374
375
  # UI
375
376
  UI_USE_BUNDLER = env("LANGGRAPH_UI_BUNDLER", cast=bool, default=False)
376
377
  IS_QUEUE_ENTRYPOINT = False
378
+ IS_EXECUTOR_ENTRYPOINT = False
377
379
  ref_sha = None
378
380
  if not os.getenv("LANGCHAIN_REVISION_ID") and (
379
381
  ref_sha := os.getenv("LANGSMITH_LANGGRAPH_GIT_REF_SHA")
@@ -20,4 +20,7 @@ if __name__ == "__main__":
20
20
  uvloop.install()
21
21
  except ImportError:
22
22
  pass
23
+ from langgraph_api import config
24
+
25
+ config.IS_EXECUTOR_ENTRYPOINT = True
23
26
  asyncio.run(main(grpc_port=args.grpc_port, entrypoint_name="python-executor"))
@@ -11,10 +11,8 @@ if not (
11
11
 
12
12
  import asyncio
13
13
  import contextlib
14
- import http.server
15
14
  import json
16
15
  import logging.config
17
- import os
18
16
  import pathlib
19
17
  import signal
20
18
  from contextlib import asynccontextmanager
@@ -22,6 +20,7 @@ from typing import cast
22
20
 
23
21
  import structlog
24
22
 
23
+ from langgraph_runtime.database import pool_stats
25
24
  from langgraph_runtime.lifespan import lifespan
26
25
  from langgraph_runtime.metrics import get_metrics
27
26
 
@@ -29,69 +28,68 @@ logger = structlog.stdlib.get_logger(__name__)
29
28
 
30
29
 
31
30
  async def health_and_metrics_server():
31
+ import uvicorn
32
+ from starlette.applications import Starlette
33
+ from starlette.responses import JSONResponse, PlainTextResponse
34
+ from starlette.routing import Route
35
+
32
36
  port = int(os.getenv("PORT", "8080"))
33
- ok = json.dumps({"status": "ok"}).encode()
34
- ok_len = str(len(ok))
35
-
36
- class HealthAndMetricsHandler(http.server.SimpleHTTPRequestHandler):
37
- def log_message(self, format, *args):
38
- # Skip logging for /ok and /metrics endpoints
39
- if getattr(self, "path", None) in ["/ok", "/metrics"]:
40
- return
41
- # Log other requests normally
42
- super().log_message(format, *args)
43
-
44
- def do_GET(self):
45
- path = getattr(self, "path", None)
46
- if path == "/ok":
47
- self.send_response(200)
48
- self.send_header("Content-Type", "application/json")
49
- self.send_header("Content-Length", ok_len)
50
- self.end_headers()
51
- self.wfile.write(ok)
52
- elif path == "/metrics":
53
- metrics = get_metrics()
54
- worker_metrics = cast(dict[str, int], metrics["workers"])
55
- workers_max = worker_metrics["max"]
56
- workers_active = worker_metrics["active"]
57
- workers_available = worker_metrics["available"]
58
-
59
- project_id = os.getenv("LANGSMITH_HOST_PROJECT_ID")
60
- revision_id = os.getenv("LANGSMITH_HOST_REVISION_ID")
61
-
62
- metrics = [
63
- "# HELP lg_api_workers_max The maximum number of workers available.",
64
- "# TYPE lg_api_workers_max gauge",
65
- f'lg_api_workers_max{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_max}',
66
- "# HELP lg_api_workers_active The number of currently active workers.",
67
- "# TYPE lg_api_workers_active gauge",
68
- f'lg_api_workers_active{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_active}',
69
- "# HELP lg_api_workers_available The number of available (idle) workers.",
70
- "# TYPE lg_api_workers_available gauge",
71
- f'lg_api_workers_available{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_available}',
72
- ]
73
-
74
- metrics_response = "\n".join(metrics).encode()
75
- metrics_len = str(len(metrics_response))
76
-
77
- self.send_response(200)
78
- self.send_header(
79
- "Content-Type", "text/plain; version=0.0.4; charset=utf-8"
80
- )
81
- self.send_header("Content-Length", metrics_len)
82
- self.end_headers()
83
- self.wfile.write(metrics_response)
84
- else:
85
- self.send_error(http.HTTPStatus.NOT_FOUND)
86
-
87
- with http.server.ThreadingHTTPServer(
88
- ("0.0.0.0", port), HealthAndMetricsHandler
89
- ) as httpd:
90
- logger.info(f"Health and metrics server started at http://0.0.0.0:{port}")
91
- try:
92
- await asyncio.to_thread(httpd.serve_forever)
93
- finally:
94
- httpd.shutdown()
37
+
38
+ async def health_endpoint(request):
39
+ return JSONResponse({"status": "ok"})
40
+
41
+ async def metrics_endpoint(request):
42
+ metrics = get_metrics()
43
+ worker_metrics = cast(dict[str, int], metrics["workers"])
44
+ workers_max = worker_metrics["max"]
45
+ workers_active = worker_metrics["active"]
46
+ workers_available = worker_metrics["available"]
47
+
48
+ project_id = os.getenv("LANGSMITH_HOST_PROJECT_ID")
49
+ revision_id = os.getenv("LANGSMITH_HOST_REVISION_ID")
50
+
51
+ metrics_lines = [
52
+ "# HELP lg_api_workers_max The maximum number of workers available.",
53
+ "# TYPE lg_api_workers_max gauge",
54
+ f'lg_api_workers_max{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_max}',
55
+ "# HELP lg_api_workers_active The number of currently active workers.",
56
+ "# TYPE lg_api_workers_active gauge",
57
+ f'lg_api_workers_active{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_active}',
58
+ "# HELP lg_api_workers_available The number of available (idle) workers.",
59
+ "# TYPE lg_api_workers_available gauge",
60
+ f'lg_api_workers_available{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_available}',
61
+ ]
62
+
63
+ metrics_lines.extend(
64
+ pool_stats(
65
+ project_id=project_id,
66
+ revision_id=revision_id,
67
+ )
68
+ )
69
+
70
+ return PlainTextResponse(
71
+ "\n".join(metrics_lines),
72
+ media_type="text/plain; version=0.0.4; charset=utf-8",
73
+ )
74
+
75
+ app = Starlette(
76
+ routes=[
77
+ Route("/ok", health_endpoint),
78
+ Route("/metrics", metrics_endpoint),
79
+ ]
80
+ )
81
+
82
+ config = uvicorn.Config(
83
+ app,
84
+ host="0.0.0.0",
85
+ port=port,
86
+ log_level="error",
87
+ access_log=False,
88
+ )
89
+ server = uvicorn.Server(config)
90
+
91
+ logger.info(f"Health and metrics server started at http://0.0.0.0:{port}")
92
+ await server.serve()
95
93
 
96
94
 
97
95
  async def entrypoint(
@@ -2,7 +2,7 @@ import uuid
2
2
  from collections.abc import AsyncIterator, Callable
3
3
  from contextlib import AsyncExitStack, aclosing, asynccontextmanager
4
4
  from functools import lru_cache
5
- from typing import Any, cast
5
+ from typing import Any, Literal, cast
6
6
 
7
7
  import langgraph.version
8
8
  import langsmith
@@ -423,7 +423,7 @@ async def consume(
423
423
  stream: AnyStream,
424
424
  run_id: str | uuid.UUID,
425
425
  resumable: bool = False,
426
- stream_modes: set[StreamMode] | None = None,
426
+ stream_modes: set[StreamMode | Literal["metadata"]] | None = None,
427
427
  ) -> None:
428
428
  stream_modes = stream_modes or set()
429
429
  if "messages-tuple" in stream_modes:
@@ -1 +0,0 @@
1
- __version__ = "0.3.1"
File without changes
File without changes
File without changes
File without changes