langgraph-api 0.2.77__tar.gz → 0.2.78__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langgraph-api might be problematic. Click here for more details.

Files changed (108) hide show
  1. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/Makefile +1 -1
  2. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/PKG-INFO +1 -1
  3. langgraph_api-0.2.78/langgraph_api/__init__.py +1 -0
  4. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/meta.py +14 -12
  5. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/openapi.py +16 -3
  6. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/config.py +2 -0
  7. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/http.py +7 -1
  8. langgraph_api-0.2.78/langgraph_api/http_metrics.py +166 -0
  9. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/metadata.py +1 -0
  10. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/middleware/http_logger.py +16 -5
  11. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/webhook.py +9 -2
  12. langgraph_api-0.2.77/langgraph_api/__init__.py +0 -1
  13. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/.gitignore +0 -0
  14. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/LICENSE +0 -0
  15. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/README.md +0 -0
  16. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/benchmark/.gitignore +0 -0
  17. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/benchmark/Makefile +0 -0
  18. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/benchmark/README.md +0 -0
  19. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/benchmark/burst.js +0 -0
  20. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/benchmark/weather.js +0 -0
  21. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/constraints.txt +0 -0
  22. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/forbidden.txt +0 -0
  23. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/healthcheck.py +0 -0
  24. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/__init__.py +0 -0
  25. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/assistants.py +0 -0
  26. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/mcp.py +0 -0
  27. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/runs.py +0 -0
  28. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/store.py +0 -0
  29. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/threads.py +0 -0
  30. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/api/ui.py +0 -0
  31. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/asgi_transport.py +0 -0
  32. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/asyncio.py +0 -0
  33. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/__init__.py +0 -0
  34. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/custom.py +0 -0
  35. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/langsmith/__init__.py +0 -0
  36. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/langsmith/backend.py +0 -0
  37. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/langsmith/client.py +0 -0
  38. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/middleware.py +0 -0
  39. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/noop.py +0 -0
  40. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/auth/studio_user.py +0 -0
  41. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/cli.py +0 -0
  42. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/command.py +0 -0
  43. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/cron_scheduler.py +0 -0
  44. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/errors.py +0 -0
  45. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/graph.py +0 -0
  46. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/.gitignore +0 -0
  47. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/.prettierrc +0 -0
  48. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/__init__.py +0 -0
  49. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/base.py +0 -0
  50. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/build.mts +0 -0
  51. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/client.http.mts +0 -0
  52. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/client.mts +0 -0
  53. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/errors.py +0 -0
  54. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/global.d.ts +0 -0
  55. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/package.json +0 -0
  56. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/remote.py +0 -0
  57. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/schema.py +0 -0
  58. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/graph.mts +0 -0
  59. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/load.hooks.mjs +0 -0
  60. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/preload.mjs +0 -0
  61. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/utils/files.mts +0 -0
  62. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/utils/importMap.mts +0 -0
  63. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/utils/pythonSchemas.mts +0 -0
  64. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/src/utils/serde.mts +0 -0
  65. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/sse.py +0 -0
  66. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/tsconfig.json +0 -0
  67. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/ui.py +0 -0
  68. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/js/yarn.lock +0 -0
  69. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/logging.py +0 -0
  70. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/middleware/__init__.py +0 -0
  71. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/middleware/private_network.py +0 -0
  72. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/middleware/request_id.py +0 -0
  73. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/models/__init__.py +0 -0
  74. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/models/run.py +0 -0
  75. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/patch.py +0 -0
  76. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/queue_entrypoint.py +0 -0
  77. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/route.py +0 -0
  78. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/schema.py +0 -0
  79. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/serde.py +0 -0
  80. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/server.py +0 -0
  81. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/sse.py +0 -0
  82. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/state.py +0 -0
  83. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/store.py +0 -0
  84. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/stream.py +0 -0
  85. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/thread_ttl.py +0 -0
  86. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/tunneling/cloudflare.py +0 -0
  87. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/utils/__init__.py +0 -0
  88. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/utils/config.py +0 -0
  89. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/utils/future.py +0 -0
  90. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/utils.py +0 -0
  91. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/validation.py +0 -0
  92. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_api/worker.py +0 -0
  93. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_license/__init__.py +0 -0
  94. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_license/validation.py +0 -0
  95. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/__init__.py +0 -0
  96. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/checkpoint.py +0 -0
  97. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/database.py +0 -0
  98. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/lifespan.py +0 -0
  99. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/metrics.py +0 -0
  100. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/ops.py +0 -0
  101. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/queue.py +0 -0
  102. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/retry.py +0 -0
  103. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/langgraph_runtime/store.py +0 -0
  104. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/logging.json +0 -0
  105. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/openapi.json +0 -0
  106. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/pyproject.toml +0 -0
  107. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/scripts/create_license.py +0 -0
  108. {langgraph_api-0.2.77 → langgraph_api-0.2.78}/uv.lock +0 -0
@@ -12,7 +12,7 @@ format:
12
12
 
13
13
  # test commands
14
14
 
15
- TEST ?= "tests/integration_tests/"
15
+ TEST ?= tests/
16
16
  AUTH_TEST ?= "tests/integration_tests/test_custom_auth.py"
17
17
  LANGGRAPH_HTTP ?= {"disable_mcp": false}
18
18
  LANGGRAPH_AES_KEY ?= '1234567890123456'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langgraph-api
3
- Version: 0.2.77
3
+ Version: 0.2.78
4
4
  Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
5
5
  License: Elastic-2.0
6
6
  License-File: LICENSE
@@ -0,0 +1 @@
1
+ __version__ = "0.2.78"
@@ -1,8 +1,7 @@
1
- import os
2
-
3
1
  from starlette.responses import JSONResponse, PlainTextResponse
4
2
 
5
3
  from langgraph_api import __version__, config, metadata
4
+ from langgraph_api.http_metrics import HTTP_METRICS_COLLECTOR
6
5
  from langgraph_api.route import ApiRequest
7
6
  from langgraph_license.validation import plus_features_enabled
8
7
  from langgraph_runtime.database import connect, pool_stats
@@ -26,6 +25,7 @@ async def meta_info(request: ApiRequest):
26
25
  "host": {
27
26
  "kind": metadata.HOST,
28
27
  "project_id": metadata.PROJECT_ID,
28
+ "host_revision_id": metadata.HOST_REVISION_ID,
29
29
  "revision_id": metadata.REVISION,
30
30
  "tenant_id": metadata.TENANT_ID,
31
31
  },
@@ -46,31 +46,31 @@ async def meta_metrics(request: ApiRequest):
46
46
  workers_active = worker_metrics["active"]
47
47
  workers_available = worker_metrics["available"]
48
48
 
49
+ http_metrics = HTTP_METRICS_COLLECTOR.get_metrics(
50
+ metadata.PROJECT_ID, metadata.HOST_REVISION_ID, metrics_format
51
+ )
52
+
49
53
  if metrics_format == "json":
50
54
  async with connect() as conn:
51
55
  resp = {
52
56
  **pool_stats(),
53
57
  "queue": await Runs.stats(conn),
58
+ **http_metrics,
54
59
  }
55
60
  if config.N_JOBS_PER_WORKER > 0:
56
61
  resp["workers"] = worker_metrics
57
62
  return JSONResponse(resp)
58
63
  elif metrics_format == "prometheus":
59
- # LANGSMITH_HOST_PROJECT_ID and LANGSMITH_HOST_REVISION_ID are injected
60
- # into the deployed image by host-backend.
61
- project_id = os.getenv("LANGSMITH_HOST_PROJECT_ID")
62
- revision_id = os.getenv("LANGSMITH_HOST_REVISION_ID")
63
-
64
64
  async with connect() as conn:
65
65
  queue_stats = await Runs.stats(conn)
66
66
 
67
67
  metrics = [
68
68
  "# HELP lg_api_num_pending_runs The number of runs currently pending.",
69
69
  "# TYPE lg_api_num_pending_runs gauge",
70
- f'lg_api_num_pending_runs{{project_id="{project_id}", revision_id="{revision_id}"}} {queue_stats["n_pending"]}',
70
+ f'lg_api_num_pending_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_pending"]}',
71
71
  "# HELP lg_api_num_running_runs The number of runs currently running.",
72
72
  "# TYPE lg_api_num_running_runs gauge",
73
- f'lg_api_num_running_runs{{project_id="{project_id}", revision_id="{revision_id}"}} {queue_stats["n_running"]}',
73
+ f'lg_api_num_running_runs{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {queue_stats["n_running"]}',
74
74
  ]
75
75
 
76
76
  if config.N_JOBS_PER_WORKER > 0:
@@ -78,15 +78,17 @@ async def meta_metrics(request: ApiRequest):
78
78
  [
79
79
  "# HELP lg_api_workers_max The maximum number of workers available.",
80
80
  "# TYPE lg_api_workers_max gauge",
81
- f'lg_api_workers_max{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_max}',
81
+ f'lg_api_workers_max{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_max}',
82
82
  "# HELP lg_api_workers_active The number of currently active workers.",
83
83
  "# TYPE lg_api_workers_active gauge",
84
- f'lg_api_workers_active{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_active}',
84
+ f'lg_api_workers_active{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_active}',
85
85
  "# HELP lg_api_workers_available The number of available (idle) workers.",
86
86
  "# TYPE lg_api_workers_available gauge",
87
- f'lg_api_workers_available{{project_id="{project_id}", revision_id="{revision_id}"}} {workers_available}',
87
+ f'lg_api_workers_available{{project_id="{metadata.PROJECT_ID}", revision_id="{metadata.HOST_REVISION_ID}"}} {workers_available}',
88
88
  ]
89
89
  )
90
90
 
91
+ metrics.extend(http_metrics)
92
+
91
93
  metrics_response = "\n".join(metrics)
92
94
  return PlainTextResponse(metrics_response)
@@ -80,6 +80,19 @@ def get_openapi_spec() -> str:
80
80
  "API documentation will not show authentication requirements. "
81
81
  "Add 'openapi' section to auth section of your `langgraph.json` file to specify security schemes."
82
82
  )
83
+
84
+ # Remove webhook parameters if webhooks are disabled
85
+ if HTTP_CONFIG and HTTP_CONFIG.get("disable_webhooks"):
86
+ webhook_schemas = ["CronCreate", "RunCreateStateful", "RunCreateStateless"]
87
+ for schema_name in webhook_schemas:
88
+ if schema_name in openapi["components"]["schemas"]:
89
+ schema = openapi["components"]["schemas"][schema_name]
90
+ if "properties" in schema and "webhook" in schema["properties"]:
91
+ del schema["properties"]["webhook"]
92
+ logger.info(
93
+ f"Removed webhook parameter from {schema_name} schema due to disable_webhooks setting"
94
+ )
95
+
83
96
  final = openapi
84
97
  if CUSTOM_OPENAPI_SPEC:
85
98
  final = merge_openapi_specs(openapi, CUSTOM_OPENAPI_SPEC)
@@ -100,11 +113,11 @@ def merge_openapi_specs(spec_a: dict, spec_b: dict) -> dict:
100
113
  Merge two OpenAPI specifications with spec_b taking precedence on conflicts.
101
114
 
102
115
  This function handles merging of the following keys:
103
- - "openapi": Uses spec_bs version.
116
+ - "openapi": Uses spec_b's version.
104
117
  - "info": Merges dictionaries with spec_b taking precedence.
105
118
  - "servers": Merges lists with deduplication (by URL and description).
106
119
  - "paths": For shared paths, merges HTTP methods:
107
- - If a method exists in both, spec_bs definition wins.
120
+ - If a method exists in both, spec_b's definition wins.
108
121
  - Otherwise, methods from both are preserved.
109
122
  Additionally, merges path-level "parameters" by (name, in).
110
123
  - "components": Merges per component type (schemas, responses, etc.).
@@ -217,7 +230,7 @@ def _merge_paths(paths_a: dict, paths_b: dict) -> dict:
217
230
 
218
231
  For each path:
219
232
  - If the path exists in both specs, merge HTTP methods:
220
- - If a method exists in both, use spec_bs definition.
233
+ - If a method exists in both, use spec_b's definition.
221
234
  - Otherwise, preserve both.
222
235
  - Additionally, merge path-level "parameters" if present.
223
236
 
@@ -37,6 +37,8 @@ class HttpConfig(TypedDict, total=False):
37
37
  """Disable /store routes"""
38
38
  disable_meta: bool
39
39
  """Disable /ok, /info, /metrics, and /docs routes"""
40
+ disable_webhooks: bool
41
+ """Disable webhooks calls on run completion in all routes"""
40
42
  cors: CorsConfig | None
41
43
  """CORS configuration"""
42
44
  disable_ui: bool
@@ -114,6 +114,11 @@ def get_loopback_client() -> JsonHttpClient:
114
114
 
115
115
 
116
116
  def is_retriable_error(exception: Exception) -> bool:
117
+ # httpx error hierarchy: https://www.python-httpx.org/exceptions/
118
+ # Retry all timeout related errors
119
+ if isinstance(exception, httpx.TimeoutException | httpx.NetworkError):
120
+ return True
121
+ # Seems to just apply to HttpStatusError but doesn't hurt to check all
117
122
  if isinstance(exception, httpx.HTTPError):
118
123
  return (
119
124
  getattr(exception, "response", None) is not None
@@ -143,6 +148,7 @@ async def http_request(
143
148
  connect_timeout: float | None = 5,
144
149
  request_timeout: float | None = 30,
145
150
  raise_error: bool = True,
151
+ client: JsonHttpClient | None = None,
146
152
  ) -> httpx.Response:
147
153
  """Make an HTTP request with retries.
148
154
 
@@ -163,7 +169,7 @@ async def http_request(
163
169
  if not path.startswith(("http://", "https://", "/")):
164
170
  raise ValueError("path must start with / or http")
165
171
 
166
- client = get_http_client()
172
+ client = client or get_http_client()
167
173
 
168
174
  content = None
169
175
  if body is not None:
@@ -0,0 +1,166 @@
1
+ from collections import defaultdict
2
+ from typing import Any
3
+
4
+ FILTERED_ROUTES = {"/ok", "/info", "/metrics", "/docs", "/openapi.json"}
5
+
6
+ MAX_REQUEST_COUNT_ENTRIES = 5000
7
+ MAX_HISTOGRAM_ENTRIES = 1000
8
+
9
+
10
+ def get_route(route: Any) -> str | None:
11
+ try:
12
+ # default lg api routes use the custom APIRoute where scope["route"] is set to a string
13
+ if isinstance(route, str):
14
+ return route
15
+ else:
16
+ # custom FastAPI routes provided by user_router attach an object to scope["route"]
17
+ route_path = getattr(route, "path", None)
18
+ return route_path
19
+ except Exception:
20
+ return None
21
+
22
+
23
+ def should_filter_route(route_path: str) -> bool:
24
+ # use endswith to honor MOUNT_PREFIX
25
+ return any(route_path.endswith(suffix) for suffix in FILTERED_ROUTES)
26
+
27
+
28
+ class HTTPMetricsCollector:
29
+ def __init__(self):
30
+ # Counter: Key: (method, route, status), Value: count
31
+ self._request_counts: dict[tuple[str, str, int], int] = defaultdict(int)
32
+
33
+ self._histogram_buckets = [
34
+ 0.01,
35
+ 0.1,
36
+ 0.5,
37
+ 1,
38
+ 5,
39
+ 15,
40
+ 30,
41
+ 60,
42
+ 120,
43
+ 300,
44
+ 600,
45
+ 1800,
46
+ 3600,
47
+ float("inf"),
48
+ ]
49
+ self._histogram_bucket_labels = [
50
+ "+Inf" if value == float("inf") else str(value)
51
+ for value in self._histogram_buckets
52
+ ]
53
+
54
+ self._histogram_data: dict[tuple[str, str], dict] = defaultdict(
55
+ lambda: {
56
+ "bucket_counts": [0] * len(self._histogram_buckets),
57
+ "sum": 0.0,
58
+ "count": 0,
59
+ }
60
+ )
61
+
62
+ def record_request(
63
+ self, method: str, route: Any, status: int, latency_ms: float
64
+ ) -> None:
65
+ route_path = get_route(route)
66
+ if route_path is None:
67
+ return
68
+
69
+ if should_filter_route(route_path):
70
+ return
71
+
72
+ request_count_key = (method, route_path, status)
73
+ histogram_key = (method, route_path)
74
+
75
+ if (
76
+ request_count_key not in self._request_counts
77
+ and len(self._request_counts) >= MAX_REQUEST_COUNT_ENTRIES
78
+ ):
79
+ return
80
+
81
+ if (
82
+ histogram_key not in self._histogram_data
83
+ and len(self._histogram_data) >= MAX_HISTOGRAM_ENTRIES
84
+ ):
85
+ return
86
+
87
+ self._request_counts[request_count_key] += 1
88
+
89
+ latency_seconds = latency_ms / 1000.0
90
+ hist_data = self._histogram_data[histogram_key]
91
+
92
+ for i, bucket_value in enumerate(self._histogram_buckets):
93
+ if latency_seconds <= bucket_value:
94
+ hist_data["bucket_counts"][i] += 1
95
+ break
96
+
97
+ hist_data["sum"] += latency_seconds
98
+ hist_data["count"] += 1
99
+
100
+ def get_metrics(
101
+ self, project_id: str, revision_id: str, format: str = "prometheus"
102
+ ) -> dict | list[str]:
103
+ if format == "json":
104
+ return {
105
+ "api": {
106
+ "http_requests_total": [
107
+ {
108
+ "method": method,
109
+ "path": path,
110
+ "status": status,
111
+ "count": count,
112
+ }
113
+ for (
114
+ method,
115
+ path,
116
+ status,
117
+ ), count in self._request_counts.items()
118
+ ]
119
+ }
120
+ }
121
+
122
+ metrics = []
123
+
124
+ # Counter metrics
125
+ if self._request_counts:
126
+ metrics.extend(
127
+ [
128
+ "# HELP lg_api_http_requests_total Total number of HTTP requests.",
129
+ "# TYPE lg_api_http_requests_total counter",
130
+ ]
131
+ )
132
+
133
+ for (method, path, status), count in self._request_counts.items():
134
+ metrics.append(
135
+ f'lg_api_http_requests_total{{project_id="{project_id}", revision_id="{revision_id}", method="{method}", path="{path}", status="{status}"}} {count}'
136
+ )
137
+
138
+ # Histogram metrics
139
+ if self._histogram_data:
140
+ metrics.extend(
141
+ [
142
+ "# HELP lg_api_http_requests_latency_seconds HTTP request latency in seconds.",
143
+ "# TYPE lg_api_http_requests_latency_seconds histogram",
144
+ ]
145
+ )
146
+
147
+ for (method, path), hist_data in self._histogram_data.items():
148
+ acc = 0
149
+ for i, bucket_count in enumerate(hist_data["bucket_counts"]):
150
+ acc += bucket_count
151
+ bucket_label = self._histogram_bucket_labels[i]
152
+ metrics.append(
153
+ f'lg_api_http_requests_latency_seconds_bucket{{project_id="{project_id}", revision_id="{revision_id}", method="{method}", path="{path}", le="{bucket_label}"}} {acc}'
154
+ )
155
+
156
+ metrics.extend(
157
+ [
158
+ f'lg_api_http_requests_latency_seconds_sum{{project_id="{project_id}", revision_id="{revision_id}", method="{method}", path="{path}"}} {hist_data["sum"]:.6f}',
159
+ f'lg_api_http_requests_latency_seconds_count{{project_id="{project_id}", revision_id="{revision_id}", method="{method}", path="{path}"}} {hist_data["count"]}',
160
+ ]
161
+ )
162
+
163
+ return metrics
164
+
165
+
166
+ HTTP_METRICS_COLLECTOR = HTTPMetricsCollector()
@@ -26,6 +26,7 @@ INTERVAL = 300
26
26
  REVISION = os.getenv("LANGSMITH_LANGGRAPH_API_REVISION")
27
27
  VARIANT = os.getenv("LANGSMITH_LANGGRAPH_API_VARIANT")
28
28
  PROJECT_ID = os.getenv("LANGSMITH_HOST_PROJECT_ID")
29
+ HOST_REVISION_ID = os.getenv("LANGSMITH_HOST_REVISION_ID")
29
30
  TENANT_ID = os.getenv("LANGSMITH_TENANT_ID")
30
31
  if PROJECT_ID:
31
32
  try:
@@ -5,6 +5,8 @@ import structlog
5
5
  from starlette.requests import ClientDisconnect
6
6
  from starlette.types import Message, Receive, Scope, Send
7
7
 
8
+ from langgraph_api.http_metrics import HTTP_METRICS_COLLECTOR
9
+
8
10
  asgi = structlog.stdlib.get_logger("asgi")
9
11
 
10
12
  PATHS_IGNORE = {"/ok", "/metrics"}
@@ -64,13 +66,22 @@ class AccessLoggerMiddleware:
64
66
  finally:
65
67
  info["end_time"] = loop.time()
66
68
  latency = int((info["end_time"] - info["start_time"]) * 1_000)
69
+
70
+ status = info["response"].get("status")
71
+ method = scope.get("method")
72
+ path = scope.get("path")
73
+ route = scope.get("route")
74
+
75
+ if method and route and status:
76
+ HTTP_METRICS_COLLECTOR.record_request(method, route, status, latency)
77
+
67
78
  self.logger.info(
68
- f"{scope.get('method')} {scope.get('path')} {info['response'].get('status')} {latency}ms",
69
- method=scope.get("method"),
70
- path=scope.get("path"),
71
- status=info["response"].get("status"),
79
+ f"{method} {path} {status} {latency}ms",
80
+ method=method,
81
+ path=path,
82
+ status=status,
72
83
  latency_ms=latency,
73
- route=scope.get("route"),
84
+ route=route,
74
85
  path_params=scope.get("path_params"),
75
86
  query_string=scope.get("query_string").decode(),
76
87
  proto=scope.get("http_version"),
@@ -2,13 +2,20 @@ from datetime import UTC, datetime
2
2
 
3
3
  import structlog
4
4
 
5
- from langgraph_api.http import get_http_client, get_loopback_client
5
+ from langgraph_api.config import HTTP_CONFIG
6
+ from langgraph_api.http import get_http_client, get_loopback_client, http_request
6
7
  from langgraph_api.worker import WorkerResult
7
8
 
8
9
  logger = structlog.stdlib.get_logger(__name__)
9
10
 
10
11
 
11
12
  async def call_webhook(result: "WorkerResult") -> None:
13
+ if HTTP_CONFIG and HTTP_CONFIG.get("disable_webhooks"):
14
+ logger.info(
15
+ "Webhooks disabled, skipping webhook call", webhook=result["webhook"]
16
+ )
17
+ return
18
+
12
19
  checkpoint = result["checkpoint"]
13
20
  payload = {
14
21
  **result["run"],
@@ -28,7 +35,7 @@ async def call_webhook(result: "WorkerResult") -> None:
28
35
  webhook_client = get_loopback_client()
29
36
  else:
30
37
  webhook_client = get_http_client()
31
- await webhook_client.post(webhook, json=payload, total_timeout=20)
38
+ await http_request("POST", webhook, json=payload, client=webhook_client)
32
39
  await logger.ainfo(
33
40
  "Background worker called webhook",
34
41
  webhook=result["webhook"],
@@ -1 +0,0 @@
1
- __version__ = "0.2.77"
File without changes
File without changes
File without changes