langgraph-api 0.4.46__tar.gz → 0.4.48__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langgraph-api might be problematic. Click here for more details.
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/Makefile +4 -3
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/PKG-INFO +2 -2
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/Makefile +1 -1
- langgraph_api-0.4.48/benchmark/benchmark-runners/assistant.js +84 -0
- langgraph_api-0.4.48/benchmark/benchmark-runners/benchmark-runner.js +34 -0
- langgraph_api-0.4.48/benchmark/benchmark-runners/benchmarks.js +18 -0
- langgraph_api-0.4.48/benchmark/benchmark-runners/stream_write.js +98 -0
- langgraph_api-0.4.48/benchmark/benchmark-runners/wait_write.js +65 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/ramp.js +49 -81
- langgraph_api-0.4.48/langgraph_api/__init__.py +1 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/threads.py +11 -7
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/cli.py +2 -49
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/config.py +75 -15
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/grpc_ops/client.py +13 -1
- langgraph_api-0.4.48/langgraph_api/grpc_ops/generated/core_api_pb2.py +276 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/grpc_ops/generated/core_api_pb2.pyi +18 -12
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/grpc_ops/generated/core_api_pb2_grpc.py +2 -2
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/grpc_ops/ops.py +582 -5
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/build.mts +1 -1
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/client.http.mts +1 -1
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/client.mts +1 -1
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/package.json +7 -7
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/yarn.lock +40 -48
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/pyproject.toml +1 -1
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/uv.lock +207 -193
- langgraph_api-0.4.46/langgraph_api/__init__.py +0 -1
- langgraph_api-0.4.46/langgraph_api/grpc_ops/generated/core_api_pb2.py +0 -274
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/.gitignore +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/LICENSE +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/README.md +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/.gitignore +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/README.md +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/burst.js +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/capacity_k6.js +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/capacity_runner.mjs +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/capacity_urls.mjs +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/clean.js +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/graphs.js +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/package.json +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/reporting/dd_reporting.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/update-revision.js +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/benchmark/weather.js +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/constraints.txt +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/forbidden.txt +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/healthcheck.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/a2a.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/assistants.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/mcp.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/meta.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/openapi.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/runs.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/store.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/api/ui.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/asgi_transport.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/asyncio.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/custom.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/langsmith/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/langsmith/backend.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/langsmith/client.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/middleware.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/noop.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/auth/studio_user.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/command.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/cron_scheduler.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/errors.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/executor_entrypoint.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/feature_flags.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/graph.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/grpc_ops/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/grpc_ops/generated/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/http.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/http_metrics.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/http_metrics_utils.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/.gitignore +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/.prettierrc +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/base.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/errors.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/global.d.ts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/remote.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/schema.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/graph.mts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/load.hooks.mjs +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/preload.mjs +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/utils/files.mts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/utils/importMap.mts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/utils/pythonSchemas.mts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/src/utils/serde.mts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/sse.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/traceblock.mts +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/tsconfig.json +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/js/ui.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/logging.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/metadata.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/middleware/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/middleware/http_logger.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/middleware/private_network.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/middleware/request_id.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/models/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/models/run.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/patch.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/queue_entrypoint.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/route.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/schema.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/self_hosted_logs.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/self_hosted_metrics.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/serde.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/server.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/sse.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/state.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/store.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/stream.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/thread_ttl.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/traceblock.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/tunneling/cloudflare.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/cache.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/config.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/errors.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/future.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/headers.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/retriable_client.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/stream_codec.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/utils/uuids.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/validation.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/webhook.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_api/worker.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_license/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_license/validation.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/__init__.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/checkpoint.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/database.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/lifespan.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/metrics.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/ops.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/queue.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/retry.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/langgraph_runtime/store.py +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/logging.json +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/openapi.json +0 -0
- {langgraph_api-0.4.46 → langgraph_api-0.4.48}/scripts/create_license.py +0 -0
|
@@ -210,13 +210,14 @@ start-js-server:
|
|
|
210
210
|
$(MAKE) start-go-server
|
|
211
211
|
@trap '$(MAKE) -C $(CURDIR) stop-go-server' INT TERM EXIT; \
|
|
212
212
|
echo "Installing JS server dependencies..."; \
|
|
213
|
-
cd ../public-api-server-js &&
|
|
213
|
+
cd ../public-api-server-js && yarn install; \
|
|
214
214
|
echo "Building JS server..."; \
|
|
215
|
-
cd ../public-api-server-js &&
|
|
215
|
+
cd ../public-api-server-js && yarn run build; \
|
|
216
216
|
echo "Starting JS server on port 9123..."; \
|
|
217
217
|
cd ../public-api-server-js && FF_USE_CORE_API=true \
|
|
218
218
|
LANGSERVE_GRAPHS=$(LANGSERVE_GRAPHS_ALL) \
|
|
219
|
-
|
|
219
|
+
LANGGRAPH_CONFIG='{"agent": {"configurable": {"model_name": "openai"}}}' \
|
|
220
|
+
PORT=9123 yarn start
|
|
220
221
|
|
|
221
222
|
VERSION_KIND ?= patch
|
|
222
223
|
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langgraph-api
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.48
|
|
4
4
|
Author-email: Nuno Campos <nuno@langchain.dev>, Will Fu-Hinthorn <will@langchain.dev>
|
|
5
5
|
License: Elastic-2.0
|
|
6
6
|
License-File: LICENSE
|
|
7
7
|
Requires-Python: >=3.11
|
|
8
8
|
Requires-Dist: cloudpickle>=3.0.0
|
|
9
9
|
Requires-Dist: cryptography<45.0,>=42.0.0
|
|
10
|
-
Requires-Dist: grpcio-tools
|
|
10
|
+
Requires-Dist: grpcio-tools==1.75.1
|
|
11
11
|
Requires-Dist: grpcio<2.0.0,>=1.75.0
|
|
12
12
|
Requires-Dist: httpx>=0.25.0
|
|
13
13
|
Requires-Dist: jsonschema-rs<0.30,>=0.20.0
|
|
@@ -18,7 +18,7 @@ benchmark-burst:
|
|
|
18
18
|
|
|
19
19
|
benchmark-ramp:
|
|
20
20
|
make benchmark-reset
|
|
21
|
-
k6 run --out json=raw_data_$(shell date +%Y-%m-%dT%H-%M-%S).json ramp.js
|
|
21
|
+
k6 run --out json=raw_data_$(shell date +%Y-%m-%dT%H-%M-%S).json --system-tags=[] ramp.js
|
|
22
22
|
|
|
23
23
|
benchmark-capacity:
|
|
24
24
|
rm -f capacity_summary_t*.json capacity_report_*.json capacity_raw_t*.json capacity_hist_*.png capacity_pie_*.png
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import { BenchmarkRunner } from './benchmark-runner.js';
|
|
2
|
+
import http from 'k6/http';
|
|
3
|
+
import { check } from 'k6';
|
|
4
|
+
// Uses crypto which is globally available in k6: https://grafana.com/docs/k6/latest/javascript-api/#crypto
|
|
5
|
+
|
|
6
|
+
export class Assistant extends BenchmarkRunner {
|
|
7
|
+
/**
|
|
8
|
+
* Create an assistant, search for it, get it, patch it, get it again, count the number of assistants, delete the assistant
|
|
9
|
+
*/
|
|
10
|
+
static run(baseUrl, requestParams) {
|
|
11
|
+
const graph_id = 'benchmark';
|
|
12
|
+
let metadata = { description: `Test benchmark assistant ${crypto.randomUUID()}` };
|
|
13
|
+
|
|
14
|
+
// Create an assistant
|
|
15
|
+
const createPayload = JSON.stringify({ graph_id, metadata });
|
|
16
|
+
const createResponse = http.post(`${baseUrl}/assistants`, createPayload, requestParams);
|
|
17
|
+
const assistantId = createResponse.json().assistant_id;
|
|
18
|
+
|
|
19
|
+
// Search for the assistant
|
|
20
|
+
const searchPayload = JSON.stringify({ graph_id, metadata, limit: 1 });
|
|
21
|
+
const searchResponse = http.post(`${baseUrl}/assistants/search`, searchPayload, requestParams);
|
|
22
|
+
|
|
23
|
+
// Get the assistant
|
|
24
|
+
const getResponse = http.get(`${baseUrl}/assistants/${assistantId}`, requestParams);
|
|
25
|
+
|
|
26
|
+
// Patch the assistant
|
|
27
|
+
metadata = { description: `Test benchmark assistant ${crypto.randomUUID()}` };
|
|
28
|
+
const patchPayload = JSON.stringify({ metadata });
|
|
29
|
+
const patchResponse = http.patch(`${baseUrl}/assistants/${assistantId}`, patchPayload, requestParams);
|
|
30
|
+
|
|
31
|
+
// Get the assistant again
|
|
32
|
+
const getResponse2 = http.get(`${baseUrl}/assistants/${assistantId}`, requestParams);
|
|
33
|
+
|
|
34
|
+
// Count the number of assistants
|
|
35
|
+
const countPayload = JSON.stringify({ graph_id, metadata });
|
|
36
|
+
const countResponse = http.post(`${baseUrl}/assistants/count`, countPayload, requestParams);
|
|
37
|
+
|
|
38
|
+
// Delete the assistant
|
|
39
|
+
http.del(`${baseUrl}/assistants/${assistantId}`, requestParams);
|
|
40
|
+
return {
|
|
41
|
+
assistantId,
|
|
42
|
+
searchResponse,
|
|
43
|
+
getResponse,
|
|
44
|
+
patchResponse,
|
|
45
|
+
getResponse2,
|
|
46
|
+
countResponse,
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
static validate(result, errorMetrics, benchmarkGraphOptions) {
|
|
51
|
+
let success = false;
|
|
52
|
+
try {
|
|
53
|
+
success = check(result, {
|
|
54
|
+
'Search response contains a single assistant': (r) => r.searchResponse.json().length === 1,
|
|
55
|
+
'Search response contains the correct assistant': (r) => r.searchResponse.json()[0].assistant_id === result.assistantId,
|
|
56
|
+
'Get response contains the correct assistant': (r) => r.getResponse.json().assistant_id === result.assistantId,
|
|
57
|
+
'Patch response contains the correct assistant': (r) => r.patchResponse.json().assistant_id === result.assistantId,
|
|
58
|
+
'Get response 2 contains the correct assistant': (r) => r.getResponse2.json().assistant_id === result.assistantId,
|
|
59
|
+
'Get response 2 contains the new description': (r) => r.getResponse2.json().metadata.description != result.getResponse.json().metadata.description && result.getResponse2.json().metadata.description === result.patchResponse.json().metadata.description,
|
|
60
|
+
'Count response contains the correct number of assistants': (r) => parseInt(r.countResponse.json()) === 1,
|
|
61
|
+
});
|
|
62
|
+
} catch (error) {
|
|
63
|
+
console.log(`Unknown error checking response: ${error.message}`);
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
if (!success) {
|
|
67
|
+
if (result.searchResponse.status >= 500 || result.getResponse.status >= 500 || result.patchResponse.status >= 500 || result.getResponse2.status >= 500 || result.countResponse.status >= 500) {
|
|
68
|
+
errorMetrics.server_errors.add(1);
|
|
69
|
+
console.log(`Server error: ${result.searchResponse.status}, ${result.getResponse.status}, ${result.patchResponse.status}, ${result.getResponse2.status}, ${result.countResponse.status}`);
|
|
70
|
+
} else if (result.searchResponse.status === 408 || result.getResponse.status === 408 || result.patchResponse.status === 408 || result.getResponse2.status === 408 || result.countResponse.status === 408) {
|
|
71
|
+
errorMetrics.timeout_errors.add(1);
|
|
72
|
+
console.log(`Timeout error: ${result.searchResponse.error}, ${result.getResponse.error}, ${result.patchResponse.error}, ${result.getResponse2.error}, ${result.countResponse.error}`);
|
|
73
|
+
} else {
|
|
74
|
+
errorMetrics.other_errors.add(1);
|
|
75
|
+
console.log(`Other error: ${result.searchResponse.body}, ${result.getResponse.body}, ${result.patchResponse.body}, ${result.getResponse2.body}, ${result.countResponse.body}`);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
return success;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
static toString() {
|
|
82
|
+
return 'assistants';
|
|
83
|
+
}
|
|
84
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Abstract class for running a benchmark type.
|
|
3
|
+
*/
|
|
4
|
+
export class BenchmarkRunner {
|
|
5
|
+
/**
|
|
6
|
+
* Run the benchmark type.
|
|
7
|
+
* @param {string} baseUrl - The base URL of the LangGraph server.
|
|
8
|
+
* @param {any} requestParams - The parameters to use for the request. Includes headers and other config like timeout.
|
|
9
|
+
* @param {any} benchmarkGraphOptions - The options for the benchmark graph.
|
|
10
|
+
* @returns {any} - The result of the benchmark type. This format will vary by benchmark type.
|
|
11
|
+
*/
|
|
12
|
+
static run(baseUrl, requestParams, benchmarkGraphOptions) {
|
|
13
|
+
throw new Error('Not implemented');
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Convert the benchmark name to a string.
|
|
18
|
+
* @returns {string} - A string representation of the benchmark name.
|
|
19
|
+
*/
|
|
20
|
+
static toString() {
|
|
21
|
+
throw new Error('Not implemented');
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Validate the result of the benchmark run.
|
|
26
|
+
* @param {any} result - The result of the benchmark run. This format will vary by benchmark type.
|
|
27
|
+
* @param {any} errorMetrics - A dictionary of error metrics that can be used to more granularly track errors.
|
|
28
|
+
* @param {any} benchmarkGraphOptions - The options for the benchmark graph.
|
|
29
|
+
* @returns {boolean} - True if the benchmark run was successful, false otherwise.
|
|
30
|
+
*/
|
|
31
|
+
static validate(result, errorMetrics, benchmarkGraphOptions) {
|
|
32
|
+
throw new Error('Not implemented');
|
|
33
|
+
}
|
|
34
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { WaitWrite } from './wait_write.js';
|
|
2
|
+
import { StreamWrite } from './stream_write.js';
|
|
3
|
+
import { Assistant } from './assistant.js';
|
|
4
|
+
|
|
5
|
+
export class Benchmarks {
|
|
6
|
+
static getRunner(type) {
|
|
7
|
+
switch (type) {
|
|
8
|
+
case WaitWrite.toString():
|
|
9
|
+
return WaitWrite;
|
|
10
|
+
case StreamWrite.toString():
|
|
11
|
+
return StreamWrite;
|
|
12
|
+
case Assistant.toString():
|
|
13
|
+
return Assistant;
|
|
14
|
+
default:
|
|
15
|
+
throw new Error(`Unknown benchmark type: ${type}`);
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import { BenchmarkRunner } from './benchmark-runner.js';
|
|
2
|
+
import { check } from 'k6';
|
|
3
|
+
import http from 'k6/http';
|
|
4
|
+
|
|
5
|
+
function parseSSE(text) {
|
|
6
|
+
const events = [];
|
|
7
|
+
const lines = text.split('\r\n');
|
|
8
|
+
let currentEvent = { event: '', data: '' };
|
|
9
|
+
|
|
10
|
+
for (const line of lines) {
|
|
11
|
+
if (line.startsWith('event:')) {
|
|
12
|
+
currentEvent.event = line.substring(6).trim();
|
|
13
|
+
} else if (line.startsWith('data:')) {
|
|
14
|
+
const dataContent = line.substring(5).trim();
|
|
15
|
+
currentEvent.data = dataContent;
|
|
16
|
+
} else if (line === '') {
|
|
17
|
+
// Empty line marks end of event
|
|
18
|
+
if (currentEvent.data) {
|
|
19
|
+
try {
|
|
20
|
+
events.push({
|
|
21
|
+
event: currentEvent.event,
|
|
22
|
+
data: JSON.parse(currentEvent.data)
|
|
23
|
+
});
|
|
24
|
+
} catch (e) {
|
|
25
|
+
// Some events might not be JSON
|
|
26
|
+
events.push(currentEvent);
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
currentEvent = { event: '', data: '' };
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
return events;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export class StreamWrite extends BenchmarkRunner {
|
|
37
|
+
static run(baseUrl, requestParams, benchmarkGraphOptions) {
|
|
38
|
+
let url = `${baseUrl}/runs/stream`;
|
|
39
|
+
|
|
40
|
+
// Create a payload with the LangGraph agent configuration
|
|
41
|
+
const payload = JSON.stringify({
|
|
42
|
+
assistant_id: benchmarkGraphOptions.graph_id,
|
|
43
|
+
input: benchmarkGraphOptions.input,
|
|
44
|
+
config: {
|
|
45
|
+
recursion_limit: benchmarkGraphOptions.input.expand + 2,
|
|
46
|
+
},
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
// If the request is stateful, create a thread first and use it in the url
|
|
50
|
+
if (benchmarkGraphOptions.stateful) {
|
|
51
|
+
const thread = http.post(`${baseUrl}/threads`, "{}", requestParams);
|
|
52
|
+
const threadId = thread.json().thread_id;
|
|
53
|
+
url = `${baseUrl}/threads/${threadId}/runs/stream`;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const response = http.post(url, payload, requestParams);
|
|
57
|
+
const events = parseSSE(response.body);
|
|
58
|
+
return { events, response };
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
static validate(result, errorMetrics, benchmarkGraphOptions) {
|
|
62
|
+
const expected_messages = benchmarkGraphOptions.input.mode === 'single' ? 1 : benchmarkGraphOptions.input.expand + 1;
|
|
63
|
+
const expected_events = expected_messages + 2; // +2 for the metadata and initial values event
|
|
64
|
+
let success = false;
|
|
65
|
+
try {
|
|
66
|
+
success = check(result, {
|
|
67
|
+
'Run completed successfully': (r) => r.response.status === 200,
|
|
68
|
+
'Response contains expected number of events': (r) => r.events.length === expected_events,
|
|
69
|
+
'Response contains metadata event': (r) => r.events[0].event === 'metadata',
|
|
70
|
+
'Response contains expected number of messages': (r) => r.events[expected_events - 1].data.messages.length === expected_messages,
|
|
71
|
+
});
|
|
72
|
+
} catch (error) {
|
|
73
|
+
console.log(`Unknown error checking result: ${error.message}`);
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if (!success) {
|
|
77
|
+
// Classify error based on status code or response
|
|
78
|
+
if (result.response.status >= 500) {
|
|
79
|
+
errorMetrics.server_errors.add(1);
|
|
80
|
+
console.log(`Server error: ${result.response.status}`);
|
|
81
|
+
} else if (result.response.status === 408 || result.response.error?.includes('timeout')) {
|
|
82
|
+
errorMetrics.timeout_errors.add(1);
|
|
83
|
+
console.log(`Timeout error: ${result.response.error}`);
|
|
84
|
+
} else if (result.response.status === 200 && result.events[expected_events - 1].data.messages.length !== expected_messages) {
|
|
85
|
+
errorMetrics.missing_message_errors.add(1);
|
|
86
|
+
console.log(`Missing message error: Status ${result.response.status}, ${JSON.stringify(result.response.body)}, ${result.response.headers?.['Content-Location']}`);
|
|
87
|
+
} else {
|
|
88
|
+
errorMetrics.other_errors.add(1);
|
|
89
|
+
console.log(`Other error: Status ${result.response.status}, ${JSON.stringify(result.response.body)}, ${result.events}`);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
return success;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
static toString() {
|
|
96
|
+
return 'stream_write';
|
|
97
|
+
}
|
|
98
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import { BenchmarkRunner } from './benchmark-runner.js';
|
|
2
|
+
import http from 'k6/http';
|
|
3
|
+
import { check } from 'k6';
|
|
4
|
+
|
|
5
|
+
export class WaitWrite extends BenchmarkRunner {
|
|
6
|
+
static run(baseUrl, requestParams, benchmarkGraphOptions) {
|
|
7
|
+
let url = `${baseUrl}/runs/wait`;
|
|
8
|
+
|
|
9
|
+
// Create a payload with the LangGraph agent configuration
|
|
10
|
+
const payload = JSON.stringify({
|
|
11
|
+
assistant_id: benchmarkGraphOptions.graph_id,
|
|
12
|
+
input: benchmarkGraphOptions.input,
|
|
13
|
+
config: {
|
|
14
|
+
recursion_limit: benchmarkGraphOptions.input.expand + 2,
|
|
15
|
+
},
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
// If the request is stateful, create a thread first and use it in the url
|
|
19
|
+
if (benchmarkGraphOptions.stateful) {
|
|
20
|
+
const thread = http.post(`${baseUrl}/threads`, "{}", requestParams);
|
|
21
|
+
const threadId = thread.json().thread_id;
|
|
22
|
+
url = `${baseUrl}/threads/${threadId}/runs/wait`;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// Make a single request to the wait endpoint
|
|
26
|
+
const result = http.post(url, payload, requestParams);
|
|
27
|
+
|
|
28
|
+
return result;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
static validate(result, errorMetrics, benchmarkGraphOptions) {
|
|
32
|
+
const expected_length = benchmarkGraphOptions.input.mode === 'single' ? 1 : benchmarkGraphOptions.input.expand + 1;
|
|
33
|
+
let success = false;
|
|
34
|
+
try {
|
|
35
|
+
success = check(result, {
|
|
36
|
+
'Run completed successfully': (r) => r.status === 200,
|
|
37
|
+
'Response contains expected number of messages': (r) => r.json().messages?.length === expected_length,
|
|
38
|
+
});
|
|
39
|
+
} catch (error) {
|
|
40
|
+
console.log(`Unknown error checking result: ${error.message}`);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if (!success) {
|
|
44
|
+
// Classify error based on status code or response
|
|
45
|
+
if (result.status >= 500) {
|
|
46
|
+
errorMetrics.server_errors.add(1);
|
|
47
|
+
console.log(`Server error: ${result.status}`);
|
|
48
|
+
} else if (result.status === 408 || result.error?.includes('timeout')) {
|
|
49
|
+
errorMetrics.timeout_errors.add(1);
|
|
50
|
+
console.log(`Timeout error: ${result.error}`);
|
|
51
|
+
} else if (result.status === 200 && result.json().messages?.length !== expected_length) {
|
|
52
|
+
errorMetrics.missing_message_errors.add(1);
|
|
53
|
+
console.log(`Missing message error: Status ${result.status}, ${JSON.stringify(result.body)}, ${result.headers?.['Content-Location']}`);
|
|
54
|
+
} else {
|
|
55
|
+
errorMetrics.other_errors.add(1);
|
|
56
|
+
console.log(`Other error: Status ${result.status}, ${JSON.stringify(result.body)}`);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
return success;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
static toString() {
|
|
63
|
+
return 'wait_write';
|
|
64
|
+
}
|
|
65
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
import
|
|
2
|
-
import { check, sleep } from 'k6';
|
|
1
|
+
import { sleep } from 'k6';
|
|
3
2
|
import { Counter, Trend } from 'k6/metrics';
|
|
4
3
|
import { randomIntBetween } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
|
|
4
|
+
import { Benchmarks } from './benchmark-runners/benchmarks.js';
|
|
5
5
|
|
|
6
6
|
// Custom metrics
|
|
7
7
|
const runDuration = new Trend('run_duration');
|
|
@@ -13,6 +13,14 @@ const serverErrors = new Counter('server_errors');
|
|
|
13
13
|
const missingMessageErrors = new Counter('missing_message_errors');
|
|
14
14
|
const otherErrors = new Counter('other_errors');
|
|
15
15
|
|
|
16
|
+
const errorMetrics = {
|
|
17
|
+
timeout_errors: timeoutErrors,
|
|
18
|
+
connection_errors: connectionErrors,
|
|
19
|
+
server_errors: serverErrors,
|
|
20
|
+
missing_message_errors: missingMessageErrors,
|
|
21
|
+
other_errors: otherErrors,
|
|
22
|
+
}
|
|
23
|
+
|
|
16
24
|
// URL of your LangGraph server
|
|
17
25
|
const BASE_URL = __ENV.BASE_URL || 'http://localhost:9123';
|
|
18
26
|
// LangSmith API key only needed with a custom server endpoint
|
|
@@ -22,6 +30,7 @@ const LANGSMITH_API_KEY = __ENV.LANGSMITH_API_KEY;
|
|
|
22
30
|
const LOAD_SIZE = parseInt(__ENV.LOAD_SIZE || '500');
|
|
23
31
|
const LEVELS = parseInt(__ENV.LEVELS || '2');
|
|
24
32
|
const PLATEAU_DURATION = parseInt(__ENV.PLATEAU_DURATION || '300');
|
|
33
|
+
const BENCHMARK_TYPE = __ENV.BENCHMARK_TYPE || 'wait_write';
|
|
25
34
|
const STATEFUL = __ENV.STATEFUL === 'true';
|
|
26
35
|
|
|
27
36
|
// Params for the agent
|
|
@@ -61,94 +70,53 @@ export let options = {
|
|
|
61
70
|
},
|
|
62
71
|
};
|
|
63
72
|
|
|
73
|
+
const runner = Benchmarks.getRunner(BENCHMARK_TYPE);
|
|
74
|
+
|
|
75
|
+
const benchmarkGraphOptions = {
|
|
76
|
+
graph_id: "benchmark",
|
|
77
|
+
input: {
|
|
78
|
+
data_size: DATA_SIZE,
|
|
79
|
+
delay: DELAY,
|
|
80
|
+
expand: EXPAND,
|
|
81
|
+
mode: MODE,
|
|
82
|
+
},
|
|
83
|
+
stateful: STATEFUL,
|
|
84
|
+
}
|
|
85
|
+
|
|
64
86
|
// Main test function
|
|
65
87
|
export default function() {
|
|
66
88
|
const startTime = new Date().getTime();
|
|
67
|
-
let response;
|
|
68
89
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
90
|
+
// Prepare the request payload
|
|
91
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
92
|
+
if (LANGSMITH_API_KEY) {
|
|
93
|
+
headers['x-api-key'] = LANGSMITH_API_KEY;
|
|
94
|
+
}
|
|
95
|
+
const requestParams = {
|
|
96
|
+
headers,
|
|
97
|
+
timeout: '120s', // k6 request timeout slightly longer than the server timeout
|
|
98
|
+
};
|
|
75
99
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
},
|
|
85
|
-
config: {
|
|
86
|
-
recursion_limit: EXPAND + 2,
|
|
87
|
-
},
|
|
88
|
-
});
|
|
89
|
-
|
|
90
|
-
// If the request is stateful, create a thread first and use it in the url
|
|
91
|
-
let url = `${BASE_URL}/runs/wait`;
|
|
92
|
-
if (STATEFUL) {
|
|
93
|
-
const thread = http.post(`${BASE_URL}/threads`, payload, {
|
|
94
|
-
headers,
|
|
95
|
-
timeout: '120s' // k6 request timeout slightly longer than the server timeout
|
|
96
|
-
});
|
|
97
|
-
const threadId = thread.json().thread_id;
|
|
98
|
-
url = `${BASE_URL}/threads/${threadId}/runs/wait`;
|
|
99
|
-
}
|
|
100
|
+
let result;
|
|
101
|
+
try {
|
|
102
|
+
result = runner.run(BASE_URL, requestParams, benchmarkGraphOptions);
|
|
103
|
+
} catch (error) {
|
|
104
|
+
failedRuns.add(1);
|
|
105
|
+
otherErrors.add(1);
|
|
106
|
+
console.log(`Unknown error running benchmark: ${error.message}`);
|
|
107
|
+
}
|
|
100
108
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
headers,
|
|
104
|
-
timeout: '120s' // k6 request timeout slightly longer than the server timeout
|
|
105
|
-
});
|
|
106
|
-
|
|
107
|
-
// Don't include verification in the duration of the request
|
|
108
|
-
const duration = new Date().getTime() - startTime;
|
|
109
|
-
|
|
110
|
-
// Check the response
|
|
111
|
-
const expected_length = MODE === 'single' ? 1 : EXPAND + 1;
|
|
112
|
-
let success = false;
|
|
113
|
-
try {
|
|
114
|
-
success = check(response, {
|
|
115
|
-
'Run completed successfully': (r) => r.status === 200,
|
|
116
|
-
'Response contains expected number of messages': (r) => JSON.parse(r.body)?.messages?.length === expected_length,
|
|
117
|
-
});
|
|
118
|
-
} catch (error) {
|
|
119
|
-
console.log(`Error checking response: ${error}`);
|
|
120
|
-
}
|
|
109
|
+
// Don't include verification in the duration of the request
|
|
110
|
+
const duration = new Date().getTime() - startTime;
|
|
121
111
|
|
|
112
|
+
let success = runner.validate(result, errorMetrics, benchmarkGraphOptions);
|
|
122
113
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
// Handle failure
|
|
129
|
-
failedRuns.add(1);
|
|
130
|
-
|
|
131
|
-
// Classify error based on status code or response
|
|
132
|
-
if (response.status >= 500) {
|
|
133
|
-
serverErrors.add(1);
|
|
134
|
-
console.log(`Server error: ${response.status}`);
|
|
135
|
-
} else if (response.status === 408 || response.error?.includes('timeout')) {
|
|
136
|
-
timeoutErrors.add(1);
|
|
137
|
-
console.log(`Timeout error: ${response.error}`);
|
|
138
|
-
} else if (response.status === 200 && response.body?.messages?.length !== expected_length) {
|
|
139
|
-
missingMessageErrors.add(1);
|
|
140
|
-
console.log(`Missing message error: Status ${response.status}, ${JSON.stringify(response.body)}, ${response.headers?.['Content-Location']}`);
|
|
141
|
-
} else {
|
|
142
|
-
otherErrors.add(1);
|
|
143
|
-
console.log(`Other error: Status ${response.status}, ${JSON.stringify(response.body)}`);
|
|
144
|
-
}
|
|
145
|
-
}
|
|
146
|
-
} catch (error) {
|
|
147
|
-
// Handle truly unexpected errors
|
|
114
|
+
if (success) {
|
|
115
|
+
runDuration.add(duration);
|
|
116
|
+
successfulRuns.add(1);
|
|
117
|
+
} else {
|
|
118
|
+
// Don't log the duration for failed runs
|
|
148
119
|
failedRuns.add(1);
|
|
149
|
-
otherErrors.add(1);
|
|
150
|
-
console.log(response);
|
|
151
|
-
console.log(`Unexpected error: ${error.message}, Response Body: ${response?.body}`);
|
|
152
120
|
}
|
|
153
121
|
|
|
154
122
|
// Add a small random sleep between iterations to prevent thundering herd
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.4.48"
|
|
@@ -5,6 +5,8 @@ from starlette.exceptions import HTTPException
|
|
|
5
5
|
from starlette.responses import Response
|
|
6
6
|
from starlette.routing import BaseRoute
|
|
7
7
|
|
|
8
|
+
from langgraph_api.feature_flags import FF_USE_CORE_API
|
|
9
|
+
from langgraph_api.grpc_ops.ops import Threads as GrpcThreads
|
|
8
10
|
from langgraph_api.route import ApiRequest, ApiResponse, ApiRoute
|
|
9
11
|
from langgraph_api.schema import THREAD_FIELDS, ThreadStreamMode
|
|
10
12
|
from langgraph_api.sse import EventSourceResponse
|
|
@@ -30,6 +32,8 @@ from langgraph_runtime.database import connect
|
|
|
30
32
|
from langgraph_runtime.ops import Threads
|
|
31
33
|
from langgraph_runtime.retry import retry_db
|
|
32
34
|
|
|
35
|
+
CrudThreads = GrpcThreads if FF_USE_CORE_API else Threads
|
|
36
|
+
|
|
33
37
|
|
|
34
38
|
@retry_db
|
|
35
39
|
async def create_thread(
|
|
@@ -41,7 +45,7 @@ async def create_thread(
|
|
|
41
45
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
42
46
|
async with connect() as conn:
|
|
43
47
|
thread_id = thread_id or str(uuid4())
|
|
44
|
-
iter = await
|
|
48
|
+
iter = await CrudThreads.put(
|
|
45
49
|
conn,
|
|
46
50
|
thread_id,
|
|
47
51
|
metadata=payload.get("metadata"),
|
|
@@ -78,7 +82,7 @@ async def search_threads(
|
|
|
78
82
|
limit = int(payload.get("limit") or 10)
|
|
79
83
|
offset = int(payload.get("offset") or 0)
|
|
80
84
|
async with connect() as conn:
|
|
81
|
-
threads_iter, next_offset = await
|
|
85
|
+
threads_iter, next_offset = await CrudThreads.search(
|
|
82
86
|
conn,
|
|
83
87
|
status=payload.get("status"),
|
|
84
88
|
values=payload.get("values"),
|
|
@@ -103,7 +107,7 @@ async def count_threads(
|
|
|
103
107
|
"""Count threads."""
|
|
104
108
|
payload = await request.json(ThreadCountRequest)
|
|
105
109
|
async with connect() as conn:
|
|
106
|
-
count = await
|
|
110
|
+
count = await CrudThreads.count(
|
|
107
111
|
conn,
|
|
108
112
|
status=payload.get("status"),
|
|
109
113
|
values=payload.get("values"),
|
|
@@ -277,7 +281,7 @@ async def get_thread(
|
|
|
277
281
|
thread_id = request.path_params["thread_id"]
|
|
278
282
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
279
283
|
async with connect() as conn:
|
|
280
|
-
thread = await
|
|
284
|
+
thread = await CrudThreads.get(conn, thread_id)
|
|
281
285
|
return ApiResponse(await fetchone(thread))
|
|
282
286
|
|
|
283
287
|
|
|
@@ -290,7 +294,7 @@ async def patch_thread(
|
|
|
290
294
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
291
295
|
payload = await request.json(ThreadPatch)
|
|
292
296
|
async with connect() as conn:
|
|
293
|
-
thread = await
|
|
297
|
+
thread = await CrudThreads.patch(
|
|
294
298
|
conn,
|
|
295
299
|
thread_id,
|
|
296
300
|
metadata=payload.get("metadata", {}),
|
|
@@ -305,7 +309,7 @@ async def delete_thread(request: ApiRequest):
|
|
|
305
309
|
thread_id = request.path_params["thread_id"]
|
|
306
310
|
validate_uuid(thread_id, "Invalid thread ID: must be a UUID")
|
|
307
311
|
async with connect() as conn:
|
|
308
|
-
tid = await
|
|
312
|
+
tid = await CrudThreads.delete(conn, thread_id)
|
|
309
313
|
await fetchone(tid)
|
|
310
314
|
return Response(status_code=204)
|
|
311
315
|
|
|
@@ -314,7 +318,7 @@ async def delete_thread(request: ApiRequest):
|
|
|
314
318
|
async def copy_thread(request: ApiRequest):
|
|
315
319
|
thread_id = request.path_params["thread_id"]
|
|
316
320
|
async with connect() as conn:
|
|
317
|
-
iter = await
|
|
321
|
+
iter = await CrudThreads.copy(conn, thread_id)
|
|
318
322
|
return ApiResponse(await fetchone(iter, not_found_code=409))
|
|
319
323
|
|
|
320
324
|
|
|
@@ -8,12 +8,10 @@ import typing
|
|
|
8
8
|
from collections.abc import Mapping, Sequence
|
|
9
9
|
from typing import Literal
|
|
10
10
|
|
|
11
|
-
from typing_extensions import TypedDict
|
|
12
|
-
|
|
13
11
|
if typing.TYPE_CHECKING:
|
|
14
12
|
from packaging.version import Version
|
|
15
13
|
|
|
16
|
-
from langgraph_api.config import HttpConfig, StoreConfig
|
|
14
|
+
from langgraph_api.config import AuthConfig, HttpConfig, StoreConfig
|
|
17
15
|
|
|
18
16
|
logging.basicConfig(level=logging.INFO)
|
|
19
17
|
logger = logging.getLogger(__name__)
|
|
@@ -81,51 +79,6 @@ def patch_environment(**kwargs):
|
|
|
81
79
|
os.environ[key] = value
|
|
82
80
|
|
|
83
81
|
|
|
84
|
-
class SecurityConfig(TypedDict, total=False):
|
|
85
|
-
securitySchemes: dict
|
|
86
|
-
security: list
|
|
87
|
-
# path => {method => security}
|
|
88
|
-
paths: dict[str, dict[str, list]]
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
class CacheConfig(TypedDict, total=False):
|
|
92
|
-
cache_keys: list[str]
|
|
93
|
-
ttl_seconds: int
|
|
94
|
-
max_size: int
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
class AuthConfig(TypedDict, total=False):
|
|
98
|
-
path: str
|
|
99
|
-
"""Path to the authentication function in a Python file."""
|
|
100
|
-
disable_studio_auth: bool
|
|
101
|
-
"""Whether to disable auth when connecting from the LangSmith Studio."""
|
|
102
|
-
openapi: SecurityConfig
|
|
103
|
-
"""The schema to use for updating the openapi spec.
|
|
104
|
-
|
|
105
|
-
Example:
|
|
106
|
-
{
|
|
107
|
-
"securitySchemes": {
|
|
108
|
-
"OAuth2": {
|
|
109
|
-
"type": "oauth2",
|
|
110
|
-
"flows": {
|
|
111
|
-
"password": {
|
|
112
|
-
"tokenUrl": "/token",
|
|
113
|
-
"scopes": {
|
|
114
|
-
"me": "Read information about the current user",
|
|
115
|
-
"items": "Access to create and manage items"
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
}
|
|
120
|
-
},
|
|
121
|
-
"security": [
|
|
122
|
-
{"OAuth2": ["me"]} # Default security requirement for all endpoints
|
|
123
|
-
]
|
|
124
|
-
}
|
|
125
|
-
"""
|
|
126
|
-
cache: CacheConfig | None
|
|
127
|
-
|
|
128
|
-
|
|
129
82
|
def run_server(
|
|
130
83
|
host: str = "127.0.0.1",
|
|
131
84
|
port: int = 2024,
|
|
@@ -141,7 +94,7 @@ def run_server(
|
|
|
141
94
|
reload_includes: Sequence[str] | None = None,
|
|
142
95
|
reload_excludes: Sequence[str] | None = None,
|
|
143
96
|
store: typing.Optional["StoreConfig"] = None,
|
|
144
|
-
auth: AuthConfig
|
|
97
|
+
auth: typing.Optional["AuthConfig"] = None,
|
|
145
98
|
http: typing.Optional["HttpConfig"] = None,
|
|
146
99
|
ui: dict | None = None,
|
|
147
100
|
ui_config: dict | None = None,
|