@hebo-ai/gateway 0.9.2 → 0.9.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/config.d.ts +2 -0
- package/dist/config.js +125 -0
- package/dist/endpoints/chat-completions/converters.d.ts +26 -0
- package/dist/endpoints/chat-completions/converters.js +525 -0
- package/dist/endpoints/chat-completions/handler.d.ts +2 -0
- package/dist/endpoints/chat-completions/handler.js +152 -0
- package/dist/endpoints/chat-completions/index.d.ts +4 -0
- package/dist/endpoints/chat-completions/index.js +4 -0
- package/dist/endpoints/chat-completions/otel.d.ts +5 -0
- package/dist/endpoints/chat-completions/otel.js +178 -0
- package/dist/endpoints/chat-completions/schema.d.ts +1170 -0
- package/dist/endpoints/chat-completions/schema.js +252 -0
- package/dist/endpoints/conversations/converters.d.ts +8 -0
- package/dist/endpoints/conversations/converters.js +29 -0
- package/dist/endpoints/conversations/handler.d.ts +2 -0
- package/dist/endpoints/conversations/handler.js +259 -0
- package/dist/endpoints/conversations/index.d.ts +3 -0
- package/dist/endpoints/conversations/index.js +3 -0
- package/dist/endpoints/conversations/schema.d.ts +1511 -0
- package/dist/endpoints/conversations/schema.js +74 -0
- package/dist/endpoints/conversations/storage/dialects/greptime.d.ts +10 -0
- package/dist/endpoints/conversations/storage/dialects/greptime.js +87 -0
- package/dist/endpoints/conversations/storage/dialects/mysql.d.ts +12 -0
- package/dist/endpoints/conversations/storage/dialects/mysql.js +118 -0
- package/dist/endpoints/conversations/storage/dialects/postgres.d.ts +16 -0
- package/dist/endpoints/conversations/storage/dialects/postgres.js +185 -0
- package/dist/endpoints/conversations/storage/dialects/sqlite.d.ts +11 -0
- package/dist/endpoints/conversations/storage/dialects/sqlite.js +176 -0
- package/dist/endpoints/conversations/storage/dialects/types.d.ts +42 -0
- package/dist/endpoints/conversations/storage/dialects/types.js +0 -0
- package/dist/endpoints/conversations/storage/dialects/utils.d.ts +25 -0
- package/dist/endpoints/conversations/storage/dialects/utils.js +80 -0
- package/dist/endpoints/conversations/storage/memory.d.ts +25 -0
- package/dist/endpoints/conversations/storage/memory.js +200 -0
- package/dist/endpoints/conversations/storage/sql.d.ts +33 -0
- package/dist/endpoints/conversations/storage/sql.js +276 -0
- package/dist/endpoints/conversations/storage/types.d.ts +39 -0
- package/dist/endpoints/conversations/storage/types.js +0 -0
- package/dist/endpoints/embeddings/converters.d.ts +10 -0
- package/dist/endpoints/embeddings/converters.js +31 -0
- package/dist/endpoints/embeddings/handler.d.ts +2 -0
- package/dist/endpoints/embeddings/handler.js +99 -0
- package/dist/endpoints/embeddings/index.d.ts +4 -0
- package/dist/endpoints/embeddings/index.js +4 -0
- package/dist/endpoints/embeddings/otel.d.ts +5 -0
- package/dist/endpoints/embeddings/otel.js +29 -0
- package/dist/endpoints/embeddings/schema.d.ts +44 -0
- package/dist/endpoints/embeddings/schema.js +29 -0
- package/dist/endpoints/models/converters.d.ts +6 -0
- package/dist/endpoints/models/converters.js +42 -0
- package/dist/endpoints/models/handler.d.ts +2 -0
- package/dist/endpoints/models/handler.js +29 -0
- package/dist/endpoints/models/index.d.ts +3 -0
- package/dist/endpoints/models/index.js +3 -0
- package/dist/endpoints/models/schema.d.ts +42 -0
- package/dist/endpoints/models/schema.js +31 -0
- package/dist/endpoints/responses/converters.d.ts +17 -0
- package/dist/endpoints/responses/converters.js +1037 -0
- package/dist/endpoints/responses/handler.d.ts +2 -0
- package/dist/endpoints/responses/handler.js +141 -0
- package/dist/endpoints/responses/index.d.ts +4 -0
- package/dist/endpoints/responses/index.js +4 -0
- package/dist/endpoints/responses/otel.d.ts +6 -0
- package/dist/endpoints/responses/otel.js +226 -0
- package/dist/endpoints/responses/schema.d.ts +2109 -0
- package/dist/endpoints/responses/schema.js +314 -0
- package/dist/endpoints/shared/converters.d.ts +56 -0
- package/dist/endpoints/shared/converters.js +180 -0
- package/dist/endpoints/shared/schema.d.ts +70 -0
- package/dist/endpoints/shared/schema.js +46 -0
- package/dist/errors/ai-sdk.d.ts +2 -0
- package/dist/errors/ai-sdk.js +52 -0
- package/dist/errors/gateway.d.ts +5 -0
- package/dist/errors/gateway.js +13 -0
- package/dist/errors/openai.d.ts +15 -0
- package/dist/errors/openai.js +40 -0
- package/dist/errors/utils.d.ts +24 -0
- package/dist/errors/utils.js +46 -0
- package/dist/gateway.d.ts +11 -0
- package/dist/gateway.js +44 -0
- package/dist/index.d.ts +11 -0
- package/dist/index.js +10 -0
- package/dist/lifecycle.d.ts +3 -0
- package/dist/lifecycle.js +114 -0
- package/dist/logger/default.d.ts +4 -0
- package/dist/logger/default.js +81 -0
- package/dist/logger/index.d.ts +11 -0
- package/dist/logger/index.js +25 -0
- package/dist/middleware/common.d.ts +12 -0
- package/dist/middleware/common.js +146 -0
- package/dist/middleware/debug.d.ts +3 -0
- package/dist/middleware/debug.js +27 -0
- package/dist/middleware/matcher.d.ts +28 -0
- package/dist/middleware/matcher.js +118 -0
- package/dist/middleware/utils.d.ts +2 -0
- package/dist/middleware/utils.js +24 -0
- package/dist/models/amazon/index.d.ts +2 -0
- package/dist/models/amazon/index.js +2 -0
- package/dist/models/amazon/middleware.d.ts +3 -0
- package/dist/models/amazon/middleware.js +69 -0
- package/dist/models/amazon/presets.d.ts +345 -0
- package/dist/models/amazon/presets.js +80 -0
- package/dist/models/anthropic/index.d.ts +2 -0
- package/dist/models/anthropic/index.js +2 -0
- package/dist/models/anthropic/middleware.d.ts +5 -0
- package/dist/models/anthropic/middleware.js +128 -0
- package/dist/models/anthropic/presets.d.ts +711 -0
- package/dist/models/anthropic/presets.js +140 -0
- package/dist/models/catalog.d.ts +4 -0
- package/dist/models/catalog.js +8 -0
- package/dist/models/cohere/index.d.ts +2 -0
- package/dist/models/cohere/index.js +2 -0
- package/dist/models/cohere/middleware.d.ts +3 -0
- package/dist/models/cohere/middleware.js +62 -0
- package/dist/models/cohere/presets.d.ts +411 -0
- package/dist/models/cohere/presets.js +134 -0
- package/dist/models/google/index.d.ts +2 -0
- package/dist/models/google/index.js +2 -0
- package/dist/models/google/middleware.d.ts +8 -0
- package/dist/models/google/middleware.js +118 -0
- package/dist/models/google/presets.d.ts +815 -0
- package/dist/models/google/presets.js +184 -0
- package/dist/models/meta/index.d.ts +1 -0
- package/dist/models/meta/index.js +1 -0
- package/dist/models/meta/presets.d.ts +483 -0
- package/dist/models/meta/presets.js +105 -0
- package/dist/models/openai/index.d.ts +2 -0
- package/dist/models/openai/index.js +2 -0
- package/dist/models/openai/middleware.d.ts +4 -0
- package/dist/models/openai/middleware.js +89 -0
- package/dist/models/openai/presets.d.ts +1319 -0
- package/dist/models/openai/presets.js +277 -0
- package/dist/models/types.d.ts +20 -0
- package/dist/models/types.js +100 -0
- package/dist/models/voyage/index.d.ts +2 -0
- package/dist/models/voyage/index.js +2 -0
- package/dist/models/voyage/middleware.d.ts +2 -0
- package/dist/models/voyage/middleware.js +19 -0
- package/dist/models/voyage/presets.d.ts +436 -0
- package/dist/models/voyage/presets.js +85 -0
- package/dist/providers/anthropic/canonical.d.ts +3 -0
- package/dist/providers/anthropic/canonical.js +9 -0
- package/dist/providers/anthropic/index.d.ts +1 -0
- package/dist/providers/anthropic/index.js +1 -0
- package/dist/providers/bedrock/canonical.d.ts +17 -0
- package/dist/providers/bedrock/canonical.js +64 -0
- package/dist/providers/bedrock/index.d.ts +2 -0
- package/dist/providers/bedrock/index.js +2 -0
- package/dist/providers/bedrock/middleware.d.ts +5 -0
- package/dist/providers/bedrock/middleware.js +133 -0
- package/dist/providers/cohere/canonical.d.ts +3 -0
- package/dist/providers/cohere/canonical.js +17 -0
- package/dist/providers/cohere/index.d.ts +1 -0
- package/dist/providers/cohere/index.js +1 -0
- package/dist/providers/groq/canonical.d.ts +3 -0
- package/dist/providers/groq/canonical.js +12 -0
- package/dist/providers/groq/index.d.ts +2 -0
- package/dist/providers/groq/index.js +2 -0
- package/dist/providers/groq/middleware.d.ts +2 -0
- package/dist/providers/groq/middleware.js +30 -0
- package/dist/providers/openai/canonical.d.ts +3 -0
- package/dist/providers/openai/canonical.js +8 -0
- package/dist/providers/openai/index.d.ts +1 -0
- package/dist/providers/openai/index.js +1 -0
- package/dist/providers/registry.d.ts +24 -0
- package/dist/providers/registry.js +103 -0
- package/dist/providers/types.d.ts +7 -0
- package/dist/providers/types.js +11 -0
- package/dist/providers/vertex/canonical.d.ts +3 -0
- package/dist/providers/vertex/canonical.js +8 -0
- package/dist/providers/vertex/index.d.ts +2 -0
- package/dist/providers/vertex/index.js +2 -0
- package/dist/providers/vertex/middleware.d.ts +2 -0
- package/dist/providers/vertex/middleware.js +47 -0
- package/dist/providers/voyage/canonical.d.ts +3 -0
- package/dist/providers/voyage/canonical.js +7 -0
- package/dist/providers/voyage/index.d.ts +1 -0
- package/dist/providers/voyage/index.js +1 -0
- package/dist/telemetry/ai-sdk.d.ts +2 -0
- package/dist/telemetry/ai-sdk.js +31 -0
- package/dist/telemetry/baggage.d.ts +1 -0
- package/dist/telemetry/baggage.js +24 -0
- package/dist/telemetry/fetch.d.ts +2 -0
- package/dist/telemetry/fetch.js +49 -0
- package/dist/telemetry/gen-ai.d.ts +7 -0
- package/dist/telemetry/gen-ai.js +108 -0
- package/dist/telemetry/http.d.ts +3 -0
- package/dist/telemetry/http.js +54 -0
- package/dist/telemetry/index.d.ts +1 -0
- package/dist/telemetry/index.js +1 -0
- package/dist/telemetry/memory.d.ts +2 -0
- package/dist/telemetry/memory.js +43 -0
- package/dist/telemetry/span.d.ts +13 -0
- package/dist/telemetry/span.js +60 -0
- package/dist/types.d.ts +231 -0
- package/dist/types.js +2 -0
- package/dist/utils/body.d.ts +19 -0
- package/dist/utils/body.js +99 -0
- package/dist/utils/env.d.ts +2 -0
- package/dist/utils/env.js +7 -0
- package/dist/utils/headers.d.ts +4 -0
- package/dist/utils/headers.js +22 -0
- package/dist/utils/preset.d.ts +10 -0
- package/dist/utils/preset.js +41 -0
- package/dist/utils/request.d.ts +2 -0
- package/dist/utils/request.js +43 -0
- package/dist/utils/response.d.ts +6 -0
- package/dist/utils/response.js +55 -0
- package/dist/utils/stream.d.ts +9 -0
- package/dist/utils/stream.js +100 -0
- package/dist/utils/url.d.ts +4 -0
- package/dist/utils/url.js +21 -0
- package/package.json +1 -1
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { metrics } from "@opentelemetry/api";
|
|
2
|
+
import { STATUS_CODE } from "../errors/utils";
|
|
3
|
+
const getMeter = () => metrics.getMeter("@hebo/gateway");
|
|
4
|
+
let requestDurationHistogram;
|
|
5
|
+
let timePerOutputTokenHistogram;
|
|
6
|
+
let timeToFirstTokenHistogram;
|
|
7
|
+
let tokenUsageHistogram;
|
|
8
|
+
const getRequestDurationHistogram = () => (requestDurationHistogram ??= getMeter().createHistogram("gen_ai.server.request.duration", {
|
|
9
|
+
description: "End-to-end gateway request duration",
|
|
10
|
+
unit: "s",
|
|
11
|
+
advice: {
|
|
12
|
+
// Upstream OTel for http.server.request.duration.
|
|
13
|
+
// We preserve that sequence and extend the tail for slow service tiers up to 30min.
|
|
14
|
+
explicitBucketBoundaries: [
|
|
15
|
+
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60, 120, 300, 600, 900, 1800,
|
|
16
|
+
],
|
|
17
|
+
},
|
|
18
|
+
}));
|
|
19
|
+
const getTimeToFirstTokenHistogram = () => (timeToFirstTokenHistogram ??= getMeter().createHistogram("gen_ai.server.time_to_first_token", {
|
|
20
|
+
description: "Time from request start until the first token is generated",
|
|
21
|
+
unit: "s",
|
|
22
|
+
advice: {
|
|
23
|
+
// Upstream OTel uses the same dense sub-second sequence through 10s.
|
|
24
|
+
// We preserve that sequence and extend the tail slow service tiers up to 30 min.
|
|
25
|
+
explicitBucketBoundaries: [
|
|
26
|
+
0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5, 7.5, 10, 30,
|
|
27
|
+
60, 120, 300, 600, 900, 1800,
|
|
28
|
+
],
|
|
29
|
+
},
|
|
30
|
+
}));
|
|
31
|
+
const getTimePerOutputTokenHistogram = () => (timePerOutputTokenHistogram ??= getMeter().createHistogram("gen_ai.server.time_per_output_token", {
|
|
32
|
+
description: "Time per output token generated after the first token",
|
|
33
|
+
unit: "s",
|
|
34
|
+
advice: {
|
|
35
|
+
// Upstream OTel uses the same low-latency shape
|
|
36
|
+
explicitBucketBoundaries: [
|
|
37
|
+
0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5,
|
|
38
|
+
],
|
|
39
|
+
},
|
|
40
|
+
}));
|
|
41
|
+
const getTokenUsageHistogram = () => (tokenUsageHistogram ??= getMeter().createHistogram("gen_ai.client.token.usage", {
|
|
42
|
+
description: "Number of tokens used in the operation, by token type",
|
|
43
|
+
unit: "{token}",
|
|
44
|
+
advice: {
|
|
45
|
+
// Upstream OTel uses powers of 4 up to 67,108,864 tokens.
|
|
46
|
+
// We keep the low-end anchors, add denser mid/high-range buckets
|
|
47
|
+
explicitBucketBoundaries: [
|
|
48
|
+
1, 4, 16, 64, 256, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288,
|
|
49
|
+
1048576, 4194304, 16777216, 67108864,
|
|
50
|
+
],
|
|
51
|
+
},
|
|
52
|
+
}));
|
|
53
|
+
export const getGenAiGeneralAttributes = (ctx, signalLevel) => {
|
|
54
|
+
if (!signalLevel || signalLevel === "off")
|
|
55
|
+
return {};
|
|
56
|
+
const requestModel = typeof ctx.body?.model === "string" ? ctx.body.model : ctx.modelId;
|
|
57
|
+
const attrs = {
|
|
58
|
+
"gen_ai.operation.name": ctx.operation,
|
|
59
|
+
"gen_ai.request.model": requestModel,
|
|
60
|
+
"gen_ai.response.model": ctx.resolvedModelId,
|
|
61
|
+
"gen_ai.provider.name": ctx.resolvedProviderId,
|
|
62
|
+
};
|
|
63
|
+
for (const [key, value] of Object.entries(ctx.otel)) {
|
|
64
|
+
if (value !== undefined)
|
|
65
|
+
attrs[key] = value;
|
|
66
|
+
}
|
|
67
|
+
return attrs;
|
|
68
|
+
};
|
|
69
|
+
export const recordRequestDuration = (duration, status, ctx, signalLevel) => {
|
|
70
|
+
if (!signalLevel || signalLevel === "off")
|
|
71
|
+
return;
|
|
72
|
+
const attrs = getGenAiGeneralAttributes(ctx, signalLevel);
|
|
73
|
+
if (status !== 200) {
|
|
74
|
+
attrs["error.type"] = `${status} ${STATUS_CODE(status).toLowerCase()}`;
|
|
75
|
+
}
|
|
76
|
+
getRequestDurationHistogram().record(duration / 1000, attrs);
|
|
77
|
+
};
|
|
78
|
+
export const recordTimeToFirstToken = (duration, metricAttrs, signalLevel) => {
|
|
79
|
+
if (!signalLevel || (signalLevel !== "recommended" && signalLevel !== "full"))
|
|
80
|
+
return;
|
|
81
|
+
getTimeToFirstTokenHistogram().record(duration / 1000, metricAttrs);
|
|
82
|
+
};
|
|
83
|
+
// FUTURE: record unsuccessful calls
|
|
84
|
+
export const recordTimePerOutputToken = (start, ttft, tokenAttrs, metricAttrs, signalLevel) => {
|
|
85
|
+
if (!signalLevel || (signalLevel !== "recommended" && signalLevel !== "full"))
|
|
86
|
+
return;
|
|
87
|
+
const outputTokens = tokenAttrs["gen_ai.usage.output_tokens"];
|
|
88
|
+
if (typeof outputTokens !== "number" || outputTokens <= 1)
|
|
89
|
+
return;
|
|
90
|
+
getTimePerOutputTokenHistogram().record((performance.now() - start - ttft) / 1000 / (outputTokens - 1), metricAttrs);
|
|
91
|
+
};
|
|
92
|
+
// FUTURE: record unsuccessful calls
|
|
93
|
+
export const recordTokenUsage = (tokenAttrs, metricAttrs, signalLevel) => {
|
|
94
|
+
if (!signalLevel || (signalLevel !== "recommended" && signalLevel !== "full"))
|
|
95
|
+
return;
|
|
96
|
+
const record = (value, tokenType) => {
|
|
97
|
+
if (typeof value !== "number")
|
|
98
|
+
return;
|
|
99
|
+
getTokenUsageHistogram().record(value, Object.assign({}, metricAttrs, { "gen_ai.token.type": tokenType }));
|
|
100
|
+
};
|
|
101
|
+
record(tokenAttrs["gen_ai.usage.input_tokens"], "input");
|
|
102
|
+
record(tokenAttrs["gen_ai.usage.output_tokens"], "output");
|
|
103
|
+
// FUTURE: "cached" and "reasoning" token types are not yet in the OTel standard — monitor:
|
|
104
|
+
// https://github.com/open-telemetry/semantic-conventions/issues/1959
|
|
105
|
+
// https://github.com/open-telemetry/semantic-conventions/issues/3341
|
|
106
|
+
record(tokenAttrs["gen_ai.usage.cache_read.input_tokens"], "cached");
|
|
107
|
+
record(tokenAttrs["gen_ai.usage.reasoning.output_tokens"], "reasoning");
|
|
108
|
+
};
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import {} from "../types";
|
|
2
|
+
const headerArr = (h, k) => (h.has(k) ? [h.get(k)] : undefined);
|
|
3
|
+
export const getRequestAttributes = (request, signalLevel) => {
|
|
4
|
+
if (!signalLevel || signalLevel === "off")
|
|
5
|
+
return {};
|
|
6
|
+
let url;
|
|
7
|
+
try {
|
|
8
|
+
// FUTURE: reuse URL from lifecycle
|
|
9
|
+
url = new URL(request.url);
|
|
10
|
+
}
|
|
11
|
+
catch { }
|
|
12
|
+
const attrs = {
|
|
13
|
+
"http.request.method": request.method,
|
|
14
|
+
"url.full": request.url,
|
|
15
|
+
"url.path": url?.pathname,
|
|
16
|
+
"url.scheme": url?.protocol.replace(":", ""),
|
|
17
|
+
"server.address": url?.hostname,
|
|
18
|
+
"server.port": url
|
|
19
|
+
? url.port
|
|
20
|
+
? Number(url.port)
|
|
21
|
+
: url.protocol === "https:"
|
|
22
|
+
? 443
|
|
23
|
+
: 80
|
|
24
|
+
: undefined,
|
|
25
|
+
};
|
|
26
|
+
if (signalLevel !== "required") {
|
|
27
|
+
Object.assign(attrs, {
|
|
28
|
+
"user_agent.original": request.headers.get("user-agent") ?? undefined,
|
|
29
|
+
});
|
|
30
|
+
}
|
|
31
|
+
if (signalLevel === "full") {
|
|
32
|
+
Object.assign(attrs, {
|
|
33
|
+
// FUTURE: "url.query"
|
|
34
|
+
"http.request.header.content-type": headerArr(request.headers, "content-type"),
|
|
35
|
+
"http.request.header.content-length": headerArr(request.headers, "content-length"),
|
|
36
|
+
// FUTURE: "client.address"
|
|
37
|
+
});
|
|
38
|
+
}
|
|
39
|
+
return attrs;
|
|
40
|
+
};
|
|
41
|
+
export const getResponseAttributes = (response, signalLevel) => {
|
|
42
|
+
if (!signalLevel || signalLevel === "off")
|
|
43
|
+
return {};
|
|
44
|
+
const attrs = {
|
|
45
|
+
"http.response.status_code": response.status,
|
|
46
|
+
};
|
|
47
|
+
if (signalLevel === "full") {
|
|
48
|
+
Object.assign(attrs, {
|
|
49
|
+
"http.response.header.content-type": [headerArr(response.headers, "content-type")],
|
|
50
|
+
"http.response.header.content-length": [headerArr(response.headers, "content-length")],
|
|
51
|
+
});
|
|
52
|
+
}
|
|
53
|
+
return attrs;
|
|
54
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./fetch";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./fetch";
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import { metrics } from "@opentelemetry/api";
|
|
2
|
+
const getMeter = () => metrics.getMeter("@hebo/gateway");
|
|
3
|
+
const defaultHeapSpaceAttrs = { "v8js.heap.space.name": "total" };
|
|
4
|
+
let registered = false;
|
|
5
|
+
const isEnabled = (level) => level === "recommended" || level === "full";
|
|
6
|
+
const observeMemory = (observe) => {
|
|
7
|
+
let usage;
|
|
8
|
+
try {
|
|
9
|
+
usage = globalThis.process?.memoryUsage?.();
|
|
10
|
+
}
|
|
11
|
+
catch {
|
|
12
|
+
return;
|
|
13
|
+
}
|
|
14
|
+
if (!usage)
|
|
15
|
+
return;
|
|
16
|
+
observe(usage.heapUsed, usage.rss);
|
|
17
|
+
};
|
|
18
|
+
export const observeV8jsMemoryMetrics = (level) => {
|
|
19
|
+
if (!isEnabled(level) || registered)
|
|
20
|
+
return;
|
|
21
|
+
registered = true;
|
|
22
|
+
const meter = getMeter();
|
|
23
|
+
meter
|
|
24
|
+
.createObservableGauge("v8js.memory.heap.used", {
|
|
25
|
+
description: "Used bytes in the V8 heap",
|
|
26
|
+
unit: "By",
|
|
27
|
+
})
|
|
28
|
+
.addCallback((result) => {
|
|
29
|
+
observeMemory((heapUsed) => {
|
|
30
|
+
result.observe(heapUsed, defaultHeapSpaceAttrs);
|
|
31
|
+
});
|
|
32
|
+
});
|
|
33
|
+
meter
|
|
34
|
+
.createObservableGauge("v8js.memory.heap.space.physical_size", {
|
|
35
|
+
description: "Physical bytes allocated for the V8 heap space",
|
|
36
|
+
unit: "By",
|
|
37
|
+
})
|
|
38
|
+
.addCallback((result) => {
|
|
39
|
+
observeMemory((_, rss) => {
|
|
40
|
+
result.observe(rss, defaultHeapSpaceAttrs);
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
};
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { Attributes, SpanOptions, Tracer } from "@opentelemetry/api";
|
|
2
|
+
import type { TelemetrySignalLevel } from "../types";
|
|
3
|
+
export declare const setSpanTracer: (tracer?: Tracer) => void;
|
|
4
|
+
export declare const setSpanEventsEnabled: (level?: TelemetrySignalLevel) => void;
|
|
5
|
+
export declare const startSpan: (name: string, options?: SpanOptions) => import("@opentelemetry/api").Span & {
|
|
6
|
+
runWithContext: <T>(fn: () => Promise<T> | T) => T | Promise<T>;
|
|
7
|
+
recordError: (_error: unknown) => void;
|
|
8
|
+
finish: () => void;
|
|
9
|
+
isExisting: boolean;
|
|
10
|
+
};
|
|
11
|
+
export declare const withSpan: <T>(name: string, run: () => Promise<T> | T, options?: SpanOptions) => Promise<T>;
|
|
12
|
+
export declare const addSpanEvent: (name: string, attributes?: Attributes) => void;
|
|
13
|
+
export declare const setSpanAttributes: (attributes?: Attributes) => void;
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { INVALID_SPAN_CONTEXT, SpanKind, SpanStatusCode, context, trace } from "@opentelemetry/api";
|
|
2
|
+
const DEFAULT_TRACER_NAME = "@hebo/gateway";
|
|
3
|
+
let spanTracer;
|
|
4
|
+
let spanEventsEnabled = false;
|
|
5
|
+
const NOOP_SPAN = {
|
|
6
|
+
runWithContext: (fn) => fn(),
|
|
7
|
+
recordError: (_error) => { },
|
|
8
|
+
finish: () => { },
|
|
9
|
+
isExisting: true,
|
|
10
|
+
};
|
|
11
|
+
export const setSpanTracer = (tracer) => {
|
|
12
|
+
spanTracer = tracer ?? trace.getTracer(DEFAULT_TRACER_NAME);
|
|
13
|
+
};
|
|
14
|
+
export const setSpanEventsEnabled = (level) => {
|
|
15
|
+
spanEventsEnabled = level === "recommended" || level === "full";
|
|
16
|
+
};
|
|
17
|
+
export const startSpan = (name, options) => {
|
|
18
|
+
if (!spanTracer) {
|
|
19
|
+
return Object.assign(trace.wrapSpanContext(INVALID_SPAN_CONTEXT), NOOP_SPAN);
|
|
20
|
+
}
|
|
21
|
+
const parentContext = context.active();
|
|
22
|
+
const activeSpan = trace.getActiveSpan();
|
|
23
|
+
const span = spanTracer.startSpan(name, { kind: activeSpan ? SpanKind.INTERNAL : SpanKind.SERVER, ...options }, parentContext);
|
|
24
|
+
const runWithContext = (fn) => context.with(trace.setSpan(parentContext, span), fn);
|
|
25
|
+
const recordError = (error) => {
|
|
26
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
27
|
+
span.recordException(err);
|
|
28
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message: err.message });
|
|
29
|
+
};
|
|
30
|
+
const finish = () => {
|
|
31
|
+
span.end();
|
|
32
|
+
};
|
|
33
|
+
return Object.assign(span, { runWithContext, recordError, finish, isExisting: !!activeSpan });
|
|
34
|
+
};
|
|
35
|
+
export const withSpan = async (name, run, options) => {
|
|
36
|
+
if (!spanTracer) {
|
|
37
|
+
return run();
|
|
38
|
+
}
|
|
39
|
+
const started = startSpan(name, options);
|
|
40
|
+
try {
|
|
41
|
+
return await started.runWithContext(run);
|
|
42
|
+
}
|
|
43
|
+
catch (error) {
|
|
44
|
+
started.recordError(error);
|
|
45
|
+
throw error;
|
|
46
|
+
}
|
|
47
|
+
finally {
|
|
48
|
+
started.finish();
|
|
49
|
+
}
|
|
50
|
+
};
|
|
51
|
+
export const addSpanEvent = (name, attributes) => {
|
|
52
|
+
if (!spanEventsEnabled)
|
|
53
|
+
return;
|
|
54
|
+
trace.getActiveSpan()?.addEvent(name, attributes);
|
|
55
|
+
};
|
|
56
|
+
export const setSpanAttributes = (attributes) => {
|
|
57
|
+
if (!attributes)
|
|
58
|
+
return;
|
|
59
|
+
trace.getActiveSpan()?.setAttributes(attributes);
|
|
60
|
+
};
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
import type { ProviderV3 } from "@ai-sdk/provider";
|
|
2
|
+
import type { Attributes, Tracer } from "@opentelemetry/api";
|
|
3
|
+
import type { ChatCompletions, ChatCompletionsBody, ChatCompletionsStream } from "./endpoints/chat-completions/schema";
|
|
4
|
+
import type { ConversationStorage } from "./endpoints/conversations/storage/types";
|
|
5
|
+
import type { Embeddings, EmbeddingsBody } from "./endpoints/embeddings/schema";
|
|
6
|
+
import type { Model, ModelList } from "./endpoints/models";
|
|
7
|
+
import type { Responses, ResponsesBody, ResponsesStream } from "./endpoints/responses/schema";
|
|
8
|
+
import type { Logger, LoggerConfig } from "./logger";
|
|
9
|
+
import type { ModelCatalog, ModelId } from "./models/types";
|
|
10
|
+
import type { ProviderId, ProviderRegistry } from "./providers/types";
|
|
11
|
+
export type GatewayOperation = "chat" | "embeddings" | "responses" | "models" | "conversations";
|
|
12
|
+
/**
|
|
13
|
+
* Per-request context shared across handlers and hooks.
|
|
14
|
+
*/
|
|
15
|
+
export type GatewayContext = {
|
|
16
|
+
/**
|
|
17
|
+
* Mutable bag for passing data between hooks.
|
|
18
|
+
*/
|
|
19
|
+
state: Record<string, unknown>;
|
|
20
|
+
/**
|
|
21
|
+
* OpenTelemetry attribute bag populated by hooks.
|
|
22
|
+
* Attributes set here are applied to both spans and all metric instruments.
|
|
23
|
+
*/
|
|
24
|
+
otel: Attributes;
|
|
25
|
+
/**
|
|
26
|
+
* Provider registry from config.
|
|
27
|
+
*/
|
|
28
|
+
providers: ProviderRegistry;
|
|
29
|
+
/**
|
|
30
|
+
* Model catalog from config.
|
|
31
|
+
*/
|
|
32
|
+
models: ModelCatalog;
|
|
33
|
+
/**
|
|
34
|
+
* Incoming request for the handler.
|
|
35
|
+
*/
|
|
36
|
+
request: Request;
|
|
37
|
+
/**
|
|
38
|
+
* Resolved request ID for logging and telemetry.
|
|
39
|
+
*/
|
|
40
|
+
requestId: string;
|
|
41
|
+
/**
|
|
42
|
+
* Parsed body from the request.
|
|
43
|
+
*/
|
|
44
|
+
body?: ChatCompletionsBody | EmbeddingsBody | ResponsesBody;
|
|
45
|
+
/**
|
|
46
|
+
* Incoming model ID.
|
|
47
|
+
*/
|
|
48
|
+
modelId?: ModelId;
|
|
49
|
+
/**
|
|
50
|
+
* Resolved model ID.
|
|
51
|
+
*/
|
|
52
|
+
resolvedModelId?: ModelId;
|
|
53
|
+
/**
|
|
54
|
+
* Operation type.
|
|
55
|
+
*/
|
|
56
|
+
operation?: GatewayOperation;
|
|
57
|
+
/**
|
|
58
|
+
* Resolved provider instance.
|
|
59
|
+
*/
|
|
60
|
+
provider?: ProviderV3;
|
|
61
|
+
/**
|
|
62
|
+
* Resolved provider ID.
|
|
63
|
+
*/
|
|
64
|
+
resolvedProviderId?: ProviderId;
|
|
65
|
+
/**
|
|
66
|
+
* Result returned by the handler (pre-response).
|
|
67
|
+
*/
|
|
68
|
+
result?: ChatCompletions | ChatCompletionsStream | Embeddings | Model | ModelList | Responses | ResponsesStream;
|
|
69
|
+
/**
|
|
70
|
+
* Response object returned by the handler.
|
|
71
|
+
*/
|
|
72
|
+
response?: Response;
|
|
73
|
+
/**
|
|
74
|
+
* Error thrown during execution.
|
|
75
|
+
*/
|
|
76
|
+
error?: unknown;
|
|
77
|
+
};
|
|
78
|
+
/**
|
|
79
|
+
* Hook context: all fields readonly except `state` and `otel`.
|
|
80
|
+
*/
|
|
81
|
+
export type HookContext = Omit<Readonly<GatewayContext>, "state" | "otel"> & {
|
|
82
|
+
state: GatewayContext["state"];
|
|
83
|
+
otel: GatewayContext["otel"];
|
|
84
|
+
};
|
|
85
|
+
type RequiredHookContext<K extends keyof GatewayContext> = Omit<HookContext, K> & Required<Pick<HookContext, K>>;
|
|
86
|
+
export type OnRequestHookContext = RequiredHookContext<"request">;
|
|
87
|
+
export type BeforeHookContext = RequiredHookContext<"request" | "operation" | "body">;
|
|
88
|
+
export type ResolveModelHookContext = RequiredHookContext<"request" | "operation" | "body" | "modelId">;
|
|
89
|
+
export type ResolveProviderHookContext = RequiredHookContext<"request" | "operation" | "body" | "modelId" | "resolvedModelId">;
|
|
90
|
+
export type AfterHookContext = RequiredHookContext<"request" | "operation" | "body" | "modelId" | "resolvedModelId" | "provider" | "resolvedProviderId" | "result">;
|
|
91
|
+
export type OnResponseHookContext = RequiredHookContext<"request" | "response">;
|
|
92
|
+
export type OnErrorHookContext = RequiredHookContext<"error">;
|
|
93
|
+
/**
|
|
94
|
+
* Hooks to plugin to the gateway lifecycle.
|
|
95
|
+
*/
|
|
96
|
+
export type GatewayHooks = {
|
|
97
|
+
/**
|
|
98
|
+
* Runs before any endpoint handler logic.
|
|
99
|
+
* @returns Optional Response to short-circuit the request.
|
|
100
|
+
*/
|
|
101
|
+
onRequest?: (ctx: OnRequestHookContext) => void | Response | Promise<void | Response>;
|
|
102
|
+
/**
|
|
103
|
+
* Runs after request JSON is parsed and validated for chat completions / embeddings / responses.
|
|
104
|
+
* @returns Replacement parsed body, or undefined to keep original.
|
|
105
|
+
*/
|
|
106
|
+
before?: (ctx: BeforeHookContext) => void | ChatCompletionsBody | EmbeddingsBody | ResponsesBody | Promise<void | ChatCompletionsBody | EmbeddingsBody | ResponsesBody>;
|
|
107
|
+
/**
|
|
108
|
+
* Maps a user-provided model ID or alias to a canonical ID.
|
|
109
|
+
* @returns Canonical model ID or undefined to keep original.
|
|
110
|
+
*/
|
|
111
|
+
resolveModelId?: (ctx: ResolveModelHookContext) => ModelId | void | Promise<ModelId | void>;
|
|
112
|
+
/**
|
|
113
|
+
* Picks a provider instance for the request.
|
|
114
|
+
* @returns ProviderV3 to override, or undefined to use default.
|
|
115
|
+
*/
|
|
116
|
+
resolveProvider?: (ctx: ResolveProviderHookContext) => ProviderV3 | void | Promise<ProviderV3 | void>;
|
|
117
|
+
/**
|
|
118
|
+
* Runs after the endpoint handler.
|
|
119
|
+
* @returns Result to replace, or undefined to keep original.
|
|
120
|
+
*/
|
|
121
|
+
after?: (ctx: AfterHookContext) => void | ChatCompletions | ChatCompletionsStream | Embeddings | Model | ModelList | Responses | ResponsesStream | Promise<void | ChatCompletions | ChatCompletionsStream | Embeddings | Model | ModelList | Responses | ResponsesStream>;
|
|
122
|
+
/**
|
|
123
|
+
* Runs after the lifecycle has produced the final Response.
|
|
124
|
+
* @returns Replacement Response, or undefined to keep original.
|
|
125
|
+
*/
|
|
126
|
+
onResponse?: (ctx: OnResponseHookContext) => void | Response | Promise<void | Response>;
|
|
127
|
+
/**
|
|
128
|
+
* Runs when the lifecycle catches an error.
|
|
129
|
+
* @returns Optional Response to replace the default error response.
|
|
130
|
+
*/
|
|
131
|
+
onError?: (ctx: OnErrorHookContext) => void | Response | Promise<void | Response>;
|
|
132
|
+
};
|
|
133
|
+
export type TelemetrySignalLevel = "off" | "required" | "recommended" | "full";
|
|
134
|
+
export declare const DEFAULT_CHAT_TIMEOUT_MS: number;
|
|
135
|
+
export type GatewayTimeout = number | null | {
|
|
136
|
+
/**
|
|
137
|
+
* Default timeout used.
|
|
138
|
+
*/
|
|
139
|
+
normal?: number | null;
|
|
140
|
+
/**
|
|
141
|
+
* Timeout used when `service_tier=flex`.
|
|
142
|
+
* Defaults to 3x `normal` when omitted.
|
|
143
|
+
*/
|
|
144
|
+
flex?: number | null;
|
|
145
|
+
};
|
|
146
|
+
/**
|
|
147
|
+
* Main configuration object for the gateway.
|
|
148
|
+
*/
|
|
149
|
+
export type GatewayConfig = {
|
|
150
|
+
/**
|
|
151
|
+
* Optional base path the gateway is mounted under (e.g. "/v1/gateway").
|
|
152
|
+
*/
|
|
153
|
+
basePath?: string;
|
|
154
|
+
/**
|
|
155
|
+
* Provider registry keyed by canonical provider IDs.
|
|
156
|
+
*/
|
|
157
|
+
providers: ProviderRegistry;
|
|
158
|
+
/**
|
|
159
|
+
* Model catalog keyed by canonical model IDs.
|
|
160
|
+
*/
|
|
161
|
+
models: ModelCatalog;
|
|
162
|
+
/**
|
|
163
|
+
* Optional lifecycle hooks for routing, auth, and response shaping.
|
|
164
|
+
*/
|
|
165
|
+
hooks?: GatewayHooks;
|
|
166
|
+
/**
|
|
167
|
+
* Preferred logger configuration: custom logger or default logger settings.
|
|
168
|
+
*/
|
|
169
|
+
logger?: Logger | LoggerConfig | null;
|
|
170
|
+
/**
|
|
171
|
+
* Optional conversation storage backend.
|
|
172
|
+
* Defaults to an in-memory storage if not provided.
|
|
173
|
+
*/
|
|
174
|
+
storage?: ConversationStorage;
|
|
175
|
+
/**
|
|
176
|
+
* Optional AI SDK telemetry configuration.
|
|
177
|
+
*/
|
|
178
|
+
telemetry?: {
|
|
179
|
+
/**
|
|
180
|
+
* Enable AI SDK OpenTelemetry instrumentation.
|
|
181
|
+
* Disabled by default.
|
|
182
|
+
*/
|
|
183
|
+
enabled?: boolean;
|
|
184
|
+
/**
|
|
185
|
+
* Optional custom OpenTelemetry tracer passed to AI SDK telemetry.
|
|
186
|
+
*/
|
|
187
|
+
tracer?: Tracer;
|
|
188
|
+
/**
|
|
189
|
+
* Telemetry signal levels by namespace.
|
|
190
|
+
* - off: disable the namespace
|
|
191
|
+
* - required: minimal baseline
|
|
192
|
+
* - recommended: practical defaults
|
|
193
|
+
* - full: include all available details
|
|
194
|
+
*/
|
|
195
|
+
signals?: {
|
|
196
|
+
gen_ai?: TelemetrySignalLevel;
|
|
197
|
+
http?: TelemetrySignalLevel;
|
|
198
|
+
hebo?: TelemetrySignalLevel;
|
|
199
|
+
};
|
|
200
|
+
};
|
|
201
|
+
/**
|
|
202
|
+
* Optional timeout for server responses.
|
|
203
|
+
* Supports a number in milliseconds, or tiered config.
|
|
204
|
+
*/
|
|
205
|
+
timeouts?: GatewayTimeout;
|
|
206
|
+
/**
|
|
207
|
+
* Maximum *decompressed* request body size in bytes for gzip/deflate-encoded requests.
|
|
208
|
+
* Plain (uncompressed) request body size limits should be configured at the
|
|
209
|
+
* framework or server level (e.g. Hono `bodyLimit` middleware, Bun `maxRequestBodySize`).
|
|
210
|
+
* Set to `0` to disable the decompressed size limit.
|
|
211
|
+
* Defaults to 10 MB (10,485,760 bytes).
|
|
212
|
+
*/
|
|
213
|
+
maxBodySize?: number;
|
|
214
|
+
};
|
|
215
|
+
export declare const kParsed: unique symbol;
|
|
216
|
+
export type GatewayConfigParsed = Omit<GatewayConfig, "storage" | "timeouts"> & {
|
|
217
|
+
storage: ConversationStorage;
|
|
218
|
+
timeouts: {
|
|
219
|
+
normal?: number;
|
|
220
|
+
flex?: number;
|
|
221
|
+
};
|
|
222
|
+
maxBodySize: number;
|
|
223
|
+
[kParsed]: true;
|
|
224
|
+
};
|
|
225
|
+
export interface Endpoint {
|
|
226
|
+
handler: (request: Request, state?: Record<string, unknown>) => Promise<Response>;
|
|
227
|
+
}
|
|
228
|
+
export interface HeboGateway<Routes extends Record<string, Endpoint>> extends Endpoint {
|
|
229
|
+
routes: Routes;
|
|
230
|
+
}
|
|
231
|
+
export {};
|
package/dist/types.js
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Default maximum decompressed body size (10 MB).
|
|
3
|
+
*/
|
|
4
|
+
export declare const DEFAULT_MAX_BODY_SIZE: number;
|
|
5
|
+
/**
|
|
6
|
+
* Parse a request body as JSON, handling Content-Encoding decompression
|
|
7
|
+
* and enforcing a decompressed body size limit for compressed requests.
|
|
8
|
+
*
|
|
9
|
+
* For plain (uncompressed) requests, body size enforcement is expected to be
|
|
10
|
+
* handled by the parent framework (e.g. Hono's `bodyLimit` middleware, or
|
|
11
|
+
* Bun/Node server-level `maxRequestBodySize`). This utility only enforces
|
|
12
|
+
* `maxBodySize` on the *decompressed* output of gzip/deflate streams, since
|
|
13
|
+
* the framework cannot know the decompressed size ahead of time.
|
|
14
|
+
*
|
|
15
|
+
* @param request - Incoming Web API Request
|
|
16
|
+
* @param maxBodySize - Maximum decompressed body size in bytes. `0` disables the limit. Defaults to 10 MB.
|
|
17
|
+
* @returns Parsed JSON body
|
|
18
|
+
*/
|
|
19
|
+
export declare function parseRequestBody(request: Request, maxBodySize?: number): Promise<unknown>;
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { GatewayError } from "../errors/gateway";
|
|
2
|
+
/**
|
|
3
|
+
* Supported Content-Encoding values for request body decompression.
|
|
4
|
+
* Uses the Web Compression Streams API (`DecompressionStream`) for runtime portability.
|
|
5
|
+
*/
|
|
6
|
+
const SUPPORTED_ENCODINGS = new Set(["gzip", "deflate"]);
|
|
7
|
+
/**
|
|
8
|
+
* Default maximum decompressed body size (10 MB).
|
|
9
|
+
*/
|
|
10
|
+
export const DEFAULT_MAX_BODY_SIZE = 10 * 1024 * 1024;
|
|
11
|
+
/**
|
|
12
|
+
* Parse a request body as JSON, handling Content-Encoding decompression
|
|
13
|
+
* and enforcing a decompressed body size limit for compressed requests.
|
|
14
|
+
*
|
|
15
|
+
* For plain (uncompressed) requests, body size enforcement is expected to be
|
|
16
|
+
* handled by the parent framework (e.g. Hono's `bodyLimit` middleware, or
|
|
17
|
+
* Bun/Node server-level `maxRequestBodySize`). This utility only enforces
|
|
18
|
+
* `maxBodySize` on the *decompressed* output of gzip/deflate streams, since
|
|
19
|
+
* the framework cannot know the decompressed size ahead of time.
|
|
20
|
+
*
|
|
21
|
+
* @param request - Incoming Web API Request
|
|
22
|
+
* @param maxBodySize - Maximum decompressed body size in bytes. `0` disables the limit. Defaults to 10 MB.
|
|
23
|
+
* @returns Parsed JSON body
|
|
24
|
+
*/
|
|
25
|
+
export function parseRequestBody(request, maxBodySize = DEFAULT_MAX_BODY_SIZE) {
|
|
26
|
+
const encoding = request.headers.get("content-encoding");
|
|
27
|
+
// No encoding — delegate to framework for size enforcement, just parse JSON.
|
|
28
|
+
if (!encoding || encoding === "identity") {
|
|
29
|
+
return parsePlainBody(request);
|
|
30
|
+
}
|
|
31
|
+
// Reject unsupported encodings early.
|
|
32
|
+
if (!SUPPORTED_ENCODINGS.has(encoding)) {
|
|
33
|
+
throw new GatewayError(`Unsupported Content-Encoding: ${encoding}`, 415);
|
|
34
|
+
}
|
|
35
|
+
return parseCompressedBody(request, encoding, maxBodySize);
|
|
36
|
+
}
|
|
37
|
+
async function parsePlainBody(request) {
|
|
38
|
+
try {
|
|
39
|
+
return await request.json();
|
|
40
|
+
}
|
|
41
|
+
catch {
|
|
42
|
+
throw new GatewayError("Invalid JSON", 400);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
async function parseCompressedBody(request, encoding, maxBodySize) {
|
|
46
|
+
if (!request.body) {
|
|
47
|
+
throw new GatewayError("Empty request body", 400);
|
|
48
|
+
}
|
|
49
|
+
let decompressedStream;
|
|
50
|
+
try {
|
|
51
|
+
decompressedStream = request.body.pipeThrough(new DecompressionStream(encoding));
|
|
52
|
+
}
|
|
53
|
+
catch {
|
|
54
|
+
throw new GatewayError("Invalid compressed body", 400);
|
|
55
|
+
}
|
|
56
|
+
// Read decompressed bytes with size enforcement.
|
|
57
|
+
const chunks = [];
|
|
58
|
+
let totalSize = 0;
|
|
59
|
+
try {
|
|
60
|
+
const reader = decompressedStream.getReader();
|
|
61
|
+
// oxlint-disable-next-line no-await-in-loop -- sequential stream reads
|
|
62
|
+
for (let r = await reader.read(); !r.done; r = await reader.read()) {
|
|
63
|
+
totalSize += r.value.byteLength;
|
|
64
|
+
if (maxBodySize > 0 && totalSize > maxBodySize) {
|
|
65
|
+
void reader.cancel();
|
|
66
|
+
throw new GatewayError(`Decompressed body too large (exceeds ${maxBodySize} byte limit)`, 413);
|
|
67
|
+
}
|
|
68
|
+
chunks.push(r.value);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
catch (error) {
|
|
72
|
+
if (error instanceof GatewayError)
|
|
73
|
+
throw error;
|
|
74
|
+
throw new GatewayError("Invalid compressed body", 400);
|
|
75
|
+
}
|
|
76
|
+
if (totalSize === 0) {
|
|
77
|
+
throw new GatewayError("Empty request body", 400);
|
|
78
|
+
}
|
|
79
|
+
// Concatenate chunks and parse JSON.
|
|
80
|
+
try {
|
|
81
|
+
let text;
|
|
82
|
+
if (chunks.length === 1) {
|
|
83
|
+
text = new TextDecoder().decode(chunks[0]);
|
|
84
|
+
}
|
|
85
|
+
else {
|
|
86
|
+
const combined = new Uint8Array(totalSize);
|
|
87
|
+
let offset = 0;
|
|
88
|
+
for (const chunk of chunks) {
|
|
89
|
+
combined.set(chunk, offset);
|
|
90
|
+
offset += chunk.byteLength;
|
|
91
|
+
}
|
|
92
|
+
text = new TextDecoder().decode(combined);
|
|
93
|
+
}
|
|
94
|
+
return JSON.parse(text);
|
|
95
|
+
}
|
|
96
|
+
catch {
|
|
97
|
+
throw new GatewayError("Invalid JSON", 400);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
// oxlint-disable-next-line no-unsafe-assignment
|
|
2
|
+
const NODE_ENV = typeof process === "undefined"
|
|
3
|
+
? // oxlint-disable-next-line no-unsafe-member-access
|
|
4
|
+
(globalThis.NODE_ENV ?? globalThis.ENV?.NODE_ENV)
|
|
5
|
+
: process.env?.["NODE_ENV"];
|
|
6
|
+
export const isProduction = () => NODE_ENV === "production";
|
|
7
|
+
export const isTest = () => NODE_ENV === "test";
|