@hebo-ai/gateway 0.11.2 → 0.11.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -160,6 +160,7 @@ export const getChatResponseAttributes = (completions, signalLevel) => {
160
160
  "gen_ai.usage.total_tokens": completions.usage?.total_tokens,
161
161
  "gen_ai.usage.input_tokens": completions.usage?.prompt_tokens,
162
162
  "gen_ai.usage.cache_read.input_tokens": completions.usage?.prompt_tokens_details?.cached_tokens,
163
+ "gen_ai.usage.cache_creation.input_tokens": completions.usage?.prompt_tokens_details?.cache_write_tokens,
163
164
  "gen_ai.usage.output_tokens": completions.usage?.completion_tokens,
164
165
  "gen_ai.usage.reasoning.output_tokens": completions.usage?.completion_tokens_details?.reasoning_tokens,
165
166
  });
@@ -625,14 +625,12 @@ export class MessagesTransformStream extends TransformStream {
625
625
  }
626
626
  case "finish": {
627
627
  const stopReason = mapStopReason(part.finishReason);
628
- const totalOutputTokens = part.totalUsage?.outputTokens ?? 0;
629
- const totalInputTokens = part.totalUsage?.inputTokens ?? 0;
630
628
  controller.enqueue({
631
629
  event: "message_delta",
632
630
  data: {
633
631
  type: "message_delta",
634
632
  delta: { stop_reason: stopReason, stop_sequence: null },
635
- usage: { output_tokens: totalOutputTokens, input_tokens: totalInputTokens },
633
+ usage: mapUsage(part.totalUsage),
636
634
  },
637
635
  });
638
636
  controller.enqueue({
@@ -647,10 +647,7 @@ export type MessageDeltaEvent = SseFrame<{
647
647
  stop_reason: MessagesStopReason;
648
648
  stop_sequence: string | null;
649
649
  };
650
- usage: {
651
- output_tokens: number;
652
- input_tokens?: number;
653
- };
650
+ usage: MessagesUsage;
654
651
  }, "message_delta">;
655
652
  export type MessageStopEvent = SseFrame<{
656
653
  type: "message_stop";
@@ -6,10 +6,19 @@ const normalizeApiCallError = (error) => {
6
6
  const statusText = `UPSTREAM_${STATUS_TEXT(status)}`;
7
7
  return new GatewayError(error, status, statusText, undefined, error.responseHeaders ?? undefined);
8
8
  };
9
+ // `AbortError` / `TimeoutError` (raised by the AI SDK's internal `timeout` controller,
10
+ // `AbortSignal.timeout`, or an aborted upstream `fetch`) reach us as plain DOMExceptions
11
+ // that none of the AI SDK error classes match. Treat them as upstream gateway timeouts
12
+ // so they surface as 504 with retry headers rather than defaulting to 500/502.
13
+ // Inbound client disconnects are caught earlier in `lifecycle.ts` and overridden to 499.
14
+ const isUpstreamAbortError = (error) => error instanceof Error && (error.name === "AbortError" || error.name === "TimeoutError");
9
15
  export const normalizeAiSdkError = (error) => {
10
16
  if (APICallError.isInstance(error)) {
11
17
  return normalizeApiCallError(error);
12
18
  }
19
+ if (isUpstreamAbortError(error)) {
20
+ return new GatewayError(error, 504, `UPSTREAM_${STATUS_TEXT(504)}`);
21
+ }
13
22
  if (RetryError.isInstance(error)) {
14
23
  if (APICallError.isInstance(error.lastError)) {
15
24
  return normalizeApiCallError(error.lastError);
package/dist/lifecycle.js CHANGED
@@ -50,11 +50,12 @@ export const winterCgHandler = (run, config) => {
50
50
  else if (status === 200 && ctx.response?.status)
51
51
  realStatus = ctx.response.status;
52
52
  if (realStatus !== 200) {
53
+ const err = reason ?? ctx.request.signal.reason;
53
54
  logger[realStatus >= 500 ? "error" : "warn"]({
54
55
  requestId: ctx.requestId,
55
- err: reason ?? ctx.request.signal.reason,
56
+ err,
56
57
  });
57
- span.recordError(reason, true);
58
+ span.recordError(err, true);
58
59
  }
59
60
  span.setAttributes({ "http.response.status_code_effective": realStatus });
60
61
  if (ctx.operation === "chat" ||
@@ -1,5 +1,6 @@
1
1
  import { metrics } from "@opentelemetry/api";
2
2
  import { STATUS_TEXT } from "../errors/utils";
3
+ import { logger } from "../logger";
3
4
  const getMeter = () => metrics.getMeter("@hebo/gateway");
4
5
  let requestDurationHistogram;
5
6
  let timePerOutputTokenHistogram;
@@ -93,20 +94,59 @@ export const recordTimePerOutputToken = (start, ttft, tokenAttrs, metricAttrs, s
93
94
  return;
94
95
  getTimePerOutputTokenHistogram().record((performance.now() - start - ttft) / 1000 / (outputTokens - 1), metricAttrs);
95
96
  };
97
+ // Partitioning follows OTel semconv PR #3624:
98
+ // https://github.com/open-telemetry/semantic-conventions/pull/3624
99
+ // When a cache or reasoning breakdown is reported, partitioned data points sum
100
+ // to the total and a bare {type} point MUST NOT be emitted alongside them.
96
101
  // FUTURE: record unsuccessful calls
97
102
  export const recordTokenUsage = (tokenAttrs, metricAttrs, signalLevel) => {
98
- if (!signalLevel || (signalLevel !== "recommended" && signalLevel !== "full"))
103
+ if (signalLevel !== "recommended" && signalLevel !== "full")
99
104
  return;
100
- const record = (value, tokenType) => {
101
- if (typeof value !== "number")
102
- return;
103
- getTokenUsageHistogram().record(value, Object.assign({}, metricAttrs, { "gen_ai.token.type": tokenType }));
105
+ const histogram = getTokenUsageHistogram();
106
+ const emit = (value, extra) => {
107
+ if (value > 0)
108
+ histogram.record(value, { ...metricAttrs, ...extra });
104
109
  };
105
- record(tokenAttrs["gen_ai.usage.input_tokens"], "input");
106
- record(tokenAttrs["gen_ai.usage.output_tokens"], "output");
107
- // FUTURE: "cached" and "reasoning" token types are not yet in the OTel standard — monitor:
108
- // https://github.com/open-telemetry/semantic-conventions/issues/1959
109
- // https://github.com/open-telemetry/semantic-conventions/issues/3341
110
- record(tokenAttrs["gen_ai.usage.cache_read.input_tokens"], "cached");
111
- record(tokenAttrs["gen_ai.usage.reasoning.output_tokens"], "reasoning");
110
+ emitInputTokens(emit, tokenAttrs);
111
+ emitOutputTokens(emit, tokenAttrs);
112
+ };
113
+ const emitInputTokens = (emit, tokenAttrs) => {
114
+ const total = tokenAttrs["gen_ai.usage.input_tokens"];
115
+ if (total === undefined)
116
+ return;
117
+ const cacheRead = tokenAttrs["gen_ai.usage.cache_read.input_tokens"];
118
+ const cacheCreation = tokenAttrs["gen_ai.usage.cache_creation.input_tokens"];
119
+ if (cacheRead === undefined && cacheCreation === undefined) {
120
+ emit(total, { "gen_ai.token.type": "input" });
121
+ return;
122
+ }
123
+ const read = cacheRead ?? 0;
124
+ const creation = cacheCreation ?? 0;
125
+ let uncached = total - read - creation;
126
+ if (uncached < 0) {
127
+ logger.warn({ inputTokens: total, cacheRead: read, cacheCreation: creation }, "[telemetry] input token cache partitions exceed total; clamping uncached to 0");
128
+ uncached = 0;
129
+ }
130
+ emit(read, { "gen_ai.token.type": "input", "gen_ai.token.cache": "read" });
131
+ emit(creation, { "gen_ai.token.type": "input", "gen_ai.token.cache": "creation" });
132
+ emit(uncached, { "gen_ai.token.type": "input", "gen_ai.token.cache": "uncached" });
133
+ };
134
+ const emitOutputTokens = (emit, tokenAttrs) => {
135
+ const total = tokenAttrs["gen_ai.usage.output_tokens"];
136
+ if (total === undefined)
137
+ return;
138
+ const reasoning = tokenAttrs["gen_ai.usage.reasoning.output_tokens"];
139
+ if (reasoning === undefined) {
140
+ emit(total, { "gen_ai.token.type": "output" });
141
+ return;
142
+ }
143
+ let reasoned = reasoning;
144
+ let nonReasoning = total - reasoning;
145
+ if (nonReasoning < 0) {
146
+ logger.warn({ outputTokens: total, reasoningTokens: reasoning }, "[telemetry] reasoning tokens exceed output total; clamping non-reasoning to 0");
147
+ reasoned = total;
148
+ nonReasoning = 0;
149
+ }
150
+ emit(reasoned, { "gen_ai.token.type": "output", "gen_ai.token.reasoning": true });
151
+ emit(nonReasoning, { "gen_ai.token.type": "output", "gen_ai.token.reasoning": false });
112
152
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hebo-ai/gateway",
3
- "version": "0.11.2",
3
+ "version": "0.11.3",
4
4
  "description": "AI gateway as a framework. For full control over models, routing & lifecycle. OpenAI /chat/completions, OpenResponses /responses & Anthropic /messages.",
5
5
  "keywords": [
6
6
  "ai",
@@ -252,36 +252,36 @@
252
252
  "fix": "bun lint:staged && bun format:staged"
253
253
  },
254
254
  "dependencies": {
255
- "@ai-sdk/provider": "^3.0.8",
256
- "ai": "^6.0.168",
255
+ "@ai-sdk/provider": "^3.0.9",
256
+ "ai": "^6.0.169",
257
257
  "lru-cache": "^11.3.5",
258
258
  "uuid": "^14.0.0",
259
259
  "zod": "^4.3.6"
260
260
  },
261
261
  "devDependencies": {
262
- "@ai-sdk/alibaba": "^1.0.17",
263
- "@ai-sdk/amazon-bedrock": "^4.0.96",
264
- "@ai-sdk/anthropic": "^3.0.71",
265
- "@ai-sdk/cohere": "^3.0.30",
266
- "@ai-sdk/deepinfra": "^2.0.45",
267
- "@ai-sdk/deepseek": "^2.0.29",
268
- "@ai-sdk/fireworks": "^2.0.46",
269
- "@ai-sdk/google-vertex": "^4.0.112",
270
- "@ai-sdk/groq": "^3.0.35",
271
- "@ai-sdk/moonshotai": "^2.0.16",
272
- "@ai-sdk/openai": "^3.0.53",
273
- "@ai-sdk/togetherai": "^2.0.45",
274
- "@ai-sdk/xai": "^3.0.83",
262
+ "@ai-sdk/alibaba": "^1.0.18",
263
+ "@ai-sdk/amazon-bedrock": "^4.0.97",
264
+ "@ai-sdk/anthropic": "^3.0.72",
265
+ "@ai-sdk/cohere": "^3.0.31",
266
+ "@ai-sdk/deepinfra": "^2.0.46",
267
+ "@ai-sdk/deepseek": "^2.0.30",
268
+ "@ai-sdk/fireworks": "^2.0.47",
269
+ "@ai-sdk/google-vertex": "^4.0.113",
270
+ "@ai-sdk/groq": "^3.0.36",
271
+ "@ai-sdk/moonshotai": "^2.0.17",
272
+ "@ai-sdk/openai": "^3.0.54",
273
+ "@ai-sdk/togetherai": "^2.0.46",
274
+ "@ai-sdk/xai": "^3.0.84",
275
275
  "@anthropic-ai/sdk": "^0.91.1",
276
- "@aws-sdk/credential-providers": "^3.1037.0",
276
+ "@aws-sdk/credential-providers": "^3.1038.0",
277
277
  "@langfuse/otel": "^5.2.0",
278
278
  "@libsql/client": "^0.17.3",
279
279
  "@mjackson/node-fetch-server": "^0.7.0",
280
280
  "@opentelemetry/api": "^1.9.1",
281
281
  "@opentelemetry/context-async-hooks": "^2.7.0",
282
282
  "@opentelemetry/sdk-trace-base": "^2.7.0",
283
- "@tanstack/react-router": "^1.168.24",
284
- "@tanstack/react-start": "^1.167.49",
283
+ "@tanstack/react-router": "^1.168.25",
284
+ "@tanstack/react-start": "^1.167.50",
285
285
  "@types/better-sqlite3": "^7.6.13",
286
286
  "@types/bun": "1.3.13",
287
287
  "@types/pg": "^8.20.0",
@@ -294,10 +294,10 @@
294
294
  "lefthook": "^2.1.6",
295
295
  "mysql2": "^3.22.3",
296
296
  "next": "^16.2.4",
297
- "openai": "^6.34.0",
297
+ "openai": "^6.35.0",
298
298
  "oxfmt": "^0.46.0",
299
- "oxlint": "^1.61.0",
300
- "oxlint-tsgolint": "^0.22.0",
299
+ "oxlint": "^1.62.0",
300
+ "oxlint-tsgolint": "^0.22.1",
301
301
  "pg": "^8.20.0",
302
302
  "pino": "^10.3.1",
303
303
  "postgres": "^3.4.9",
@@ -315,12 +315,12 @@
315
315
  "@ai-sdk/deepinfra": "^2.0.45",
316
316
  "@ai-sdk/deepseek": "^2.0.29",
317
317
  "@ai-sdk/fireworks": "^2.0.46",
318
- "@ai-sdk/google": "^3.0.64",
318
+ "@ai-sdk/google": "^3.0.65",
319
319
  "@ai-sdk/google-vertex": "^4.0.80",
320
320
  "@ai-sdk/groq": "^3.0.29",
321
321
  "@ai-sdk/moonshotai": "^2.0.16",
322
322
  "@ai-sdk/openai": "^3.0.41",
323
- "@ai-sdk/openai-compatible": "^2.0.41",
323
+ "@ai-sdk/openai-compatible": "^2.0.42",
324
324
  "@ai-sdk/togetherai": "^2.0.45",
325
325
  "@ai-sdk/xai": "^3.0.83",
326
326
  "@libsql/client": "^0.14.0",