@hebo-ai/gateway 0.9.3 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +23 -12
  2. package/dist/endpoints/chat-completions/otel.js +0 -1
  3. package/dist/endpoints/chat-completions/schema.d.ts +289 -57
  4. package/dist/endpoints/conversations/schema.d.ts +200 -40
  5. package/dist/endpoints/messages/converters.d.ts +24 -0
  6. package/dist/endpoints/messages/converters.js +661 -0
  7. package/dist/endpoints/messages/handler.d.ts +2 -0
  8. package/dist/endpoints/messages/handler.js +142 -0
  9. package/dist/endpoints/messages/index.d.ts +4 -0
  10. package/dist/endpoints/messages/index.js +4 -0
  11. package/dist/endpoints/messages/otel.d.ts +6 -0
  12. package/dist/endpoints/messages/otel.js +171 -0
  13. package/dist/endpoints/messages/schema.d.ts +623 -0
  14. package/dist/endpoints/messages/schema.js +185 -0
  15. package/dist/endpoints/responses/otel.js +0 -1
  16. package/dist/endpoints/responses/schema.d.ts +237 -45
  17. package/dist/endpoints/shared/schema.d.ts +23 -2
  18. package/dist/endpoints/shared/schema.js +3 -1
  19. package/dist/errors/anthropic.d.ts +10 -0
  20. package/dist/errors/anthropic.js +46 -0
  21. package/dist/errors/openai.js +1 -10
  22. package/dist/errors/utils.d.ts +3 -1
  23. package/dist/errors/utils.js +9 -0
  24. package/dist/gateway.d.ts +1 -0
  25. package/dist/gateway.js +2 -0
  26. package/dist/index.d.ts +1 -0
  27. package/dist/index.js +1 -0
  28. package/dist/lifecycle.js +12 -3
  29. package/dist/models/anthropic/middleware.js +5 -0
  30. package/dist/providers/bedrock/middleware.js +16 -1
  31. package/dist/providers/registry.d.ts +1 -1
  32. package/dist/telemetry/gen-ai.js +4 -0
  33. package/dist/types.d.ts +6 -5
  34. package/dist/utils/response.d.ts +1 -0
  35. package/dist/utils/stream.d.ts +1 -0
  36. package/dist/utils/stream.js +10 -3
  37. package/package.json +14 -3
@@ -8,15 +8,17 @@ import * as z from "zod";
8
8
  // Note: The 16-key limit is not currently validated.
9
9
  export const CacheControlSchema = z.object({
10
10
  type: z.literal("ephemeral"),
11
- ttl: z.string().optional(),
11
+ ttl: z.enum(["5m", "1h", "24h"]).optional(),
12
12
  });
13
13
  export const ProviderMetadataSchema = z.record(z.string(), z.record(z.string(), z.any()));
14
14
  export const ReasoningEffortSchema = z.enum(["none", "minimal", "low", "medium", "high", "xhigh"]);
15
+ export const ReasoningSummarySchema = z.enum(["auto", "concise", "detailed", "none"]);
15
16
  export const ReasoningConfigSchema = z.object({
16
17
  enabled: z.optional(z.boolean()),
17
18
  effort: z.optional(ReasoningEffortSchema),
18
19
  max_tokens: z.optional(z.number()),
19
20
  exclude: z.optional(z.boolean()),
21
+ summary: z.optional(ReasoningSummarySchema),
20
22
  });
21
23
  export const ServiceTierSchema = z.enum(["auto", "default", "flex", "scale", "priority"]);
22
24
  const InputAudioFormatSchema = z.enum([
@@ -0,0 +1,10 @@
1
+ export declare class AnthropicError {
2
+ readonly type = "error";
3
+ readonly error: {
4
+ type: string;
5
+ message: string;
6
+ };
7
+ constructor(message: string, type?: string);
8
+ }
9
+ export declare function toAnthropicError(error: unknown): AnthropicError;
10
+ export declare function toAnthropicErrorResponse(error: unknown, responseInit?: ResponseInit): Response;
@@ -0,0 +1,46 @@
1
+ import { resolveRequestId } from "../utils/headers";
2
+ import { toResponse } from "../utils/response";
3
+ import { getErrorMeta, maybeMaskMessage } from "./utils";
4
+ export class AnthropicError {
5
+ type = "error";
6
+ error;
7
+ constructor(message, type = "api_error") {
8
+ this.error = { type, message };
9
+ }
10
+ }
11
+ const mapType = (status) => {
12
+ switch (status) {
13
+ case 400:
14
+ return "invalid_request_error";
15
+ case 401:
16
+ return "authentication_error";
17
+ case 403:
18
+ return "permission_error";
19
+ case 404:
20
+ return "not_found_error";
21
+ case 402:
22
+ return "billing_error";
23
+ case 413:
24
+ return "request_too_large";
25
+ case 429:
26
+ return "rate_limit_error";
27
+ case 504:
28
+ return "timeout_error";
29
+ case 529:
30
+ return "overloaded_error";
31
+ default:
32
+ return status >= 500 ? "api_error" : "invalid_request_error";
33
+ }
34
+ };
35
+ export function toAnthropicError(error) {
36
+ const meta = getErrorMeta(error);
37
+ return new AnthropicError(maybeMaskMessage(meta), mapType(meta.status));
38
+ }
39
+ export function toAnthropicErrorResponse(error, responseInit) {
40
+ const meta = getErrorMeta(error);
41
+ return toResponse(new AnthropicError(maybeMaskMessage(meta, resolveRequestId(responseInit)), mapType(meta.status)), {
42
+ status: meta.status,
43
+ statusText: meta.code,
44
+ headers: responseInit?.headers,
45
+ });
46
+ }
@@ -1,8 +1,7 @@
1
1
  import * as z from "zod";
2
- import { isProduction } from "../utils/env";
3
2
  import { resolveRequestId } from "../utils/headers";
4
3
  import { toResponse } from "../utils/response";
5
- import { getErrorMeta, STATUS_CODE } from "./utils";
4
+ import { getErrorMeta, maybeMaskMessage } from "./utils";
6
5
  export const OpenAIErrorSchema = z.object({
7
6
  error: z.object({
8
7
  message: z.string(),
@@ -18,14 +17,6 @@ export class OpenAIError {
18
17
  }
19
18
  }
20
19
  const mapType = (status) => (status < 500 ? "invalid_request_error" : "server_error");
21
- const maybeMaskMessage = (meta, requestId) => {
22
- // FUTURE: consider masking all upstream errors, also 4xx
23
- if (!(isProduction() && meta.status >= 500)) {
24
- return meta.message;
25
- }
26
- // FUTURE: always attach requestId to errors (masked and unmasked)
27
- return `${STATUS_CODE(meta.status)} (${requestId ?? "see requestId in response headers"})`;
28
- };
29
20
  export function toOpenAIError(error) {
30
21
  const meta = getErrorMeta(error);
31
22
  return new OpenAIError(maybeMaskMessage(meta), mapType(meta.status), meta.code);
@@ -17,8 +17,10 @@ export declare const STATUS_CODES: {
17
17
  readonly 504: "GATEWAY_TIMEOUT";
18
18
  };
19
19
  export declare const STATUS_CODE: (status: number) => "BAD_REQUEST" | "UNAUTHORIZED" | "PAYMENT_REQUIRED" | "FORBIDDEN" | "NOT_FOUND" | "METHOD_NOT_ALLOWED" | "CONFLICT" | "PAYLOAD_TOO_LARGE" | "UNSUPPORTED_MEDIA_TYPE" | "UNPROCESSABLE_ENTITY" | "TOO_MANY_REQUESTS" | "CLIENT_CLOSED_REQUEST" | "INTERNAL_SERVER_ERROR" | "BAD_GATEWAY" | "SERVICE_UNAVAILABLE" | "GATEWAY_TIMEOUT";
20
- export declare function getErrorMeta(error: unknown): {
20
+ export type ErrorMeta = {
21
21
  status: number;
22
22
  code: string;
23
23
  message: string;
24
24
  };
25
+ export declare function getErrorMeta(error: unknown): ErrorMeta;
26
+ export declare function maybeMaskMessage(meta: ErrorMeta, requestId?: string): string;
@@ -1,3 +1,4 @@
1
+ import { isProduction } from "../utils/env";
1
2
  import { normalizeAiSdkError } from "./ai-sdk";
2
3
  import { GatewayError } from "./gateway";
3
4
  export const STATUS_CODES = {
@@ -44,3 +45,11 @@ export function getErrorMeta(error) {
44
45
  }
45
46
  return { status, code, message };
46
47
  }
48
+ export function maybeMaskMessage(meta, requestId) {
49
+ // FUTURE: consider masking all upstream errors, also 4xx
50
+ if (!(isProduction() && meta.status >= 500)) {
51
+ return meta.message;
52
+ }
53
+ // FUTURE: always attach requestId to errors (masked and unmasked)
54
+ return `${STATUS_CODE(meta.status)} (${requestId ?? "see requestId in response headers"})`;
55
+ }
package/dist/gateway.d.ts CHANGED
@@ -6,6 +6,7 @@ export declare function gateway(config: GatewayConfig): {
6
6
  readonly "/embeddings": Endpoint;
7
7
  readonly "/models": Endpoint;
8
8
  readonly "/conversations": Endpoint;
9
+ readonly "/messages": Endpoint;
9
10
  readonly "/responses": Endpoint;
10
11
  };
11
12
  };
package/dist/gateway.js CHANGED
@@ -2,6 +2,7 @@ import { parseConfig } from "./config";
2
2
  import { chatCompletions } from "./endpoints/chat-completions/handler";
3
3
  import { conversations } from "./endpoints/conversations/handler";
4
4
  import { embeddings } from "./endpoints/embeddings/handler";
5
+ import { messages } from "./endpoints/messages/handler";
5
6
  import { models } from "./endpoints/models/handler";
6
7
  import { responses } from "./endpoints/responses/handler";
7
8
  import { GatewayError } from "./errors/gateway";
@@ -19,6 +20,7 @@ export function gateway(config) {
19
20
  ["/embeddings"]: embeddings(parsedConfig),
20
21
  ["/models"]: models(parsedConfig),
21
22
  ["/conversations"]: conversations(parsedConfig),
23
+ ["/messages"]: messages(parsedConfig),
22
24
  ["/responses"]: responses(parsedConfig),
23
25
  };
24
26
  const routeEntries = Object.entries(routes);
package/dist/index.d.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  export * from "./gateway";
2
2
  export type * from "./types";
3
+ export * from "./errors/anthropic";
3
4
  export * from "./errors/gateway";
4
5
  export * from "./errors/openai";
5
6
  export * from "./logger";
package/dist/index.js CHANGED
@@ -1,4 +1,5 @@
1
1
  export * from "./gateway";
2
+ export * from "./errors/anthropic";
2
3
  export * from "./errors/gateway";
3
4
  export * from "./errors/openai";
4
5
  export * from "./logger";
package/dist/lifecycle.js CHANGED
@@ -1,6 +1,7 @@
1
1
  import { parseConfig } from "./config";
2
+ import { toAnthropicError, toAnthropicErrorResponse } from "./errors/anthropic";
2
3
  import { GatewayError } from "./errors/gateway";
3
- import { toOpenAIErrorResponse } from "./errors/openai";
4
+ import { toOpenAIError, toOpenAIErrorResponse } from "./errors/openai";
4
5
  import { logger } from "./logger";
5
6
  import { getBaggageAttributes } from "./telemetry/baggage";
6
7
  import { instrumentFetch } from "./telemetry/fetch";
@@ -57,6 +58,7 @@ export const winterCgHandler = (run, config) => {
57
58
  span.setAttributes({ "http.response.status_code_effective": realStatus });
58
59
  if (ctx.operation === "chat" ||
59
60
  ctx.operation === "embeddings" ||
61
+ ctx.operation === "messages" ||
60
62
  ctx.operation === "responses") {
61
63
  recordRequestDuration(performance.now() - start, realStatus, ctx, parsedConfig.telemetry?.signals?.gen_ai);
62
64
  }
@@ -73,8 +75,10 @@ export const winterCgHandler = (run, config) => {
73
75
  }
74
76
  if (!ctx.response) {
75
77
  ctx.result = (await run(ctx, parsedConfig));
78
+ const formatError = ctx.operation === "messages" ? toAnthropicError : toOpenAIError;
76
79
  ctx.response = toResponse(ctx.result, prepareResponseInit(ctx.requestId), {
77
80
  onDone: finalize,
81
+ formatError,
78
82
  });
79
83
  }
80
84
  if (parsedConfig.hooks?.onResponse) {
@@ -103,9 +107,14 @@ export const winterCgHandler = (run, config) => {
103
107
  logger.debug("[lifecycle] onError hook threw");
104
108
  }
105
109
  }
106
- ctx.response ??= toOpenAIErrorResponse(ctx.request.signal.aborted
110
+ const errorPayload = ctx.request.signal.aborted
107
111
  ? new GatewayError(error ?? ctx.request.signal.reason, 499)
108
- : error, prepareResponseInit(ctx.requestId));
112
+ : error;
113
+ const errorResponseInit = prepareResponseInit(ctx.requestId);
114
+ ctx.response ??=
115
+ ctx.operation === "messages"
116
+ ? toAnthropicErrorResponse(errorPayload, errorResponseInit)
117
+ : toOpenAIErrorResponse(errorPayload, errorResponseInit);
109
118
  finalize(ctx.response.status, error);
110
119
  }
111
120
  });
@@ -102,6 +102,11 @@ export const claudeReasoningMiddleware = {
102
102
  else {
103
103
  target.thinking = { type: "enabled" };
104
104
  }
105
+ // Map reasoning.summary to Anthropic thinking.display
106
+ const thinkingDisplay = reasoning.summary === "none" ? "omitted" : reasoning.summary ? "summarized" : undefined;
107
+ if (thinkingDisplay && target.thinking && target.thinking.type !== "disabled") {
108
+ target.thinking["display"] = thinkingDisplay;
109
+ }
105
110
  delete unknown["reasoning"];
106
111
  return params;
107
112
  },
@@ -1,4 +1,5 @@
1
1
  import { modelMiddlewareMatcher } from "../../middleware/matcher";
2
+ import { calculateReasoningBudgetFromEffort } from "../../middleware/utils";
2
3
  const isClaude46 = (modelId) => modelId.includes("-4-6");
3
4
  // https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
4
5
  export const bedrockServiceTierMiddleware = {
@@ -62,10 +63,24 @@ export const bedrockClaudeReasoningMiddleware = {
62
63
  return params;
63
64
  const target = (bedrock.reasoningConfig ??= {});
64
65
  if (thinking && typeof thinking === "object") {
65
- target.type = thinking.type;
66
+ // Bedrock's InvokeModel (Messages) API supports "adaptive" thinking natively,
67
+ // but @ai-sdk/amazon-bedrock only uses the Converse API which rejects "adaptive"
68
+ // in additionalModelRequestFields — it only accepts "enabled" / "disabled".
69
+ // Map "adaptive" → "enabled" until the SDK adds InvokeModel support.
70
+ // See: https://docs.aws.amazon.com/bedrock/latest/userguide/claude-messages-adaptive-thinking.html
71
+ // SDK tracking issue: https://github.com/vercel/ai/issues/8513
72
+ target.type = thinking.type === "adaptive" ? "enabled" : thinking.type;
66
73
  if ("budgetTokens" in thinking && thinking.budgetTokens !== undefined) {
67
74
  target.budgetTokens = thinking.budgetTokens;
68
75
  }
76
+ else if (target.type === "enabled") {
77
+ // Bedrock requires budgetTokens when type is "enabled". When mapping from
78
+ // "adaptive" (which doesn't require budgetTokens), compute a fallback using
79
+ // the same effort-based logic as other model cases, defaulting to "medium".
80
+ // Note: Bedrock Converse API doesn't support "adaptive" natively — see vercel/ai#8513
81
+ const mappedEffort = effort === "max" ? "xhigh" : effort ?? "medium";
82
+ target.budgetTokens = calculateReasoningBudgetFromEffort(mappedEffort, params.maxOutputTokens ?? 65536, 1024);
83
+ }
69
84
  }
70
85
  // FUTURE: bedrock currently does not support "effort" for other 4.x models
71
86
  if (effort !== undefined && isClaude46(model.modelId)) {
@@ -5,7 +5,7 @@ export declare const resolveProvider: (args: {
5
5
  providers: ProviderRegistry;
6
6
  models: ModelCatalog;
7
7
  modelId: ModelId;
8
- operation: "chat" | "embeddings" | "responses";
8
+ operation: "chat" | "embeddings" | "messages" | "responses";
9
9
  }) => ProviderV3;
10
10
  export type CanonicalIdsOptions = {
11
11
  mapping?: Partial<Record<ModelId, string>>;
@@ -54,12 +54,16 @@ export const getGenAiGeneralAttributes = (ctx, signalLevel) => {
54
54
  if (!signalLevel || signalLevel === "off")
55
55
  return {};
56
56
  const requestModel = typeof ctx.body?.model === "string" ? ctx.body.model : ctx.modelId;
57
+ const serviceTier = typeof ctx.body?.service_tier === "string" ? ctx.body.service_tier : undefined;
57
58
  const attrs = {
58
59
  "gen_ai.operation.name": ctx.operation,
59
60
  "gen_ai.request.model": requestModel,
60
61
  "gen_ai.response.model": ctx.resolvedModelId,
61
62
  "gen_ai.provider.name": ctx.resolvedProviderId,
62
63
  };
64
+ if (signalLevel !== "required" && serviceTier !== undefined) {
65
+ attrs["gen_ai.request.service_tier"] = serviceTier;
66
+ }
63
67
  for (const [key, value] of Object.entries(ctx.otel)) {
64
68
  if (value !== undefined)
65
69
  attrs[key] = value;
package/dist/types.d.ts CHANGED
@@ -3,12 +3,13 @@ import type { Attributes, Tracer } from "@opentelemetry/api";
3
3
  import type { ChatCompletions, ChatCompletionsBody, ChatCompletionsStream } from "./endpoints/chat-completions/schema";
4
4
  import type { ConversationStorage } from "./endpoints/conversations/storage/types";
5
5
  import type { Embeddings, EmbeddingsBody } from "./endpoints/embeddings/schema";
6
+ import type { Messages, MessagesBody, MessagesStream } from "./endpoints/messages/schema";
6
7
  import type { Model, ModelList } from "./endpoints/models";
7
8
  import type { Responses, ResponsesBody, ResponsesStream } from "./endpoints/responses/schema";
8
9
  import type { Logger, LoggerConfig } from "./logger";
9
10
  import type { ModelCatalog, ModelId } from "./models/types";
10
11
  import type { ProviderId, ProviderRegistry } from "./providers/types";
11
- export type GatewayOperation = "chat" | "embeddings" | "responses" | "models" | "conversations";
12
+ export type GatewayOperation = "chat" | "embeddings" | "messages" | "responses" | "models" | "conversations";
12
13
  /**
13
14
  * Per-request context shared across handlers and hooks.
14
15
  */
@@ -41,7 +42,7 @@ export type GatewayContext = {
41
42
  /**
42
43
  * Parsed body from the request.
43
44
  */
44
- body?: ChatCompletionsBody | EmbeddingsBody | ResponsesBody;
45
+ body?: ChatCompletionsBody | EmbeddingsBody | MessagesBody | ResponsesBody;
45
46
  /**
46
47
  * Incoming model ID.
47
48
  */
@@ -65,7 +66,7 @@ export type GatewayContext = {
65
66
  /**
66
67
  * Result returned by the handler (pre-response).
67
68
  */
68
- result?: ChatCompletions | ChatCompletionsStream | Embeddings | Model | ModelList | Responses | ResponsesStream;
69
+ result?: ChatCompletions | ChatCompletionsStream | Embeddings | Messages | MessagesStream | Model | ModelList | Responses | ResponsesStream;
69
70
  /**
70
71
  * Response object returned by the handler.
71
72
  */
@@ -103,7 +104,7 @@ export type GatewayHooks = {
103
104
  * Runs after request JSON is parsed and validated for chat completions / embeddings / responses.
104
105
  * @returns Replacement parsed body, or undefined to keep original.
105
106
  */
106
- before?: (ctx: BeforeHookContext) => void | ChatCompletionsBody | EmbeddingsBody | ResponsesBody | Promise<void | ChatCompletionsBody | EmbeddingsBody | ResponsesBody>;
107
+ before?: (ctx: BeforeHookContext) => void | ChatCompletionsBody | EmbeddingsBody | MessagesBody | ResponsesBody | Promise<void | ChatCompletionsBody | EmbeddingsBody | MessagesBody | ResponsesBody>;
107
108
  /**
108
109
  * Maps a user-provided model ID or alias to a canonical ID.
109
110
  * @returns Canonical model ID or undefined to keep original.
@@ -118,7 +119,7 @@ export type GatewayHooks = {
118
119
  * Runs after the endpoint handler.
119
120
  * @returns Result to replace, or undefined to keep original.
120
121
  */
121
- after?: (ctx: AfterHookContext) => void | ChatCompletions | ChatCompletionsStream | Embeddings | Model | ModelList | Responses | ResponsesStream | Promise<void | ChatCompletions | ChatCompletionsStream | Embeddings | Model | ModelList | Responses | ResponsesStream>;
122
+ after?: (ctx: AfterHookContext) => void | ChatCompletions | ChatCompletionsStream | Embeddings | Messages | MessagesStream | Model | ModelList | Responses | ResponsesStream | Promise<void | ChatCompletions | ChatCompletionsStream | Embeddings | Messages | MessagesStream | Model | ModelList | Responses | ResponsesStream>;
122
123
  /**
123
124
  * Runs after the lifecycle has produced the final Response.
124
125
  * @returns Replacement Response, or undefined to keep original.
@@ -3,4 +3,5 @@ export declare const prepareResponseInit: (requestId: string) => ResponseInit;
3
3
  export declare const mergeResponseInit: (defaultHeaders: HeadersInit, responseInit?: ResponseInit) => ResponseInit;
4
4
  export declare const toResponse: (result: ReadableStream<SseFrame> | Uint8Array<ArrayBuffer> | object | string, responseInit?: ResponseInit, streamOptions?: {
5
5
  onDone?: (status: number, reason?: unknown) => void;
6
+ formatError?: (error: unknown) => unknown;
6
7
  }) => Response;
@@ -6,4 +6,5 @@ export type SseErrorFrame = SseFrame<Error, "error" | undefined>;
6
6
  export declare function toSseStream(src: ReadableStream<SseFrame>, options?: {
7
7
  onDone?: (status: number, reason?: unknown) => void;
8
8
  keepAliveMs?: number;
9
+ formatError?: (error: unknown) => unknown;
9
10
  }): ReadableStream<Uint8Array>;
@@ -59,9 +59,13 @@ export function toSseStream(src, options = {}) {
59
59
  }
60
60
  const value = result.value;
61
61
  if (value.event === "error" || value.data instanceof Error) {
62
- const error = toOpenAIError(value.data);
62
+ const error = options.formatError
63
+ ? options.formatError(value.data)
64
+ : toOpenAIError(value.data);
63
65
  controller.enqueue(TEXT_ENCODER.encode(serializeSseFrame({ event: value.event, data: error })));
64
- done(controller, error.error.type === "invalid_request_error" ? 422 : 502, value.data);
66
+ const openAiError = toOpenAIError(value.data);
67
+ const errorStatus = openAiError?.error.type === "invalid_request_error" ? 422 : 502;
68
+ done(controller, errorStatus, value.data);
65
69
  reader.cancel(value.data).catch(() => { });
66
70
  return;
67
71
  }
@@ -70,9 +74,12 @@ export function toSseStream(src, options = {}) {
70
74
  }
71
75
  catch (error) {
72
76
  try {
77
+ const errorPayload = options.formatError
78
+ ? options.formatError(error)
79
+ : toOpenAIError(error);
73
80
  controller.enqueue(TEXT_ENCODER.encode(serializeSseFrame({
74
81
  event: "error",
75
- data: toOpenAIError(error),
82
+ data: errorPayload,
76
83
  })));
77
84
  }
78
85
  catch { }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@hebo-ai/gateway",
3
- "version": "0.9.3",
4
- "description": "AI gateway as a framework. For full control over models, routing & lifecycle. OpenAI-compatible /chat/completions, /embeddings & /models.",
3
+ "version": "0.10.0",
4
+ "description": "AI gateway as a framework. For full control over models, routing & lifecycle. OpenAI /chat/completions, OpenResponses /responses & Anthropic /messages.",
5
5
  "keywords": [
6
6
  "ai",
7
7
  "ai-gateway",
@@ -62,6 +62,10 @@
62
62
  "types": "./dist/endpoints/embeddings/index.d.ts",
63
63
  "import": "./dist/endpoints/embeddings/index.js"
64
64
  },
65
+ "./endpoints/messages": {
66
+ "types": "./dist/endpoints/messages/index.d.ts",
67
+ "import": "./dist/endpoints/messages/index.js"
68
+ },
65
69
  "./endpoints/models": {
66
70
  "types": "./dist/endpoints/models/index.d.ts",
67
71
  "import": "./dist/endpoints/models/index.js"
@@ -80,6 +84,10 @@
80
84
  "import": "./dist/endpoints/conversations/storage/sql.js",
81
85
  "dev-source": "./src/endpoints/conversations/storage/sql.ts"
82
86
  },
87
+ "./errors/anthropic": {
88
+ "types": "./dist/errors/anthropic.d.ts",
89
+ "import": "./dist/errors/anthropic.js"
90
+ },
83
91
  "./errors/openai": {
84
92
  "types": "./dist/errors/openai.d.ts",
85
93
  "import": "./dist/errors/openai.js"
@@ -158,7 +166,9 @@
158
166
  "lint": "oxlint",
159
167
  "lint:staged": "oxlint --fix",
160
168
  "typecheck": "oxlint --type-check",
161
- "test": "bun test",
169
+ "test": "bun test src/",
170
+ "test:e2e": "bun test test/e2e/",
171
+ "test:all": "bun test",
162
172
  "check": "bun lint && bun typecheck",
163
173
  "fix": "bun lint:staged && bun format:staged"
164
174
  },
@@ -176,6 +186,7 @@
176
186
  "@ai-sdk/google-vertex": "^4.0.105",
177
187
  "@ai-sdk/groq": "^3.0.35",
178
188
  "@ai-sdk/openai": "^3.0.52",
189
+ "@anthropic-ai/sdk": "^0.88.0",
179
190
  "@aws-sdk/credential-providers": "^3.1027.0",
180
191
  "@langfuse/otel": "^5.0.2",
181
192
  "@libsql/client": "^0.17.2",