@hebo-ai/gateway 0.3.0-rc.0 β†’ 0.3.0-rc.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +89 -10
  2. package/dist/endpoints/chat-completions/converters.js +42 -51
  3. package/dist/endpoints/chat-completions/handler.js +1 -8
  4. package/dist/endpoints/embeddings/handler.js +1 -8
  5. package/dist/logger/default.d.ts +1 -1
  6. package/dist/logger/default.js +4 -3
  7. package/dist/logger/index.js +3 -0
  8. package/dist/middleware/common.js +23 -37
  9. package/dist/middleware/matcher.d.ts +17 -9
  10. package/dist/middleware/matcher.js +57 -39
  11. package/dist/models/amazon/middleware.js +2 -2
  12. package/dist/models/anthropic/middleware.js +1 -1
  13. package/dist/models/cohere/middleware.js +2 -2
  14. package/dist/models/google/middleware.js +2 -2
  15. package/dist/models/openai/middleware.js +2 -2
  16. package/dist/models/voyage/middleware.js +1 -1
  17. package/dist/telemetry/access-log.js +4 -8
  18. package/dist/utils/env.d.ts +1 -0
  19. package/dist/utils/env.js +1 -0
  20. package/package.json +2 -1
  21. package/src/endpoints/chat-completions/converters.ts +54 -59
  22. package/src/endpoints/chat-completions/handler.ts +1 -7
  23. package/src/endpoints/embeddings/handler.ts +1 -9
  24. package/src/logger/default.ts +5 -3
  25. package/src/logger/index.ts +3 -0
  26. package/src/middleware/common.ts +29 -45
  27. package/src/middleware/matcher.ts +77 -53
  28. package/src/models/amazon/middleware.test.ts +4 -4
  29. package/src/models/amazon/middleware.ts +2 -2
  30. package/src/models/anthropic/middleware.test.ts +5 -5
  31. package/src/models/anthropic/middleware.ts +1 -1
  32. package/src/models/cohere/middleware.test.ts +4 -4
  33. package/src/models/cohere/middleware.ts +2 -2
  34. package/src/models/google/middleware.test.ts +4 -4
  35. package/src/models/google/middleware.ts +2 -2
  36. package/src/models/openai/middleware.test.ts +5 -5
  37. package/src/models/openai/middleware.ts +2 -2
  38. package/src/models/voyage/middleware.test.ts +2 -2
  39. package/src/models/voyage/middleware.ts +1 -1
  40. package/src/telemetry/access-log.ts +4 -8
  41. package/src/utils/env.ts +1 -0
package/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  Roll your own AI gateway for full control over models, providers, routing logic, guardrails, observability and more ...
4
4
 
5
- ## Overview
5
+ ## πŸ’ Overview
6
6
 
7
7
  Existing AI gateways like OpenRouter, Vercel AI Gateway, LiteLLM, and Portkey work out of the box, but they’re hard to extend once your needs go beyond configuration.
8
8
 
@@ -10,7 +10,7 @@ Hebo Gateway is an open-source, embeddable AI gateway framework built to live in
10
10
 
11
11
  Learn more in our blog post: [Yet Another AI Gateway?](https://hebo.ai/blog/260127-hebo-gateway/) (`https://hebo.ai/blog/260127-hebo-gateway/`)
12
12
 
13
- ## Features
13
+ ## 🍌 Features
14
14
 
15
15
  - 🌐 OpenAI-compatible /chat/completions, /embeddings & /models endpoints.
16
16
  - πŸ”Œ Integrate into your existing Hono, Elysia, Next.js & TanStack apps.
@@ -20,13 +20,28 @@ Learn more in our blog post: [Yet Another AI Gateway?](https://hebo.ai/blog/2601
20
20
  - πŸͺ Hook system to customize routing, auth, rate limits, and shape responses.
21
21
  - 🧰 Low-level OpenAI-compatible schema, converters, and middleware helpers.
22
22
 
23
- ## Installation
23
+ ## πŸ“¦ Installation
24
24
 
25
25
  ```bash
26
26
  bun install @hebo-ai/gateway
27
27
  ```
28
28
 
29
- ## Quickstart
29
+ ## ☰ Table of Contents
30
+
31
+ - Quickstart
32
+ - [Setup A Gateway Instance](#setup-a-gateway-instance) | [Mount Route Handlers](#mount-route-handlers) | [Call the Gateway](#call-the-gateway)
33
+ - Configuration Reference
34
+ - [Providers](#providers) | [Models](#models) | [Hooks](#hooks) | [Logger](#logger-settings)
35
+ - Framework Support
36
+ - [ElysiaJS](#elysiajs) | [Hono](#hono) | [Next.js](#nextjs) | [TanStack Start](#tanstack-start)
37
+ - Runtime Support
38
+ - [Vercel Edge](#vercel-edge) | [Cloudflare Workers](#cloudflare-workers) | [Deno Deploy](#deno-deploy) | [AWS Lambda](#aws-lambda)
39
+ - OpenAI Extensions
40
+ - [Reasoning](#reasoning)
41
+ - Advanced Usage
42
+ - [Passing Framework State to Hooks](#passing-framework-state-to-hooks) | [Selective Route Mounting](#selective-route-mounting) | [Low-level Schemas & Converters](#low-level-schemas--converters)
43
+
44
+ ## πŸš€ Quickstart
30
45
 
31
46
  ### Setup A Gateway Instance
32
47
 
@@ -115,7 +130,7 @@ const { text } = await generateText({
115
130
  console.log(text);
116
131
  ```
117
132
 
118
- ## Configuration Reference
133
+ ## βš™οΈ Configuration Reference
119
134
 
120
135
  ### Providers
121
136
 
@@ -338,7 +353,7 @@ The `ctx` object is **readonly for core fields**. Use return values to override
338
353
  > [!TIP]
339
354
  > To pass data between hooks, use `ctx.state`. It’s a per-request mutable bag in which you can stash things like auth info, routing decisions, timers, or trace IDs and read them later again in any of the other hooks.
340
355
 
341
- ## Framework Support
356
+ ## 🧩 Framework Support
342
357
 
343
358
  Hebo Gateway exposes **WinterCG-compatible** handlers that integrate with almost any existing framework.
344
359
 
@@ -366,7 +381,9 @@ export default new Hono().mount("/v1/gateway/", gw.handler);
366
381
  console.log(`πŸ’ Hebo Gateway is running with Hono framework`);
367
382
  ```
368
383
 
369
- ### Next.js (App Router)
384
+ ### Next.js
385
+
386
+ #### App Router
370
387
 
371
388
  `app/api/gateway/[...all]/route.ts`
372
389
 
@@ -380,7 +397,7 @@ const gw = gateway({
380
397
  export const POST = gw.handler, GET = gw.handler;
381
398
  ```
382
399
 
383
- ### Next.js (Pages Router)
400
+ #### Pages Router
384
401
 
385
402
  `pages/api/gateway/[...all].ts`
386
403
 
@@ -420,7 +437,69 @@ export const Route = createFileRoute("/api/$")({
420
437
  });
421
438
  ```
422
439
 
423
- ## OpenAI Extensions
440
+ ## 🌍 Runtime Support
441
+
442
+ Hebo Gateway also works directly with runtime-level `Request -> Response` handlers.
443
+
444
+ ### Vercel Edge
445
+
446
+ `api/gateway.ts`
447
+
448
+ ```ts
449
+ export const runtime = "edge";
450
+
451
+ const gw = gateway({
452
+ // ...
453
+ });
454
+
455
+ export default gw.handler;
456
+ ```
457
+
458
+ ### Cloudflare Workers
459
+
460
+ `src/index.ts`
461
+
462
+ ```ts
463
+ const gw = gateway({
464
+ // ...
465
+ });
466
+
467
+ export default {
468
+ fetch: gw.handler,
469
+ };
470
+ ```
471
+
472
+ ### Deno Deploy
473
+
474
+ `main.ts`
475
+
476
+ ```ts
477
+ import { serve } from "https://deno.land/std/http/server.ts";
478
+
479
+ const gw = gateway({
480
+ // ...
481
+ });
482
+
483
+ serve((request: Request) => gw.handler(request));
484
+ ```
485
+
486
+ ### AWS Lambda
487
+
488
+ `src/lambda.ts`
489
+
490
+ ```ts
491
+ import { awsLambdaEventHandler } from "@hattip/adapter-aws-lambda";
492
+
493
+ const gw = gateway({
494
+ // ...
495
+ });
496
+
497
+ export const handler = awsLambdaEventHandler({
498
+ handler: gw.handler,
499
+ });
500
+ ```
501
+
502
+ ## 🧠 OpenAI Extensions
424
503
 
425
504
  ### Reasoning
426
505
 
@@ -453,7 +532,7 @@ Reasoning output is surfaced as extension to the `completion` object.
453
532
 
454
533
  Most SDKs handle these fields out-of-the-box.
455
534
 
456
- ## Advanced Usage
535
+ ## πŸ§ͺ Advanced Usage
457
536
 
458
537
  ### Logger Settings
459
538
 
@@ -1,3 +1,4 @@
1
+ import { convertBase64ToUint8Array } from "@ai-sdk/provider-utils";
1
2
  import { jsonSchema, JsonToSseTransformStream, tool } from "ai";
2
3
  import { GatewayError } from "../../errors/gateway";
3
4
  import { OpenAIError, toOpenAIError } from "../../errors/openai";
@@ -5,6 +6,7 @@ import { toResponse } from "../../utils/response";
5
6
  // --- Request Flow ---
6
7
  export function convertToTextCallOptions(params) {
7
8
  const { messages, tools, tool_choice, temperature, max_tokens, max_completion_tokens, reasoning_effort, reasoning, frequency_penalty, presence_penalty, seed, stop, top_p, ...rest } = params;
9
+ Object.assign(rest, parseReasoningOptions(reasoning_effort, reasoning));
8
10
  return {
9
11
  messages: convertToModelMessages(messages),
10
12
  tools: convertToToolSet(tools),
@@ -17,10 +19,7 @@ export function convertToTextCallOptions(params) {
17
19
  stopSequences: stop ? (Array.isArray(stop) ? stop : [stop]) : undefined,
18
20
  topP: top_p,
19
21
  providerOptions: {
20
- unknown: {
21
- ...rest,
22
- ...parseReasoningOptions(reasoning_effort, reasoning),
23
- },
22
+ unknown: rest,
24
23
  },
25
24
  };
26
25
  }
@@ -113,29 +112,16 @@ export function fromChatCompletionsContent(content) {
113
112
  if (part.type === "image_url") {
114
113
  const url = part.image_url.url;
115
114
  if (url.startsWith("data:")) {
116
- const parts = url.split(",");
117
- const metadata = parts[0];
118
- const base64Data = parts[1];
119
- if (!metadata || !base64Data) {
120
- throw new GatewayError("Invalid data URL: missing metadata or data", 400);
121
- }
122
- const mimeTypePart = metadata.split(":")[1];
123
- if (!mimeTypePart) {
124
- throw new GatewayError("Invalid data URL: missing MIME type part", 400);
125
- }
126
- const mimeType = mimeTypePart.split(";")[0];
127
- if (!mimeType) {
128
- throw new GatewayError("Invalid data URL: missing MIME type", 400);
129
- }
115
+ const { mimeType, base64Data } = parseDataUrl(url);
130
116
  return mimeType.startsWith("image/")
131
117
  ? {
132
118
  type: "image",
133
- image: Buffer.from(base64Data, "base64"),
119
+ image: convertBase64ToUint8Array(base64Data),
134
120
  mediaType: mimeType,
135
121
  }
136
122
  : {
137
123
  type: "file",
138
- data: Buffer.from(base64Data, "base64"),
124
+ data: convertBase64ToUint8Array(base64Data),
139
125
  mediaType: mimeType,
140
126
  };
141
127
  }
@@ -149,12 +135,12 @@ export function fromChatCompletionsContent(content) {
149
135
  return media_type.startsWith("image/")
150
136
  ? {
151
137
  type: "image",
152
- image: Buffer.from(data, "base64"),
138
+ image: convertBase64ToUint8Array(data),
153
139
  mediaType: media_type,
154
140
  }
155
141
  : {
156
142
  type: "file",
157
- data: Buffer.from(data, "base64"),
143
+ data: convertBase64ToUint8Array(data),
158
144
  filename,
159
145
  mediaType: media_type,
160
146
  };
@@ -195,6 +181,20 @@ function parseToolOutput(content) {
195
181
  return { type: "text", value: content };
196
182
  }
197
183
  }
184
+ function parseDataUrl(url) {
185
+ const commaIndex = url.indexOf(",");
186
+ if (commaIndex <= "data:".length || commaIndex === url.length - 1) {
187
+ throw new GatewayError("Invalid data URL: missing metadata or data", 400);
188
+ }
189
+ const metadata = url.slice("data:".length, commaIndex);
190
+ const base64Data = url.slice(commaIndex + 1);
191
+ const semicolonIndex = metadata.indexOf(";");
192
+ const mimeType = (semicolonIndex === -1 ? metadata : metadata.slice(0, semicolonIndex)).trim();
193
+ if (!mimeType) {
194
+ throw new GatewayError("Invalid data URL: missing MIME type", 400);
195
+ }
196
+ return { mimeType, base64Data };
197
+ }
198
198
  function parseReasoningOptions(reasoning_effort, reasoning) {
199
199
  const effort = reasoning?.effort ?? reasoning_effort;
200
200
  const max_tokens = reasoning?.max_tokens;
@@ -285,13 +285,10 @@ export class ChatCompletionsStream extends TransformStream {
285
285
  break;
286
286
  }
287
287
  case "tool-call": {
288
+ const toolCall = toChatCompletionsToolCall(part.toolCallId, part.toolName, part.input, part.providerMetadata);
289
+ toolCall.index = toolCallIndexCounter++;
288
290
  controller.enqueue(createChunk({
289
- tool_calls: [
290
- {
291
- ...toChatCompletionsToolCall(part.toolCallId, part.toolName, part.input, part.providerMetadata),
292
- index: toolCallIndexCounter++,
293
- },
294
- ],
291
+ tool_calls: [toolCall],
295
292
  }));
296
293
  break;
297
294
  }
@@ -336,29 +333,23 @@ export const toChatCompletionsAssistantMessage = (result) => {
336
333
  return message;
337
334
  };
338
335
  export function toChatCompletionsUsage(usage) {
339
- return {
340
- ...(usage.inputTokens !== undefined && {
341
- prompt_tokens: usage.inputTokens,
342
- }),
343
- ...(usage.outputTokens !== undefined && {
344
- completion_tokens: usage.outputTokens,
345
- }),
346
- ...((usage.totalTokens !== undefined ||
347
- usage.inputTokens !== undefined ||
348
- usage.outputTokens !== undefined) && {
349
- total_tokens: usage.totalTokens ?? (usage.inputTokens ?? 0) + (usage.outputTokens ?? 0),
350
- }),
351
- ...(usage.outputTokenDetails?.reasoningTokens !== undefined && {
352
- completion_tokens_details: {
353
- reasoning_tokens: usage.outputTokenDetails.reasoningTokens,
354
- },
355
- }),
356
- ...(usage.inputTokenDetails?.cacheReadTokens !== undefined && {
357
- prompt_tokens_details: {
358
- cached_tokens: usage.inputTokenDetails.cacheReadTokens,
359
- },
360
- }),
361
- };
336
+ const out = {};
337
+ const prompt = usage.inputTokens;
338
+ if (prompt !== undefined)
339
+ out.prompt_tokens = prompt;
340
+ const completion = usage.outputTokens;
341
+ if (completion !== undefined)
342
+ out.completion_tokens = completion;
343
+ if (prompt !== undefined || completion !== undefined || usage.totalTokens !== undefined) {
344
+ out.total_tokens = usage.totalTokens ?? (prompt ?? 0) + (completion ?? 0);
345
+ }
346
+ const reasoning = usage.outputTokenDetails?.reasoningTokens;
347
+ if (reasoning !== undefined)
348
+ out.completion_tokens_details = { reasoning_tokens: reasoning };
349
+ const cached = usage.inputTokenDetails?.cacheReadTokens;
350
+ if (cached !== undefined)
351
+ out.prompt_tokens_details = { cached_tokens: cached };
352
+ return out;
362
353
  }
363
354
  export function toChatCompletionsToolCall(id, name, args, providerMetadata) {
364
355
  const out = {
@@ -3,7 +3,6 @@ import * as z from "zod/mini";
3
3
  import { GatewayError } from "../../errors/gateway";
4
4
  import { winterCgHandler } from "../../lifecycle";
5
5
  import { logger } from "../../logger";
6
- import { forwardParamsMiddleware } from "../../middleware/common";
7
6
  import { modelMiddlewareMatcher } from "../../middleware/matcher";
8
7
  import { resolveProvider } from "../../providers/registry";
9
8
  import { prepareForwardHeaders } from "../../utils/request";
@@ -55,15 +54,9 @@ export const chatCompletions = (config) => {
55
54
  options: textOptions,
56
55
  }, "[chat] AI SDK options");
57
56
  // Build middleware chain (model -> forward params -> provider).
58
- const middleware = [];
59
- for (const m of modelMiddlewareMatcher.forModel(ctx.resolvedModelId))
60
- middleware.push(m);
61
- middleware.push(forwardParamsMiddleware(languageModel.provider));
62
- for (const m of modelMiddlewareMatcher.forProvider(languageModel.provider))
63
- middleware.push(m);
64
57
  const languageModelWithMiddleware = wrapLanguageModel({
65
58
  model: languageModel,
66
- middleware,
59
+ middleware: modelMiddlewareMatcher.for(ctx.resolvedModelId, languageModel.provider),
67
60
  });
68
61
  // Execute request (streaming vs. non-streaming).
69
62
  if (stream) {
@@ -3,7 +3,6 @@ import * as z from "zod/mini";
3
3
  import { GatewayError } from "../../errors/gateway";
4
4
  import { winterCgHandler } from "../../lifecycle";
5
5
  import { logger } from "../../logger";
6
- import { forwardParamsEmbeddingMiddleware } from "../../middleware/common";
7
6
  import { modelMiddlewareMatcher } from "../../middleware/matcher";
8
7
  import { resolveProvider } from "../../providers/registry";
9
8
  import { prepareForwardHeaders } from "../../utils/request";
@@ -52,15 +51,9 @@ export const embeddings = (config) => {
52
51
  const embedOptions = convertToEmbedCallOptions(inputs);
53
52
  logger.trace({ requestId: ctx.request.headers.get("x-request-id"), options: embedOptions }, "[embeddings] AI SDK options");
54
53
  // Build middleware chain (model -> forward params -> provider).
55
- const middleware = [];
56
- for (const m of modelMiddlewareMatcher.forEmbeddingModel(ctx.resolvedModelId))
57
- middleware.push(m);
58
- middleware.push(forwardParamsEmbeddingMiddleware(embeddingModel.provider));
59
- for (const m of modelMiddlewareMatcher.forEmbeddingProvider(embeddingModel.provider))
60
- middleware.push(m);
61
54
  const embeddingModelWithMiddleware = wrapEmbeddingModel({
62
55
  model: embeddingModel,
63
- middleware,
56
+ middleware: modelMiddlewareMatcher.forEmbedding(ctx.resolvedModelId, embeddingModel.provider),
64
57
  });
65
58
  // Execute request.
66
59
  const result = await embedMany({
@@ -1,5 +1,5 @@
1
1
  import type { LogLevel, Logger } from "./index";
2
- export declare const getDefaultLogLevel: () => "debug" | "info";
2
+ export declare const getDefaultLogLevel: () => LogLevel;
3
3
  export declare const createDefaultLogger: (config: {
4
4
  level?: LogLevel;
5
5
  }) => Logger;
@@ -1,6 +1,6 @@
1
1
  import { serializeError } from "serialize-error";
2
- import { isProduction } from "../utils/env";
3
- export const getDefaultLogLevel = () => (isProduction() ? "info" : "debug");
2
+ import { isProduction, isTest } from "../utils/env";
3
+ export const getDefaultLogLevel = () => isTest() ? "silent" : isProduction() ? "info" : "debug";
4
4
  const noop = () => { };
5
5
  const LEVEL = {
6
6
  trace: 5,
@@ -8,6 +8,7 @@ const LEVEL = {
8
8
  info: 20,
9
9
  warn: 30,
10
10
  error: 40,
11
+ silent: 50,
11
12
  };
12
13
  const LEVELS = Object.keys(LEVEL);
13
14
  const isRecord = (value) => typeof value === "object" && value !== null && !(value instanceof Error);
@@ -52,7 +53,7 @@ const buildLogObject = (level, args) => {
52
53
  };
53
54
  const makeLogFn = (level, write) => (...args) => write(JSON.stringify(buildLogObject(level, args)));
54
55
  export const createDefaultLogger = (config) => {
55
- if (config.level === "silent") {
56
+ if (config.level === "silent" || getDefaultLogLevel() === "silent") {
56
57
  return { trace: noop, debug: noop, info: noop, warn: noop, error: noop };
57
58
  }
58
59
  const threshold = LEVEL[config.level ?? getDefaultLogLevel()];
@@ -1,3 +1,4 @@
1
+ import { isTest } from "../utils/env";
1
2
  const KEY = Symbol.for("@hebo/logger");
2
3
  const g = globalThis;
3
4
  g[KEY] ??= {
@@ -10,6 +11,8 @@ g[KEY] ??= {
10
11
  export let logger = g[KEY];
11
12
  export const isLogger = (input) => typeof input === "object" && input !== null && "info" in input;
12
13
  export function isLoggerDisabled(input) {
14
+ if (isTest())
15
+ return true;
13
16
  if (input === null)
14
17
  return true;
15
18
  if (!input || typeof input !== "object" || "info" in input)
@@ -16,8 +16,16 @@ function snakeToCamel(key) {
16
16
  }
17
17
  return out;
18
18
  }
19
+ function hasUppercase(s) {
20
+ for (let i = 0; i < s.length; i++) {
21
+ const c = s[i];
22
+ if (c >= "A" && c <= "Z")
23
+ return true;
24
+ }
25
+ return false;
26
+ }
19
27
  function camelToSnake(key) {
20
- if (!/[A-Z]/.test(key))
28
+ if (!hasUppercase(key))
21
29
  return key;
22
30
  let out = "";
23
31
  for (let i = 0; i < key.length; i++) {
@@ -26,53 +34,31 @@ function camelToSnake(key) {
26
34
  }
27
35
  return out;
28
36
  }
29
- function camelizeKeysDeep(value) {
30
- if (value === null || typeof value !== "object")
31
- return value;
32
- if (Array.isArray(value)) {
33
- return value.map((v) => camelizeKeysDeep(v));
34
- }
35
- const out = {};
36
- for (const [k, v] of Object.entries(value)) {
37
- if (v === undefined || v === null)
38
- continue;
39
- out[snakeToCamel(k)] = camelizeKeysDeep(v);
40
- }
41
- return out;
42
- }
43
- function snakizeKeysDeep(value) {
37
+ function remapDeep(value, mapKey) {
44
38
  if (value === null || typeof value !== "object")
45
39
  return value;
46
40
  if (Array.isArray(value)) {
47
- return value.map((v) => snakizeKeysDeep(v));
41
+ return value.map((v) => remapDeep(v, mapKey));
48
42
  }
49
43
  const out = {};
50
- for (const [k, v] of Object.entries(value)) {
51
- if (v === undefined || v === null)
52
- continue;
53
- out[camelToSnake(k)] = snakizeKeysDeep(v);
44
+ for (const key of Object.keys(value)) {
45
+ out[mapKey(key)] = remapDeep(value[key], mapKey);
54
46
  }
55
47
  return out;
56
48
  }
57
- function processOptions(providerOptions, providerName) {
58
- if (providerOptions[providerName]) {
59
- providerOptions[providerName] = camelizeKeysDeep(providerOptions[providerName]);
60
- }
61
- const target = (providerOptions[providerName] ??= {});
62
- for (const key in providerOptions) {
63
- if (key === providerName)
49
+ function processOptions(options, providerName) {
50
+ const target = (options[providerName] = remapDeep(options[providerName] ?? {}, snakeToCamel));
51
+ for (const namespace in options) {
52
+ if (namespace === providerName)
64
53
  continue;
65
- const source = camelizeKeysDeep(providerOptions[key]);
66
- for (const k in source) {
67
- target[k] = source[k];
68
- }
69
- if (key === "unknown")
70
- delete providerOptions[key];
54
+ Object.assign(target, remapDeep(options[namespace], snakeToCamel));
55
+ if (namespace === "unknown")
56
+ delete options[namespace];
71
57
  }
72
58
  }
73
- function processMetadata(providerMetadata) {
74
- for (const key in providerMetadata) {
75
- providerMetadata[key] = snakizeKeysDeep(providerMetadata[key]);
59
+ function processMetadata(metadata) {
60
+ for (const namespace in metadata) {
61
+ metadata[namespace] = remapDeep(metadata[namespace], camelToSnake);
76
62
  }
77
63
  }
78
64
  /**
@@ -1,19 +1,27 @@
1
1
  import type { EmbeddingModelMiddleware, LanguageModelMiddleware } from "ai";
2
2
  import type { ModelId } from "../models/types";
3
3
  import type { ProviderId } from "../providers/types";
4
- type MiddlewareEntry = {
5
- language?: LanguageModelMiddleware | LanguageModelMiddleware[];
6
- embedding?: EmbeddingModelMiddleware | EmbeddingModelMiddleware[];
4
+ type MiddlewareEntries = {
5
+ language?: LanguageModelMiddleware[];
6
+ embedding?: EmbeddingModelMiddleware[];
7
7
  };
8
+ type ModelMiddleware = LanguageModelMiddleware | EmbeddingModelMiddleware;
8
9
  declare class ModelMiddlewareMatcher {
9
10
  private model;
10
11
  private provider;
11
- useForModel(patterns: ModelId | readonly ModelId[], entry: MiddlewareEntry): void;
12
- useForProvider(patterns: ProviderId | readonly ProviderId[], entry: MiddlewareEntry): void;
13
- forModel(modelId: ModelId): LanguageModelMiddleware[];
14
- forProvider(providerId: ProviderId): LanguageModelMiddleware[];
15
- forEmbeddingModel(modelId: ModelId): EmbeddingModelMiddleware[];
16
- forEmbeddingProvider(providerId: ProviderId): EmbeddingModelMiddleware[];
12
+ private static readonly MAX_CACHE;
13
+ private cache;
14
+ useForModel(patterns: ModelId | readonly ModelId[], entry: MiddlewareEntries): void;
15
+ useForProvider(patterns: ProviderId | readonly ProviderId[], entry: MiddlewareEntries): void;
16
+ for(modelId: ModelId, providerId: ProviderId): LanguageModelMiddleware[];
17
+ forEmbedding(modelId: ModelId, providerId: ProviderId): EmbeddingModelMiddleware[];
18
+ resolve(options: {
19
+ kind: "text" | "embedding";
20
+ modelId?: ModelId;
21
+ providerId?: ProviderId;
22
+ forward?: ModelMiddleware | (() => ModelMiddleware);
23
+ }): ModelMiddleware[];
24
+ private collect;
17
25
  }
18
26
  export declare const modelMiddlewareMatcher: ModelMiddlewareMatcher;
19
27
  export type { ModelMiddlewareMatcher };