@hebo-ai/gateway 0.8.1 → 0.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -147,26 +147,31 @@ import { withCanonicalIdsForGroq } from "@hebo-ai/gateway/providers/groq";
147
147
 
148
148
  If an adapter is not yet provided, you can create your own by wrapping the provider instance with the `withCanonicalIds` helper and define your custom canonicalization mapping & rules.
149
149
 
150
+ For Azure, use `createAzure` from `@ai-sdk/azure` directly. Name each [Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/endpoints) deployment after its Hebo canonical ID (e.g. `anthropic/claude-sonnet-4.5`).
151
+
152
+ For other providers, use `withCanonicalIds` with an explicit `mapping`:
153
+
150
154
  ```ts
151
- import { createAzure } from "@ai-sdk/openai";
155
+ import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
152
156
  import { gateway, withCanonicalIds } from "@hebo-ai/gateway";
153
157
 
154
- const azure = withCanonicalIds(
155
- createAzure({
156
- resourceName: process.env["AZURE_RESOURCE_NAME"],
157
- apiKey: process.env["AZURE_API_KEY"],
158
+ const myProvider = withCanonicalIds(
159
+ createOpenAICompatible({
160
+ name: "my-provider",
161
+ baseURL: "https://api.my-provider.com/v1",
162
+ apiKey: process.env["MY_PROVIDER_API_KEY"],
158
163
  }),
159
164
  {
160
165
  mapping: {
161
- "openai/gpt-4.1-mini": "your-gpt-4.1-mini-deployment-name",
162
- "openai/text-embedding-3-small": "your-embeddings-3-small-deployment-name",
166
+ "openai/gpt-4.1-mini": "gpt-4.1-mini-custom",
167
+ "anthropic/claude-sonnet-4.5": "claude-sonnet-4-5",
163
168
  },
164
169
  },
165
170
  );
166
171
 
167
172
  const gw = gateway({
168
173
  providers: {
169
- azure,
174
+ myProvider,
170
175
  },
171
176
  models: {
172
177
  // ...your models pointing at canonical IDs above
@@ -108,7 +108,9 @@ export const getChatRequestAttributes = (body, signalLevel) => {
108
108
  }
109
109
  if (signalLevel !== "required") {
110
110
  Object.assign(attrs, {
111
- // FUTURE: add reasoning info
111
+ "gen_ai.request.reasoning.enabled": body.reasoning?.enabled,
112
+ "gen_ai.request.reasoning.effort": body.reasoning?.effort,
113
+ "gen_ai.request.reasoning.max_tokens": body.reasoning?.max_tokens,
112
114
  "gen_ai.request.stream": body.stream,
113
115
  "gen_ai.request.service_tier": body.service_tier,
114
116
  "gen_ai.request.frequency_penalty": body.frequency_penalty,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hebo-ai/gateway",
3
- "version": "0.8.1",
3
+ "version": "0.8.2",
4
4
  "description": "AI gateway as a framework. For full control over models, routing & lifecycle. OpenAI-compatible /chat/completions, /embeddings & /models.",
5
5
  "keywords": [
6
6
  "ai",