@llmgateway/ai-sdk-provider 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -146
- package/dist/index.js +183 -175
- package/dist/index.js.map +1 -1
- package/dist/{index.cjs → index.mjs} +176 -184
- package/dist/index.mjs.map +1 -0
- package/dist/internal/index.js +176 -169
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/{index.cjs → index.mjs} +170 -177
- package/dist/internal/index.mjs.map +1 -0
- package/package.json +1 -2
- package/dist/index.cjs.map +0 -1
- package/dist/internal/index.cjs.map +0 -1
- /package/dist/{index.d.cts → index.d.mts} +0 -0
- /package/dist/internal/{index.d.cts → index.d.mts} +0 -0
package/README.md
CHANGED
|
@@ -39,149 +39,4 @@ const { text } = await generateText({
|
|
|
39
39
|
|
|
40
40
|
## Supported models
|
|
41
41
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
You can find the latest list of tool-supported models supported by LLMGateway [here](https://llmgateway.io/models?order=newest&supported_parameters=tools). (Note: This list may contain models that are not compatible with the AI SDK.)
|
|
45
|
-
|
|
46
|
-
## Passing Extra Body to LLMGateway
|
|
47
|
-
|
|
48
|
-
There are 3 ways to pass extra body to LLMGateway:
|
|
49
|
-
|
|
50
|
-
1. Via the `providerOptions.llmgateway` property:
|
|
51
|
-
|
|
52
|
-
```typescript
|
|
53
|
-
import { createLLMGateway } from '@llmgateway/ai-sdk-provider';
|
|
54
|
-
import { streamText } from 'ai';
|
|
55
|
-
|
|
56
|
-
const llmgateway = createLLMGateway({ apiKey: 'your-api-key' });
|
|
57
|
-
const model = llmgateway('anthropic/claude-3.7-sonnet:thinking');
|
|
58
|
-
await streamText({
|
|
59
|
-
model,
|
|
60
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
61
|
-
providerOptions: {
|
|
62
|
-
llmgateway: {
|
|
63
|
-
reasoning: {
|
|
64
|
-
max_tokens: 10,
|
|
65
|
-
},
|
|
66
|
-
},
|
|
67
|
-
},
|
|
68
|
-
});
|
|
69
|
-
```
|
|
70
|
-
|
|
71
|
-
2. Via the `extraBody` property in the model settings:
|
|
72
|
-
|
|
73
|
-
```typescript
|
|
74
|
-
import { createLLMGateway } from '@llmgateway/ai-sdk-provider';
|
|
75
|
-
import { streamText } from 'ai';
|
|
76
|
-
|
|
77
|
-
const llmgateway = createLLMGateway({ apiKey: 'your-api-key' });
|
|
78
|
-
const model = llmgateway('anthropic/claude-3.7-sonnet:thinking', {
|
|
79
|
-
extraBody: {
|
|
80
|
-
reasoning: {
|
|
81
|
-
max_tokens: 10,
|
|
82
|
-
},
|
|
83
|
-
},
|
|
84
|
-
});
|
|
85
|
-
await streamText({
|
|
86
|
-
model,
|
|
87
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
88
|
-
});
|
|
89
|
-
```
|
|
90
|
-
|
|
91
|
-
3. Via the `extraBody` property in the model factory.
|
|
92
|
-
|
|
93
|
-
```typescript
|
|
94
|
-
import { createLLMGateway } from '@llmgateway/ai-sdk-provider';
|
|
95
|
-
import { streamText } from 'ai';
|
|
96
|
-
|
|
97
|
-
const llmgateway = createLLMGateway({
|
|
98
|
-
apiKey: 'your-api-key',
|
|
99
|
-
extraBody: {
|
|
100
|
-
reasoning: {
|
|
101
|
-
max_tokens: 10,
|
|
102
|
-
},
|
|
103
|
-
},
|
|
104
|
-
});
|
|
105
|
-
const model = llmgateway('anthropic/claude-3.7-sonnet:thinking');
|
|
106
|
-
await streamText({
|
|
107
|
-
model,
|
|
108
|
-
messages: [{ role: 'user', content: 'Hello' }],
|
|
109
|
-
});
|
|
110
|
-
```
|
|
111
|
-
|
|
112
|
-
## Anthropic Prompt Caching
|
|
113
|
-
|
|
114
|
-
You can include Anthropic-specific options directly in your messages when using functions like `streamText`. The LLMGateway provider will automatically convert these messages to the correct format internally.
|
|
115
|
-
|
|
116
|
-
### Basic Usage
|
|
117
|
-
|
|
118
|
-
```typescript
|
|
119
|
-
import { createLLMGateway } from '@llmgateway/ai-sdk-provider';
|
|
120
|
-
import { streamText } from 'ai';
|
|
121
|
-
|
|
122
|
-
const llmgateway = createLLMGateway({ apiKey: 'your-api-key' });
|
|
123
|
-
const model = llmgateway('anthropic/<supported-caching-model>');
|
|
124
|
-
|
|
125
|
-
await streamText({
|
|
126
|
-
model,
|
|
127
|
-
messages: [
|
|
128
|
-
{
|
|
129
|
-
role: 'system',
|
|
130
|
-
content:
|
|
131
|
-
'You are a podcast summary assistant. You are detail oriented and critical about the content.',
|
|
132
|
-
},
|
|
133
|
-
{
|
|
134
|
-
role: 'user',
|
|
135
|
-
content: [
|
|
136
|
-
{
|
|
137
|
-
type: 'text',
|
|
138
|
-
text: 'Given the text body below:',
|
|
139
|
-
},
|
|
140
|
-
{
|
|
141
|
-
type: 'text',
|
|
142
|
-
text: `<LARGE BODY OF TEXT>`,
|
|
143
|
-
providerOptions: {
|
|
144
|
-
llmgateway: {
|
|
145
|
-
cacheControl: { type: 'ephemeral' },
|
|
146
|
-
},
|
|
147
|
-
},
|
|
148
|
-
},
|
|
149
|
-
{
|
|
150
|
-
type: 'text',
|
|
151
|
-
text: 'List the speakers?',
|
|
152
|
-
},
|
|
153
|
-
],
|
|
154
|
-
},
|
|
155
|
-
],
|
|
156
|
-
});
|
|
157
|
-
```
|
|
158
|
-
|
|
159
|
-
## Use Cases
|
|
160
|
-
|
|
161
|
-
### Usage Accounting
|
|
162
|
-
|
|
163
|
-
The provider supports [LLMGateway usage accounting](https://llmgateway.io/docs/use-cases/usage-accounting), which allows you to track token usage details directly in your API responses, without making additional API calls.
|
|
164
|
-
|
|
165
|
-
```typescript
|
|
166
|
-
// Enable usage accounting
|
|
167
|
-
const model = llmgateway('openai/gpt-3.5-turbo', {
|
|
168
|
-
usage: {
|
|
169
|
-
include: true,
|
|
170
|
-
},
|
|
171
|
-
});
|
|
172
|
-
|
|
173
|
-
// Access usage accounting data
|
|
174
|
-
const result = await generateText({
|
|
175
|
-
model,
|
|
176
|
-
prompt: 'Hello, how are you today?',
|
|
177
|
-
});
|
|
178
|
-
|
|
179
|
-
// Provider-specific usage details (available in providerMetadata)
|
|
180
|
-
if (result.providerMetadata?.llmgateway?.usage) {
|
|
181
|
-
console.log('Cost:', result.providerMetadata.llmgateway.usage.cost);
|
|
182
|
-
console.log(
|
|
183
|
-
'Total Tokens:',
|
|
184
|
-
result.providerMetadata.llmgateway.usage.totalTokens,
|
|
185
|
-
);
|
|
186
|
-
}
|
|
187
|
-
```
|
|
42
|
+
You can find the latest list of models supported by LLMGateway [here](https://llmgateway.io/models).
|