@providerprotocol/ai 0.0.17 → 0.0.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +294 -114
- package/dist/anthropic/index.d.ts +1 -1
- package/dist/anthropic/index.js +5 -3
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-MOU4U3PO.js → chunk-5FEAOEXV.js} +4 -68
- package/dist/chunk-5FEAOEXV.js.map +1 -0
- package/dist/chunk-DZQHVGNV.js +71 -0
- package/dist/chunk-DZQHVGNV.js.map +1 -0
- package/dist/chunk-SKY2JLA7.js +59 -0
- package/dist/chunk-SKY2JLA7.js.map +1 -0
- package/dist/{chunk-SVYROCLD.js → chunk-UMKWXGO3.js} +1 -1
- package/dist/chunk-UMKWXGO3.js.map +1 -0
- package/dist/chunk-WAKD3OO5.js +224 -0
- package/dist/chunk-WAKD3OO5.js.map +1 -0
- package/dist/content-DEl3z_W2.d.ts +276 -0
- package/dist/google/index.d.ts +3 -1
- package/dist/google/index.js +122 -4
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +2 -2
- package/dist/http/index.js +2 -1
- package/dist/image-Dhq-Yuq4.d.ts +456 -0
- package/dist/index.d.ts +59 -1460
- package/dist/index.js +89 -267
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +1 -1
- package/dist/ollama/index.js +5 -3
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +47 -20
- package/dist/openai/index.js +309 -4
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +1 -1
- package/dist/openrouter/index.js +5 -3
- package/dist/openrouter/index.js.map +1 -1
- package/dist/{provider-D5MO3-pS.d.ts → provider-BBMBZuGn.d.ts} +11 -11
- package/dist/proxy/index.d.ts +652 -0
- package/dist/proxy/index.js +565 -0
- package/dist/proxy/index.js.map +1 -0
- package/dist/{retry-DZ4Sqmxp.d.ts → retry-DR7YRJDz.d.ts} +1 -1
- package/dist/stream-DRHy6q1a.d.ts +1013 -0
- package/dist/xai/index.d.ts +29 -1
- package/dist/xai/index.js +118 -4
- package/dist/xai/index.js.map +1 -1
- package/package.json +6 -1
- package/dist/chunk-MOU4U3PO.js.map +0 -1
- package/dist/chunk-SVYROCLD.js.map +0 -1
package/README.md
CHANGED
|
@@ -1,117 +1,130 @@
|
|
|
1
1
|
# @providerprotocol/ai
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
## Install
|
|
3
|
+
A unified TypeScript SDK for AI inference across multiple providers. One API for LLMs, embeddings, and image generation.
|
|
6
4
|
|
|
7
5
|
```bash
|
|
8
6
|
bun add @providerprotocol/ai
|
|
9
7
|
```
|
|
10
8
|
|
|
11
|
-
##
|
|
9
|
+
## Quick Start
|
|
12
10
|
|
|
13
11
|
```typescript
|
|
14
12
|
import { llm } from '@providerprotocol/ai';
|
|
15
13
|
import { anthropic } from '@providerprotocol/ai/anthropic';
|
|
16
|
-
import { openai } from '@providerprotocol/ai/openai';
|
|
17
|
-
import { google } from '@providerprotocol/ai/google';
|
|
18
|
-
import { ollama } from '@providerprotocol/ai/ollama';
|
|
19
|
-
import { openrouter } from '@providerprotocol/ai/openrouter';
|
|
20
|
-
import { xai } from '@providerprotocol/ai/xai';
|
|
21
14
|
|
|
22
|
-
// Simple generation
|
|
23
15
|
const claude = llm({ model: anthropic('claude-sonnet-4-20250514') });
|
|
24
16
|
const turn = await claude.generate('Hello!');
|
|
25
17
|
console.log(turn.response.text);
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Providers
|
|
21
|
+
|
|
22
|
+
| Provider | Import | LLM | Embedding | Image |
|
|
23
|
+
|----------|--------|:---:|:---------:|:-----:|
|
|
24
|
+
| Anthropic | `@providerprotocol/ai/anthropic` | ✓ | | |
|
|
25
|
+
| OpenAI | `@providerprotocol/ai/openai` | ✓ | ✓ | ✓ |
|
|
26
|
+
| Google | `@providerprotocol/ai/google` | ✓ | ✓ | ✓ |
|
|
27
|
+
| xAI | `@providerprotocol/ai/xai` | ✓ | | ✓ |
|
|
28
|
+
| Ollama | `@providerprotocol/ai/ollama` | ✓ | ✓ | |
|
|
29
|
+
| OpenRouter | `@providerprotocol/ai/openrouter` | ✓ | ✓ | |
|
|
30
|
+
|
|
31
|
+
API keys are loaded automatically from environment variables (`ANTHROPIC_API_KEY`, `OPENAI_API_KEY`, etc.).
|
|
32
|
+
|
|
33
|
+
## LLM
|
|
26
34
|
|
|
27
|
-
|
|
35
|
+
### Streaming
|
|
36
|
+
|
|
37
|
+
```typescript
|
|
28
38
|
const stream = claude.stream('Count to 5');
|
|
29
39
|
for await (const event of stream) {
|
|
30
|
-
if (event.type === 'text_delta')
|
|
40
|
+
if (event.type === 'text_delta') {
|
|
41
|
+
process.stdout.write(event.delta.text);
|
|
42
|
+
}
|
|
31
43
|
}
|
|
44
|
+
const turn = await stream.turn;
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
### Multi-turn Conversations
|
|
48
|
+
|
|
49
|
+
```typescript
|
|
50
|
+
const history: Message[] = [];
|
|
32
51
|
|
|
33
|
-
// Multi-turn
|
|
34
|
-
const history = [];
|
|
35
52
|
const t1 = await claude.generate(history, 'My name is Alice');
|
|
36
53
|
history.push(...t1.messages);
|
|
54
|
+
|
|
37
55
|
const t2 = await claude.generate(history, 'What is my name?');
|
|
56
|
+
// Response: "Your name is Alice"
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Tools
|
|
38
60
|
|
|
39
|
-
|
|
61
|
+
```typescript
|
|
40
62
|
const turn = await claude.generate({
|
|
41
63
|
tools: [{
|
|
42
64
|
name: 'getWeather',
|
|
43
65
|
description: 'Get weather for a location',
|
|
44
|
-
parameters: {
|
|
45
|
-
|
|
66
|
+
parameters: {
|
|
67
|
+
type: 'object',
|
|
68
|
+
properties: { location: { type: 'string' } },
|
|
69
|
+
required: ['location'],
|
|
70
|
+
},
|
|
71
|
+
run: async ({ location }) => ({ temp: 72, conditions: 'sunny' }),
|
|
46
72
|
}],
|
|
47
|
-
}, '
|
|
73
|
+
}, 'What is the weather in Tokyo?');
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### Structured Output
|
|
48
77
|
|
|
49
|
-
|
|
50
|
-
|
|
78
|
+
```typescript
|
|
79
|
+
import { llm } from '@providerprotocol/ai';
|
|
80
|
+
import { openai } from '@providerprotocol/ai/openai';
|
|
81
|
+
|
|
82
|
+
const extractor = llm({
|
|
51
83
|
model: openai('gpt-4o'),
|
|
52
84
|
structure: {
|
|
53
85
|
type: 'object',
|
|
54
|
-
properties: {
|
|
86
|
+
properties: {
|
|
87
|
+
name: { type: 'string' },
|
|
88
|
+
age: { type: 'number' },
|
|
89
|
+
},
|
|
90
|
+
required: ['name', 'age'],
|
|
55
91
|
},
|
|
56
|
-
})
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
const turn = await extractor.generate('John is 30 years old');
|
|
57
95
|
console.log(turn.data); // { name: 'John', age: 30 }
|
|
58
96
|
```
|
|
59
97
|
|
|
60
|
-
|
|
98
|
+
### Multimodal Input
|
|
61
99
|
|
|
62
|
-
|
|
100
|
+
```typescript
|
|
101
|
+
import { Image } from '@providerprotocol/ai';
|
|
102
|
+
|
|
103
|
+
const img = await Image.fromPath('./photo.png');
|
|
104
|
+
const turn = await claude.generate([img, 'What is in this image?']);
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
## Embeddings
|
|
63
108
|
|
|
64
109
|
```typescript
|
|
65
110
|
import { embedding } from '@providerprotocol/ai';
|
|
66
111
|
import { openai } from '@providerprotocol/ai/openai';
|
|
67
|
-
import { google } from '@providerprotocol/ai/google';
|
|
68
|
-
import { ollama } from '@providerprotocol/ai/ollama';
|
|
69
|
-
import { openrouter } from '@providerprotocol/ai/openrouter';
|
|
70
112
|
|
|
71
|
-
// Single text embedding
|
|
72
113
|
const embedder = embedding({ model: openai('text-embedding-3-small') });
|
|
73
|
-
const result = await embedder.embed('Hello world');
|
|
74
|
-
console.log(result.embeddings[0].vector); // [0.123, -0.456, ...]
|
|
75
|
-
console.log(result.embeddings[0].dimensions); // 1536
|
|
76
114
|
|
|
77
|
-
//
|
|
115
|
+
// Single or batch
|
|
116
|
+
const result = await embedder.embed('Hello world');
|
|
78
117
|
const batch = await embedder.embed(['doc1', 'doc2', 'doc3']);
|
|
79
|
-
console.log(batch.embeddings.length); // 3
|
|
80
118
|
|
|
81
|
-
//
|
|
82
|
-
|
|
83
|
-
model: openai('text-embedding-3-small'),
|
|
84
|
-
params: { dimensions: 256 },
|
|
85
|
-
});
|
|
86
|
-
|
|
87
|
-
// Google with task type optimization
|
|
88
|
-
const googleEmbed = embedding({
|
|
89
|
-
model: google('text-embedding-004'),
|
|
90
|
-
params: {
|
|
91
|
-
taskType: 'RETRIEVAL_DOCUMENT',
|
|
92
|
-
title: 'Important Document',
|
|
93
|
-
},
|
|
94
|
-
});
|
|
95
|
-
|
|
96
|
-
// Ollama local embeddings
|
|
97
|
-
const localEmbed = embedding({
|
|
98
|
-
model: ollama('qwen3-embedding:4b'),
|
|
99
|
-
});
|
|
100
|
-
|
|
101
|
-
// OpenRouter (access multiple providers)
|
|
102
|
-
const routerEmbed = embedding({
|
|
103
|
-
model: openrouter('openai/text-embedding-3-small'),
|
|
104
|
-
});
|
|
119
|
+
console.log(result.embeddings[0].vector); // [0.123, -0.456, ...]
|
|
120
|
+
console.log(result.embeddings[0].dimensions); // 1536
|
|
105
121
|
```
|
|
106
122
|
|
|
107
|
-
### Chunked
|
|
123
|
+
### Chunked Processing
|
|
108
124
|
|
|
109
|
-
For large
|
|
125
|
+
For large datasets with progress tracking:
|
|
110
126
|
|
|
111
127
|
```typescript
|
|
112
|
-
const embedder = embedding({ model: openai('text-embedding-3-small') });
|
|
113
|
-
const documents = Array.from({ length: 1000 }, (_, i) => `Document ${i}`);
|
|
114
|
-
|
|
115
128
|
const stream = embedder.embed(documents, {
|
|
116
129
|
chunked: true,
|
|
117
130
|
batchSize: 100,
|
|
@@ -120,91 +133,258 @@ const stream = embedder.embed(documents, {
|
|
|
120
133
|
|
|
121
134
|
for await (const progress of stream) {
|
|
122
135
|
console.log(`${progress.percent.toFixed(1)}% complete`);
|
|
123
|
-
console.log(`Processed ${progress.completed} of ${progress.total}`);
|
|
124
136
|
}
|
|
125
137
|
|
|
126
|
-
const
|
|
127
|
-
console.log(`Total embeddings: ${finalResult.embeddings.length}`);
|
|
138
|
+
const result = await stream.result;
|
|
128
139
|
```
|
|
129
140
|
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
Each provider supports its native parameters passed through unchanged:
|
|
141
|
+
## Image Generation
|
|
133
142
|
|
|
134
143
|
```typescript
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
model: openai('text-embedding-3-large'),
|
|
138
|
-
params: { dimensions: 1024, encoding_format: 'float' },
|
|
139
|
-
});
|
|
140
|
-
|
|
141
|
-
// Google: taskType, title, outputDimensionality
|
|
142
|
-
embedding({
|
|
143
|
-
model: google('text-embedding-004'),
|
|
144
|
-
params: {
|
|
145
|
-
taskType: 'SEMANTIC_SIMILARITY',
|
|
146
|
-
outputDimensionality: 256,
|
|
147
|
-
},
|
|
148
|
-
});
|
|
144
|
+
import { image } from '@providerprotocol/ai';
|
|
145
|
+
import { openai } from '@providerprotocol/ai/openai';
|
|
149
146
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
model: ollama('nomic-embed-text'),
|
|
153
|
-
params: { truncate: true, keep_alive: '5m' },
|
|
154
|
-
});
|
|
147
|
+
const dalle = image({ model: openai('dall-e-3') });
|
|
148
|
+
const result = await dalle.generate('A sunset over mountains');
|
|
155
149
|
|
|
156
|
-
|
|
157
|
-
embedding({
|
|
158
|
-
model: openrouter('openai/text-embedding-3-small'),
|
|
159
|
-
params: { dimensions: 512 },
|
|
160
|
-
});
|
|
150
|
+
console.log(result.images[0].image.toBase64());
|
|
161
151
|
```
|
|
162
152
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
| Provider | Import | LLM | Embedding |
|
|
166
|
-
|----------|--------|-----|-----------|
|
|
167
|
-
| Anthropic | `@providerprotocol/ai/anthropic` | Yes | - |
|
|
168
|
-
| OpenAI | `@providerprotocol/ai/openai` | Yes | Yes |
|
|
169
|
-
| Google | `@providerprotocol/ai/google` | Yes | Yes |
|
|
170
|
-
| Ollama | `@providerprotocol/ai/ollama` | Yes | Yes |
|
|
171
|
-
| OpenRouter | `@providerprotocol/ai/openrouter` | Yes | Yes |
|
|
172
|
-
| xAI (Grok) | `@providerprotocol/ai/xai` | Yes | - |
|
|
153
|
+
### With Parameters
|
|
173
154
|
|
|
174
|
-
|
|
155
|
+
```typescript
|
|
156
|
+
const hd = image({
|
|
157
|
+
model: openai('dall-e-3'),
|
|
158
|
+
params: { size: '1792x1024', quality: 'hd', style: 'natural' },
|
|
159
|
+
});
|
|
160
|
+
```
|
|
175
161
|
|
|
176
|
-
|
|
162
|
+
### Image Editing
|
|
177
163
|
|
|
178
164
|
```typescript
|
|
179
|
-
import {
|
|
165
|
+
import { image, Image } from '@providerprotocol/ai';
|
|
180
166
|
|
|
181
|
-
|
|
182
|
-
const grok = llm({ model: xai('grok-3-fast') });
|
|
167
|
+
const editor = image({ model: openai('dall-e-2') });
|
|
183
168
|
|
|
184
|
-
|
|
185
|
-
const
|
|
169
|
+
const source = await Image.fromPath('./photo.png');
|
|
170
|
+
const mask = await Image.fromPath('./mask.png');
|
|
186
171
|
|
|
187
|
-
|
|
188
|
-
|
|
172
|
+
const result = await editor.edit({
|
|
173
|
+
image: source,
|
|
174
|
+
mask,
|
|
175
|
+
prompt: 'Add a rainbow in the sky',
|
|
176
|
+
});
|
|
189
177
|
```
|
|
190
178
|
|
|
191
179
|
## Configuration
|
|
192
180
|
|
|
193
181
|
```typescript
|
|
182
|
+
import { llm } from '@providerprotocol/ai';
|
|
183
|
+
import { openai } from '@providerprotocol/ai/openai';
|
|
194
184
|
import { ExponentialBackoff, RoundRobinKeys } from '@providerprotocol/ai/http';
|
|
195
185
|
|
|
196
186
|
const instance = llm({
|
|
197
187
|
model: openai('gpt-4o'),
|
|
198
188
|
config: {
|
|
199
|
-
apiKey: 'sk
|
|
189
|
+
apiKey: new RoundRobinKeys(['sk-key1', 'sk-key2']),
|
|
200
190
|
timeout: 30000,
|
|
201
191
|
retryStrategy: new ExponentialBackoff({ maxAttempts: 3 }),
|
|
202
192
|
},
|
|
203
|
-
params: {
|
|
204
|
-
|
|
193
|
+
params: {
|
|
194
|
+
temperature: 0.7,
|
|
195
|
+
max_tokens: 1000,
|
|
196
|
+
},
|
|
197
|
+
system: 'You are a helpful assistant.',
|
|
198
|
+
});
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
### Key Strategies
|
|
202
|
+
|
|
203
|
+
```typescript
|
|
204
|
+
import { RoundRobinKeys, WeightedKeys, DynamicKey } from '@providerprotocol/ai/http';
|
|
205
|
+
|
|
206
|
+
// Cycle through keys evenly
|
|
207
|
+
new RoundRobinKeys(['sk-1', 'sk-2', 'sk-3'])
|
|
208
|
+
|
|
209
|
+
// Weighted selection (70% key1, 30% key2)
|
|
210
|
+
new WeightedKeys([
|
|
211
|
+
{ key: 'sk-1', weight: 70 },
|
|
212
|
+
{ key: 'sk-2', weight: 30 },
|
|
213
|
+
])
|
|
214
|
+
|
|
215
|
+
// Dynamic fetching (secrets manager, etc.)
|
|
216
|
+
new DynamicKey(async () => fetchKeyFromVault())
|
|
217
|
+
```
|
|
218
|
+
|
|
219
|
+
### Retry Strategies
|
|
220
|
+
|
|
221
|
+
```typescript
|
|
222
|
+
import {
|
|
223
|
+
ExponentialBackoff,
|
|
224
|
+
LinearBackoff,
|
|
225
|
+
NoRetry,
|
|
226
|
+
TokenBucket,
|
|
227
|
+
RetryAfterStrategy,
|
|
228
|
+
} from '@providerprotocol/ai/http';
|
|
229
|
+
|
|
230
|
+
// Exponential: 1s, 2s, 4s... (default)
|
|
231
|
+
new ExponentialBackoff({ maxAttempts: 5, baseDelay: 1000, maxDelay: 30000 })
|
|
232
|
+
|
|
233
|
+
// Linear: 1s, 2s, 3s...
|
|
234
|
+
new LinearBackoff({ maxAttempts: 3, delay: 1000 })
|
|
235
|
+
|
|
236
|
+
// Rate limiting with token bucket
|
|
237
|
+
new TokenBucket({ maxTokens: 10, refillRate: 1 })
|
|
238
|
+
|
|
239
|
+
// Respect server Retry-After headers
|
|
240
|
+
new RetryAfterStrategy({ maxAttempts: 3, fallbackDelay: 5000 })
|
|
241
|
+
|
|
242
|
+
// No retries
|
|
243
|
+
new NoRetry()
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## Tool Execution Control
|
|
247
|
+
|
|
248
|
+
```typescript
|
|
249
|
+
const turn = await claude.generate({
|
|
250
|
+
tools: [weatherTool, searchTool],
|
|
251
|
+
toolStrategy: {
|
|
252
|
+
maxIterations: 5,
|
|
253
|
+
onBeforeCall: (tool, params) => {
|
|
254
|
+
if (tool.name === 'dangerousTool') return false; // Block execution
|
|
255
|
+
return true;
|
|
256
|
+
},
|
|
257
|
+
onAfterCall: (tool, params, result) => {
|
|
258
|
+
console.log(`${tool.name} returned:`, result);
|
|
259
|
+
},
|
|
260
|
+
onError: (tool, params, error) => {
|
|
261
|
+
console.error(`${tool.name} failed:`, error);
|
|
262
|
+
},
|
|
263
|
+
},
|
|
264
|
+
}, 'Search for recent news about AI');
|
|
265
|
+
```
|
|
266
|
+
|
|
267
|
+
## Thread Management
|
|
268
|
+
|
|
269
|
+
```typescript
|
|
270
|
+
import { Thread } from '@providerprotocol/ai';
|
|
271
|
+
|
|
272
|
+
const thread = new Thread();
|
|
273
|
+
|
|
274
|
+
thread.user('Hello!');
|
|
275
|
+
const turn = await claude.generate(thread.toMessages(), 'How are you?');
|
|
276
|
+
thread.append(turn);
|
|
277
|
+
|
|
278
|
+
// Serialize for storage
|
|
279
|
+
const json = thread.toJSON();
|
|
280
|
+
localStorage.setItem('conversation', JSON.stringify(json));
|
|
281
|
+
|
|
282
|
+
// Restore later
|
|
283
|
+
const restored = Thread.fromJSON(JSON.parse(localStorage.getItem('conversation')));
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
## Error Handling
|
|
287
|
+
|
|
288
|
+
All errors are normalized to `UPPError` with consistent error codes:
|
|
289
|
+
|
|
290
|
+
```typescript
|
|
291
|
+
import { UPPError } from '@providerprotocol/ai';
|
|
292
|
+
|
|
293
|
+
try {
|
|
294
|
+
await claude.generate('Hello');
|
|
295
|
+
} catch (error) {
|
|
296
|
+
if (error instanceof UPPError) {
|
|
297
|
+
switch (error.code) {
|
|
298
|
+
case 'RATE_LIMITED':
|
|
299
|
+
// Wait and retry
|
|
300
|
+
break;
|
|
301
|
+
case 'CONTEXT_LENGTH_EXCEEDED':
|
|
302
|
+
// Reduce input size
|
|
303
|
+
break;
|
|
304
|
+
case 'AUTHENTICATION_FAILED':
|
|
305
|
+
// Check API key
|
|
306
|
+
break;
|
|
307
|
+
case 'CONTENT_FILTERED':
|
|
308
|
+
// Content policy violation
|
|
309
|
+
break;
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
```
|
|
314
|
+
|
|
315
|
+
**Error Codes:** `AUTHENTICATION_FAILED`, `RATE_LIMITED`, `CONTEXT_LENGTH_EXCEEDED`, `MODEL_NOT_FOUND`, `INVALID_REQUEST`, `INVALID_RESPONSE`, `CONTENT_FILTERED`, `QUOTA_EXCEEDED`, `PROVIDER_ERROR`, `NETWORK_ERROR`, `TIMEOUT`, `CANCELLED`
|
|
316
|
+
|
|
317
|
+
## Proxy Server
|
|
318
|
+
|
|
319
|
+
Build backend proxies to hide API keys from clients.
|
|
320
|
+
|
|
321
|
+
### Server
|
|
322
|
+
|
|
323
|
+
```typescript
|
|
324
|
+
import { llm } from '@providerprotocol/ai';
|
|
325
|
+
import { anthropic } from '@providerprotocol/ai/anthropic';
|
|
326
|
+
import { webapi, parseBody, toJSON, toSSE } from '@providerprotocol/ai/proxy';
|
|
327
|
+
|
|
328
|
+
const claude = llm({ model: anthropic('claude-sonnet-4-20250514') });
|
|
329
|
+
|
|
330
|
+
Bun.serve({
|
|
331
|
+
port: 3000,
|
|
332
|
+
fetch: webapi(async (req) => {
|
|
333
|
+
const { messages, system, params } = parseBody(await req.json());
|
|
334
|
+
|
|
335
|
+
if (params?.stream) {
|
|
336
|
+
return toSSE(claude.stream(messages, { system }));
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
return toJSON(await claude.generate(messages, { system }));
|
|
340
|
+
}),
|
|
205
341
|
});
|
|
206
342
|
```
|
|
207
343
|
|
|
344
|
+
### Client
|
|
345
|
+
|
|
346
|
+
```typescript
|
|
347
|
+
import { llm } from '@providerprotocol/ai';
|
|
348
|
+
import { proxy } from '@providerprotocol/ai/proxy';
|
|
349
|
+
|
|
350
|
+
const claude = llm({ model: proxy('http://localhost:3000') });
|
|
351
|
+
const turn = await claude.generate('Hello!');
|
|
352
|
+
```
|
|
353
|
+
|
|
354
|
+
## xAI API Modes
|
|
355
|
+
|
|
356
|
+
xAI supports multiple API compatibility modes:
|
|
357
|
+
|
|
358
|
+
```typescript
|
|
359
|
+
import { xai } from '@providerprotocol/ai/xai';
|
|
360
|
+
|
|
361
|
+
// Chat Completions (OpenAI-compatible, default)
|
|
362
|
+
xai('grok-3-fast')
|
|
363
|
+
|
|
364
|
+
// Responses API (stateful)
|
|
365
|
+
xai('grok-3-fast', { api: 'responses' })
|
|
366
|
+
|
|
367
|
+
// Messages API (Anthropic-compatible)
|
|
368
|
+
xai('grok-3-fast', { api: 'messages' })
|
|
369
|
+
```
|
|
370
|
+
|
|
371
|
+
## TypeScript
|
|
372
|
+
|
|
373
|
+
Full type safety with no `any` types. All provider parameters are typed:
|
|
374
|
+
|
|
375
|
+
```typescript
|
|
376
|
+
import type {
|
|
377
|
+
Turn,
|
|
378
|
+
Message,
|
|
379
|
+
Tool,
|
|
380
|
+
UPPError,
|
|
381
|
+
TokenUsage,
|
|
382
|
+
StreamEvent,
|
|
383
|
+
EmbeddingResult,
|
|
384
|
+
ImageResult,
|
|
385
|
+
} from '@providerprotocol/ai';
|
|
386
|
+
```
|
|
387
|
+
|
|
208
388
|
## License
|
|
209
389
|
|
|
210
390
|
MIT
|
package/dist/anthropic/index.js
CHANGED
|
@@ -6,17 +6,19 @@ import {
|
|
|
6
6
|
isAssistantMessage,
|
|
7
7
|
isToolResultMessage,
|
|
8
8
|
isUserMessage
|
|
9
|
-
} from "../chunk-
|
|
9
|
+
} from "../chunk-UMKWXGO3.js";
|
|
10
10
|
import {
|
|
11
11
|
parseSSEStream
|
|
12
12
|
} from "../chunk-Z7RBRCRN.js";
|
|
13
13
|
import {
|
|
14
|
-
UPPError,
|
|
15
14
|
doFetch,
|
|
16
15
|
doStreamFetch,
|
|
17
16
|
normalizeHttpError,
|
|
18
17
|
resolveApiKey
|
|
19
|
-
} from "../chunk-
|
|
18
|
+
} from "../chunk-5FEAOEXV.js";
|
|
19
|
+
import {
|
|
20
|
+
UPPError
|
|
21
|
+
} from "../chunk-DZQHVGNV.js";
|
|
20
22
|
|
|
21
23
|
// src/providers/anthropic/transform.ts
|
|
22
24
|
function transformRequest(request, modelId) {
|