@providerprotocol/ai 0.0.22 → 0.0.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/README.md +188 -6
  2. package/dist/anthropic/index.d.ts +1 -1
  3. package/dist/anthropic/index.js +95 -36
  4. package/dist/anthropic/index.js.map +1 -1
  5. package/dist/{chunk-7WYBJPJJ.js → chunk-55X3W2MN.js} +4 -3
  6. package/dist/chunk-55X3W2MN.js.map +1 -0
  7. package/dist/{chunk-M4BMM5IB.js → chunk-6AZVUI6H.js} +20 -4
  8. package/dist/chunk-6AZVUI6H.js.map +1 -0
  9. package/dist/chunk-73IIE3QT.js +120 -0
  10. package/dist/chunk-73IIE3QT.js.map +1 -0
  11. package/dist/{chunk-RFWLEFAB.js → chunk-QNJO7DSD.js} +61 -16
  12. package/dist/chunk-QNJO7DSD.js.map +1 -0
  13. package/dist/{chunk-RS7C25LS.js → chunk-SBCATNHA.js} +9 -5
  14. package/dist/chunk-SBCATNHA.js.map +1 -0
  15. package/dist/{chunk-NWS5IKNR.js → chunk-TOJCZMVU.js} +3 -12
  16. package/dist/chunk-TOJCZMVU.js.map +1 -0
  17. package/dist/{chunk-I2VHCGQE.js → chunk-Z6DKC37J.js} +6 -5
  18. package/dist/chunk-Z6DKC37J.js.map +1 -0
  19. package/dist/google/index.d.ts +36 -4
  20. package/dist/google/index.js +98 -53
  21. package/dist/google/index.js.map +1 -1
  22. package/dist/http/index.d.ts +2 -2
  23. package/dist/http/index.js +4 -4
  24. package/dist/index.d.ts +8 -6
  25. package/dist/index.js +92 -122
  26. package/dist/index.js.map +1 -1
  27. package/dist/ollama/index.d.ts +5 -2
  28. package/dist/ollama/index.js +47 -36
  29. package/dist/ollama/index.js.map +1 -1
  30. package/dist/openai/index.d.ts +1 -1
  31. package/dist/openai/index.js +117 -56
  32. package/dist/openai/index.js.map +1 -1
  33. package/dist/openrouter/index.d.ts +1 -1
  34. package/dist/openrouter/index.js +58 -53
  35. package/dist/openrouter/index.js.map +1 -1
  36. package/dist/{provider-DWEAzeM5.d.ts → provider-x4RocsnK.d.ts} +199 -54
  37. package/dist/proxy/index.d.ts +2 -2
  38. package/dist/proxy/index.js +11 -9
  39. package/dist/proxy/index.js.map +1 -1
  40. package/dist/{retry-DmPmqZL6.d.ts → retry-DTfjXXPh.d.ts} +1 -1
  41. package/dist/{stream-DbkLOIbJ.d.ts → stream-ITNFNnO4.d.ts} +95 -38
  42. package/dist/xai/index.d.ts +1 -1
  43. package/dist/xai/index.js +221 -97
  44. package/dist/xai/index.js.map +1 -1
  45. package/package.json +1 -1
  46. package/dist/chunk-7WYBJPJJ.js.map +0 -1
  47. package/dist/chunk-I2VHCGQE.js.map +0 -1
  48. package/dist/chunk-M4BMM5IB.js.map +0 -1
  49. package/dist/chunk-NWS5IKNR.js.map +0 -1
  50. package/dist/chunk-RFWLEFAB.js.map +0 -1
  51. package/dist/chunk-RS7C25LS.js.map +0 -1
package/README.md CHANGED
@@ -44,6 +44,31 @@ for await (const event of stream) {
44
44
  const turn = await stream.turn;
45
45
  ```
46
46
 
47
+ **Stream Control:**
48
+
49
+ ```typescript
50
+ const stream = claude.stream('Write a long story');
51
+
52
+ // Abort the stream at any time
53
+ setTimeout(() => stream.abort(), 5000);
54
+
55
+ for await (const event of stream) {
56
+ // Process events until abort
57
+ }
58
+ ```
59
+
60
+ **Stream Events:**
61
+
62
+ | Event | Description |
63
+ |-------|-------------|
64
+ | `text_delta` | Incremental text output |
65
+ | `reasoning_delta` | Incremental reasoning/thinking output |
66
+ | `tool_call_delta` | Tool call arguments being streamed |
67
+ | `tool_execution_start` | Tool execution has started |
68
+ | `tool_execution_end` | Tool execution has completed |
69
+ | `message_start` / `message_stop` | Message boundaries |
70
+ | `content_block_start` / `content_block_stop` | Content block boundaries |
71
+
47
72
  ### Multi-turn Conversations
48
73
 
49
74
  ```typescript
@@ -104,6 +129,60 @@ const img = await Image.fromPath('./photo.png');
104
129
  const turn = await claude.generate([img, 'What is in this image?']);
105
130
  ```
106
131
 
132
+ ## Anthropic Beta Features
133
+
134
+ Anthropic provides beta features through the `betas` export. Enable them at the model level:
135
+
136
+ ```typescript
137
+ import { anthropic, betas } from '@providerprotocol/ai/anthropic';
138
+ import { llm } from '@providerprotocol/ai';
139
+
140
+ // Native structured outputs with guaranteed JSON schema conformance
141
+ const model = llm({
142
+ model: anthropic('claude-sonnet-4-20250514', {
143
+ betas: [betas.structuredOutputs],
144
+ }),
145
+ structure: {
146
+ type: 'object',
147
+ properties: { answer: { type: 'string' } },
148
+ required: ['answer'],
149
+ },
150
+ });
151
+
152
+ // Extended thinking with interleaved tool calls
153
+ const thinker = llm({
154
+ model: anthropic('claude-sonnet-4-20250514', {
155
+ betas: [betas.interleavedThinking],
156
+ }),
157
+ params: {
158
+ thinking: { type: 'enabled', budget_tokens: 10000 },
159
+ },
160
+ });
161
+ ```
162
+
163
+ **Available Beta Features:**
164
+
165
+ | Beta | Description |
166
+ |------|-------------|
167
+ | `structuredOutputs` | Guaranteed JSON schema conformance for responses |
168
+ | `interleavedThinking` | Claude can think between tool calls |
169
+ | `devFullThinking` | Developer mode for full thinking visibility |
170
+ | `effort` | Control response thoroughness vs efficiency (Opus 4.5) |
171
+ | `computerUse` | Mouse, keyboard, screenshot control (Claude 4) |
172
+ | `codeExecution` | Python/Bash sandbox execution |
173
+ | `tokenEfficientTools` | Up to 70% token reduction for tool calls |
174
+ | `fineGrainedToolStreaming` | Stream tool args without buffering |
175
+ | `output128k` | 128K token output length |
176
+ | `context1m` | 1 million token context window (Sonnet 4) |
177
+ | `promptCaching` | Reduced latency and costs via caching |
178
+ | `extendedCacheTtl` | 1-hour cache TTL (vs 5-minute default) |
179
+ | `advancedToolUse` | Tool Search, Programmatic Tool Calling |
180
+ | `mcpClient` | Connect to remote MCP servers |
181
+ | `filesApi` | Upload and manage files |
182
+ | `pdfs` | PDF document support |
183
+ | `messageBatches` | Async batch processing at 50% cost |
184
+ | `skills` | Agent Skills (PowerPoint, Excel, Word, PDF) |
185
+
107
186
  ## Embeddings
108
187
 
109
188
  ```typescript
@@ -198,6 +277,51 @@ const instance = llm({
198
277
  });
199
278
  ```
200
279
 
280
+ ### System Prompts
281
+
282
+ System prompts can be a simple string or a provider-specific array for advanced features:
283
+
284
+ ```typescript
285
+ // Simple string (all providers)
286
+ const simple = llm({
287
+ model: anthropic('claude-sonnet-4-20250514'),
288
+ system: 'You are a helpful assistant.',
289
+ });
290
+
291
+ // Anthropic cache_control format
292
+ import { anthropic, betas } from '@providerprotocol/ai/anthropic';
293
+
294
+ const cached = llm({
295
+ model: anthropic('claude-sonnet-4-20250514', {
296
+ betas: [betas.promptCaching],
297
+ }),
298
+ system: [
299
+ { type: 'text', text: 'Large context document...', cache_control: { type: 'ephemeral' } },
300
+ { type: 'text', text: 'Instructions...' },
301
+ ],
302
+ });
303
+ ```
304
+
305
+ ### Provider Config Options
306
+
307
+ ```typescript
308
+ interface ProviderConfig {
309
+ apiKey?: string | (() => Promise<string>) | KeyStrategy; // API key, async getter, or strategy
310
+ baseUrl?: string; // Custom API endpoint
311
+ timeout?: number; // Per-attempt timeout (ms)
312
+ retryStrategy?: RetryStrategy; // Retry behavior
313
+ headers?: Record<string, string>; // Custom headers (merged with provider defaults)
314
+ fetch?: typeof fetch; // Custom fetch implementation
315
+ apiVersion?: string; // API version override
316
+ retryAfterMaxSeconds?: number; // Cap for Retry-After header (default: 3600)
317
+ }
318
+ ```
319
+
320
+ **Notes:**
321
+ - `timeout` applies per attempt; total time can exceed this with retries
322
+ - `headers` are merged with model-level headers (explicit config takes precedence)
323
+ - `retryAfterMaxSeconds` prevents honoring excessively long Retry-After values
324
+
201
325
  ### Key Strategies
202
326
 
203
327
  ```typescript
@@ -227,8 +351,13 @@ import {
227
351
  RetryAfterStrategy,
228
352
  } from '@providerprotocol/ai/http';
229
353
 
230
- // Exponential: 1s, 2s, 4s... (default)
231
- new ExponentialBackoff({ maxAttempts: 5, baseDelay: 1000, maxDelay: 30000 })
354
+ // Exponential: 1s, 2s, 4s...
355
+ new ExponentialBackoff({
356
+ maxAttempts: 5,
357
+ baseDelay: 1000,
358
+ maxDelay: 30000,
359
+ jitter: true, // Randomize delays to prevent thundering herd (default: true)
360
+ })
232
361
 
233
362
  // Linear: 1s, 2s, 3s...
234
363
  new LinearBackoff({ maxAttempts: 3, delay: 1000 })
@@ -243,6 +372,8 @@ new RetryAfterStrategy({ maxAttempts: 3, fallbackDelay: 5000 })
243
372
  new NoRetry()
244
373
  ```
245
374
 
375
+ **Retryable Errors:** `RATE_LIMITED`, `NETWORK_ERROR`, `TIMEOUT`, `PROVIDER_ERROR`
376
+
246
377
  ## Tool Execution Control
247
378
 
248
379
  ```typescript
@@ -294,6 +425,12 @@ try {
294
425
  await claude.generate('Hello');
295
426
  } catch (error) {
296
427
  if (error instanceof UPPError) {
428
+ console.log(error.code); // 'RATE_LIMITED'
429
+ console.log(error.provider); // 'anthropic'
430
+ console.log(error.modality); // 'llm'
431
+ console.log(error.statusCode); // 429
432
+ console.log(error.cause); // Original error (if any)
433
+
297
434
  switch (error.code) {
298
435
  case 'RATE_LIMITED':
299
436
  // Wait and retry
@@ -385,7 +522,7 @@ Server adapters for Express, Fastify, and Nuxt/H3:
385
522
 
386
523
  ```typescript
387
524
  // Express
388
- import { express as expressAdapter } from '@providerprotocol/ai/proxy/server';
525
+ import { express as expressAdapter, parseBody } from '@providerprotocol/ai/proxy';
389
526
  app.post('/ai', authMiddleware, async (req, res) => {
390
527
  const { messages, system, params } = parseBody(req.body);
391
528
  if (params?.stream) {
@@ -396,7 +533,7 @@ app.post('/ai', authMiddleware, async (req, res) => {
396
533
  });
397
534
 
398
535
  // Fastify
399
- import { fastify as fastifyAdapter } from '@providerprotocol/ai/proxy/server';
536
+ import { fastify as fastifyAdapter, parseBody } from '@providerprotocol/ai/proxy';
400
537
  app.post('/ai', async (request, reply) => {
401
538
  const { messages, system, params } = parseBody(request.body);
402
539
  if (params?.stream) {
@@ -406,7 +543,7 @@ app.post('/ai', async (request, reply) => {
406
543
  });
407
544
 
408
545
  // Nuxt/H3 (server/api/ai.post.ts)
409
- import { h3 as h3Adapter } from '@providerprotocol/ai/proxy/server';
546
+ import { h3 as h3Adapter, parseBody } from '@providerprotocol/ai/proxy';
410
547
  export default defineEventHandler(async (event) => {
411
548
  const { messages, system, params } = parseBody(await readBody(event));
412
549
  if (params?.stream) {
@@ -441,23 +578,68 @@ xai('grok-3-fast', { api: 'responses' })
441
578
  xai('grok-3-fast', { api: 'messages' })
442
579
  ```
443
580
 
581
+ ## Alternative Import Style
582
+
583
+ Use the `ai` namespace for a grouped import style:
584
+
585
+ ```typescript
586
+ import { ai } from '@providerprotocol/ai';
587
+ import { openai } from '@providerprotocol/ai/openai';
588
+
589
+ const model = ai.llm({ model: openai('gpt-4o') });
590
+ const embedder = ai.embedding({ model: openai('text-embedding-3-small') });
591
+ const dalle = ai.image({ model: openai('dall-e-3') });
592
+ ```
593
+
444
594
  ## TypeScript
445
595
 
446
596
  Full type safety with no `any` types. All provider parameters are typed:
447
597
 
448
598
  ```typescript
449
599
  import type {
600
+ // Core types
450
601
  Turn,
451
602
  Message,
452
603
  Tool,
453
- UPPError,
454
604
  TokenUsage,
605
+
606
+ // Streaming
455
607
  StreamEvent,
608
+ StreamResult,
609
+
610
+ // Modality results
456
611
  EmbeddingResult,
457
612
  ImageResult,
613
+
614
+ // Errors
615
+ UPPError,
616
+ ErrorCode,
617
+
618
+ // Configuration
619
+ ProviderConfig,
620
+ KeyStrategy,
621
+ RetryStrategy,
622
+ LLMCapabilities,
458
623
  } from '@providerprotocol/ai';
459
624
  ```
460
625
 
626
+ ### Custom Providers
627
+
628
+ Build custom providers with `createProvider`:
629
+
630
+ ```typescript
631
+ import { createProvider } from '@providerprotocol/ai';
632
+
633
+ const myProvider = createProvider({
634
+ name: 'my-provider',
635
+ version: '1.0.0',
636
+ handlers: {
637
+ llm: myLLMHandler,
638
+ embedding: myEmbeddingHandler,
639
+ },
640
+ });
641
+ ```
642
+
461
643
  ## License
462
644
 
463
645
  MIT
@@ -1,4 +1,4 @@
1
- import { g as Provider } from '../provider-DWEAzeM5.js';
1
+ import { g as Provider } from '../provider-x4RocsnK.js';
2
2
 
3
3
  /**
4
4
  * @fileoverview Anthropic API type definitions.
@@ -1,6 +1,9 @@
1
1
  import {
2
2
  parseJsonResponse
3
- } from "../chunk-I2VHCGQE.js";
3
+ } from "../chunk-Z6DKC37J.js";
4
+ import {
5
+ StreamEventType
6
+ } from "../chunk-73IIE3QT.js";
4
7
  import {
5
8
  AssistantMessage,
6
9
  createProvider,
@@ -8,20 +11,22 @@ import {
8
11
  isAssistantMessage,
9
12
  isToolResultMessage,
10
13
  isUserMessage
11
- } from "../chunk-M4BMM5IB.js";
14
+ } from "../chunk-6AZVUI6H.js";
12
15
  import {
13
16
  parseSSEStream
14
- } from "../chunk-NWS5IKNR.js";
17
+ } from "../chunk-TOJCZMVU.js";
15
18
  import {
16
19
  resolveApiKey
17
- } from "../chunk-7WYBJPJJ.js";
20
+ } from "../chunk-55X3W2MN.js";
18
21
  import {
22
+ ErrorCode,
23
+ ModalityType,
19
24
  UPPError,
20
25
  doFetch,
21
26
  doStreamFetch,
22
27
  normalizeHttpError,
23
28
  toError
24
- } from "../chunk-RFWLEFAB.js";
29
+ } from "../chunk-QNJO7DSD.js";
25
30
 
26
31
  // src/providers/anthropic/types.ts
27
32
  var betas = {
@@ -206,9 +211,9 @@ function normalizeSystem(system) {
206
211
  if (!Array.isArray(system)) {
207
212
  throw new UPPError(
208
213
  "System prompt must be a string or an array of text blocks",
209
- "INVALID_REQUEST",
214
+ ErrorCode.InvalidRequest,
210
215
  "anthropic",
211
- "llm"
216
+ ModalityType.LLM
212
217
  );
213
218
  }
214
219
  const blocks = [];
@@ -216,26 +221,26 @@ function normalizeSystem(system) {
216
221
  if (!block || typeof block !== "object") {
217
222
  throw new UPPError(
218
223
  'System prompt array must contain objects with type "text"',
219
- "INVALID_REQUEST",
224
+ ErrorCode.InvalidRequest,
220
225
  "anthropic",
221
- "llm"
226
+ ModalityType.LLM
222
227
  );
223
228
  }
224
229
  const candidate = block;
225
230
  if (candidate.type !== "text" || typeof candidate.text !== "string") {
226
231
  throw new UPPError(
227
232
  'Anthropic system blocks must be of type "text" with a string text field',
228
- "INVALID_REQUEST",
233
+ ErrorCode.InvalidRequest,
229
234
  "anthropic",
230
- "llm"
235
+ ModalityType.LLM
231
236
  );
232
237
  }
233
238
  if (candidate.cache_control !== void 0 && !isValidCacheControl(candidate.cache_control)) {
234
239
  throw new UPPError(
235
240
  "Invalid cache_control for Anthropic system prompt",
236
- "INVALID_REQUEST",
241
+ ErrorCode.InvalidRequest,
237
242
  "anthropic",
238
- "llm"
243
+ ModalityType.LLM
239
244
  );
240
245
  }
241
246
  blocks.push(block);
@@ -272,9 +277,26 @@ function transformMessage(message) {
272
277
  }
273
278
  if (isAssistantMessage(message)) {
274
279
  const validContent = filterValidContent(message.content);
275
- const content = validContent.map(
276
- (block, index, arr) => transformContentBlock(block, index === arr.length - 1 && !message.toolCalls?.length ? cacheControl : void 0)
277
- );
280
+ const content = [];
281
+ const anthropicMeta = message.metadata?.anthropic;
282
+ const thinkingSignatures = anthropicMeta?.thinkingSignatures;
283
+ let reasoningIndex = 0;
284
+ for (let i = 0; i < validContent.length; i++) {
285
+ const block = validContent[i];
286
+ const isLastNonToolBlock = i === validContent.length - 1 && !message.toolCalls?.length;
287
+ if (block.type === "reasoning") {
288
+ const signatureFromArray = thinkingSignatures?.[reasoningIndex];
289
+ const signature = Array.isArray(thinkingSignatures) ? typeof signatureFromArray === "string" ? signatureFromArray : void 0 : anthropicMeta?.thinkingSignature;
290
+ reasoningIndex += 1;
291
+ content.push({
292
+ type: "thinking",
293
+ thinking: block.text,
294
+ ...signature ? { signature } : {}
295
+ });
296
+ } else {
297
+ content.push(transformContentBlock(block, isLastNonToolBlock ? cacheControl : void 0));
298
+ }
299
+ }
278
300
  if (message.toolCalls) {
279
301
  for (let i = 0; i < message.toolCalls.length; i++) {
280
302
  const call = message.toolCalls[i];
@@ -377,11 +399,20 @@ function transformTool(tool) {
377
399
  };
378
400
  }
379
401
  function transformResponse(data, useNativeStructuredOutput = false) {
402
+ const reasoningContent = [];
380
403
  const textContent = [];
381
404
  const toolCalls = [];
382
405
  let structuredData;
406
+ let thinkingSignature;
407
+ const thinkingSignatures = [];
383
408
  for (const block of data.content) {
384
- if (block.type === "text") {
409
+ if (block.type === "thinking") {
410
+ reasoningContent.push({ type: "reasoning", text: block.thinking });
411
+ if (block.signature) {
412
+ thinkingSignature = block.signature;
413
+ }
414
+ thinkingSignatures.push(block.signature ?? null);
415
+ } else if (block.type === "text") {
385
416
  textContent.push({ type: "text", text: block.text });
386
417
  if (useNativeStructuredOutput && structuredData === void 0) {
387
418
  try {
@@ -414,8 +445,10 @@ ${block.content.content}\`\`\`
414
445
  }
415
446
  }
416
447
  }
448
+ const allContent = [...reasoningContent, ...textContent];
449
+ const hasThinkingSignatures = thinkingSignatures.some((signature) => signature);
417
450
  const message = new AssistantMessage(
418
- textContent,
451
+ allContent,
419
452
  toolCalls.length > 0 ? toolCalls : void 0,
420
453
  {
421
454
  id: data.id,
@@ -423,7 +456,9 @@ ${block.content.content}\`\`\`
423
456
  anthropic: {
424
457
  stop_reason: data.stop_reason,
425
458
  stop_sequence: data.stop_sequence,
426
- model: data.model
459
+ model: data.model,
460
+ thinkingSignature,
461
+ ...hasThinkingSignatures ? { thinkingSignatures } : {}
427
462
  }
428
463
  }
429
464
  }
@@ -463,10 +498,12 @@ function transformStreamEvent(event, state) {
463
498
  state.inputTokens = event.message.usage.input_tokens;
464
499
  state.cacheReadTokens = event.message.usage.cache_read_input_tokens ?? 0;
465
500
  state.cacheWriteTokens = event.message.usage.cache_creation_input_tokens ?? 0;
466
- events.push({ type: "message_start", index: 0, delta: {} });
501
+ events.push({ type: StreamEventType.MessageStart, index: 0, delta: {} });
467
502
  break;
468
503
  case "content_block_start":
469
- if (event.content_block.type === "text") {
504
+ if (event.content_block.type === "thinking") {
505
+ state.content[event.index] = { type: "thinking", thinking: "" };
506
+ } else if (event.content_block.type === "text") {
470
507
  state.content[event.index] = { type: "text", text: "" };
471
508
  } else if (event.content_block.type === "tool_use") {
472
509
  state.content[event.index] = {
@@ -497,7 +534,7 @@ function transformStreamEvent(event, state) {
497
534
  fileContent: resultBlock.content?.content ?? ""
498
535
  };
499
536
  }
500
- events.push({ type: "content_block_start", index: event.index, delta: {} });
537
+ events.push({ type: StreamEventType.ContentBlockStart, index: event.index, delta: {} });
501
538
  break;
502
539
  case "content_block_delta": {
503
540
  const delta = event.delta;
@@ -506,7 +543,7 @@ function transformStreamEvent(event, state) {
506
543
  state.content[event.index].text = (state.content[event.index].text ?? "") + delta.text;
507
544
  }
508
545
  events.push({
509
- type: "text_delta",
546
+ type: StreamEventType.TextDelta,
510
547
  index: event.index,
511
548
  delta: { text: delta.text }
512
549
  });
@@ -517,7 +554,7 @@ function transformStreamEvent(event, state) {
517
554
  state.content[event.index].input = (state.content[event.index].input ?? "") + delta.partial_json;
518
555
  }
519
556
  events.push({
520
- type: "tool_call_delta",
557
+ type: StreamEventType.ToolCallDelta,
521
558
  index: event.index,
522
559
  delta: {
523
560
  argumentsJson: delta.partial_json,
@@ -528,24 +565,33 @@ function transformStreamEvent(event, state) {
528
565
  break;
529
566
  }
530
567
  if (delta.type === "thinking_delta") {
568
+ if (state.content[event.index]) {
569
+ state.content[event.index].thinking = (state.content[event.index].thinking ?? "") + delta.thinking;
570
+ }
531
571
  events.push({
532
- type: "reasoning_delta",
572
+ type: StreamEventType.ReasoningDelta,
533
573
  index: event.index,
534
574
  delta: { text: delta.thinking }
535
575
  });
536
576
  break;
537
577
  }
578
+ if (delta.type === "signature_delta") {
579
+ if (state.content[event.index]) {
580
+ state.content[event.index].signature = delta.signature;
581
+ }
582
+ break;
583
+ }
538
584
  break;
539
585
  }
540
586
  case "content_block_stop":
541
- events.push({ type: "content_block_stop", index: event.index, delta: {} });
587
+ events.push({ type: StreamEventType.ContentBlockStop, index: event.index, delta: {} });
542
588
  break;
543
589
  case "message_delta":
544
590
  state.stopReason = event.delta.stop_reason;
545
591
  state.outputTokens = event.usage.output_tokens;
546
592
  return [];
547
593
  case "message_stop":
548
- events.push({ type: "message_stop", index: 0, delta: {} });
594
+ events.push({ type: StreamEventType.MessageStop, index: 0, delta: {} });
549
595
  break;
550
596
  case "ping":
551
597
  case "error":
@@ -556,12 +602,21 @@ function transformStreamEvent(event, state) {
556
602
  return events;
557
603
  }
558
604
  function buildResponseFromState(state, useNativeStructuredOutput = false) {
605
+ const reasoningContent = [];
559
606
  const textContent = [];
560
607
  const toolCalls = [];
561
608
  let structuredData;
609
+ let thinkingSignature;
610
+ const thinkingSignatures = [];
562
611
  for (const block of state.content) {
563
612
  if (!block) continue;
564
- if (block.type === "text" && block.text) {
613
+ if (block.type === "thinking" && block.thinking) {
614
+ reasoningContent.push({ type: "reasoning", text: block.thinking });
615
+ if (block.signature) {
616
+ thinkingSignature = block.signature;
617
+ }
618
+ thinkingSignatures.push(block.signature ?? null);
619
+ } else if (block.type === "text" && block.text) {
565
620
  textContent.push({ type: "text", text: block.text });
566
621
  if (useNativeStructuredOutput && structuredData === void 0) {
567
622
  try {
@@ -597,16 +652,20 @@ ${block.fileContent}\`\`\`
597
652
  ` });
598
653
  }
599
654
  }
655
+ const allContent = [...reasoningContent, ...textContent];
656
+ const hasThinkingSignatures = thinkingSignatures.some((signature) => signature);
600
657
  const messageId = state.messageId || generateId();
601
658
  const message = new AssistantMessage(
602
- textContent,
659
+ allContent,
603
660
  toolCalls.length > 0 ? toolCalls : void 0,
604
661
  {
605
662
  id: messageId,
606
663
  metadata: {
607
664
  anthropic: {
608
665
  stop_reason: state.stopReason,
609
- model: state.model
666
+ model: state.model,
667
+ thinkingSignature,
668
+ ...hasThinkingSignatures ? { thinkingSignatures } : {}
610
669
  }
611
670
  }
612
671
  }
@@ -657,9 +716,9 @@ function createLLMHandler() {
657
716
  if (!providerRef) {
658
717
  throw new UPPError(
659
718
  "Provider reference not set. Handler must be used with createProvider().",
660
- "INVALID_REQUEST",
719
+ ErrorCode.InvalidRequest,
661
720
  "anthropic",
662
- "llm"
721
+ ModalityType.LLM
663
722
  );
664
723
  }
665
724
  const model = {
@@ -764,9 +823,9 @@ function createLLMHandler() {
764
823
  if (!response.body) {
765
824
  const error = new UPPError(
766
825
  "No response body for streaming request",
767
- "PROVIDER_ERROR",
826
+ ErrorCode.ProviderError,
768
827
  "anthropic",
769
- "llm"
828
+ ModalityType.LLM
770
829
  );
771
830
  responseReject(error);
772
831
  throw error;
@@ -777,9 +836,9 @@ function createLLMHandler() {
777
836
  if (event.type === "error") {
778
837
  const error = new UPPError(
779
838
  event.error.message,
780
- "PROVIDER_ERROR",
839
+ ErrorCode.ProviderError,
781
840
  "anthropic",
782
- "llm"
841
+ ModalityType.LLM
783
842
  );
784
843
  responseReject(error);
785
844
  throw error;