@openai/agents-extensions 0.3.7 → 0.3.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/aiSdk.d.ts CHANGED
@@ -1,5 +1,18 @@
1
1
  import type { LanguageModelV2, LanguageModelV2ToolChoice } from '@ai-sdk/provider';
2
2
  import { Model, ModelRequest, ResponseStreamEvent, Usage, ModelSettingsToolChoice } from '@openai/agents';
3
+ type LanguageModelV3Compatible = {
4
+ specificationVersion: string;
5
+ provider: string;
6
+ modelId: string;
7
+ supportedUrls: any;
8
+ doGenerate: (options: any) => PromiseLike<any> | any;
9
+ doStream: (options: any) => PromiseLike<{
10
+ stream: AsyncIterable<any>;
11
+ }> | {
12
+ stream: AsyncIterable<any>;
13
+ } | any;
14
+ };
15
+ type LanguageModelCompatible = LanguageModelV2 | LanguageModelV3Compatible;
3
16
  /**
4
17
  * Wraps a model from the AI SDK that adheres to the LanguageModelV2 spec to be used used as a model
5
18
  * in the OpenAI Agents SDK to use other models.
@@ -26,25 +39,12 @@ import { Model, ModelRequest, ResponseStreamEvent, Usage, ModelSettingsToolChoic
26
39
  */
27
40
  export declare class AiSdkModel implements Model {
28
41
  #private;
29
- constructor(model: LanguageModelV2);
42
+ constructor(model: LanguageModelCompatible);
30
43
  getResponse(request: ModelRequest): Promise<{
31
44
  readonly responseId: any;
32
45
  readonly usage: Usage;
33
46
  readonly output: import("@openai/agents").AgentOutputItem[];
34
- readonly providerData: {
35
- content: Array<import("@ai-sdk/provider").LanguageModelV2Content>;
36
- finishReason: import("@ai-sdk/provider").LanguageModelV2FinishReason;
37
- usage: import("@ai-sdk/provider").LanguageModelV2Usage;
38
- providerMetadata?: import("@ai-sdk/provider").SharedV2ProviderMetadata;
39
- request?: {
40
- body?: unknown;
41
- };
42
- response?: import("@ai-sdk/provider").LanguageModelV2ResponseMetadata & {
43
- headers?: import("@ai-sdk/provider").SharedV2Headers;
44
- body?: unknown;
45
- };
46
- warnings: Array<import("@ai-sdk/provider").LanguageModelV2CallWarning>;
47
- };
47
+ readonly providerData: any;
48
48
  }>;
49
49
  getStreamedResponse(request: ModelRequest): AsyncIterable<ResponseStreamEvent>;
50
50
  }
@@ -72,6 +72,7 @@ export declare class AiSdkModel implements Model {
72
72
  * @param model - The Vercel AI SDK model to wrap.
73
73
  * @returns The wrapped model.
74
74
  */
75
- export declare function aisdk(model: LanguageModelV2): AiSdkModel;
75
+ export declare function aisdk(model: LanguageModelCompatible): AiSdkModel;
76
76
  export declare function parseArguments(args: string | undefined | null): any;
77
77
  export declare function toolChoiceToLanguageV2Format(toolChoice: ModelSettingsToolChoice | undefined): LanguageModelV2ToolChoice | undefined;
78
+ export {};
package/dist/aiSdk.js CHANGED
@@ -10,6 +10,26 @@ exports.toolChoiceToLanguageV2Format = toolChoiceToLanguageV2Format;
10
10
  const agents_1 = require("@openai/agents");
11
11
  const utils_1 = require("@openai/agents/utils");
12
12
  const utils_2 = require("@openai/agents/utils");
13
+ function getSpecVersion(model) {
14
+ const spec = model?.specificationVersion;
15
+ if (!spec) {
16
+ // Default to v2 for backward compatibility with older AI SDK model wrappers.
17
+ return 'v2';
18
+ }
19
+ if (spec === 'v2') {
20
+ return 'v2';
21
+ }
22
+ if (typeof spec === 'string' && spec.toLowerCase().startsWith('v3')) {
23
+ return 'v3';
24
+ }
25
+ return 'unknown';
26
+ }
27
+ function ensureSupportedModel(model) {
28
+ const spec = getSpecVersion(model);
29
+ if (spec === 'unknown') {
30
+ throw new agents_1.UserError(`Unsupported AI SDK specificationVersion: ${String(model?.specificationVersion)}. Only v2 and v3 are supported.`);
31
+ }
32
+ }
13
33
  /**
14
34
  * @internal
15
35
  * Converts a list of model items to a list of language model V2 messages.
@@ -18,23 +38,52 @@ const utils_2 = require("@openai/agents/utils");
18
38
  * @param items - The items to convert.
19
39
  * @returns The list of language model V2 messages.
20
40
  */
21
- function itemsToLanguageV2Messages(model, items) {
41
+ function itemsToLanguageV2Messages(model, items, modelSettings) {
22
42
  const messages = [];
23
43
  let currentAssistantMessage;
44
+ let pendingReasonerReasoning;
45
+ const flushPendingReasonerReasoningToMessages = () => {
46
+ if (!(shouldIncludeReasoningContent(model, modelSettings) &&
47
+ pendingReasonerReasoning)) {
48
+ return;
49
+ }
50
+ const reasoningPart = {
51
+ type: 'reasoning',
52
+ text: pendingReasonerReasoning.text,
53
+ providerOptions: pendingReasonerReasoning.providerOptions,
54
+ };
55
+ if (currentAssistantMessage &&
56
+ Array.isArray(currentAssistantMessage.content) &&
57
+ currentAssistantMessage.role === 'assistant') {
58
+ currentAssistantMessage.content.unshift(reasoningPart);
59
+ currentAssistantMessage.providerOptions = {
60
+ ...pendingReasonerReasoning.providerOptions,
61
+ ...currentAssistantMessage.providerOptions,
62
+ };
63
+ }
64
+ else {
65
+ messages.push({
66
+ role: 'assistant',
67
+ content: [reasoningPart],
68
+ providerOptions: pendingReasonerReasoning.providerOptions,
69
+ });
70
+ }
71
+ pendingReasonerReasoning = undefined;
72
+ };
24
73
  for (const item of items) {
25
74
  if (item.type === 'message' || typeof item.type === 'undefined') {
26
75
  const { role, content, providerData } = item;
27
76
  if (role === 'system') {
77
+ flushPendingReasonerReasoningToMessages();
28
78
  messages.push({
29
79
  role: 'system',
30
80
  content: content,
31
- providerOptions: {
32
- ...(providerData ?? {}),
33
- },
81
+ providerOptions: toProviderOptions(providerData, model),
34
82
  });
35
83
  continue;
36
84
  }
37
85
  if (role === 'user') {
86
+ flushPendingReasonerReasoningToMessages();
38
87
  messages.push({
39
88
  role,
40
89
  content: typeof content === 'string'
@@ -45,9 +94,7 @@ function itemsToLanguageV2Messages(model, items) {
45
94
  return {
46
95
  type: 'text',
47
96
  text: c.text,
48
- providerOptions: {
49
- ...(contentProviderData ?? {}),
50
- },
97
+ providerOptions: toProviderOptions(contentProviderData, model),
51
98
  };
52
99
  }
53
100
  if (c.type === 'input_image') {
@@ -64,9 +111,7 @@ function itemsToLanguageV2Messages(model, items) {
64
111
  type: 'file',
65
112
  data: url,
66
113
  mediaType: 'image/*',
67
- providerOptions: {
68
- ...(contentProviderData ?? {}),
69
- },
114
+ providerOptions: toProviderOptions(contentProviderData, model),
70
115
  };
71
116
  }
72
117
  if (c.type === 'input_file') {
@@ -74,9 +119,7 @@ function itemsToLanguageV2Messages(model, items) {
74
119
  }
75
120
  throw new agents_1.UserError(`Unknown content type: ${c.type}`);
76
121
  }),
77
- providerOptions: {
78
- ...(providerData ?? {}),
79
- },
122
+ providerOptions: toProviderOptions(providerData, model),
80
123
  });
81
124
  continue;
82
125
  }
@@ -85,23 +128,39 @@ function itemsToLanguageV2Messages(model, items) {
85
128
  messages.push(currentAssistantMessage);
86
129
  currentAssistantMessage = undefined;
87
130
  }
131
+ const assistantProviderOptions = toProviderOptions(providerData, model);
132
+ const assistantContent = content
133
+ .filter((c) => c.type === 'output_text')
134
+ .map((c) => {
135
+ const { providerData: contentProviderData } = c;
136
+ return {
137
+ type: 'text',
138
+ text: c.text,
139
+ providerOptions: toProviderOptions(contentProviderData, model),
140
+ };
141
+ });
142
+ if (shouldIncludeReasoningContent(model, modelSettings) &&
143
+ pendingReasonerReasoning) {
144
+ assistantContent.unshift({
145
+ type: 'reasoning',
146
+ text: pendingReasonerReasoning.text,
147
+ providerOptions: pendingReasonerReasoning.providerOptions,
148
+ });
149
+ messages.push({
150
+ role,
151
+ content: assistantContent,
152
+ providerOptions: {
153
+ ...pendingReasonerReasoning.providerOptions,
154
+ ...assistantProviderOptions,
155
+ },
156
+ });
157
+ pendingReasonerReasoning = undefined;
158
+ continue;
159
+ }
88
160
  messages.push({
89
161
  role,
90
- content: content
91
- .filter((c) => c.type === 'output_text')
92
- .map((c) => {
93
- const { providerData: contentProviderData } = c;
94
- return {
95
- type: 'text',
96
- text: c.text,
97
- providerOptions: {
98
- ...(contentProviderData ?? {}),
99
- },
100
- };
101
- }),
102
- providerOptions: {
103
- ...(providerData ?? {}),
104
- },
162
+ content: assistantContent,
163
+ providerOptions: assistantProviderOptions,
105
164
  });
106
165
  continue;
107
166
  }
@@ -113,27 +172,38 @@ function itemsToLanguageV2Messages(model, items) {
113
172
  currentAssistantMessage = {
114
173
  role: 'assistant',
115
174
  content: [],
116
- providerOptions: {
117
- ...(item.providerData ?? {}),
118
- },
175
+ providerOptions: toProviderOptions(item.providerData, model),
119
176
  };
120
177
  }
121
178
  if (Array.isArray(currentAssistantMessage.content) &&
122
179
  currentAssistantMessage.role === 'assistant') {
180
+ // Reasoner models (e.g., DeepSeek Reasoner) require reasoning_content on tool-call messages.
181
+ if (shouldIncludeReasoningContent(model, modelSettings) &&
182
+ pendingReasonerReasoning) {
183
+ currentAssistantMessage.content.push({
184
+ type: 'reasoning',
185
+ text: pendingReasonerReasoning.text,
186
+ providerOptions: pendingReasonerReasoning.providerOptions,
187
+ });
188
+ currentAssistantMessage.providerOptions = {
189
+ ...pendingReasonerReasoning.providerOptions,
190
+ ...currentAssistantMessage.providerOptions,
191
+ };
192
+ pendingReasonerReasoning = undefined;
193
+ }
123
194
  const content = {
124
195
  type: 'tool-call',
125
196
  toolCallId: item.callId,
126
197
  toolName: item.name,
127
198
  input: parseArguments(item.arguments),
128
- providerOptions: {
129
- ...(item.providerData ?? {}),
130
- },
199
+ providerOptions: toProviderOptions(item.providerData, model),
131
200
  };
132
201
  currentAssistantMessage.content.push(content);
133
202
  }
134
203
  continue;
135
204
  }
136
205
  else if (item.type === 'function_call_result') {
206
+ flushPendingReasonerReasoningToMessages();
137
207
  if (currentAssistantMessage) {
138
208
  messages.push(currentAssistantMessage);
139
209
  currentAssistantMessage = undefined;
@@ -143,16 +213,12 @@ function itemsToLanguageV2Messages(model, items) {
143
213
  toolCallId: item.callId,
144
214
  toolName: item.name,
145
215
  output: convertToAiSdkOutput(item.output),
146
- providerOptions: {
147
- ...(item.providerData ?? {}),
148
- },
216
+ providerOptions: toProviderOptions(item.providerData, model),
149
217
  };
150
218
  messages.push({
151
219
  role: 'tool',
152
220
  content: [toolResult],
153
- providerOptions: {
154
- ...(item.providerData ?? {}),
155
- },
221
+ providerOptions: toProviderOptions(item.providerData, model),
156
222
  });
157
223
  continue;
158
224
  }
@@ -180,22 +246,29 @@ function itemsToLanguageV2Messages(model, items) {
180
246
  if (item.type === 'reasoning' &&
181
247
  item.content.length > 0 &&
182
248
  typeof item.content[0].text === 'string') {
249
+ // Only forward provider data when it targets this model so signatures stay scoped correctly.
250
+ if (shouldIncludeReasoningContent(model, modelSettings)) {
251
+ pendingReasonerReasoning = {
252
+ text: item.content[0].text,
253
+ providerOptions: toProviderOptions(item.providerData, model),
254
+ };
255
+ continue;
256
+ }
183
257
  messages.push({
184
258
  role: 'assistant',
185
259
  content: [
186
260
  {
187
261
  type: 'reasoning',
188
262
  text: item.content[0].text,
189
- providerOptions: { ...(item.providerData ?? {}) },
263
+ providerOptions: toProviderOptions(item.providerData, model),
190
264
  },
191
265
  ],
192
- providerOptions: {
193
- ...(item.providerData ?? {}),
194
- },
266
+ providerOptions: toProviderOptions(item.providerData, model),
195
267
  });
196
268
  continue;
197
269
  }
198
270
  if (item.type === 'unknown') {
271
+ flushPendingReasonerReasoningToMessages();
199
272
  messages.push({ ...(item.providerData ?? {}) });
200
273
  continue;
201
274
  }
@@ -205,6 +278,7 @@ function itemsToLanguageV2Messages(model, items) {
205
278
  const itemType = item;
206
279
  throw new agents_1.UserError(`Unknown item type: ${itemType}`);
207
280
  }
281
+ flushPendingReasonerReasoningToMessages();
208
282
  if (currentAssistantMessage) {
209
283
  messages.push(currentAssistantMessage);
210
284
  }
@@ -390,6 +464,122 @@ function convertStructuredOutputsToAiSdkOutput(outputs) {
390
464
  function isRecord(value) {
391
465
  return typeof value === 'object' && value !== null;
392
466
  }
467
+ function getModelIdentifier(model) {
468
+ return `${model.provider}:${model.modelId}`;
469
+ }
470
+ function isProviderDataForModel(providerData, model) {
471
+ const providerDataModel = providerData.model;
472
+ if (typeof providerDataModel !== 'string') {
473
+ return true;
474
+ }
475
+ const target = getModelIdentifier(model).toLowerCase();
476
+ const pdLower = providerDataModel.toLowerCase();
477
+ return (pdLower === target ||
478
+ pdLower === model.modelId.toLowerCase() ||
479
+ pdLower === model.provider.toLowerCase());
480
+ }
481
+ function isGeminiModel(model) {
482
+ const target = getModelIdentifier(model).toLowerCase();
483
+ return (target.includes('gemini') || model.modelId.toLowerCase().includes('gemini'));
484
+ }
485
+ function isDeepSeekModel(model) {
486
+ const target = getModelIdentifier(model).toLowerCase();
487
+ return (target.includes('deepseek') ||
488
+ model.modelId.toLowerCase().includes('deepseek') ||
489
+ model.provider.toLowerCase().includes('deepseek'));
490
+ }
491
+ function shouldIncludeReasoningContent(model, modelSettings) {
492
+ const target = getModelIdentifier(model).toLowerCase();
493
+ const modelIdLower = model.modelId.toLowerCase();
494
+ // DeepSeek models require reasoning_content to be sent alongside tool calls when
495
+ // either the dedicated reasoner model is used or thinking mode is explicitly enabled.
496
+ const isDeepSeekReasoner = target.includes('deepseek-reasoner') ||
497
+ modelIdLower.includes('deepseek-reasoner');
498
+ if (isDeepSeekReasoner) {
499
+ return true;
500
+ }
501
+ if (!isDeepSeekModel(model)) {
502
+ return false;
503
+ }
504
+ return hasEnabledDeepSeekThinking(modelSettings?.providerData);
505
+ }
506
+ function hasEnabledDeepSeekThinking(providerData) {
507
+ if (!isRecord(providerData)) {
508
+ return false;
509
+ }
510
+ const thinkingOption = [
511
+ providerData.thinking,
512
+ providerData.deepseek?.thinking,
513
+ providerData.providerOptions?.thinking,
514
+ providerData.providerOptions?.deepseek?.thinking,
515
+ ].find((value) => value !== undefined);
516
+ return isThinkingEnabled(thinkingOption);
517
+ }
518
+ function isThinkingEnabled(option) {
519
+ if (option === undefined || option === null) {
520
+ return false;
521
+ }
522
+ if (option === true) {
523
+ return true;
524
+ }
525
+ if (typeof option === 'string') {
526
+ return option.toLowerCase() === 'enabled';
527
+ }
528
+ if (isRecord(option)) {
529
+ const type = option.type ?? option.mode ?? option.status;
530
+ if (typeof type === 'string') {
531
+ return type.toLowerCase() === 'enabled';
532
+ }
533
+ }
534
+ return false;
535
+ }
536
+ function toProviderOptions(providerData, model) {
537
+ if (!isRecord(providerData)) {
538
+ return {};
539
+ }
540
+ if (!isProviderDataForModel(providerData, model)) {
541
+ return {};
542
+ }
543
+ const options = { ...providerData };
544
+ delete options.model;
545
+ delete options.responseId;
546
+ delete options.response_id;
547
+ if (isGeminiModel(model)) {
548
+ const googleFields = isRecord(options.google) ? { ...options.google } : {};
549
+ const thoughtSignature = googleFields.thoughtSignature ??
550
+ googleFields.thought_signature ??
551
+ options.thoughtSignature ??
552
+ options.thought_signature;
553
+ if (thoughtSignature) {
554
+ googleFields.thoughtSignature = thoughtSignature;
555
+ }
556
+ if (Object.keys(googleFields).length > 0) {
557
+ options.google = googleFields;
558
+ }
559
+ delete options.thoughtSignature;
560
+ delete options.thought_signature;
561
+ }
562
+ return options;
563
+ }
564
+ function buildBaseProviderData(model, responseId) {
565
+ const base = { model: getModelIdentifier(model) };
566
+ if (responseId) {
567
+ base.responseId = responseId;
568
+ }
569
+ return base;
570
+ }
571
+ function mergeProviderData(base, ...sources) {
572
+ const merged = {};
573
+ if (isRecord(base)) {
574
+ Object.assign(merged, base);
575
+ }
576
+ for (const src of sources) {
577
+ if (isRecord(src)) {
578
+ Object.assign(merged, src);
579
+ }
580
+ }
581
+ return Object.keys(merged).length > 0 ? merged : undefined;
582
+ }
393
583
  function getImageInlineMediaType(source) {
394
584
  if (typeof source.mediaType === 'string' && source.mediaType.length > 0) {
395
585
  return source.mediaType;
@@ -485,6 +675,7 @@ class AiSdkModel {
485
675
  #model;
486
676
  #logger = (0, agents_1.getLogger)('openai-agents:extensions:ai-sdk');
487
677
  constructor(model) {
678
+ ensureSupportedModel(model);
488
679
  this.#model = model;
489
680
  }
490
681
  async getResponse(request) {
@@ -502,7 +693,7 @@ class AiSdkModel {
502
693
  content: [{ type: 'text', text: request.input }],
503
694
  },
504
695
  ]
505
- : itemsToLanguageV2Messages(this.#model, request.input);
696
+ : itemsToLanguageV2Messages(this.#model, request.input, request.modelSettings);
506
697
  if (request.systemInstructions) {
507
698
  input = [
508
699
  {
@@ -543,8 +734,10 @@ class AiSdkModel {
543
734
  this.#logger.debug('Request:', JSON.stringify(aiSdkRequest, null, 2));
544
735
  }
545
736
  const result = await this.#model.doGenerate(aiSdkRequest);
737
+ const baseProviderData = buildBaseProviderData(this.#model, result.response?.id);
546
738
  const output = [];
547
739
  const resultContent = result.content ?? [];
740
+ // Emit reasoning before tool calls so Anthropic thinking signatures propagate into the next turn.
548
741
  // Extract and add reasoning items FIRST (required by Anthropic: thinking blocks must precede tool_use blocks)
549
742
  const reasoningParts = resultContent.filter((c) => c && c.type === 'reasoning');
550
743
  for (const reasoningPart of reasoningParts) {
@@ -554,7 +747,7 @@ class AiSdkModel {
554
747
  content: [{ type: 'input_text', text: reasoningText }],
555
748
  rawContent: [{ type: 'reasoning_text', text: reasoningText }],
556
749
  // Preserve provider-specific metadata (including signature for Anthropic extended thinking)
557
- providerData: reasoningPart.providerMetadata ?? undefined,
750
+ providerData: mergeProviderData(baseProviderData, reasoningPart.providerMetadata),
558
751
  });
559
752
  }
560
753
  const toolCalls = resultContent.filter((c) => c && c.type === 'tool-call');
@@ -586,8 +779,8 @@ class AiSdkModel {
586
779
  name: toolCall.toolName,
587
780
  arguments: toolCallArguments,
588
781
  status: 'completed',
589
- providerData: toolCall.providerMetadata ??
590
- (hasToolCalls ? result.providerMetadata : undefined),
782
+ providerData: mergeProviderData(baseProviderData, toolCall.providerMetadata ??
783
+ (hasToolCalls ? result.providerMetadata : undefined)),
591
784
  });
592
785
  }
593
786
  // Some of other platforms may return both tool calls and text.
@@ -602,7 +795,7 @@ class AiSdkModel {
602
795
  content: [{ type: 'output_text', text: textItem.text }],
603
796
  role: 'assistant',
604
797
  status: 'completed',
605
- providerData: result.providerMetadata,
798
+ providerData: mergeProviderData(baseProviderData, result.providerMetadata),
606
799
  });
607
800
  }
608
801
  }
@@ -612,18 +805,10 @@ class AiSdkModel {
612
805
  const response = {
613
806
  responseId: result.response?.id ?? 'FAKE_ID',
614
807
  usage: new agents_1.Usage({
615
- inputTokens: Number.isNaN(result.usage?.inputTokens)
616
- ? 0
617
- : (result.usage?.inputTokens ?? 0),
618
- outputTokens: Number.isNaN(result.usage?.outputTokens)
619
- ? 0
620
- : (result.usage?.outputTokens ?? 0),
621
- totalTokens: (Number.isNaN(result.usage?.inputTokens)
622
- ? 0
623
- : (result.usage?.inputTokens ?? 0)) +
624
- (Number.isNaN(result.usage?.outputTokens)
625
- ? 0
626
- : (result.usage?.outputTokens ?? 0)) || 0,
808
+ inputTokens: extractTokenCount(result.usage, 'inputTokens'),
809
+ outputTokens: extractTokenCount(result.usage, 'outputTokens'),
810
+ totalTokens: extractTokenCount(result.usage, 'inputTokens') +
811
+ extractTokenCount(result.usage, 'outputTokens') || 0,
627
812
  }),
628
813
  output,
629
814
  providerData: result,
@@ -711,7 +896,7 @@ class AiSdkModel {
711
896
  content: [{ type: 'text', text: request.input }],
712
897
  },
713
898
  ]
714
- : itemsToLanguageV2Messages(this.#model, request.input);
899
+ : itemsToLanguageV2Messages(this.#model, request.input, request.modelSettings);
715
900
  if (request.systemInstructions) {
716
901
  input = [
717
902
  {
@@ -731,6 +916,7 @@ class AiSdkModel {
731
916
  const responseFormat = getResponseFormat(request.outputType);
732
917
  const aiSdkRequest = {
733
918
  tools,
919
+ toolChoice: toolChoiceToLanguageV2Format(request.modelSettings.toolChoice),
734
920
  prompt: input,
735
921
  temperature: request.modelSettings.temperature,
736
922
  topP: request.modelSettings.topP,
@@ -748,13 +934,15 @@ class AiSdkModel {
748
934
  this.#logger.debug('Request (streamed):', JSON.stringify(aiSdkRequest, null, 2));
749
935
  }
750
936
  const { stream } = await this.#model.doStream(aiSdkRequest);
937
+ const baseProviderData = buildBaseProviderData(this.#model);
751
938
  let started = false;
752
939
  let responseId;
753
940
  let usagePromptTokens = 0;
754
941
  let usageCompletionTokens = 0;
755
942
  const functionCalls = {};
756
943
  let textOutput;
757
- // State for tracking reasoning blocks (for Anthropic extended thinking)
944
+ // State for tracking reasoning blocks (for Anthropic extended thinking):
945
+ // Track reasoning deltas so we can preserve Anthropic signatures even when text is redacted.
758
946
  const reasoningBlocks = {};
759
947
  for await (const part of stream) {
760
948
  if (!started) {
@@ -810,9 +998,7 @@ class AiSdkModel {
810
998
  name: part.toolName,
811
999
  arguments: part.input ?? '',
812
1000
  status: 'completed',
813
- ...(part.providerMetadata
814
- ? { providerData: part.providerMetadata }
815
- : {}),
1001
+ providerData: mergeProviderData(baseProviderData, part.providerMetadata),
816
1002
  };
817
1003
  }
818
1004
  break;
@@ -824,12 +1010,8 @@ class AiSdkModel {
824
1010
  break;
825
1011
  }
826
1012
  case 'finish': {
827
- usagePromptTokens = Number.isNaN(part.usage?.inputTokens)
828
- ? 0
829
- : (part.usage?.inputTokens ?? 0);
830
- usageCompletionTokens = Number.isNaN(part.usage?.outputTokens)
831
- ? 0
832
- : (part.usage?.outputTokens ?? 0);
1013
+ usagePromptTokens = extractTokenCount(part.usage, 'inputTokens');
1014
+ usageCompletionTokens = extractTokenCount(part.usage, 'outputTokens');
833
1015
  break;
834
1016
  }
835
1017
  case 'error': {
@@ -850,7 +1032,7 @@ class AiSdkModel {
850
1032
  content: [{ type: 'input_text', text: reasoningBlock.text }],
851
1033
  rawContent: [{ type: 'reasoning_text', text: reasoningBlock.text }],
852
1034
  // Preserve provider-specific metadata (including signature for Anthropic extended thinking)
853
- providerData: reasoningBlock.providerMetadata ?? undefined,
1035
+ providerData: mergeProviderData(baseProviderData, reasoningBlock.providerMetadata, responseId ? { responseId } : undefined),
854
1036
  });
855
1037
  }
856
1038
  }
@@ -860,10 +1042,14 @@ class AiSdkModel {
860
1042
  role: 'assistant',
861
1043
  content: [textOutput],
862
1044
  status: 'completed',
1045
+ providerData: mergeProviderData(baseProviderData, responseId ? { responseId } : undefined),
863
1046
  });
864
1047
  }
865
1048
  for (const fc of Object.values(functionCalls)) {
866
- outputs.push(fc);
1049
+ outputs.push({
1050
+ ...fc,
1051
+ providerData: mergeProviderData(baseProviderData, fc.providerData, responseId ? { responseId } : undefined),
1052
+ });
867
1053
  }
868
1054
  const finalEvent = {
869
1055
  type: 'response_done',
@@ -970,6 +1156,19 @@ exports.AiSdkModel = AiSdkModel;
970
1156
  function aisdk(model) {
971
1157
  return new AiSdkModel(model);
972
1158
  }
1159
+ function extractTokenCount(usage, key) {
1160
+ const val = usage?.[key];
1161
+ if (typeof val === 'number') {
1162
+ return Number.isNaN(val) ? 0 : val;
1163
+ }
1164
+ // Handle Google AI SDK object format ({ total: number, ... })
1165
+ if (typeof val === 'object' &&
1166
+ val !== null &&
1167
+ typeof val.total === 'number') {
1168
+ return val.total;
1169
+ }
1170
+ return 0;
1171
+ }
973
1172
  function parseArguments(args) {
974
1173
  if (!args) {
975
1174
  return {};