@ai-sdk/anthropic 2.0.0-canary.1 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,13 +47,15 @@ var anthropicFailedResponseHandler = (0, import_provider_utils.createJsonErrorRe
47
47
 
48
48
  // src/anthropic-prepare-tools.ts
49
49
  var import_provider = require("@ai-sdk/provider");
50
- function prepareTools(mode) {
51
- var _a;
52
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
50
+ function prepareTools({
51
+ tools,
52
+ toolChoice
53
+ }) {
54
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
53
55
  const toolWarnings = [];
54
56
  const betas = /* @__PURE__ */ new Set();
55
57
  if (tools == null) {
56
- return { tools: void 0, tool_choice: void 0, toolWarnings, betas };
58
+ return { tools: void 0, toolChoice: void 0, toolWarnings, betas };
57
59
  }
58
60
  const anthropicTools2 = [];
59
61
  for (const tool of tools) {
@@ -125,11 +127,10 @@ function prepareTools(mode) {
125
127
  break;
126
128
  }
127
129
  }
128
- const toolChoice = mode.toolChoice;
129
130
  if (toolChoice == null) {
130
131
  return {
131
132
  tools: anthropicTools2,
132
- tool_choice: void 0,
133
+ toolChoice: void 0,
133
134
  toolWarnings,
134
135
  betas
135
136
  };
@@ -139,30 +140,30 @@ function prepareTools(mode) {
139
140
  case "auto":
140
141
  return {
141
142
  tools: anthropicTools2,
142
- tool_choice: { type: "auto" },
143
+ toolChoice: { type: "auto" },
143
144
  toolWarnings,
144
145
  betas
145
146
  };
146
147
  case "required":
147
148
  return {
148
149
  tools: anthropicTools2,
149
- tool_choice: { type: "any" },
150
+ toolChoice: { type: "any" },
150
151
  toolWarnings,
151
152
  betas
152
153
  };
153
154
  case "none":
154
- return { tools: void 0, tool_choice: void 0, toolWarnings, betas };
155
+ return { tools: void 0, toolChoice: void 0, toolWarnings, betas };
155
156
  case "tool":
156
157
  return {
157
158
  tools: anthropicTools2,
158
- tool_choice: { type: "tool", name: toolChoice.toolName },
159
+ toolChoice: { type: "tool", name: toolChoice.toolName },
159
160
  toolWarnings,
160
161
  betas
161
162
  };
162
163
  default: {
163
164
  const _exhaustiveCheck = type;
164
165
  throw new import_provider.UnsupportedFunctionalityError({
165
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
166
+ functionality: `tool choice type: ${_exhaustiveCheck}`
166
167
  });
167
168
  }
168
169
  }
@@ -176,7 +177,7 @@ function convertToAnthropicMessagesPrompt({
176
177
  sendReasoning,
177
178
  warnings
178
179
  }) {
179
- var _a, _b, _c, _d;
180
+ var _a, _b, _c;
180
181
  const betas = /* @__PURE__ */ new Set();
181
182
  const blocks = groupIntoBlocks(prompt);
182
183
  let system = void 0;
@@ -198,10 +199,10 @@ function convertToAnthropicMessagesPrompt({
198
199
  functionality: "Multiple system messages that are separated by user/assistant messages"
199
200
  });
200
201
  }
201
- system = block.messages.map(({ content, providerMetadata }) => ({
202
+ system = block.messages.map(({ content, providerOptions }) => ({
202
203
  type: "text",
203
204
  text: content,
204
- cache_control: getCacheControl(providerMetadata)
205
+ cache_control: getCacheControl(providerOptions)
205
206
  }));
206
207
  break;
207
208
  }
@@ -214,7 +215,7 @@ function convertToAnthropicMessagesPrompt({
214
215
  for (let j = 0; j < content.length; j++) {
215
216
  const part = content[j];
216
217
  const isLastPart = j === content.length - 1;
217
- const cacheControl = (_a = getCacheControl(part.providerMetadata)) != null ? _a : isLastPart ? getCacheControl(message.providerMetadata) : void 0;
218
+ const cacheControl = (_a = getCacheControl(part.providerOptions)) != null ? _a : isLastPart ? getCacheControl(message.providerOptions) : void 0;
218
219
  switch (part.type) {
219
220
  case "text": {
220
221
  anthropicContent.push({
@@ -224,42 +225,39 @@ function convertToAnthropicMessagesPrompt({
224
225
  });
225
226
  break;
226
227
  }
227
- case "image": {
228
- anthropicContent.push({
229
- type: "image",
230
- source: part.image instanceof URL ? {
231
- type: "url",
232
- url: part.image.toString()
233
- } : {
234
- type: "base64",
235
- media_type: (_b = part.mimeType) != null ? _b : "image/jpeg",
236
- data: (0, import_provider_utils2.convertUint8ArrayToBase64)(part.image)
237
- },
238
- cache_control: cacheControl
239
- });
240
- break;
241
- }
242
228
  case "file": {
243
- if (part.data instanceof URL) {
244
- throw new import_provider2.UnsupportedFunctionalityError({
245
- functionality: "Image URLs in user messages"
229
+ if (part.mediaType.startsWith("image/")) {
230
+ anthropicContent.push({
231
+ type: "image",
232
+ source: part.data instanceof URL ? {
233
+ type: "url",
234
+ url: part.data.toString()
235
+ } : {
236
+ type: "base64",
237
+ media_type: part.mediaType === "image/*" ? "image/jpeg" : part.mediaType,
238
+ data: (0, import_provider_utils2.convertToBase64)(part.data)
239
+ },
240
+ cache_control: cacheControl
246
241
  });
247
- }
248
- if (part.mimeType !== "application/pdf") {
242
+ } else if (part.mediaType === "application/pdf") {
243
+ betas.add("pdfs-2024-09-25");
244
+ anthropicContent.push({
245
+ type: "document",
246
+ source: part.data instanceof URL ? {
247
+ type: "url",
248
+ url: part.data.toString()
249
+ } : {
250
+ type: "base64",
251
+ media_type: "application/pdf",
252
+ data: (0, import_provider_utils2.convertToBase64)(part.data)
253
+ },
254
+ cache_control: cacheControl
255
+ });
256
+ } else {
249
257
  throw new import_provider2.UnsupportedFunctionalityError({
250
- functionality: "Non-PDF files in user messages"
258
+ functionality: `media type: ${part.mediaType}`
251
259
  });
252
260
  }
253
- betas.add("pdfs-2024-09-25");
254
- anthropicContent.push({
255
- type: "document",
256
- source: {
257
- type: "base64",
258
- media_type: "application/pdf",
259
- data: part.data
260
- },
261
- cache_control: cacheControl
262
- });
263
261
  break;
264
262
  }
265
263
  }
@@ -270,7 +268,7 @@ function convertToAnthropicMessagesPrompt({
270
268
  for (let i2 = 0; i2 < content.length; i2++) {
271
269
  const part = content[i2];
272
270
  const isLastPart = i2 === content.length - 1;
273
- const cacheControl = (_c = getCacheControl(part.providerMetadata)) != null ? _c : isLastPart ? getCacheControl(message.providerMetadata) : void 0;
271
+ const cacheControl = (_b = getCacheControl(part.providerOptions)) != null ? _b : isLastPart ? getCacheControl(message.providerOptions) : void 0;
274
272
  const toolResultContent = part.content != null ? part.content.map((part2) => {
275
273
  var _a2;
276
274
  switch (part2.type) {
@@ -285,7 +283,7 @@ function convertToAnthropicMessagesPrompt({
285
283
  type: "image",
286
284
  source: {
287
285
  type: "base64",
288
- media_type: (_a2 = part2.mimeType) != null ? _a2 : "image/jpeg",
286
+ media_type: (_a2 = part2.mediaType) != null ? _a2 : "image/jpeg",
289
287
  data: part2.data
290
288
  },
291
289
  cache_control: void 0
@@ -320,7 +318,7 @@ function convertToAnthropicMessagesPrompt({
320
318
  for (let k = 0; k < content.length; k++) {
321
319
  const part = content[k];
322
320
  const isLastContentPart = k === content.length - 1;
323
- const cacheControl = (_d = getCacheControl(part.providerMetadata)) != null ? _d : isLastContentPart ? getCacheControl(message.providerMetadata) : void 0;
321
+ const cacheControl = (_c = getCacheControl(part.providerOptions)) != null ? _c : isLastContentPart ? getCacheControl(message.providerOptions) : void 0;
324
322
  switch (part.type) {
325
323
  case "text": {
326
324
  anthropicContent.push({
@@ -377,7 +375,7 @@ function convertToAnthropicMessagesPrompt({
377
375
  }
378
376
  default: {
379
377
  const _exhaustiveCheck = type;
380
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
378
+ throw new Error(`content type: ${_exhaustiveCheck}`);
381
379
  }
382
380
  }
383
381
  }
@@ -452,21 +450,23 @@ function mapAnthropicStopReason(finishReason) {
452
450
  var AnthropicMessagesLanguageModel = class {
453
451
  constructor(modelId, settings, config) {
454
452
  this.specificationVersion = "v2";
455
- this.defaultObjectGenerationMode = "tool";
456
453
  this.modelId = modelId;
457
454
  this.settings = settings;
458
455
  this.config = config;
459
456
  }
457
+ supportsUrl(url) {
458
+ return url.protocol === "https:";
459
+ }
460
460
  get provider() {
461
461
  return this.config.provider;
462
462
  }
463
- get supportsImageUrls() {
464
- return this.config.supportsImageUrls;
463
+ async getSupportedUrls() {
464
+ var _a, _b, _c;
465
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
465
466
  }
466
467
  async getArgs({
467
- mode,
468
468
  prompt,
469
- maxTokens = 4096,
469
+ maxOutputTokens = 4096,
470
470
  // 4096: max model output tokens TODO update default in v5
471
471
  temperature,
472
472
  topP,
@@ -476,10 +476,11 @@ var AnthropicMessagesLanguageModel = class {
476
476
  stopSequences,
477
477
  responseFormat,
478
478
  seed,
479
- providerMetadata: providerOptions
479
+ tools,
480
+ toolChoice,
481
+ providerOptions
480
482
  }) {
481
483
  var _a, _b, _c;
482
- const type = mode.type;
483
484
  const warnings = [];
484
485
  if (frequencyPenalty != null) {
485
486
  warnings.push({
@@ -522,7 +523,7 @@ var AnthropicMessagesLanguageModel = class {
522
523
  // model id:
523
524
  model: this.modelId,
524
525
  // standardized settings:
525
- max_tokens: maxTokens,
526
+ max_tokens: maxOutputTokens,
526
527
  temperature,
527
528
  top_k: topK,
528
529
  top_p: topP,
@@ -565,44 +566,23 @@ var AnthropicMessagesLanguageModel = class {
565
566
  details: "topP is not supported when thinking is enabled"
566
567
  });
567
568
  }
568
- baseArgs.max_tokens = maxTokens + thinkingBudget;
569
- }
570
- switch (type) {
571
- case "regular": {
572
- const {
573
- tools,
574
- tool_choice,
575
- toolWarnings,
576
- betas: toolsBetas
577
- } = prepareTools(mode);
578
- return {
579
- args: { ...baseArgs, tools, tool_choice },
580
- warnings: [...warnings, ...toolWarnings],
581
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
582
- };
583
- }
584
- case "object-json": {
585
- throw new import_provider3.UnsupportedFunctionalityError({
586
- functionality: "json-mode object generation"
587
- });
588
- }
589
- case "object-tool": {
590
- const { name, description, parameters } = mode.tool;
591
- return {
592
- args: {
593
- ...baseArgs,
594
- tools: [{ name, description, input_schema: parameters }],
595
- tool_choice: { type: "tool", name }
596
- },
597
- warnings,
598
- betas: messagesBetas
599
- };
600
- }
601
- default: {
602
- const _exhaustiveCheck = type;
603
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
604
- }
569
+ baseArgs.max_tokens = maxOutputTokens + thinkingBudget;
605
570
  }
571
+ const {
572
+ tools: anthropicTools2,
573
+ toolChoice: anthropicToolChoice,
574
+ toolWarnings,
575
+ betas: toolsBetas
576
+ } = prepareTools({ tools, toolChoice });
577
+ return {
578
+ args: {
579
+ ...baseArgs,
580
+ tools: anthropicTools2,
581
+ tool_choice: anthropicToolChoice
582
+ },
583
+ warnings: [...warnings, ...toolWarnings],
584
+ betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
585
+ };
606
586
  }
607
587
  async getHeaders({
608
588
  betas,
@@ -640,56 +620,59 @@ var AnthropicMessagesLanguageModel = class {
640
620
  abortSignal: options.abortSignal,
641
621
  fetch: this.config.fetch
642
622
  });
643
- const { messages: rawPrompt, ...rawSettings } = args;
644
- let text = "";
645
- for (const content of response.content) {
646
- if (content.type === "text") {
647
- text += content.text;
648
- }
649
- }
650
- let toolCalls = void 0;
651
- if (response.content.some((content) => content.type === "tool_use")) {
652
- toolCalls = [];
653
- for (const content of response.content) {
654
- if (content.type === "tool_use") {
655
- toolCalls.push({
623
+ const content = [];
624
+ for (const part of response.content) {
625
+ switch (part.type) {
626
+ case "text": {
627
+ content.push({ type: "text", text: part.text });
628
+ break;
629
+ }
630
+ case "thinking": {
631
+ content.push({
632
+ type: "reasoning",
633
+ reasoningType: "text",
634
+ text: part.thinking
635
+ });
636
+ content.push({
637
+ type: "reasoning",
638
+ reasoningType: "signature",
639
+ signature: part.signature
640
+ });
641
+ break;
642
+ }
643
+ case "redacted_thinking": {
644
+ content.push({
645
+ type: "reasoning",
646
+ reasoningType: "redacted",
647
+ data: part.data
648
+ });
649
+ break;
650
+ }
651
+ case "tool_use": {
652
+ content.push({
653
+ type: "tool-call",
656
654
  toolCallType: "function",
657
- toolCallId: content.id,
658
- toolName: content.name,
659
- args: JSON.stringify(content.input)
655
+ toolCallId: part.id,
656
+ toolName: part.name,
657
+ args: JSON.stringify(part.input)
660
658
  });
659
+ break;
661
660
  }
662
661
  }
663
662
  }
664
- const reasoning = response.content.filter(
665
- (content) => content.type === "redacted_thinking" || content.type === "thinking"
666
- ).map(
667
- (content) => content.type === "thinking" ? {
668
- type: "text",
669
- text: content.thinking,
670
- signature: content.signature
671
- } : {
672
- type: "redacted",
673
- data: content.data
674
- }
675
- );
676
663
  return {
677
- text,
678
- reasoning: reasoning.length > 0 ? reasoning : void 0,
679
- toolCalls,
664
+ content,
680
665
  finishReason: mapAnthropicStopReason(response.stop_reason),
681
666
  usage: {
682
- promptTokens: response.usage.input_tokens,
683
- completionTokens: response.usage.output_tokens
684
- },
685
- rawCall: { rawPrompt, rawSettings },
686
- rawResponse: {
687
- headers: responseHeaders,
688
- body: rawResponse
667
+ inputTokens: response.usage.input_tokens,
668
+ outputTokens: response.usage.output_tokens
689
669
  },
670
+ request: { body: args },
690
671
  response: {
691
672
  id: (_a = response.id) != null ? _a : void 0,
692
- modelId: (_b = response.model) != null ? _b : void 0
673
+ modelId: (_b = response.model) != null ? _b : void 0,
674
+ headers: responseHeaders,
675
+ body: rawResponse
693
676
  },
694
677
  warnings,
695
678
  providerMetadata: {
@@ -697,8 +680,7 @@ var AnthropicMessagesLanguageModel = class {
697
680
  cacheCreationInputTokens: (_c = response.usage.cache_creation_input_tokens) != null ? _c : null,
698
681
  cacheReadInputTokens: (_d = response.usage.cache_read_input_tokens) != null ? _d : null
699
682
  }
700
- },
701
- request: { body: JSON.stringify(args) }
683
+ }
702
684
  };
703
685
  }
704
686
  async doStream(options) {
@@ -715,11 +697,10 @@ var AnthropicMessagesLanguageModel = class {
715
697
  abortSignal: options.abortSignal,
716
698
  fetch: this.config.fetch
717
699
  });
718
- const { messages: rawPrompt, ...rawSettings } = args;
719
700
  let finishReason = "unknown";
720
701
  const usage = {
721
- promptTokens: Number.NaN,
722
- completionTokens: Number.NaN
702
+ inputTokens: void 0,
703
+ outputTokens: void 0
723
704
  };
724
705
  const toolCallContentBlocks = {};
725
706
  let providerMetadata = void 0;
@@ -727,6 +708,9 @@ var AnthropicMessagesLanguageModel = class {
727
708
  return {
728
709
  stream: response.pipeThrough(
729
710
  new TransformStream({
711
+ start(controller) {
712
+ controller.enqueue({ type: "stream-start", warnings });
713
+ },
730
714
  transform(chunk, controller) {
731
715
  var _a, _b, _c, _d;
732
716
  if (!chunk.success) {
@@ -748,7 +732,8 @@ var AnthropicMessagesLanguageModel = class {
748
732
  }
749
733
  case "redacted_thinking": {
750
734
  controller.enqueue({
751
- type: "redacted-reasoning",
735
+ type: "reasoning",
736
+ reasoningType: "redacted",
752
737
  data: value.content_block.data
753
738
  });
754
739
  return;
@@ -789,22 +774,24 @@ var AnthropicMessagesLanguageModel = class {
789
774
  switch (deltaType) {
790
775
  case "text_delta": {
791
776
  controller.enqueue({
792
- type: "text-delta",
793
- textDelta: value.delta.text
777
+ type: "text",
778
+ text: value.delta.text
794
779
  });
795
780
  return;
796
781
  }
797
782
  case "thinking_delta": {
798
783
  controller.enqueue({
799
784
  type: "reasoning",
800
- textDelta: value.delta.thinking
785
+ reasoningType: "text",
786
+ text: value.delta.thinking
801
787
  });
802
788
  return;
803
789
  }
804
790
  case "signature_delta": {
805
791
  if (blockType === "thinking") {
806
792
  controller.enqueue({
807
- type: "reasoning-signature",
793
+ type: "reasoning",
794
+ reasoningType: "signature",
808
795
  signature: value.delta.signature
809
796
  });
810
797
  }
@@ -831,8 +818,8 @@ var AnthropicMessagesLanguageModel = class {
831
818
  }
832
819
  }
833
820
  case "message_start": {
834
- usage.promptTokens = value.message.usage.input_tokens;
835
- usage.completionTokens = value.message.usage.output_tokens;
821
+ usage.inputTokens = value.message.usage.input_tokens;
822
+ usage.outputTokens = value.message.usage.output_tokens;
836
823
  providerMetadata = {
837
824
  anthropic: {
838
825
  cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null,
@@ -847,7 +834,7 @@ var AnthropicMessagesLanguageModel = class {
847
834
  return;
848
835
  }
849
836
  case "message_delta": {
850
- usage.completionTokens = value.usage.output_tokens;
837
+ usage.outputTokens = value.usage.output_tokens;
851
838
  finishReason = mapAnthropicStopReason(value.delta.stop_reason);
852
839
  return;
853
840
  }
@@ -872,10 +859,8 @@ var AnthropicMessagesLanguageModel = class {
872
859
  }
873
860
  })
874
861
  ),
875
- rawCall: { rawPrompt, rawSettings },
876
- rawResponse: { headers: responseHeaders },
877
- warnings,
878
- request: { body: JSON.stringify(body) }
862
+ request: { body },
863
+ response: { headers: responseHeaders }
879
864
  };
880
865
  }
881
866
  };