@ai-sdk/anthropic 2.0.0-canary.1 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -31,13 +31,15 @@ var anthropicFailedResponseHandler = createJsonErrorResponseHandler({
31
31
  import {
32
32
  UnsupportedFunctionalityError
33
33
  } from "@ai-sdk/provider";
34
- function prepareTools(mode) {
35
- var _a;
36
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
34
+ function prepareTools({
35
+ tools,
36
+ toolChoice
37
+ }) {
38
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
37
39
  const toolWarnings = [];
38
40
  const betas = /* @__PURE__ */ new Set();
39
41
  if (tools == null) {
40
- return { tools: void 0, tool_choice: void 0, toolWarnings, betas };
42
+ return { tools: void 0, toolChoice: void 0, toolWarnings, betas };
41
43
  }
42
44
  const anthropicTools2 = [];
43
45
  for (const tool of tools) {
@@ -109,11 +111,10 @@ function prepareTools(mode) {
109
111
  break;
110
112
  }
111
113
  }
112
- const toolChoice = mode.toolChoice;
113
114
  if (toolChoice == null) {
114
115
  return {
115
116
  tools: anthropicTools2,
116
- tool_choice: void 0,
117
+ toolChoice: void 0,
117
118
  toolWarnings,
118
119
  betas
119
120
  };
@@ -123,30 +124,30 @@ function prepareTools(mode) {
123
124
  case "auto":
124
125
  return {
125
126
  tools: anthropicTools2,
126
- tool_choice: { type: "auto" },
127
+ toolChoice: { type: "auto" },
127
128
  toolWarnings,
128
129
  betas
129
130
  };
130
131
  case "required":
131
132
  return {
132
133
  tools: anthropicTools2,
133
- tool_choice: { type: "any" },
134
+ toolChoice: { type: "any" },
134
135
  toolWarnings,
135
136
  betas
136
137
  };
137
138
  case "none":
138
- return { tools: void 0, tool_choice: void 0, toolWarnings, betas };
139
+ return { tools: void 0, toolChoice: void 0, toolWarnings, betas };
139
140
  case "tool":
140
141
  return {
141
142
  tools: anthropicTools2,
142
- tool_choice: { type: "tool", name: toolChoice.toolName },
143
+ toolChoice: { type: "tool", name: toolChoice.toolName },
143
144
  toolWarnings,
144
145
  betas
145
146
  };
146
147
  default: {
147
148
  const _exhaustiveCheck = type;
148
149
  throw new UnsupportedFunctionalityError({
149
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
150
+ functionality: `tool choice type: ${_exhaustiveCheck}`
150
151
  });
151
152
  }
152
153
  }
@@ -156,13 +157,13 @@ function prepareTools(mode) {
156
157
  import {
157
158
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
158
159
  } from "@ai-sdk/provider";
159
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
160
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
160
161
  function convertToAnthropicMessagesPrompt({
161
162
  prompt,
162
163
  sendReasoning,
163
164
  warnings
164
165
  }) {
165
- var _a, _b, _c, _d;
166
+ var _a, _b, _c;
166
167
  const betas = /* @__PURE__ */ new Set();
167
168
  const blocks = groupIntoBlocks(prompt);
168
169
  let system = void 0;
@@ -184,10 +185,10 @@ function convertToAnthropicMessagesPrompt({
184
185
  functionality: "Multiple system messages that are separated by user/assistant messages"
185
186
  });
186
187
  }
187
- system = block.messages.map(({ content, providerMetadata }) => ({
188
+ system = block.messages.map(({ content, providerOptions }) => ({
188
189
  type: "text",
189
190
  text: content,
190
- cache_control: getCacheControl(providerMetadata)
191
+ cache_control: getCacheControl(providerOptions)
191
192
  }));
192
193
  break;
193
194
  }
@@ -200,7 +201,7 @@ function convertToAnthropicMessagesPrompt({
200
201
  for (let j = 0; j < content.length; j++) {
201
202
  const part = content[j];
202
203
  const isLastPart = j === content.length - 1;
203
- const cacheControl = (_a = getCacheControl(part.providerMetadata)) != null ? _a : isLastPart ? getCacheControl(message.providerMetadata) : void 0;
204
+ const cacheControl = (_a = getCacheControl(part.providerOptions)) != null ? _a : isLastPart ? getCacheControl(message.providerOptions) : void 0;
204
205
  switch (part.type) {
205
206
  case "text": {
206
207
  anthropicContent.push({
@@ -210,42 +211,39 @@ function convertToAnthropicMessagesPrompt({
210
211
  });
211
212
  break;
212
213
  }
213
- case "image": {
214
- anthropicContent.push({
215
- type: "image",
216
- source: part.image instanceof URL ? {
217
- type: "url",
218
- url: part.image.toString()
219
- } : {
220
- type: "base64",
221
- media_type: (_b = part.mimeType) != null ? _b : "image/jpeg",
222
- data: convertUint8ArrayToBase64(part.image)
223
- },
224
- cache_control: cacheControl
225
- });
226
- break;
227
- }
228
214
  case "file": {
229
- if (part.data instanceof URL) {
230
- throw new UnsupportedFunctionalityError2({
231
- functionality: "Image URLs in user messages"
215
+ if (part.mediaType.startsWith("image/")) {
216
+ anthropicContent.push({
217
+ type: "image",
218
+ source: part.data instanceof URL ? {
219
+ type: "url",
220
+ url: part.data.toString()
221
+ } : {
222
+ type: "base64",
223
+ media_type: part.mediaType === "image/*" ? "image/jpeg" : part.mediaType,
224
+ data: convertToBase64(part.data)
225
+ },
226
+ cache_control: cacheControl
232
227
  });
233
- }
234
- if (part.mimeType !== "application/pdf") {
228
+ } else if (part.mediaType === "application/pdf") {
229
+ betas.add("pdfs-2024-09-25");
230
+ anthropicContent.push({
231
+ type: "document",
232
+ source: part.data instanceof URL ? {
233
+ type: "url",
234
+ url: part.data.toString()
235
+ } : {
236
+ type: "base64",
237
+ media_type: "application/pdf",
238
+ data: convertToBase64(part.data)
239
+ },
240
+ cache_control: cacheControl
241
+ });
242
+ } else {
235
243
  throw new UnsupportedFunctionalityError2({
236
- functionality: "Non-PDF files in user messages"
244
+ functionality: `media type: ${part.mediaType}`
237
245
  });
238
246
  }
239
- betas.add("pdfs-2024-09-25");
240
- anthropicContent.push({
241
- type: "document",
242
- source: {
243
- type: "base64",
244
- media_type: "application/pdf",
245
- data: part.data
246
- },
247
- cache_control: cacheControl
248
- });
249
247
  break;
250
248
  }
251
249
  }
@@ -256,7 +254,7 @@ function convertToAnthropicMessagesPrompt({
256
254
  for (let i2 = 0; i2 < content.length; i2++) {
257
255
  const part = content[i2];
258
256
  const isLastPart = i2 === content.length - 1;
259
- const cacheControl = (_c = getCacheControl(part.providerMetadata)) != null ? _c : isLastPart ? getCacheControl(message.providerMetadata) : void 0;
257
+ const cacheControl = (_b = getCacheControl(part.providerOptions)) != null ? _b : isLastPart ? getCacheControl(message.providerOptions) : void 0;
260
258
  const toolResultContent = part.content != null ? part.content.map((part2) => {
261
259
  var _a2;
262
260
  switch (part2.type) {
@@ -271,7 +269,7 @@ function convertToAnthropicMessagesPrompt({
271
269
  type: "image",
272
270
  source: {
273
271
  type: "base64",
274
- media_type: (_a2 = part2.mimeType) != null ? _a2 : "image/jpeg",
272
+ media_type: (_a2 = part2.mediaType) != null ? _a2 : "image/jpeg",
275
273
  data: part2.data
276
274
  },
277
275
  cache_control: void 0
@@ -306,7 +304,7 @@ function convertToAnthropicMessagesPrompt({
306
304
  for (let k = 0; k < content.length; k++) {
307
305
  const part = content[k];
308
306
  const isLastContentPart = k === content.length - 1;
309
- const cacheControl = (_d = getCacheControl(part.providerMetadata)) != null ? _d : isLastContentPart ? getCacheControl(message.providerMetadata) : void 0;
307
+ const cacheControl = (_c = getCacheControl(part.providerOptions)) != null ? _c : isLastContentPart ? getCacheControl(message.providerOptions) : void 0;
310
308
  switch (part.type) {
311
309
  case "text": {
312
310
  anthropicContent.push({
@@ -363,7 +361,7 @@ function convertToAnthropicMessagesPrompt({
363
361
  }
364
362
  default: {
365
363
  const _exhaustiveCheck = type;
366
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
364
+ throw new Error(`content type: ${_exhaustiveCheck}`);
367
365
  }
368
366
  }
369
367
  }
@@ -438,21 +436,23 @@ function mapAnthropicStopReason(finishReason) {
438
436
  var AnthropicMessagesLanguageModel = class {
439
437
  constructor(modelId, settings, config) {
440
438
  this.specificationVersion = "v2";
441
- this.defaultObjectGenerationMode = "tool";
442
439
  this.modelId = modelId;
443
440
  this.settings = settings;
444
441
  this.config = config;
445
442
  }
443
+ supportsUrl(url) {
444
+ return url.protocol === "https:";
445
+ }
446
446
  get provider() {
447
447
  return this.config.provider;
448
448
  }
449
- get supportsImageUrls() {
450
- return this.config.supportsImageUrls;
449
+ async getSupportedUrls() {
450
+ var _a, _b, _c;
451
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
451
452
  }
452
453
  async getArgs({
453
- mode,
454
454
  prompt,
455
- maxTokens = 4096,
455
+ maxOutputTokens = 4096,
456
456
  // 4096: max model output tokens TODO update default in v5
457
457
  temperature,
458
458
  topP,
@@ -462,10 +462,11 @@ var AnthropicMessagesLanguageModel = class {
462
462
  stopSequences,
463
463
  responseFormat,
464
464
  seed,
465
- providerMetadata: providerOptions
465
+ tools,
466
+ toolChoice,
467
+ providerOptions
466
468
  }) {
467
469
  var _a, _b, _c;
468
- const type = mode.type;
469
470
  const warnings = [];
470
471
  if (frequencyPenalty != null) {
471
472
  warnings.push({
@@ -508,7 +509,7 @@ var AnthropicMessagesLanguageModel = class {
508
509
  // model id:
509
510
  model: this.modelId,
510
511
  // standardized settings:
511
- max_tokens: maxTokens,
512
+ max_tokens: maxOutputTokens,
512
513
  temperature,
513
514
  top_k: topK,
514
515
  top_p: topP,
@@ -551,44 +552,23 @@ var AnthropicMessagesLanguageModel = class {
551
552
  details: "topP is not supported when thinking is enabled"
552
553
  });
553
554
  }
554
- baseArgs.max_tokens = maxTokens + thinkingBudget;
555
- }
556
- switch (type) {
557
- case "regular": {
558
- const {
559
- tools,
560
- tool_choice,
561
- toolWarnings,
562
- betas: toolsBetas
563
- } = prepareTools(mode);
564
- return {
565
- args: { ...baseArgs, tools, tool_choice },
566
- warnings: [...warnings, ...toolWarnings],
567
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
568
- };
569
- }
570
- case "object-json": {
571
- throw new UnsupportedFunctionalityError3({
572
- functionality: "json-mode object generation"
573
- });
574
- }
575
- case "object-tool": {
576
- const { name, description, parameters } = mode.tool;
577
- return {
578
- args: {
579
- ...baseArgs,
580
- tools: [{ name, description, input_schema: parameters }],
581
- tool_choice: { type: "tool", name }
582
- },
583
- warnings,
584
- betas: messagesBetas
585
- };
586
- }
587
- default: {
588
- const _exhaustiveCheck = type;
589
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
590
- }
555
+ baseArgs.max_tokens = maxOutputTokens + thinkingBudget;
591
556
  }
557
+ const {
558
+ tools: anthropicTools2,
559
+ toolChoice: anthropicToolChoice,
560
+ toolWarnings,
561
+ betas: toolsBetas
562
+ } = prepareTools({ tools, toolChoice });
563
+ return {
564
+ args: {
565
+ ...baseArgs,
566
+ tools: anthropicTools2,
567
+ tool_choice: anthropicToolChoice
568
+ },
569
+ warnings: [...warnings, ...toolWarnings],
570
+ betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
571
+ };
592
572
  }
593
573
  async getHeaders({
594
574
  betas,
@@ -626,56 +606,59 @@ var AnthropicMessagesLanguageModel = class {
626
606
  abortSignal: options.abortSignal,
627
607
  fetch: this.config.fetch
628
608
  });
629
- const { messages: rawPrompt, ...rawSettings } = args;
630
- let text = "";
631
- for (const content of response.content) {
632
- if (content.type === "text") {
633
- text += content.text;
634
- }
635
- }
636
- let toolCalls = void 0;
637
- if (response.content.some((content) => content.type === "tool_use")) {
638
- toolCalls = [];
639
- for (const content of response.content) {
640
- if (content.type === "tool_use") {
641
- toolCalls.push({
609
+ const content = [];
610
+ for (const part of response.content) {
611
+ switch (part.type) {
612
+ case "text": {
613
+ content.push({ type: "text", text: part.text });
614
+ break;
615
+ }
616
+ case "thinking": {
617
+ content.push({
618
+ type: "reasoning",
619
+ reasoningType: "text",
620
+ text: part.thinking
621
+ });
622
+ content.push({
623
+ type: "reasoning",
624
+ reasoningType: "signature",
625
+ signature: part.signature
626
+ });
627
+ break;
628
+ }
629
+ case "redacted_thinking": {
630
+ content.push({
631
+ type: "reasoning",
632
+ reasoningType: "redacted",
633
+ data: part.data
634
+ });
635
+ break;
636
+ }
637
+ case "tool_use": {
638
+ content.push({
639
+ type: "tool-call",
642
640
  toolCallType: "function",
643
- toolCallId: content.id,
644
- toolName: content.name,
645
- args: JSON.stringify(content.input)
641
+ toolCallId: part.id,
642
+ toolName: part.name,
643
+ args: JSON.stringify(part.input)
646
644
  });
645
+ break;
647
646
  }
648
647
  }
649
648
  }
650
- const reasoning = response.content.filter(
651
- (content) => content.type === "redacted_thinking" || content.type === "thinking"
652
- ).map(
653
- (content) => content.type === "thinking" ? {
654
- type: "text",
655
- text: content.thinking,
656
- signature: content.signature
657
- } : {
658
- type: "redacted",
659
- data: content.data
660
- }
661
- );
662
649
  return {
663
- text,
664
- reasoning: reasoning.length > 0 ? reasoning : void 0,
665
- toolCalls,
650
+ content,
666
651
  finishReason: mapAnthropicStopReason(response.stop_reason),
667
652
  usage: {
668
- promptTokens: response.usage.input_tokens,
669
- completionTokens: response.usage.output_tokens
670
- },
671
- rawCall: { rawPrompt, rawSettings },
672
- rawResponse: {
673
- headers: responseHeaders,
674
- body: rawResponse
653
+ inputTokens: response.usage.input_tokens,
654
+ outputTokens: response.usage.output_tokens
675
655
  },
656
+ request: { body: args },
676
657
  response: {
677
658
  id: (_a = response.id) != null ? _a : void 0,
678
- modelId: (_b = response.model) != null ? _b : void 0
659
+ modelId: (_b = response.model) != null ? _b : void 0,
660
+ headers: responseHeaders,
661
+ body: rawResponse
679
662
  },
680
663
  warnings,
681
664
  providerMetadata: {
@@ -683,8 +666,7 @@ var AnthropicMessagesLanguageModel = class {
683
666
  cacheCreationInputTokens: (_c = response.usage.cache_creation_input_tokens) != null ? _c : null,
684
667
  cacheReadInputTokens: (_d = response.usage.cache_read_input_tokens) != null ? _d : null
685
668
  }
686
- },
687
- request: { body: JSON.stringify(args) }
669
+ }
688
670
  };
689
671
  }
690
672
  async doStream(options) {
@@ -701,11 +683,10 @@ var AnthropicMessagesLanguageModel = class {
701
683
  abortSignal: options.abortSignal,
702
684
  fetch: this.config.fetch
703
685
  });
704
- const { messages: rawPrompt, ...rawSettings } = args;
705
686
  let finishReason = "unknown";
706
687
  const usage = {
707
- promptTokens: Number.NaN,
708
- completionTokens: Number.NaN
688
+ inputTokens: void 0,
689
+ outputTokens: void 0
709
690
  };
710
691
  const toolCallContentBlocks = {};
711
692
  let providerMetadata = void 0;
@@ -713,6 +694,9 @@ var AnthropicMessagesLanguageModel = class {
713
694
  return {
714
695
  stream: response.pipeThrough(
715
696
  new TransformStream({
697
+ start(controller) {
698
+ controller.enqueue({ type: "stream-start", warnings });
699
+ },
716
700
  transform(chunk, controller) {
717
701
  var _a, _b, _c, _d;
718
702
  if (!chunk.success) {
@@ -734,7 +718,8 @@ var AnthropicMessagesLanguageModel = class {
734
718
  }
735
719
  case "redacted_thinking": {
736
720
  controller.enqueue({
737
- type: "redacted-reasoning",
721
+ type: "reasoning",
722
+ reasoningType: "redacted",
738
723
  data: value.content_block.data
739
724
  });
740
725
  return;
@@ -775,22 +760,24 @@ var AnthropicMessagesLanguageModel = class {
775
760
  switch (deltaType) {
776
761
  case "text_delta": {
777
762
  controller.enqueue({
778
- type: "text-delta",
779
- textDelta: value.delta.text
763
+ type: "text",
764
+ text: value.delta.text
780
765
  });
781
766
  return;
782
767
  }
783
768
  case "thinking_delta": {
784
769
  controller.enqueue({
785
770
  type: "reasoning",
786
- textDelta: value.delta.thinking
771
+ reasoningType: "text",
772
+ text: value.delta.thinking
787
773
  });
788
774
  return;
789
775
  }
790
776
  case "signature_delta": {
791
777
  if (blockType === "thinking") {
792
778
  controller.enqueue({
793
- type: "reasoning-signature",
779
+ type: "reasoning",
780
+ reasoningType: "signature",
794
781
  signature: value.delta.signature
795
782
  });
796
783
  }
@@ -817,8 +804,8 @@ var AnthropicMessagesLanguageModel = class {
817
804
  }
818
805
  }
819
806
  case "message_start": {
820
- usage.promptTokens = value.message.usage.input_tokens;
821
- usage.completionTokens = value.message.usage.output_tokens;
807
+ usage.inputTokens = value.message.usage.input_tokens;
808
+ usage.outputTokens = value.message.usage.output_tokens;
822
809
  providerMetadata = {
823
810
  anthropic: {
824
811
  cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null,
@@ -833,7 +820,7 @@ var AnthropicMessagesLanguageModel = class {
833
820
  return;
834
821
  }
835
822
  case "message_delta": {
836
- usage.completionTokens = value.usage.output_tokens;
823
+ usage.outputTokens = value.usage.output_tokens;
837
824
  finishReason = mapAnthropicStopReason(value.delta.stop_reason);
838
825
  return;
839
826
  }
@@ -858,10 +845,8 @@ var AnthropicMessagesLanguageModel = class {
858
845
  }
859
846
  })
860
847
  ),
861
- rawCall: { rawPrompt, rawSettings },
862
- rawResponse: { headers: responseHeaders },
863
- warnings,
864
- request: { body: JSON.stringify(body) }
848
+ request: { body },
849
+ response: { headers: responseHeaders }
865
850
  };
866
851
  }
867
852
  };