@ai-sdk/anthropic 2.0.0-canary.1 → 2.0.0-canary.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -40,13 +40,15 @@ var anthropicFailedResponseHandler = createJsonErrorResponseHandler({
40
40
  import {
41
41
  UnsupportedFunctionalityError
42
42
  } from "@ai-sdk/provider";
43
- function prepareTools(mode) {
44
- var _a;
45
- const tools = ((_a = mode.tools) == null ? void 0 : _a.length) ? mode.tools : void 0;
43
+ function prepareTools({
44
+ tools,
45
+ toolChoice
46
+ }) {
47
+ tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
46
48
  const toolWarnings = [];
47
49
  const betas = /* @__PURE__ */ new Set();
48
50
  if (tools == null) {
49
- return { tools: void 0, tool_choice: void 0, toolWarnings, betas };
51
+ return { tools: void 0, toolChoice: void 0, toolWarnings, betas };
50
52
  }
51
53
  const anthropicTools2 = [];
52
54
  for (const tool of tools) {
@@ -118,11 +120,10 @@ function prepareTools(mode) {
118
120
  break;
119
121
  }
120
122
  }
121
- const toolChoice = mode.toolChoice;
122
123
  if (toolChoice == null) {
123
124
  return {
124
125
  tools: anthropicTools2,
125
- tool_choice: void 0,
126
+ toolChoice: void 0,
126
127
  toolWarnings,
127
128
  betas
128
129
  };
@@ -132,30 +133,30 @@ function prepareTools(mode) {
132
133
  case "auto":
133
134
  return {
134
135
  tools: anthropicTools2,
135
- tool_choice: { type: "auto" },
136
+ toolChoice: { type: "auto" },
136
137
  toolWarnings,
137
138
  betas
138
139
  };
139
140
  case "required":
140
141
  return {
141
142
  tools: anthropicTools2,
142
- tool_choice: { type: "any" },
143
+ toolChoice: { type: "any" },
143
144
  toolWarnings,
144
145
  betas
145
146
  };
146
147
  case "none":
147
- return { tools: void 0, tool_choice: void 0, toolWarnings, betas };
148
+ return { tools: void 0, toolChoice: void 0, toolWarnings, betas };
148
149
  case "tool":
149
150
  return {
150
151
  tools: anthropicTools2,
151
- tool_choice: { type: "tool", name: toolChoice.toolName },
152
+ toolChoice: { type: "tool", name: toolChoice.toolName },
152
153
  toolWarnings,
153
154
  betas
154
155
  };
155
156
  default: {
156
157
  const _exhaustiveCheck = type;
157
158
  throw new UnsupportedFunctionalityError({
158
- functionality: `Unsupported tool choice type: ${_exhaustiveCheck}`
159
+ functionality: `tool choice type: ${_exhaustiveCheck}`
159
160
  });
160
161
  }
161
162
  }
@@ -165,13 +166,13 @@ function prepareTools(mode) {
165
166
  import {
166
167
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
167
168
  } from "@ai-sdk/provider";
168
- import { convertUint8ArrayToBase64 } from "@ai-sdk/provider-utils";
169
+ import { convertToBase64 } from "@ai-sdk/provider-utils";
169
170
  function convertToAnthropicMessagesPrompt({
170
171
  prompt,
171
172
  sendReasoning,
172
173
  warnings
173
174
  }) {
174
- var _a, _b, _c, _d;
175
+ var _a, _b, _c;
175
176
  const betas = /* @__PURE__ */ new Set();
176
177
  const blocks = groupIntoBlocks(prompt);
177
178
  let system = void 0;
@@ -193,10 +194,10 @@ function convertToAnthropicMessagesPrompt({
193
194
  functionality: "Multiple system messages that are separated by user/assistant messages"
194
195
  });
195
196
  }
196
- system = block.messages.map(({ content, providerMetadata }) => ({
197
+ system = block.messages.map(({ content, providerOptions }) => ({
197
198
  type: "text",
198
199
  text: content,
199
- cache_control: getCacheControl(providerMetadata)
200
+ cache_control: getCacheControl(providerOptions)
200
201
  }));
201
202
  break;
202
203
  }
@@ -209,7 +210,7 @@ function convertToAnthropicMessagesPrompt({
209
210
  for (let j = 0; j < content.length; j++) {
210
211
  const part = content[j];
211
212
  const isLastPart = j === content.length - 1;
212
- const cacheControl = (_a = getCacheControl(part.providerMetadata)) != null ? _a : isLastPart ? getCacheControl(message.providerMetadata) : void 0;
213
+ const cacheControl = (_a = getCacheControl(part.providerOptions)) != null ? _a : isLastPart ? getCacheControl(message.providerOptions) : void 0;
213
214
  switch (part.type) {
214
215
  case "text": {
215
216
  anthropicContent.push({
@@ -219,42 +220,39 @@ function convertToAnthropicMessagesPrompt({
219
220
  });
220
221
  break;
221
222
  }
222
- case "image": {
223
- anthropicContent.push({
224
- type: "image",
225
- source: part.image instanceof URL ? {
226
- type: "url",
227
- url: part.image.toString()
228
- } : {
229
- type: "base64",
230
- media_type: (_b = part.mimeType) != null ? _b : "image/jpeg",
231
- data: convertUint8ArrayToBase64(part.image)
232
- },
233
- cache_control: cacheControl
234
- });
235
- break;
236
- }
237
223
  case "file": {
238
- if (part.data instanceof URL) {
239
- throw new UnsupportedFunctionalityError2({
240
- functionality: "Image URLs in user messages"
224
+ if (part.mediaType.startsWith("image/")) {
225
+ anthropicContent.push({
226
+ type: "image",
227
+ source: part.data instanceof URL ? {
228
+ type: "url",
229
+ url: part.data.toString()
230
+ } : {
231
+ type: "base64",
232
+ media_type: part.mediaType === "image/*" ? "image/jpeg" : part.mediaType,
233
+ data: convertToBase64(part.data)
234
+ },
235
+ cache_control: cacheControl
241
236
  });
242
- }
243
- if (part.mimeType !== "application/pdf") {
237
+ } else if (part.mediaType === "application/pdf") {
238
+ betas.add("pdfs-2024-09-25");
239
+ anthropicContent.push({
240
+ type: "document",
241
+ source: part.data instanceof URL ? {
242
+ type: "url",
243
+ url: part.data.toString()
244
+ } : {
245
+ type: "base64",
246
+ media_type: "application/pdf",
247
+ data: convertToBase64(part.data)
248
+ },
249
+ cache_control: cacheControl
250
+ });
251
+ } else {
244
252
  throw new UnsupportedFunctionalityError2({
245
- functionality: "Non-PDF files in user messages"
253
+ functionality: `media type: ${part.mediaType}`
246
254
  });
247
255
  }
248
- betas.add("pdfs-2024-09-25");
249
- anthropicContent.push({
250
- type: "document",
251
- source: {
252
- type: "base64",
253
- media_type: "application/pdf",
254
- data: part.data
255
- },
256
- cache_control: cacheControl
257
- });
258
256
  break;
259
257
  }
260
258
  }
@@ -265,7 +263,7 @@ function convertToAnthropicMessagesPrompt({
265
263
  for (let i2 = 0; i2 < content.length; i2++) {
266
264
  const part = content[i2];
267
265
  const isLastPart = i2 === content.length - 1;
268
- const cacheControl = (_c = getCacheControl(part.providerMetadata)) != null ? _c : isLastPart ? getCacheControl(message.providerMetadata) : void 0;
266
+ const cacheControl = (_b = getCacheControl(part.providerOptions)) != null ? _b : isLastPart ? getCacheControl(message.providerOptions) : void 0;
269
267
  const toolResultContent = part.content != null ? part.content.map((part2) => {
270
268
  var _a2;
271
269
  switch (part2.type) {
@@ -280,7 +278,7 @@ function convertToAnthropicMessagesPrompt({
280
278
  type: "image",
281
279
  source: {
282
280
  type: "base64",
283
- media_type: (_a2 = part2.mimeType) != null ? _a2 : "image/jpeg",
281
+ media_type: (_a2 = part2.mediaType) != null ? _a2 : "image/jpeg",
284
282
  data: part2.data
285
283
  },
286
284
  cache_control: void 0
@@ -315,7 +313,7 @@ function convertToAnthropicMessagesPrompt({
315
313
  for (let k = 0; k < content.length; k++) {
316
314
  const part = content[k];
317
315
  const isLastContentPart = k === content.length - 1;
318
- const cacheControl = (_d = getCacheControl(part.providerMetadata)) != null ? _d : isLastContentPart ? getCacheControl(message.providerMetadata) : void 0;
316
+ const cacheControl = (_c = getCacheControl(part.providerOptions)) != null ? _c : isLastContentPart ? getCacheControl(message.providerOptions) : void 0;
319
317
  switch (part.type) {
320
318
  case "text": {
321
319
  anthropicContent.push({
@@ -372,7 +370,7 @@ function convertToAnthropicMessagesPrompt({
372
370
  }
373
371
  default: {
374
372
  const _exhaustiveCheck = type;
375
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
373
+ throw new Error(`content type: ${_exhaustiveCheck}`);
376
374
  }
377
375
  }
378
376
  }
@@ -447,21 +445,23 @@ function mapAnthropicStopReason(finishReason) {
447
445
  var AnthropicMessagesLanguageModel = class {
448
446
  constructor(modelId, settings, config) {
449
447
  this.specificationVersion = "v2";
450
- this.defaultObjectGenerationMode = "tool";
451
448
  this.modelId = modelId;
452
449
  this.settings = settings;
453
450
  this.config = config;
454
451
  }
452
+ supportsUrl(url) {
453
+ return url.protocol === "https:";
454
+ }
455
455
  get provider() {
456
456
  return this.config.provider;
457
457
  }
458
- get supportsImageUrls() {
459
- return this.config.supportsImageUrls;
458
+ async getSupportedUrls() {
459
+ var _a, _b, _c;
460
+ return (_c = (_b = (_a = this.config).getSupportedUrls) == null ? void 0 : _b.call(_a)) != null ? _c : {};
460
461
  }
461
462
  async getArgs({
462
- mode,
463
463
  prompt,
464
- maxTokens = 4096,
464
+ maxOutputTokens = 4096,
465
465
  // 4096: max model output tokens TODO update default in v5
466
466
  temperature,
467
467
  topP,
@@ -471,10 +471,11 @@ var AnthropicMessagesLanguageModel = class {
471
471
  stopSequences,
472
472
  responseFormat,
473
473
  seed,
474
- providerMetadata: providerOptions
474
+ tools,
475
+ toolChoice,
476
+ providerOptions
475
477
  }) {
476
478
  var _a, _b, _c;
477
- const type = mode.type;
478
479
  const warnings = [];
479
480
  if (frequencyPenalty != null) {
480
481
  warnings.push({
@@ -517,7 +518,7 @@ var AnthropicMessagesLanguageModel = class {
517
518
  // model id:
518
519
  model: this.modelId,
519
520
  // standardized settings:
520
- max_tokens: maxTokens,
521
+ max_tokens: maxOutputTokens,
521
522
  temperature,
522
523
  top_k: topK,
523
524
  top_p: topP,
@@ -560,44 +561,23 @@ var AnthropicMessagesLanguageModel = class {
560
561
  details: "topP is not supported when thinking is enabled"
561
562
  });
562
563
  }
563
- baseArgs.max_tokens = maxTokens + thinkingBudget;
564
- }
565
- switch (type) {
566
- case "regular": {
567
- const {
568
- tools,
569
- tool_choice,
570
- toolWarnings,
571
- betas: toolsBetas
572
- } = prepareTools(mode);
573
- return {
574
- args: { ...baseArgs, tools, tool_choice },
575
- warnings: [...warnings, ...toolWarnings],
576
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
577
- };
578
- }
579
- case "object-json": {
580
- throw new UnsupportedFunctionalityError3({
581
- functionality: "json-mode object generation"
582
- });
583
- }
584
- case "object-tool": {
585
- const { name, description, parameters } = mode.tool;
586
- return {
587
- args: {
588
- ...baseArgs,
589
- tools: [{ name, description, input_schema: parameters }],
590
- tool_choice: { type: "tool", name }
591
- },
592
- warnings,
593
- betas: messagesBetas
594
- };
595
- }
596
- default: {
597
- const _exhaustiveCheck = type;
598
- throw new Error(`Unsupported type: ${_exhaustiveCheck}`);
599
- }
564
+ baseArgs.max_tokens = maxOutputTokens + thinkingBudget;
600
565
  }
566
+ const {
567
+ tools: anthropicTools2,
568
+ toolChoice: anthropicToolChoice,
569
+ toolWarnings,
570
+ betas: toolsBetas
571
+ } = prepareTools({ tools, toolChoice });
572
+ return {
573
+ args: {
574
+ ...baseArgs,
575
+ tools: anthropicTools2,
576
+ tool_choice: anthropicToolChoice
577
+ },
578
+ warnings: [...warnings, ...toolWarnings],
579
+ betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
580
+ };
601
581
  }
602
582
  async getHeaders({
603
583
  betas,
@@ -635,56 +615,59 @@ var AnthropicMessagesLanguageModel = class {
635
615
  abortSignal: options.abortSignal,
636
616
  fetch: this.config.fetch
637
617
  });
638
- const { messages: rawPrompt, ...rawSettings } = args;
639
- let text = "";
640
- for (const content of response.content) {
641
- if (content.type === "text") {
642
- text += content.text;
643
- }
644
- }
645
- let toolCalls = void 0;
646
- if (response.content.some((content) => content.type === "tool_use")) {
647
- toolCalls = [];
648
- for (const content of response.content) {
649
- if (content.type === "tool_use") {
650
- toolCalls.push({
618
+ const content = [];
619
+ for (const part of response.content) {
620
+ switch (part.type) {
621
+ case "text": {
622
+ content.push({ type: "text", text: part.text });
623
+ break;
624
+ }
625
+ case "thinking": {
626
+ content.push({
627
+ type: "reasoning",
628
+ reasoningType: "text",
629
+ text: part.thinking
630
+ });
631
+ content.push({
632
+ type: "reasoning",
633
+ reasoningType: "signature",
634
+ signature: part.signature
635
+ });
636
+ break;
637
+ }
638
+ case "redacted_thinking": {
639
+ content.push({
640
+ type: "reasoning",
641
+ reasoningType: "redacted",
642
+ data: part.data
643
+ });
644
+ break;
645
+ }
646
+ case "tool_use": {
647
+ content.push({
648
+ type: "tool-call",
651
649
  toolCallType: "function",
652
- toolCallId: content.id,
653
- toolName: content.name,
654
- args: JSON.stringify(content.input)
650
+ toolCallId: part.id,
651
+ toolName: part.name,
652
+ args: JSON.stringify(part.input)
655
653
  });
654
+ break;
656
655
  }
657
656
  }
658
657
  }
659
- const reasoning = response.content.filter(
660
- (content) => content.type === "redacted_thinking" || content.type === "thinking"
661
- ).map(
662
- (content) => content.type === "thinking" ? {
663
- type: "text",
664
- text: content.thinking,
665
- signature: content.signature
666
- } : {
667
- type: "redacted",
668
- data: content.data
669
- }
670
- );
671
658
  return {
672
- text,
673
- reasoning: reasoning.length > 0 ? reasoning : void 0,
674
- toolCalls,
659
+ content,
675
660
  finishReason: mapAnthropicStopReason(response.stop_reason),
676
661
  usage: {
677
- promptTokens: response.usage.input_tokens,
678
- completionTokens: response.usage.output_tokens
679
- },
680
- rawCall: { rawPrompt, rawSettings },
681
- rawResponse: {
682
- headers: responseHeaders,
683
- body: rawResponse
662
+ inputTokens: response.usage.input_tokens,
663
+ outputTokens: response.usage.output_tokens
684
664
  },
665
+ request: { body: args },
685
666
  response: {
686
667
  id: (_a = response.id) != null ? _a : void 0,
687
- modelId: (_b = response.model) != null ? _b : void 0
668
+ modelId: (_b = response.model) != null ? _b : void 0,
669
+ headers: responseHeaders,
670
+ body: rawResponse
688
671
  },
689
672
  warnings,
690
673
  providerMetadata: {
@@ -692,8 +675,7 @@ var AnthropicMessagesLanguageModel = class {
692
675
  cacheCreationInputTokens: (_c = response.usage.cache_creation_input_tokens) != null ? _c : null,
693
676
  cacheReadInputTokens: (_d = response.usage.cache_read_input_tokens) != null ? _d : null
694
677
  }
695
- },
696
- request: { body: JSON.stringify(args) }
678
+ }
697
679
  };
698
680
  }
699
681
  async doStream(options) {
@@ -710,11 +692,10 @@ var AnthropicMessagesLanguageModel = class {
710
692
  abortSignal: options.abortSignal,
711
693
  fetch: this.config.fetch
712
694
  });
713
- const { messages: rawPrompt, ...rawSettings } = args;
714
695
  let finishReason = "unknown";
715
696
  const usage = {
716
- promptTokens: Number.NaN,
717
- completionTokens: Number.NaN
697
+ inputTokens: void 0,
698
+ outputTokens: void 0
718
699
  };
719
700
  const toolCallContentBlocks = {};
720
701
  let providerMetadata = void 0;
@@ -722,6 +703,9 @@ var AnthropicMessagesLanguageModel = class {
722
703
  return {
723
704
  stream: response.pipeThrough(
724
705
  new TransformStream({
706
+ start(controller) {
707
+ controller.enqueue({ type: "stream-start", warnings });
708
+ },
725
709
  transform(chunk, controller) {
726
710
  var _a, _b, _c, _d;
727
711
  if (!chunk.success) {
@@ -743,7 +727,8 @@ var AnthropicMessagesLanguageModel = class {
743
727
  }
744
728
  case "redacted_thinking": {
745
729
  controller.enqueue({
746
- type: "redacted-reasoning",
730
+ type: "reasoning",
731
+ reasoningType: "redacted",
747
732
  data: value.content_block.data
748
733
  });
749
734
  return;
@@ -784,22 +769,24 @@ var AnthropicMessagesLanguageModel = class {
784
769
  switch (deltaType) {
785
770
  case "text_delta": {
786
771
  controller.enqueue({
787
- type: "text-delta",
788
- textDelta: value.delta.text
772
+ type: "text",
773
+ text: value.delta.text
789
774
  });
790
775
  return;
791
776
  }
792
777
  case "thinking_delta": {
793
778
  controller.enqueue({
794
779
  type: "reasoning",
795
- textDelta: value.delta.thinking
780
+ reasoningType: "text",
781
+ text: value.delta.thinking
796
782
  });
797
783
  return;
798
784
  }
799
785
  case "signature_delta": {
800
786
  if (blockType === "thinking") {
801
787
  controller.enqueue({
802
- type: "reasoning-signature",
788
+ type: "reasoning",
789
+ reasoningType: "signature",
803
790
  signature: value.delta.signature
804
791
  });
805
792
  }
@@ -826,8 +813,8 @@ var AnthropicMessagesLanguageModel = class {
826
813
  }
827
814
  }
828
815
  case "message_start": {
829
- usage.promptTokens = value.message.usage.input_tokens;
830
- usage.completionTokens = value.message.usage.output_tokens;
816
+ usage.inputTokens = value.message.usage.input_tokens;
817
+ usage.outputTokens = value.message.usage.output_tokens;
831
818
  providerMetadata = {
832
819
  anthropic: {
833
820
  cacheCreationInputTokens: (_a = value.message.usage.cache_creation_input_tokens) != null ? _a : null,
@@ -842,7 +829,7 @@ var AnthropicMessagesLanguageModel = class {
842
829
  return;
843
830
  }
844
831
  case "message_delta": {
845
- usage.completionTokens = value.usage.output_tokens;
832
+ usage.outputTokens = value.usage.output_tokens;
846
833
  finishReason = mapAnthropicStopReason(value.delta.stop_reason);
847
834
  return;
848
835
  }
@@ -867,10 +854,8 @@ var AnthropicMessagesLanguageModel = class {
867
854
  }
868
855
  })
869
856
  ),
870
- rawCall: { rawPrompt, rawSettings },
871
- rawResponse: { headers: responseHeaders },
872
- warnings,
873
- request: { body: JSON.stringify(body) }
857
+ request: { body },
858
+ response: { headers: responseHeaders }
874
859
  };
875
860
  }
876
861
  };
@@ -1163,7 +1148,9 @@ function createAnthropic(options = {}) {
1163
1148
  baseURL,
1164
1149
  headers: getHeaders,
1165
1150
  fetch: options.fetch,
1166
- supportsImageUrls: true
1151
+ getSupportedUrls: async () => ({
1152
+ "image/*": [/^https?:\/\/.*$/]
1153
+ })
1167
1154
  });
1168
1155
  const provider = function(modelId, settings) {
1169
1156
  if (new.target) {