@ai-sdk/anthropic 3.0.0-beta.43 → 3.0.0-beta.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -610,7 +610,16 @@ var anthropicProviderOptions = z3.object({
610
610
  version: z3.string().optional()
611
611
  })
612
612
  ).optional()
613
- }).optional()
613
+ }).optional(),
614
+ /**
615
+ * Whether to enable tool streaming (and structured output streaming).
616
+ *
617
+ * When set to false, the model will return all tool calls and results
618
+ * at once after a delay.
619
+ *
620
+ * @default true
621
+ */
622
+ toolStreaming: z3.boolean().optional()
614
623
  });
615
624
 
616
625
  // src/anthropic-prepare-tools.ts
@@ -1884,9 +1893,10 @@ var AnthropicMessagesLanguageModel = class {
1884
1893
  seed,
1885
1894
  tools,
1886
1895
  toolChoice,
1887
- providerOptions
1896
+ providerOptions,
1897
+ stream
1888
1898
  }) {
1889
- var _a, _b, _c, _d;
1899
+ var _a, _b, _c, _d, _e;
1890
1900
  const warnings = [];
1891
1901
  if (frequencyPenalty != null) {
1892
1902
  warnings.push({
@@ -1913,12 +1923,6 @@ var AnthropicMessagesLanguageModel = class {
1913
1923
  setting: "responseFormat",
1914
1924
  details: "JSON response format requires a schema. The response format is ignored."
1915
1925
  });
1916
- } else if (tools != null) {
1917
- warnings.push({
1918
- type: "unsupported-setting",
1919
- setting: "tools",
1920
- details: "JSON response format does not support tools. The provided tools are ignored."
1921
- });
1922
1926
  }
1923
1927
  }
1924
1928
  const jsonResponseTool = (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null ? {
@@ -2042,6 +2046,9 @@ var AnthropicMessagesLanguageModel = class {
2042
2046
  });
2043
2047
  }
2044
2048
  }
2049
+ if (stream && ((_e = anthropicOptions == null ? void 0 : anthropicOptions.toolStreaming) != null ? _e : true)) {
2050
+ betas.add("fine-grained-tool-streaming-2025-05-14");
2051
+ }
2045
2052
  const {
2046
2053
  tools: anthropicTools2,
2047
2054
  toolChoice: anthropicToolChoice,
@@ -2049,8 +2056,8 @@ var AnthropicMessagesLanguageModel = class {
2049
2056
  betas: toolsBetas
2050
2057
  } = await prepareTools(
2051
2058
  jsonResponseTool != null ? {
2052
- tools: [jsonResponseTool],
2053
- toolChoice: { type: "tool", toolName: jsonResponseTool.name },
2059
+ tools: [...tools != null ? tools : [], jsonResponseTool],
2060
+ toolChoice: { type: "required" },
2054
2061
  disableParallelToolUse: true,
2055
2062
  cacheControlValidator
2056
2063
  } : {
@@ -2065,7 +2072,9 @@ var AnthropicMessagesLanguageModel = class {
2065
2072
  args: {
2066
2073
  ...baseArgs,
2067
2074
  tools: anthropicTools2,
2068
- tool_choice: anthropicToolChoice
2075
+ tool_choice: anthropicToolChoice,
2076
+ stream: stream === true ? true : void 0
2077
+ // do not send when not streaming
2069
2078
  },
2070
2079
  warnings: [...warnings, ...toolWarnings, ...cacheWarnings],
2071
2080
  betas: /* @__PURE__ */ new Set([...betas, ...toolsBetas]),
@@ -2115,7 +2124,10 @@ var AnthropicMessagesLanguageModel = class {
2115
2124
  }
2116
2125
  async doGenerate(options) {
2117
2126
  var _a, _b, _c, _d, _e, _f, _g, _h;
2118
- const { args, warnings, betas, usesJsonResponseTool } = await this.getArgs(options);
2127
+ const { args, warnings, betas, usesJsonResponseTool } = await this.getArgs({
2128
+ ...options,
2129
+ stream: false
2130
+ });
2119
2131
  const citationDocuments = this.extractCitationDocuments(options.prompt);
2120
2132
  const {
2121
2133
  responseHeaders,
@@ -2134,6 +2146,7 @@ var AnthropicMessagesLanguageModel = class {
2134
2146
  });
2135
2147
  const content = [];
2136
2148
  const mcpToolCalls = {};
2149
+ let isJsonResponseFromTool = false;
2137
2150
  for (const part of response.content) {
2138
2151
  switch (part.type) {
2139
2152
  case "text": {
@@ -2179,18 +2192,21 @@ var AnthropicMessagesLanguageModel = class {
2179
2192
  break;
2180
2193
  }
2181
2194
  case "tool_use": {
2182
- content.push(
2183
- // when a json response tool is used, the tool call becomes the text:
2184
- usesJsonResponseTool ? {
2195
+ const isJsonResponseTool = usesJsonResponseTool && part.name === "json";
2196
+ if (isJsonResponseTool) {
2197
+ isJsonResponseFromTool = true;
2198
+ content.push({
2185
2199
  type: "text",
2186
2200
  text: JSON.stringify(part.input)
2187
- } : {
2201
+ });
2202
+ } else {
2203
+ content.push({
2188
2204
  type: "tool-call",
2189
2205
  toolCallId: part.id,
2190
2206
  toolName: part.name,
2191
2207
  input: JSON.stringify(part.input)
2192
- }
2193
- );
2208
+ });
2209
+ }
2194
2210
  break;
2195
2211
  }
2196
2212
  case "server_tool_use": {
@@ -2377,7 +2393,7 @@ var AnthropicMessagesLanguageModel = class {
2377
2393
  content,
2378
2394
  finishReason: mapAnthropicStopReason({
2379
2395
  finishReason: response.stop_reason,
2380
- isJsonResponseFromTool: usesJsonResponseTool
2396
+ isJsonResponseFromTool
2381
2397
  }),
2382
2398
  usage: {
2383
2399
  inputTokens: response.usage.input_tokens,
@@ -2412,9 +2428,16 @@ var AnthropicMessagesLanguageModel = class {
2412
2428
  };
2413
2429
  }
2414
2430
  async doStream(options) {
2415
- const { args, warnings, betas, usesJsonResponseTool } = await this.getArgs(options);
2431
+ const {
2432
+ args: body,
2433
+ warnings,
2434
+ betas,
2435
+ usesJsonResponseTool
2436
+ } = await this.getArgs({
2437
+ ...options,
2438
+ stream: true
2439
+ });
2416
2440
  const citationDocuments = this.extractCitationDocuments(options.prompt);
2417
- const body = { ...args, stream: true };
2418
2441
  const { responseHeaders, value: response } = await postJsonToApi({
2419
2442
  url: this.buildRequestUrl(true),
2420
2443
  headers: await this.getHeaders({ betas, headers: options.headers }),
@@ -2438,6 +2461,7 @@ var AnthropicMessagesLanguageModel = class {
2438
2461
  let cacheCreationInputTokens = null;
2439
2462
  let stopSequence = null;
2440
2463
  let container = null;
2464
+ let isJsonResponseFromTool = false;
2441
2465
  let blockType = void 0;
2442
2466
  const generateId2 = this.generateId;
2443
2467
  return {
@@ -2466,6 +2490,9 @@ var AnthropicMessagesLanguageModel = class {
2466
2490
  blockType = contentBlockType;
2467
2491
  switch (contentBlockType) {
2468
2492
  case "text": {
2493
+ if (usesJsonResponseTool) {
2494
+ return;
2495
+ }
2469
2496
  contentBlocks[value.index] = { type: "text" };
2470
2497
  controller.enqueue({
2471
2498
  type: "text-start",
@@ -2495,20 +2522,28 @@ var AnthropicMessagesLanguageModel = class {
2495
2522
  return;
2496
2523
  }
2497
2524
  case "tool_use": {
2498
- contentBlocks[value.index] = usesJsonResponseTool ? { type: "text" } : {
2499
- type: "tool-call",
2500
- toolCallId: part.id,
2501
- toolName: part.name,
2502
- input: "",
2503
- firstDelta: true
2504
- };
2505
- controller.enqueue(
2506
- usesJsonResponseTool ? { type: "text-start", id: String(value.index) } : {
2525
+ const isJsonResponseTool = usesJsonResponseTool && part.name === "json";
2526
+ if (isJsonResponseTool) {
2527
+ isJsonResponseFromTool = true;
2528
+ contentBlocks[value.index] = { type: "text" };
2529
+ controller.enqueue({
2530
+ type: "text-start",
2531
+ id: String(value.index)
2532
+ });
2533
+ } else {
2534
+ contentBlocks[value.index] = {
2535
+ type: "tool-call",
2536
+ toolCallId: part.id,
2537
+ toolName: part.name,
2538
+ input: "",
2539
+ firstDelta: true
2540
+ };
2541
+ controller.enqueue({
2507
2542
  type: "tool-input-start",
2508
2543
  id: part.id,
2509
2544
  toolName: part.name
2510
- }
2511
- );
2545
+ });
2546
+ }
2512
2547
  return;
2513
2548
  }
2514
2549
  case "server_tool_use": {
@@ -2724,7 +2759,8 @@ var AnthropicMessagesLanguageModel = class {
2724
2759
  break;
2725
2760
  }
2726
2761
  case "tool-call":
2727
- if (!usesJsonResponseTool) {
2762
+ const isJsonResponseTool = usesJsonResponseTool && contentBlock.toolName === "json";
2763
+ if (!isJsonResponseTool) {
2728
2764
  controller.enqueue({
2729
2765
  type: "tool-input-end",
2730
2766
  id: contentBlock.toolCallId
@@ -2788,7 +2824,7 @@ var AnthropicMessagesLanguageModel = class {
2788
2824
  if (delta.length === 0) {
2789
2825
  return;
2790
2826
  }
2791
- if (usesJsonResponseTool) {
2827
+ if (isJsonResponseFromTool) {
2792
2828
  if ((contentBlock == null ? void 0 : contentBlock.type) !== "text") {
2793
2829
  return;
2794
2830
  }
@@ -2853,7 +2889,7 @@ var AnthropicMessagesLanguageModel = class {
2853
2889
  usage.totalTokens = ((_f = usage.inputTokens) != null ? _f : 0) + ((_g = value.usage.output_tokens) != null ? _g : 0);
2854
2890
  finishReason = mapAnthropicStopReason({
2855
2891
  finishReason: value.delta.stop_reason,
2856
- isJsonResponseFromTool: usesJsonResponseTool
2892
+ isJsonResponseFromTool
2857
2893
  });
2858
2894
  stopSequence = (_h = value.delta.stop_sequence) != null ? _h : null;
2859
2895
  container = value.delta.container != null ? {