ai 5.0.0-canary.13 → 5.0.0-canary.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -8,7 +8,11 @@ var __export = (target, all) => {
8
8
  import { createIdGenerator as createIdGenerator5, generateId as generateId2 } from "@ai-sdk/provider-utils";
9
9
 
10
10
  // core/util/index.ts
11
- import { generateId } from "@ai-sdk/provider-utils";
11
+ import {
12
+ generateId,
13
+ jsonSchema,
14
+ asSchema
15
+ } from "@ai-sdk/provider-utils";
12
16
 
13
17
  // core/util/process-chat-response.ts
14
18
  import { generateId as generateIdFunction } from "@ai-sdk/provider-utils";
@@ -823,7 +827,6 @@ async function processChatResponse({
823
827
  execUpdate();
824
828
  },
825
829
  onReasoningPart(value) {
826
- var _a18;
827
830
  if (currentReasoningPart == null) {
828
831
  currentReasoningPart = {
829
832
  type: "reasoning",
@@ -835,7 +838,6 @@ async function processChatResponse({
835
838
  currentReasoningPart.reasoning += value.text;
836
839
  currentReasoningPart.providerMetadata = value.providerMetadata;
837
840
  }
838
- message.reasoning = ((_a18 = message.reasoning) != null ? _a18 : "") + value.text;
839
841
  execUpdate();
840
842
  },
841
843
  onReasoningPartFinish(value) {
@@ -1258,12 +1260,6 @@ function getMessageParts(message) {
1258
1260
  type: "tool-invocation",
1259
1261
  toolInvocation
1260
1262
  })) : [],
1261
- ...message.reasoning ? [
1262
- {
1263
- type: "reasoning",
1264
- reasoning: message.reasoning
1265
- }
1266
- ] : [],
1267
1263
  ...message.content ? [{ type: "text", text: message.content }] : []
1268
1264
  ];
1269
1265
  }
@@ -1343,51 +1339,29 @@ async function prepareAttachmentsForRequest(attachmentsFromOptions) {
1343
1339
  throw new Error("Invalid attachments type");
1344
1340
  }
1345
1341
 
1346
- // core/util/schema.ts
1347
- import { validatorSymbol } from "@ai-sdk/provider-utils";
1348
-
1349
- // core/util/zod-schema.ts
1350
- import zodToJsonSchema from "zod-to-json-schema";
1351
- function zodSchema(zodSchema2, options) {
1342
+ // core/util/update-tool-call-result.ts
1343
+ function updateToolCallResult({
1344
+ messages,
1345
+ toolCallId,
1346
+ toolResult: result
1347
+ }) {
1352
1348
  var _a17;
1353
- const useReferences = (_a17 = options == null ? void 0 : options.useReferences) != null ? _a17 : false;
1354
- return jsonSchema(
1355
- zodToJsonSchema(zodSchema2, {
1356
- $refStrategy: useReferences ? "root" : "none",
1357
- target: "jsonSchema7"
1358
- // note: openai mode breaks various gemini conversions
1359
- }),
1360
- {
1361
- validate: (value) => {
1362
- const result = zodSchema2.safeParse(value);
1363
- return result.success ? { success: true, value: result.data } : { success: false, error: result.error };
1364
- }
1365
- }
1349
+ const lastMessage = messages[messages.length - 1];
1350
+ const invocationPart = lastMessage.parts.find(
1351
+ (part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
1366
1352
  );
1367
- }
1368
-
1369
- // core/util/schema.ts
1370
- var schemaSymbol = Symbol.for("vercel.ai.schema");
1371
- function jsonSchema(jsonSchema2, {
1372
- validate
1373
- } = {}) {
1374
- return {
1375
- [schemaSymbol]: true,
1376
- _type: void 0,
1377
- // should never be used directly
1378
- [validatorSymbol]: true,
1379
- jsonSchema: jsonSchema2,
1380
- validate
1353
+ if (invocationPart == null) {
1354
+ return;
1355
+ }
1356
+ const toolResult = {
1357
+ ...invocationPart.toolInvocation,
1358
+ state: "result",
1359
+ result
1381
1360
  };
1382
- }
1383
- function isSchema(value) {
1384
- return typeof value === "object" && value !== null && schemaSymbol in value && value[schemaSymbol] === true && "jsonSchema" in value && "validate" in value;
1385
- }
1386
- function asSchema(schema) {
1387
- return schema == null ? jsonSchema({
1388
- properties: {},
1389
- additionalProperties: false
1390
- }) : isSchema(schema) ? schema : zodSchema(schema);
1361
+ invocationPart.toolInvocation = toolResult;
1362
+ lastMessage.toolInvocations = (_a17 = lastMessage.toolInvocations) == null ? void 0 : _a17.map(
1363
+ (toolInvocation) => toolInvocation.toolCallId === toolCallId ? toolResult : toolInvocation
1364
+ );
1391
1365
  }
1392
1366
 
1393
1367
  // core/util/should-resubmit-messages.ts
@@ -1419,31 +1393,6 @@ function isAssistantMessageWithCompletedToolCalls(message) {
1419
1393
  return lastStepToolInvocations.length > 0 && lastStepToolInvocations.every((part) => "result" in part.toolInvocation);
1420
1394
  }
1421
1395
 
1422
- // core/util/update-tool-call-result.ts
1423
- function updateToolCallResult({
1424
- messages,
1425
- toolCallId,
1426
- toolResult: result
1427
- }) {
1428
- var _a17;
1429
- const lastMessage = messages[messages.length - 1];
1430
- const invocationPart = lastMessage.parts.find(
1431
- (part) => part.type === "tool-invocation" && part.toolInvocation.toolCallId === toolCallId
1432
- );
1433
- if (invocationPart == null) {
1434
- return;
1435
- }
1436
- const toolResult = {
1437
- ...invocationPart.toolInvocation,
1438
- state: "result",
1439
- result
1440
- };
1441
- invocationPart.toolInvocation = toolResult;
1442
- lastMessage.toolInvocations = (_a17 = lastMessage.toolInvocations) == null ? void 0 : _a17.map(
1443
- (toolInvocation) => toolInvocation.toolCallId === toolCallId ? toolResult : toolInvocation
1444
- );
1445
- }
1446
-
1447
1396
  // core/data-stream/create-data-stream.ts
1448
1397
  function createDataStream({
1449
1398
  execute,
@@ -2103,8 +2052,8 @@ async function embedMany({
2103
2052
  }),
2104
2053
  tracer,
2105
2054
  fn: async (span) => {
2106
- const maxEmbeddingsPerCall = model.maxEmbeddingsPerCall;
2107
- if (maxEmbeddingsPerCall == null) {
2055
+ const maxEmbeddingsPerCall = await model.maxEmbeddingsPerCall;
2056
+ if (maxEmbeddingsPerCall == null || maxEmbeddingsPerCall === Infinity) {
2108
2057
  const { embeddings: embeddings2, usage, response } = await retry(() => {
2109
2058
  return recordSpan({
2110
2059
  name: "ai.embedMany.doEmbed",
@@ -3602,7 +3551,6 @@ async function standardizePrompt({
3602
3551
  });
3603
3552
  }
3604
3553
  return {
3605
- type: "prompt",
3606
3554
  system: prompt.system,
3607
3555
  messages: [
3608
3556
  {
@@ -3641,7 +3589,6 @@ async function standardizePrompt({
3641
3589
  });
3642
3590
  }
3643
3591
  return {
3644
- type: "messages",
3645
3592
  messages,
3646
3593
  system: prompt.system
3647
3594
  };
@@ -4042,29 +3989,31 @@ function validateObjectGenerationInput({
4042
3989
 
4043
3990
  // core/generate-object/generate-object.ts
4044
3991
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
4045
- async function generateObject({
4046
- model,
4047
- enum: enumValues,
4048
- // rename bc enum is reserved by typescript
4049
- schema: inputSchema,
4050
- schemaName,
4051
- schemaDescription,
4052
- output = "object",
4053
- system,
4054
- prompt,
4055
- messages,
4056
- maxRetries: maxRetriesArg,
4057
- abortSignal,
4058
- headers,
4059
- experimental_repairText: repairText,
4060
- experimental_telemetry: telemetry,
4061
- providerOptions,
4062
- _internal: {
4063
- generateId: generateId3 = originalGenerateId,
4064
- currentDate = () => /* @__PURE__ */ new Date()
4065
- } = {},
4066
- ...settings
4067
- }) {
3992
+ async function generateObject(options) {
3993
+ const {
3994
+ model,
3995
+ output = "object",
3996
+ system,
3997
+ prompt,
3998
+ messages,
3999
+ maxRetries: maxRetriesArg,
4000
+ abortSignal,
4001
+ headers,
4002
+ experimental_repairText: repairText,
4003
+ experimental_telemetry: telemetry,
4004
+ providerOptions,
4005
+ _internal: {
4006
+ generateId: generateId3 = originalGenerateId,
4007
+ currentDate = () => /* @__PURE__ */ new Date()
4008
+ } = {},
4009
+ ...settings
4010
+ } = options;
4011
+ const enumValues = "enum" in options ? options.enum : void 0;
4012
+ const {
4013
+ schema: inputSchema,
4014
+ schemaDescription,
4015
+ schemaName
4016
+ } = "schema" in options ? options : {};
4068
4017
  validateObjectGenerationInput({
4069
4018
  output,
4070
4019
  schema: inputSchema,
@@ -4122,7 +4071,7 @@ async function generateObject({
4122
4071
  });
4123
4072
  const promptMessages = await convertToLanguageModelPrompt({
4124
4073
  prompt: standardizedPrompt,
4125
- supportedUrls: await model.getSupportedUrls()
4074
+ supportedUrls: await model.supportedUrls
4126
4075
  });
4127
4076
  const generateResult = await retry(
4128
4077
  () => recordSpan({
@@ -4135,9 +4084,6 @@ async function generateObject({
4135
4084
  telemetry
4136
4085
  }),
4137
4086
  ...baseTelemetryAttributes,
4138
- "ai.prompt.format": {
4139
- input: () => standardizedPrompt.type
4140
- },
4141
4087
  "ai.prompt.messages": {
4142
4088
  input: () => JSON.stringify(promptMessages)
4143
4089
  },
@@ -4163,7 +4109,6 @@ async function generateObject({
4163
4109
  description: schemaDescription
4164
4110
  },
4165
4111
  ...prepareCallSettings(settings),
4166
- inputFormat: standardizedPrompt.type,
4167
4112
  prompt: promptMessages,
4168
4113
  providerOptions,
4169
4114
  abortSignal,
@@ -4459,29 +4404,32 @@ function now() {
4459
4404
 
4460
4405
  // core/generate-object/stream-object.ts
4461
4406
  var originalGenerateId2 = createIdGenerator2({ prefix: "aiobj", size: 24 });
4462
- function streamObject({
4463
- model,
4464
- schema: inputSchema,
4465
- schemaName,
4466
- schemaDescription,
4467
- output = "object",
4468
- system,
4469
- prompt,
4470
- messages,
4471
- maxRetries,
4472
- abortSignal,
4473
- headers,
4474
- experimental_telemetry: telemetry,
4475
- providerOptions,
4476
- onError,
4477
- onFinish,
4478
- _internal: {
4479
- generateId: generateId3 = originalGenerateId2,
4480
- currentDate = () => /* @__PURE__ */ new Date(),
4481
- now: now2 = now
4482
- } = {},
4483
- ...settings
4484
- }) {
4407
+ function streamObject(options) {
4408
+ const {
4409
+ model,
4410
+ output = "object",
4411
+ system,
4412
+ prompt,
4413
+ messages,
4414
+ maxRetries,
4415
+ abortSignal,
4416
+ headers,
4417
+ experimental_telemetry: telemetry,
4418
+ providerOptions,
4419
+ onError,
4420
+ onFinish,
4421
+ _internal: {
4422
+ generateId: generateId3 = originalGenerateId2,
4423
+ currentDate = () => /* @__PURE__ */ new Date(),
4424
+ now: now2 = now
4425
+ } = {},
4426
+ ...settings
4427
+ } = options;
4428
+ const {
4429
+ schema: inputSchema,
4430
+ schemaDescription,
4431
+ schemaName
4432
+ } = "schema" in options ? options : {};
4485
4433
  validateObjectGenerationInput({
4486
4434
  output,
4487
4435
  schema: inputSchema,
@@ -4594,10 +4542,9 @@ var DefaultStreamObjectResult = class {
4594
4542
  description: schemaDescription
4595
4543
  },
4596
4544
  ...prepareCallSettings(settings),
4597
- inputFormat: standardizedPrompt.type,
4598
4545
  prompt: await convertToLanguageModelPrompt({
4599
4546
  prompt: standardizedPrompt,
4600
- supportedUrls: await model.getSupportedUrls()
4547
+ supportedUrls: await model.supportedUrls
4601
4548
  }),
4602
4549
  providerOptions,
4603
4550
  abortSignal,
@@ -4632,9 +4579,6 @@ var DefaultStreamObjectResult = class {
4632
4579
  telemetry
4633
4580
  }),
4634
4581
  ...baseTelemetryAttributes,
4635
- "ai.prompt.format": {
4636
- input: () => callOptions.inputFormat
4637
- },
4638
4582
  "ai.prompt.messages": {
4639
4583
  input: () => JSON.stringify(callOptions.prompt)
4640
4584
  },
@@ -5310,6 +5254,7 @@ async function generateText({
5310
5254
  experimental_telemetry: telemetry,
5311
5255
  providerOptions,
5312
5256
  experimental_activeTools: activeTools,
5257
+ experimental_prepareStep: prepareStep,
5313
5258
  experimental_repairToolCall: repairToolCall,
5314
5259
  _internal: {
5315
5260
  generateId: generateId3 = originalGenerateId3,
@@ -5348,6 +5293,9 @@ async function generateText({
5348
5293
  telemetry
5349
5294
  }),
5350
5295
  ...baseTelemetryAttributes,
5296
+ // model:
5297
+ "ai.model.provider": model.provider,
5298
+ "ai.model.id": model.modelId,
5351
5299
  // specific settings that only make sense on the outer level:
5352
5300
  "ai.prompt": {
5353
5301
  input: () => JSON.stringify({ system, prompt, messages })
@@ -5357,10 +5305,8 @@ async function generateText({
5357
5305
  }),
5358
5306
  tracer,
5359
5307
  fn: async (span) => {
5360
- var _a17, _b, _c;
5361
- const toolsAndToolChoice = {
5362
- ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
5363
- };
5308
+ var _a17, _b, _c, _d, _e, _f;
5309
+ const callSettings2 = prepareCallSettings(settings);
5364
5310
  let currentModelResponse;
5365
5311
  let currentToolCalls = [];
5366
5312
  let currentToolResults = [];
@@ -5377,18 +5323,28 @@ async function generateText({
5377
5323
  };
5378
5324
  let stepType = "initial";
5379
5325
  do {
5380
- const promptFormat = stepCount === 0 ? initialPrompt.type : "messages";
5381
5326
  const stepInputMessages = [
5382
5327
  ...initialPrompt.messages,
5383
5328
  ...responseMessages
5384
5329
  ];
5330
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5331
+ model,
5332
+ steps,
5333
+ maxSteps,
5334
+ stepNumber: stepCount
5335
+ }));
5385
5336
  const promptMessages = await convertToLanguageModelPrompt({
5386
5337
  prompt: {
5387
- type: promptFormat,
5388
5338
  system: initialPrompt.system,
5389
5339
  messages: stepInputMessages
5390
5340
  },
5391
- supportedUrls: await model.getSupportedUrls()
5341
+ supportedUrls: await model.supportedUrls
5342
+ });
5343
+ const stepModel = (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _a17 : model;
5344
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5345
+ tools,
5346
+ toolChoice: (_b = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _b : toolChoice,
5347
+ activeTools: (_c = prepareStepResult == null ? void 0 : prepareStepResult.experimental_activeTools) != null ? _c : activeTools
5392
5348
  });
5393
5349
  currentModelResponse = await retry(
5394
5350
  () => {
@@ -5403,23 +5359,23 @@ async function generateText({
5403
5359
  telemetry
5404
5360
  }),
5405
5361
  ...baseTelemetryAttributes,
5406
- "ai.prompt.format": { input: () => promptFormat },
5362
+ // model:
5363
+ "ai.model.provider": stepModel.provider,
5364
+ "ai.model.id": stepModel.modelId,
5365
+ // prompt:
5407
5366
  "ai.prompt.messages": {
5408
5367
  input: () => JSON.stringify(promptMessages)
5409
5368
  },
5410
5369
  "ai.prompt.tools": {
5411
5370
  // convert the language model level tools:
5412
- input: () => {
5413
- var _a19;
5414
- return (_a19 = toolsAndToolChoice.tools) == null ? void 0 : _a19.map((tool2) => JSON.stringify(tool2));
5415
- }
5371
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
5416
5372
  },
5417
5373
  "ai.prompt.toolChoice": {
5418
- input: () => toolsAndToolChoice.toolChoice != null ? JSON.stringify(toolsAndToolChoice.toolChoice) : void 0
5374
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
5419
5375
  },
5420
5376
  // standardized gen-ai llm span attributes:
5421
- "gen_ai.system": model.provider,
5422
- "gen_ai.request.model": model.modelId,
5377
+ "gen_ai.system": stepModel.provider,
5378
+ "gen_ai.request.model": stepModel.modelId,
5423
5379
  "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5424
5380
  "gen_ai.request.max_tokens": settings.maxOutputTokens,
5425
5381
  "gen_ai.request.presence_penalty": settings.presencePenalty,
@@ -5431,11 +5387,11 @@ async function generateText({
5431
5387
  }),
5432
5388
  tracer,
5433
5389
  fn: async (span2) => {
5434
- var _a19, _b2, _c2, _d, _e, _f, _g, _h;
5435
- const result = await model.doGenerate({
5436
- ...callSettings,
5437
- ...toolsAndToolChoice,
5438
- inputFormat: promptFormat,
5390
+ var _a19, _b2, _c2, _d2, _e2, _f2, _g, _h;
5391
+ const result = await stepModel.doGenerate({
5392
+ ...callSettings2,
5393
+ tools: stepTools,
5394
+ toolChoice: stepToolChoice,
5439
5395
  responseFormat: output == null ? void 0 : output.responseFormat,
5440
5396
  prompt: promptMessages,
5441
5397
  providerOptions,
@@ -5444,8 +5400,8 @@ async function generateText({
5444
5400
  });
5445
5401
  const responseData = {
5446
5402
  id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5447
- timestamp: (_d = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d : currentDate(),
5448
- modelId: (_f = (_e = result.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
5403
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5404
+ modelId: (_f2 = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f2 : stepModel.modelId,
5449
5405
  headers: (_g = result.response) == null ? void 0 : _g.headers,
5450
5406
  body: (_h = result.response) == null ? void 0 : _h.body
5451
5407
  };
@@ -5521,7 +5477,7 @@ async function generateText({
5521
5477
  nextStepType = "tool-result";
5522
5478
  }
5523
5479
  }
5524
- const originalText = (_a17 = extractContentText(currentModelResponse.content)) != null ? _a17 : "";
5480
+ const originalText = (_d = extractContentText(currentModelResponse.content)) != null ? _d : "";
5525
5481
  const stepTextLeadingWhitespaceTrimmed = stepType === "continue" && // only for continue steps
5526
5482
  text2.trimEnd() !== text2 ? originalText.trimStart() : originalText;
5527
5483
  const stepText = nextStepType === "continue" ? removeTextAfterLastWhitespace(stepTextLeadingWhitespaceTrimmed) : stepTextLeadingWhitespaceTrimmed;
@@ -5574,7 +5530,7 @@ async function generateText({
5574
5530
  finishReason: currentModelResponse.finishReason,
5575
5531
  usage: currentUsage,
5576
5532
  warnings: currentModelResponse.warnings,
5577
- request: (_b = currentModelResponse.request) != null ? _b : {},
5533
+ request: (_e = currentModelResponse.request) != null ? _e : {},
5578
5534
  response: {
5579
5535
  ...currentModelResponse.response,
5580
5536
  // deep clone msgs to avoid mutating past messages in multi-step:
@@ -5627,7 +5583,7 @@ async function generateText({
5627
5583
  finishReason: currentModelResponse.finishReason,
5628
5584
  usage,
5629
5585
  warnings: currentModelResponse.warnings,
5630
- request: (_c = currentModelResponse.request) != null ? _c : {},
5586
+ request: (_f = currentModelResponse.request) != null ? _f : {},
5631
5587
  response: {
5632
5588
  ...currentModelResponse.response,
5633
5589
  messages: responseMessages
@@ -6709,18 +6665,16 @@ var DefaultStreamTextResult = class {
6709
6665
  prompt: { system, prompt, messages },
6710
6666
  tools
6711
6667
  });
6712
- const promptFormat = responseMessages.length === 0 ? initialPrompt.type : "messages";
6713
6668
  const stepInputMessages = [
6714
6669
  ...initialPrompt.messages,
6715
6670
  ...responseMessages
6716
6671
  ];
6717
6672
  const promptMessages = await convertToLanguageModelPrompt({
6718
6673
  prompt: {
6719
- type: promptFormat,
6720
6674
  system: initialPrompt.system,
6721
6675
  messages: stepInputMessages
6722
6676
  },
6723
- supportedUrls: await model.getSupportedUrls()
6677
+ supportedUrls: await model.supportedUrls
6724
6678
  });
6725
6679
  const toolsAndToolChoice = {
6726
6680
  ...prepareToolsAndToolChoice({ tools, toolChoice, activeTools })
@@ -6740,9 +6694,6 @@ var DefaultStreamTextResult = class {
6740
6694
  telemetry
6741
6695
  }),
6742
6696
  ...baseTelemetryAttributes,
6743
- "ai.prompt.format": {
6744
- input: () => promptFormat
6745
- },
6746
6697
  "ai.prompt.messages": {
6747
6698
  input: () => JSON.stringify(promptMessages)
6748
6699
  },
@@ -6780,7 +6731,6 @@ var DefaultStreamTextResult = class {
6780
6731
  result: await model.doStream({
6781
6732
  ...callSettings,
6782
6733
  ...toolsAndToolChoice,
6783
- inputFormat: promptFormat,
6784
6734
  responseFormat: output == null ? void 0 : output.responseFormat,
6785
6735
  prompt: promptMessages,
6786
6736
  providerOptions,
@@ -7873,8 +7823,8 @@ var doWrap = ({
7873
7823
  provider: providerId != null ? providerId : model.provider,
7874
7824
  modelId: modelId != null ? modelId : model.modelId,
7875
7825
  // TODO middleware should be able to modify the supported urls
7876
- async getSupportedUrls() {
7877
- return model.getSupportedUrls();
7826
+ get supportedUrls() {
7827
+ return model.supportedUrls;
7878
7828
  },
7879
7829
  async doGenerate(params) {
7880
7830
  const transformedParams = await doTransform({ params, type: "generate" });
@@ -7991,7 +7941,6 @@ function appendResponseMessages({
7991
7941
  );
7992
7942
  (_b = lastMessage.parts) != null ? _b : lastMessage.parts = [];
7993
7943
  lastMessage.content = textContent;
7994
- lastMessage.reasoning = reasoningTextContent;
7995
7944
  lastMessage.parts.push(...parts);
7996
7945
  lastMessage.toolInvocations = [
7997
7946
  ...(_c = lastMessage.toolInvocations) != null ? _c : [],
@@ -8010,7 +7959,6 @@ function appendResponseMessages({
8010
7959
  createdAt: currentDate(),
8011
7960
  // generate a createdAt date for the message, will be overridden by the client
8012
7961
  content: textContent,
8013
- reasoning: reasoningTextContent,
8014
7962
  toolInvocations: getToolInvocations2(0),
8015
7963
  parts: [
8016
7964
  ...parts,
@@ -8962,7 +8910,6 @@ export {
8962
8910
  streamText,
8963
8911
  tool,
8964
8912
  updateToolCallResult,
8965
- wrapLanguageModel,
8966
- zodSchema
8913
+ wrapLanguageModel
8967
8914
  };
8968
8915
  //# sourceMappingURL=index.mjs.map