ai 3.1.19 → 3.1.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -212,7 +212,8 @@ function convertToLanguageModelPrompt(prompt) {
212
212
  if (prompt.system != null) {
213
213
  languageModelMessages.push({ role: "system", content: prompt.system });
214
214
  }
215
- switch (prompt.type) {
215
+ const promptType = prompt.type;
216
+ switch (promptType) {
216
217
  case "prompt": {
217
218
  languageModelMessages.push({
218
219
  role: "user",
@@ -222,72 +223,75 @@ function convertToLanguageModelPrompt(prompt) {
222
223
  }
223
224
  case "messages": {
224
225
  languageModelMessages.push(
225
- ...prompt.messages.map((message) => {
226
- switch (message.role) {
227
- case "system": {
228
- return { role: "system", content: message.content };
229
- }
230
- case "user": {
231
- if (typeof message.content === "string") {
232
- return {
233
- role: "user",
234
- content: [{ type: "text", text: message.content }]
235
- };
226
+ ...prompt.messages.map(convertToLanguageModelMessage)
227
+ );
228
+ break;
229
+ }
230
+ default: {
231
+ const _exhaustiveCheck = promptType;
232
+ throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
233
+ }
234
+ }
235
+ return languageModelMessages;
236
+ }
237
+ function convertToLanguageModelMessage(message) {
238
+ switch (message.role) {
239
+ case "system": {
240
+ return { role: "system", content: message.content };
241
+ }
242
+ case "user": {
243
+ if (typeof message.content === "string") {
244
+ return {
245
+ role: "user",
246
+ content: [{ type: "text", text: message.content }]
247
+ };
248
+ }
249
+ return {
250
+ role: "user",
251
+ content: message.content.map(
252
+ (part) => {
253
+ var _a;
254
+ switch (part.type) {
255
+ case "text": {
256
+ return part;
236
257
  }
237
- return {
238
- role: "user",
239
- content: message.content.map(
240
- (part) => {
241
- var _a;
242
- switch (part.type) {
243
- case "text": {
244
- return part;
245
- }
246
- case "image": {
247
- if (part.image instanceof URL) {
248
- return {
249
- type: "image",
250
- image: part.image,
251
- mimeType: part.mimeType
252
- };
253
- }
254
- const imageUint8 = convertDataContentToUint8Array(
255
- part.image
256
- );
257
- return {
258
- type: "image",
259
- image: imageUint8,
260
- mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
261
- };
262
- }
263
- }
264
- }
265
- )
266
- };
267
- }
268
- case "assistant": {
269
- if (typeof message.content === "string") {
258
+ case "image": {
259
+ if (part.image instanceof URL) {
260
+ return {
261
+ type: "image",
262
+ image: part.image,
263
+ mimeType: part.mimeType
264
+ };
265
+ }
266
+ const imageUint8 = convertDataContentToUint8Array(part.image);
270
267
  return {
271
- role: "assistant",
272
- content: [{ type: "text", text: message.content }]
268
+ type: "image",
269
+ image: imageUint8,
270
+ mimeType: (_a = part.mimeType) != null ? _a : detectImageMimeType(imageUint8)
273
271
  };
274
272
  }
275
- return { role: "assistant", content: message.content };
276
- }
277
- case "tool": {
278
- return message;
279
273
  }
280
274
  }
281
- })
282
- );
283
- break;
275
+ )
276
+ };
277
+ }
278
+ case "assistant": {
279
+ if (typeof message.content === "string") {
280
+ return {
281
+ role: "assistant",
282
+ content: [{ type: "text", text: message.content }]
283
+ };
284
+ }
285
+ return { role: "assistant", content: message.content };
286
+ }
287
+ case "tool": {
288
+ return message;
284
289
  }
285
290
  default: {
286
- const _exhaustiveCheck = prompt;
287
- throw new Error(`Unsupported prompt type: ${_exhaustiveCheck}`);
291
+ const _exhaustiveCheck = message;
292
+ throw new Error(`Unsupported message role: ${_exhaustiveCheck}`);
288
293
  }
289
294
  }
290
- return languageModelMessages;
291
295
  }
292
296
 
293
297
  // core/prompt/get-validated-prompt.ts
@@ -1245,40 +1249,66 @@ async function generateText({
1245
1249
  messages,
1246
1250
  maxRetries,
1247
1251
  abortSignal,
1252
+ maxAutomaticRoundtrips = 0,
1248
1253
  ...settings
1249
1254
  }) {
1250
- var _a, _b;
1255
+ var _a, _b, _c;
1251
1256
  const retry = retryWithExponentialBackoff({ maxRetries });
1252
1257
  const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
1253
- const modelResponse = await retry(() => {
1254
- return model.doGenerate({
1255
- mode: {
1256
- type: "regular",
1257
- ...prepareToolsAndToolChoice({ tools, toolChoice })
1258
- },
1259
- ...prepareCallSettings(settings),
1260
- inputFormat: validatedPrompt.type,
1261
- prompt: convertToLanguageModelPrompt(validatedPrompt),
1262
- abortSignal
1258
+ const mode = {
1259
+ type: "regular",
1260
+ ...prepareToolsAndToolChoice({ tools, toolChoice })
1261
+ };
1262
+ const callSettings = prepareCallSettings(settings);
1263
+ const promptMessages = convertToLanguageModelPrompt(validatedPrompt);
1264
+ let currentModelResponse;
1265
+ let currentToolCalls = [];
1266
+ let currentToolResults = [];
1267
+ let roundtrips = 0;
1268
+ const responseMessages = [];
1269
+ do {
1270
+ currentModelResponse = await retry(() => {
1271
+ return model.doGenerate({
1272
+ mode,
1273
+ ...callSettings,
1274
+ // once we have a roundtrip, we need to switch to messages format:
1275
+ inputFormat: roundtrips === 0 ? validatedPrompt.type : "messages",
1276
+ prompt: promptMessages,
1277
+ abortSignal
1278
+ });
1263
1279
  });
1264
- });
1265
- const toolCalls = [];
1266
- for (const modelToolCall of (_a = modelResponse.toolCalls) != null ? _a : []) {
1267
- toolCalls.push(parseToolCall({ toolCall: modelToolCall, tools }));
1268
- }
1269
- const toolResults = tools == null ? [] : await executeTools({ toolCalls, tools });
1280
+ currentToolCalls = ((_a = currentModelResponse.toolCalls) != null ? _a : []).map(
1281
+ (modelToolCall) => parseToolCall({ toolCall: modelToolCall, tools })
1282
+ );
1283
+ currentToolResults = tools == null ? [] : await executeTools({ toolCalls: currentToolCalls, tools });
1284
+ const newResponseMessages = toResponseMessages({
1285
+ text: (_b = currentModelResponse.text) != null ? _b : "",
1286
+ toolCalls: currentToolCalls,
1287
+ toolResults: currentToolResults
1288
+ });
1289
+ responseMessages.push(...newResponseMessages);
1290
+ promptMessages.push(
1291
+ ...newResponseMessages.map(convertToLanguageModelMessage)
1292
+ );
1293
+ } while (
1294
+ // there are tool calls:
1295
+ currentToolCalls.length > 0 && // all current tool calls have results:
1296
+ currentToolResults.length === currentToolCalls.length && // the number of roundtrips is less than the maximum:
1297
+ roundtrips++ < maxAutomaticRoundtrips
1298
+ );
1270
1299
  return new GenerateTextResult({
1271
1300
  // Always return a string so that the caller doesn't have to check for undefined.
1272
1301
  // If they need to check if the model did not return any text,
1273
1302
  // they can check the length of the string:
1274
- text: (_b = modelResponse.text) != null ? _b : "",
1275
- toolCalls,
1276
- toolResults,
1277
- finishReason: modelResponse.finishReason,
1278
- usage: calculateTokenUsage(modelResponse.usage),
1279
- warnings: modelResponse.warnings,
1280
- rawResponse: modelResponse.rawResponse,
1281
- logprobs: modelResponse.logprobs
1303
+ text: (_c = currentModelResponse.text) != null ? _c : "",
1304
+ toolCalls: currentToolCalls,
1305
+ toolResults: currentToolResults,
1306
+ finishReason: currentModelResponse.finishReason,
1307
+ usage: calculateTokenUsage(currentModelResponse.usage),
1308
+ warnings: currentModelResponse.warnings,
1309
+ rawResponse: currentModelResponse.rawResponse,
1310
+ logprobs: currentModelResponse.logprobs,
1311
+ responseMessages
1282
1312
  });
1283
1313
  }
1284
1314
  async function executeTools({
@@ -1314,7 +1344,7 @@ var GenerateTextResult = class {
1314
1344
  this.warnings = options.warnings;
1315
1345
  this.rawResponse = options.rawResponse;
1316
1346
  this.logprobs = options.logprobs;
1317
- this.responseMessages = toResponseMessages(options);
1347
+ this.responseMessages = options.responseMessages;
1318
1348
  }
1319
1349
  };
1320
1350
  function toResponseMessages({
@@ -1342,6 +1372,16 @@ function toResponseMessages({
1342
1372
  }
1343
1373
  var experimental_generateText = generateText;
1344
1374
 
1375
+ // core/util/prepare-response-headers.ts
1376
+ function prepareResponseHeaders(init, { contentType }) {
1377
+ var _a;
1378
+ const headers = new Headers((_a = init == null ? void 0 : init.headers) != null ? _a : {});
1379
+ if (!headers.has("Content-Type")) {
1380
+ headers.set("Content-Type", contentType);
1381
+ }
1382
+ return headers;
1383
+ }
1384
+
1345
1385
  // core/generate-text/run-tools-transformation.ts
1346
1386
  import { NoSuchToolError as NoSuchToolError2 } from "@ai-sdk/provider";
1347
1387
 
@@ -1814,10 +1854,9 @@ var StreamTextResult = class {
1814
1854
  var _a;
1815
1855
  return new Response(this.textStream.pipeThrough(new TextEncoderStream()), {
1816
1856
  status: (_a = init == null ? void 0 : init.status) != null ? _a : 200,
1817
- headers: {
1818
- "Content-Type": "text/plain; charset=utf-8",
1819
- ...init == null ? void 0 : init.headers
1820
- }
1857
+ headers: prepareResponseHeaders(init, {
1858
+ contentType: "text/plain; charset=utf-8"
1859
+ })
1821
1860
  });
1822
1861
  }
1823
1862
  };
@@ -3447,10 +3486,9 @@ var StreamingTextResponse = class extends Response {
3447
3486
  super(processedStream, {
3448
3487
  ...init,
3449
3488
  status: 200,
3450
- headers: {
3451
- "Content-Type": "text/plain; charset=utf-8",
3452
- ...init == null ? void 0 : init.headers
3453
- }
3489
+ headers: prepareResponseHeaders(init, {
3490
+ contentType: "text/plain; charset=utf-8"
3491
+ })
3454
3492
  });
3455
3493
  }
3456
3494
  };