ai 5.0.0-alpha.7 → 5.0.0-alpha.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1395,14 +1395,14 @@ var ChatStore = class {
1395
1395
  return this.chats.size;
1396
1396
  }
1397
1397
  getStatus(id) {
1398
- return this.getChatState(id).status;
1398
+ return this.getChat(id).status;
1399
1399
  }
1400
1400
  setStatus({
1401
1401
  id,
1402
1402
  status,
1403
1403
  error
1404
1404
  }) {
1405
- const state = this.getChatState(id);
1405
+ const state = this.getChat(id);
1406
1406
  if (state.status === status)
1407
1407
  return;
1408
1408
  state.setStatus(status);
@@ -1410,13 +1410,13 @@ var ChatStore = class {
1410
1410
  this.emit({ type: "chat-status-changed", chatId: id, error });
1411
1411
  }
1412
1412
  getError(id) {
1413
- return this.getChatState(id).error;
1413
+ return this.getChat(id).error;
1414
1414
  }
1415
1415
  getMessages(id) {
1416
- return this.getChatState(id).messages;
1416
+ return this.getChat(id).messages;
1417
1417
  }
1418
1418
  getLastMessage(id) {
1419
- const chat = this.getChatState(id);
1419
+ const chat = this.getChat(id);
1420
1420
  return chat.messages[chat.messages.length - 1];
1421
1421
  }
1422
1422
  subscribe(subscriber) {
@@ -1427,11 +1427,11 @@ var ChatStore = class {
1427
1427
  id,
1428
1428
  messages
1429
1429
  }) {
1430
- this.getChatState(id).setMessages(messages);
1430
+ this.getChat(id).setMessages(messages);
1431
1431
  this.emit({ type: "chat-messages-changed", chatId: id });
1432
1432
  }
1433
1433
  removeAssistantResponse(id) {
1434
- const chat = this.getChatState(id);
1434
+ const chat = this.getChat(id);
1435
1435
  const lastMessage = chat.messages[chat.messages.length - 1];
1436
1436
  if (lastMessage == null) {
1437
1437
  throw new Error("Cannot remove assistant response from empty chat");
@@ -1452,8 +1452,8 @@ var ChatStore = class {
1452
1452
  onFinish
1453
1453
  }) {
1454
1454
  var _a17;
1455
- const state = this.getChatState(chatId);
1456
- state.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
1455
+ const chat = this.getChat(chatId);
1456
+ chat.pushMessage({ ...message, id: (_a17 = message.id) != null ? _a17 : this.generateId() });
1457
1457
  this.emit({
1458
1458
  type: "chat-messages-changed",
1459
1459
  chatId
@@ -1476,7 +1476,7 @@ var ChatStore = class {
1476
1476
  onToolCall,
1477
1477
  onFinish
1478
1478
  }) {
1479
- const chat = this.getChatState(chatId);
1479
+ const chat = this.getChat(chatId);
1480
1480
  if (chat.messages[chat.messages.length - 1].role === "assistant") {
1481
1481
  chat.popMessage();
1482
1482
  this.emit({
@@ -1520,7 +1520,7 @@ var ChatStore = class {
1520
1520
  toolCallId,
1521
1521
  result
1522
1522
  }) {
1523
- const chat = this.getChatState(chatId);
1523
+ const chat = this.getChat(chatId);
1524
1524
  chat.jobExecutor.run(async () => {
1525
1525
  updateToolCallResult({
1526
1526
  messages: chat.messages,
@@ -1545,7 +1545,7 @@ var ChatStore = class {
1545
1545
  }
1546
1546
  async stopStream({ chatId }) {
1547
1547
  var _a17;
1548
- const chat = this.getChatState(chatId);
1548
+ const chat = this.getChat(chatId);
1549
1549
  if (chat.status !== "streaming" && chat.status !== "submitted")
1550
1550
  return;
1551
1551
  if ((_a17 = chat.activeResponse) == null ? void 0 : _a17.abortController) {
@@ -1558,7 +1558,7 @@ var ChatStore = class {
1558
1558
  subscriber.onChatChanged(event);
1559
1559
  }
1560
1560
  }
1561
- getChatState(id) {
1561
+ getChat(id) {
1562
1562
  if (!this.hasChat(id)) {
1563
1563
  this.addChat(id, []);
1564
1564
  }
@@ -1573,7 +1573,7 @@ var ChatStore = class {
1573
1573
  onToolCall,
1574
1574
  onFinish
1575
1575
  }) {
1576
- const chat = this.getChatState(chatId);
1576
+ const chat = this.getChat(chatId);
1577
1577
  this.setStatus({ id: chatId, status: "submitted", error: void 0 });
1578
1578
  const messageCount = chat.messages.length;
1579
1579
  const lastMessage = chat.messages[chat.messages.length - 1];
@@ -3742,6 +3742,19 @@ function prepareCallSettings({
3742
3742
  };
3743
3743
  }
3744
3744
 
3745
+ // core/prompt/resolve-language-model.ts
3746
+ import { gateway } from "@ai-sdk/gateway";
3747
+ var GLOBAL_DEFAULT_PROVIDER = Symbol(
3748
+ "vercel.ai.global.defaultProvider"
3749
+ );
3750
+ function resolveLanguageModel(model) {
3751
+ if (typeof model !== "string") {
3752
+ return model;
3753
+ }
3754
+ const globalProvider = globalThis[GLOBAL_DEFAULT_PROVIDER];
3755
+ return (globalProvider != null ? globalProvider : gateway).languageModel(model);
3756
+ }
3757
+
3745
3758
  // core/prompt/standardize-prompt.ts
3746
3759
  import { InvalidPromptError as InvalidPromptError2 } from "@ai-sdk/provider";
3747
3760
  import { safeValidateTypes } from "@ai-sdk/provider-utils";
@@ -3933,6 +3946,23 @@ async function standardizePrompt(prompt) {
3933
3946
  };
3934
3947
  }
3935
3948
 
3949
+ // core/prompt/wrap-gateway-error.ts
3950
+ import {
3951
+ GatewayAuthenticationError,
3952
+ GatewayModelNotFoundError
3953
+ } from "@ai-sdk/gateway";
3954
+ import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
3955
+ function wrapGatewayError(error) {
3956
+ if (GatewayAuthenticationError.isInstance(error) || GatewayModelNotFoundError.isInstance(error)) {
3957
+ return new AISDKError18({
3958
+ name: "GatewayError",
3959
+ message: "Vercel AI Gateway access failed. If you want to use AI SDK providers directly, use the providers, e.g. @ai-sdk/openai, or register a different global default provider.",
3960
+ cause: error
3961
+ });
3962
+ }
3963
+ return error;
3964
+ }
3965
+
3936
3966
  // core/telemetry/stringify-for-telemetry.ts
3937
3967
  function stringifyForTelemetry(prompt) {
3938
3968
  return JSON.stringify(
@@ -4349,12 +4379,6 @@ function validateObjectGenerationInput({
4349
4379
  }
4350
4380
  }
4351
4381
 
4352
- // core/prompt/resolve-language-model.ts
4353
- import { gateway } from "@ai-sdk/gateway";
4354
- function resolveLanguageModel(model) {
4355
- return typeof model === "string" ? gateway.languageModel(model) : model;
4356
- }
4357
-
4358
4382
  // core/generate-object/generate-object.ts
4359
4383
  var originalGenerateId = createIdGenerator({ prefix: "aiobj", size: 24 });
4360
4384
  async function generateObject(options) {
@@ -4404,208 +4428,212 @@ async function generateObject(options) {
4404
4428
  settings: { ...callSettings, maxRetries }
4405
4429
  });
4406
4430
  const tracer = getTracer(telemetry);
4407
- return recordSpan({
4408
- name: "ai.generateObject",
4409
- attributes: selectTelemetryAttributes({
4410
- telemetry,
4411
- attributes: {
4412
- ...assembleOperationName({
4413
- operationId: "ai.generateObject",
4414
- telemetry
4415
- }),
4416
- ...baseTelemetryAttributes,
4417
- // specific settings that only make sense on the outer level:
4418
- "ai.prompt": {
4419
- input: () => JSON.stringify({ system, prompt, messages })
4420
- },
4421
- "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
4422
- "ai.schema.name": schemaName,
4423
- "ai.schema.description": schemaDescription,
4424
- "ai.settings.output": outputStrategy.type
4425
- }
4426
- }),
4427
- tracer,
4428
- fn: async (span) => {
4429
- var _a17;
4430
- let result;
4431
- let finishReason;
4432
- let usage;
4433
- let warnings;
4434
- let response;
4435
- let request;
4436
- let resultProviderMetadata;
4437
- const standardizedPrompt = await standardizePrompt({
4438
- system,
4439
- prompt,
4440
- messages
4441
- });
4442
- const promptMessages = await convertToLanguageModelPrompt({
4443
- prompt: standardizedPrompt,
4444
- supportedUrls: await model.supportedUrls
4445
- });
4446
- const generateResult = await retry(
4447
- () => recordSpan({
4448
- name: "ai.generateObject.doGenerate",
4449
- attributes: selectTelemetryAttributes({
4450
- telemetry,
4451
- attributes: {
4452
- ...assembleOperationName({
4453
- operationId: "ai.generateObject.doGenerate",
4454
- telemetry
4455
- }),
4456
- ...baseTelemetryAttributes,
4457
- "ai.prompt.messages": {
4458
- input: () => stringifyForTelemetry(promptMessages)
4459
- },
4460
- // standardized gen-ai llm span attributes:
4461
- "gen_ai.system": model.provider,
4462
- "gen_ai.request.model": model.modelId,
4463
- "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
4464
- "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
4465
- "gen_ai.request.presence_penalty": callSettings.presencePenalty,
4466
- "gen_ai.request.temperature": callSettings.temperature,
4467
- "gen_ai.request.top_k": callSettings.topK,
4468
- "gen_ai.request.top_p": callSettings.topP
4469
- }
4431
+ try {
4432
+ return await recordSpan({
4433
+ name: "ai.generateObject",
4434
+ attributes: selectTelemetryAttributes({
4435
+ telemetry,
4436
+ attributes: {
4437
+ ...assembleOperationName({
4438
+ operationId: "ai.generateObject",
4439
+ telemetry
4470
4440
  }),
4471
- tracer,
4472
- fn: async (span2) => {
4473
- var _a18, _b, _c, _d, _e, _f, _g, _h;
4474
- const result2 = await model.doGenerate({
4475
- responseFormat: {
4476
- type: "json",
4477
- schema: outputStrategy.jsonSchema,
4478
- name: schemaName,
4479
- description: schemaDescription
4480
- },
4481
- ...prepareCallSettings(settings),
4482
- prompt: promptMessages,
4483
- providerOptions,
4484
- abortSignal,
4485
- headers
4486
- });
4487
- const responseData = {
4488
- id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
4489
- timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
4490
- modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4491
- headers: (_g = result2.response) == null ? void 0 : _g.headers,
4492
- body: (_h = result2.response) == null ? void 0 : _h.body
4493
- };
4494
- const text2 = extractContentText(result2.content);
4495
- if (text2 === void 0) {
4496
- throw new NoObjectGeneratedError({
4497
- message: "No object generated: the model did not return a response.",
4498
- response: responseData,
4499
- usage: result2.usage,
4500
- finishReason: result2.finishReason
4441
+ ...baseTelemetryAttributes,
4442
+ // specific settings that only make sense on the outer level:
4443
+ "ai.prompt": {
4444
+ input: () => JSON.stringify({ system, prompt, messages })
4445
+ },
4446
+ "ai.schema": outputStrategy.jsonSchema != null ? { input: () => JSON.stringify(outputStrategy.jsonSchema) } : void 0,
4447
+ "ai.schema.name": schemaName,
4448
+ "ai.schema.description": schemaDescription,
4449
+ "ai.settings.output": outputStrategy.type
4450
+ }
4451
+ }),
4452
+ tracer,
4453
+ fn: async (span) => {
4454
+ var _a17;
4455
+ let result;
4456
+ let finishReason;
4457
+ let usage;
4458
+ let warnings;
4459
+ let response;
4460
+ let request;
4461
+ let resultProviderMetadata;
4462
+ const standardizedPrompt = await standardizePrompt({
4463
+ system,
4464
+ prompt,
4465
+ messages
4466
+ });
4467
+ const promptMessages = await convertToLanguageModelPrompt({
4468
+ prompt: standardizedPrompt,
4469
+ supportedUrls: await model.supportedUrls
4470
+ });
4471
+ const generateResult = await retry(
4472
+ () => recordSpan({
4473
+ name: "ai.generateObject.doGenerate",
4474
+ attributes: selectTelemetryAttributes({
4475
+ telemetry,
4476
+ attributes: {
4477
+ ...assembleOperationName({
4478
+ operationId: "ai.generateObject.doGenerate",
4479
+ telemetry
4480
+ }),
4481
+ ...baseTelemetryAttributes,
4482
+ "ai.prompt.messages": {
4483
+ input: () => stringifyForTelemetry(promptMessages)
4484
+ },
4485
+ // standardized gen-ai llm span attributes:
4486
+ "gen_ai.system": model.provider,
4487
+ "gen_ai.request.model": model.modelId,
4488
+ "gen_ai.request.frequency_penalty": callSettings.frequencyPenalty,
4489
+ "gen_ai.request.max_tokens": callSettings.maxOutputTokens,
4490
+ "gen_ai.request.presence_penalty": callSettings.presencePenalty,
4491
+ "gen_ai.request.temperature": callSettings.temperature,
4492
+ "gen_ai.request.top_k": callSettings.topK,
4493
+ "gen_ai.request.top_p": callSettings.topP
4494
+ }
4495
+ }),
4496
+ tracer,
4497
+ fn: async (span2) => {
4498
+ var _a18, _b, _c, _d, _e, _f, _g, _h;
4499
+ const result2 = await model.doGenerate({
4500
+ responseFormat: {
4501
+ type: "json",
4502
+ schema: outputStrategy.jsonSchema,
4503
+ name: schemaName,
4504
+ description: schemaDescription
4505
+ },
4506
+ ...prepareCallSettings(settings),
4507
+ prompt: promptMessages,
4508
+ providerOptions,
4509
+ abortSignal,
4510
+ headers
4501
4511
  });
4512
+ const responseData = {
4513
+ id: (_b = (_a18 = result2.response) == null ? void 0 : _a18.id) != null ? _b : generateId3(),
4514
+ timestamp: (_d = (_c = result2.response) == null ? void 0 : _c.timestamp) != null ? _d : currentDate(),
4515
+ modelId: (_f = (_e = result2.response) == null ? void 0 : _e.modelId) != null ? _f : model.modelId,
4516
+ headers: (_g = result2.response) == null ? void 0 : _g.headers,
4517
+ body: (_h = result2.response) == null ? void 0 : _h.body
4518
+ };
4519
+ const text2 = extractContentText(result2.content);
4520
+ if (text2 === void 0) {
4521
+ throw new NoObjectGeneratedError({
4522
+ message: "No object generated: the model did not return a response.",
4523
+ response: responseData,
4524
+ usage: result2.usage,
4525
+ finishReason: result2.finishReason
4526
+ });
4527
+ }
4528
+ span2.setAttributes(
4529
+ selectTelemetryAttributes({
4530
+ telemetry,
4531
+ attributes: {
4532
+ "ai.response.finishReason": result2.finishReason,
4533
+ "ai.response.object": { output: () => text2 },
4534
+ "ai.response.id": responseData.id,
4535
+ "ai.response.model": responseData.modelId,
4536
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
4537
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4538
+ "ai.usage.promptTokens": result2.usage.inputTokens,
4539
+ "ai.usage.completionTokens": result2.usage.outputTokens,
4540
+ // standardized gen-ai llm span attributes:
4541
+ "gen_ai.response.finish_reasons": [result2.finishReason],
4542
+ "gen_ai.response.id": responseData.id,
4543
+ "gen_ai.response.model": responseData.modelId,
4544
+ "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4545
+ "gen_ai.usage.output_tokens": result2.usage.outputTokens
4546
+ }
4547
+ })
4548
+ );
4549
+ return { ...result2, objectText: text2, responseData };
4502
4550
  }
4503
- span2.setAttributes(
4504
- selectTelemetryAttributes({
4505
- telemetry,
4506
- attributes: {
4507
- "ai.response.finishReason": result2.finishReason,
4508
- "ai.response.object": { output: () => text2 },
4509
- "ai.response.id": responseData.id,
4510
- "ai.response.model": responseData.modelId,
4511
- "ai.response.timestamp": responseData.timestamp.toISOString(),
4512
- // TODO rename telemetry attributes to inputTokens and outputTokens
4513
- "ai.usage.promptTokens": result2.usage.inputTokens,
4514
- "ai.usage.completionTokens": result2.usage.outputTokens,
4515
- // standardized gen-ai llm span attributes:
4516
- "gen_ai.response.finish_reasons": [result2.finishReason],
4517
- "gen_ai.response.id": responseData.id,
4518
- "gen_ai.response.model": responseData.modelId,
4519
- "gen_ai.usage.input_tokens": result2.usage.inputTokens,
4520
- "gen_ai.usage.output_tokens": result2.usage.outputTokens
4521
- }
4522
- })
4523
- );
4524
- return { ...result2, objectText: text2, responseData };
4551
+ })
4552
+ );
4553
+ result = generateResult.objectText;
4554
+ finishReason = generateResult.finishReason;
4555
+ usage = generateResult.usage;
4556
+ warnings = generateResult.warnings;
4557
+ resultProviderMetadata = generateResult.providerMetadata;
4558
+ request = (_a17 = generateResult.request) != null ? _a17 : {};
4559
+ response = generateResult.responseData;
4560
+ async function processResult(result2) {
4561
+ const parseResult = await safeParseJSON2({ text: result2 });
4562
+ if (!parseResult.success) {
4563
+ throw new NoObjectGeneratedError({
4564
+ message: "No object generated: could not parse the response.",
4565
+ cause: parseResult.error,
4566
+ text: result2,
4567
+ response,
4568
+ usage,
4569
+ finishReason
4570
+ });
4525
4571
  }
4526
- })
4527
- );
4528
- result = generateResult.objectText;
4529
- finishReason = generateResult.finishReason;
4530
- usage = generateResult.usage;
4531
- warnings = generateResult.warnings;
4532
- resultProviderMetadata = generateResult.providerMetadata;
4533
- request = (_a17 = generateResult.request) != null ? _a17 : {};
4534
- response = generateResult.responseData;
4535
- async function processResult(result2) {
4536
- const parseResult = await safeParseJSON2({ text: result2 });
4537
- if (!parseResult.success) {
4538
- throw new NoObjectGeneratedError({
4539
- message: "No object generated: could not parse the response.",
4540
- cause: parseResult.error,
4541
- text: result2,
4542
- response,
4543
- usage,
4544
- finishReason
4545
- });
4546
- }
4547
- const validationResult = await outputStrategy.validateFinalResult(
4548
- parseResult.value,
4549
- {
4550
- text: result2,
4551
- response,
4552
- usage
4572
+ const validationResult = await outputStrategy.validateFinalResult(
4573
+ parseResult.value,
4574
+ {
4575
+ text: result2,
4576
+ response,
4577
+ usage
4578
+ }
4579
+ );
4580
+ if (!validationResult.success) {
4581
+ throw new NoObjectGeneratedError({
4582
+ message: "No object generated: response did not match schema.",
4583
+ cause: validationResult.error,
4584
+ text: result2,
4585
+ response,
4586
+ usage,
4587
+ finishReason
4588
+ });
4553
4589
  }
4554
- );
4555
- if (!validationResult.success) {
4556
- throw new NoObjectGeneratedError({
4557
- message: "No object generated: response did not match schema.",
4558
- cause: validationResult.error,
4559
- text: result2,
4560
- response,
4561
- usage,
4562
- finishReason
4563
- });
4590
+ return validationResult.value;
4564
4591
  }
4565
- return validationResult.value;
4566
- }
4567
- let object2;
4568
- try {
4569
- object2 = await processResult(result);
4570
- } catch (error) {
4571
- if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError2.isInstance(error.cause) || TypeValidationError3.isInstance(error.cause))) {
4572
- const repairedText = await repairText({
4573
- text: result,
4574
- error: error.cause
4575
- });
4576
- if (repairedText === null) {
4592
+ let object2;
4593
+ try {
4594
+ object2 = await processResult(result);
4595
+ } catch (error) {
4596
+ if (repairText != null && NoObjectGeneratedError.isInstance(error) && (JSONParseError2.isInstance(error.cause) || TypeValidationError3.isInstance(error.cause))) {
4597
+ const repairedText = await repairText({
4598
+ text: result,
4599
+ error: error.cause
4600
+ });
4601
+ if (repairedText === null) {
4602
+ throw error;
4603
+ }
4604
+ object2 = await processResult(repairedText);
4605
+ } else {
4577
4606
  throw error;
4578
4607
  }
4579
- object2 = await processResult(repairedText);
4580
- } else {
4581
- throw error;
4582
4608
  }
4609
+ span.setAttributes(
4610
+ selectTelemetryAttributes({
4611
+ telemetry,
4612
+ attributes: {
4613
+ "ai.response.finishReason": finishReason,
4614
+ "ai.response.object": {
4615
+ output: () => JSON.stringify(object2)
4616
+ },
4617
+ // TODO rename telemetry attributes to inputTokens and outputTokens
4618
+ "ai.usage.promptTokens": usage.inputTokens,
4619
+ "ai.usage.completionTokens": usage.outputTokens
4620
+ }
4621
+ })
4622
+ );
4623
+ return new DefaultGenerateObjectResult({
4624
+ object: object2,
4625
+ finishReason,
4626
+ usage,
4627
+ warnings,
4628
+ request,
4629
+ response,
4630
+ providerMetadata: resultProviderMetadata
4631
+ });
4583
4632
  }
4584
- span.setAttributes(
4585
- selectTelemetryAttributes({
4586
- telemetry,
4587
- attributes: {
4588
- "ai.response.finishReason": finishReason,
4589
- "ai.response.object": {
4590
- output: () => JSON.stringify(object2)
4591
- },
4592
- // TODO rename telemetry attributes to inputTokens and outputTokens
4593
- "ai.usage.promptTokens": usage.inputTokens,
4594
- "ai.usage.completionTokens": usage.outputTokens
4595
- }
4596
- })
4597
- );
4598
- return new DefaultGenerateObjectResult({
4599
- object: object2,
4600
- finishReason,
4601
- usage,
4602
- warnings,
4603
- request,
4604
- response,
4605
- providerMetadata: resultProviderMetadata
4606
- });
4607
- }
4608
- });
4633
+ });
4634
+ } catch (error) {
4635
+ throw wrapGatewayError(error);
4636
+ }
4609
4637
  }
4610
4638
  var DefaultGenerateObjectResult = class {
4611
4639
  constructor(options) {
@@ -4629,7 +4657,9 @@ var DefaultGenerateObjectResult = class {
4629
4657
  };
4630
4658
 
4631
4659
  // core/generate-object/stream-object.ts
4632
- import { createIdGenerator as createIdGenerator2 } from "@ai-sdk/provider-utils";
4660
+ import {
4661
+ createIdGenerator as createIdGenerator2
4662
+ } from "@ai-sdk/provider-utils";
4633
4663
 
4634
4664
  // src/util/create-resolvable-promise.ts
4635
4665
  function createResolvablePromise() {
@@ -4786,7 +4816,9 @@ function streamObject(options) {
4786
4816
  headers,
4787
4817
  experimental_telemetry: telemetry,
4788
4818
  providerOptions,
4789
- onError,
4819
+ onError = ({ error }) => {
4820
+ console.error(error);
4821
+ },
4790
4822
  onFinish,
4791
4823
  _internal: {
4792
4824
  generateId: generateId3 = originalGenerateId2,
@@ -4879,7 +4911,7 @@ var DefaultStreamObjectResult = class {
4879
4911
  transform(chunk, controller) {
4880
4912
  controller.enqueue(chunk);
4881
4913
  if (chunk.type === "error") {
4882
- onError == null ? void 0 : onError({ error: chunk.error });
4914
+ onError({ error: wrapGatewayError(chunk.error) });
4883
4915
  }
4884
4916
  }
4885
4917
  });
@@ -5279,8 +5311,8 @@ var DefaultStreamObjectResult = class {
5279
5311
  };
5280
5312
 
5281
5313
  // src/error/no-speech-generated-error.ts
5282
- import { AISDKError as AISDKError18 } from "@ai-sdk/provider";
5283
- var NoSpeechGeneratedError = class extends AISDKError18 {
5314
+ import { AISDKError as AISDKError19 } from "@ai-sdk/provider";
5315
+ var NoSpeechGeneratedError = class extends AISDKError19 {
5284
5316
  constructor(options) {
5285
5317
  super({
5286
5318
  name: "AI_NoSpeechGeneratedError",
@@ -5720,239 +5752,243 @@ async function generateText({
5720
5752
  messages
5721
5753
  });
5722
5754
  const tracer = getTracer(telemetry);
5723
- return recordSpan({
5724
- name: "ai.generateText",
5725
- attributes: selectTelemetryAttributes({
5726
- telemetry,
5727
- attributes: {
5728
- ...assembleOperationName({
5729
- operationId: "ai.generateText",
5730
- telemetry
5731
- }),
5732
- ...baseTelemetryAttributes,
5733
- // model:
5734
- "ai.model.provider": model.provider,
5735
- "ai.model.id": model.modelId,
5736
- // specific settings that only make sense on the outer level:
5737
- "ai.prompt": {
5738
- input: () => JSON.stringify({ system, prompt, messages })
5739
- }
5740
- }
5741
- }),
5742
- tracer,
5743
- fn: async (span) => {
5744
- var _a17, _b, _c, _d, _e;
5745
- const callSettings2 = prepareCallSettings(settings);
5746
- let currentModelResponse;
5747
- let currentToolCalls = [];
5748
- let currentToolResults = [];
5749
- const responseMessages = [];
5750
- const steps = [];
5751
- do {
5752
- const stepInputMessages = [
5753
- ...initialPrompt.messages,
5754
- ...responseMessages
5755
- ];
5756
- const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5757
- model,
5758
- steps,
5759
- stepNumber: steps.length
5760
- }));
5761
- const promptMessages = await convertToLanguageModelPrompt({
5762
- prompt: {
5763
- system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
5764
- messages: stepInputMessages
5765
- },
5766
- supportedUrls: await model.supportedUrls
5767
- });
5768
- const stepModel = resolveLanguageModel(
5769
- (_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
5770
- );
5771
- const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5772
- tools,
5773
- toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
5774
- activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
5775
- });
5776
- currentModelResponse = await retry(
5777
- () => {
5778
- var _a18;
5779
- return recordSpan({
5780
- name: "ai.generateText.doGenerate",
5781
- attributes: selectTelemetryAttributes({
5782
- telemetry,
5783
- attributes: {
5784
- ...assembleOperationName({
5785
- operationId: "ai.generateText.doGenerate",
5786
- telemetry
5787
- }),
5788
- ...baseTelemetryAttributes,
5789
- // model:
5790
- "ai.model.provider": stepModel.provider,
5791
- "ai.model.id": stepModel.modelId,
5792
- // prompt:
5793
- "ai.prompt.messages": {
5794
- input: () => stringifyForTelemetry(promptMessages)
5795
- },
5796
- "ai.prompt.tools": {
5797
- // convert the language model level tools:
5798
- input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
5799
- },
5800
- "ai.prompt.toolChoice": {
5801
- input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
5802
- },
5803
- // standardized gen-ai llm span attributes:
5804
- "gen_ai.system": stepModel.provider,
5805
- "gen_ai.request.model": stepModel.modelId,
5806
- "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5807
- "gen_ai.request.max_tokens": settings.maxOutputTokens,
5808
- "gen_ai.request.presence_penalty": settings.presencePenalty,
5809
- "gen_ai.request.stop_sequences": settings.stopSequences,
5810
- "gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
5811
- "gen_ai.request.top_k": settings.topK,
5812
- "gen_ai.request.top_p": settings.topP
5813
- }
5814
- }),
5815
- tracer,
5816
- fn: async (span2) => {
5817
- var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
5818
- const result = await stepModel.doGenerate({
5819
- ...callSettings2,
5820
- tools: stepTools,
5821
- toolChoice: stepToolChoice,
5822
- responseFormat: output == null ? void 0 : output.responseFormat,
5823
- prompt: promptMessages,
5824
- providerOptions,
5825
- abortSignal,
5826
- headers
5827
- });
5828
- const responseData = {
5829
- id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5830
- timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5831
- modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
5832
- headers: (_g = result.response) == null ? void 0 : _g.headers,
5833
- body: (_h = result.response) == null ? void 0 : _h.body
5834
- };
5835
- span2.setAttributes(
5836
- selectTelemetryAttributes({
5837
- telemetry,
5838
- attributes: {
5839
- "ai.response.finishReason": result.finishReason,
5840
- "ai.response.text": {
5841
- output: () => extractContentText(result.content)
5842
- },
5843
- "ai.response.toolCalls": {
5844
- output: () => {
5845
- const toolCalls = asToolCalls(result.content);
5846
- return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5847
- }
5848
- },
5849
- "ai.response.id": responseData.id,
5850
- "ai.response.model": responseData.modelId,
5851
- "ai.response.timestamp": responseData.timestamp.toISOString(),
5852
- // TODO rename telemetry attributes to inputTokens and outputTokens
5853
- "ai.usage.promptTokens": result.usage.inputTokens,
5854
- "ai.usage.completionTokens": result.usage.outputTokens,
5855
- // standardized gen-ai llm span attributes:
5856
- "gen_ai.response.finish_reasons": [result.finishReason],
5857
- "gen_ai.response.id": responseData.id,
5858
- "gen_ai.response.model": responseData.modelId,
5859
- "gen_ai.usage.input_tokens": result.usage.inputTokens,
5860
- "gen_ai.usage.output_tokens": result.usage.outputTokens
5861
- }
5862
- })
5863
- );
5864
- return { ...result, response: responseData };
5865
- }
5866
- });
5755
+ try {
5756
+ return await recordSpan({
5757
+ name: "ai.generateText",
5758
+ attributes: selectTelemetryAttributes({
5759
+ telemetry,
5760
+ attributes: {
5761
+ ...assembleOperationName({
5762
+ operationId: "ai.generateText",
5763
+ telemetry
5764
+ }),
5765
+ ...baseTelemetryAttributes,
5766
+ // model:
5767
+ "ai.model.provider": model.provider,
5768
+ "ai.model.id": model.modelId,
5769
+ // specific settings that only make sense on the outer level:
5770
+ "ai.prompt": {
5771
+ input: () => JSON.stringify({ system, prompt, messages })
5867
5772
  }
5868
- );
5869
- currentToolCalls = await Promise.all(
5870
- currentModelResponse.content.filter(
5871
- (part) => part.type === "tool-call"
5872
- ).map(
5873
- (toolCall) => parseToolCall({
5874
- toolCall,
5875
- tools,
5876
- repairToolCall,
5877
- system,
5773
+ }
5774
+ }),
5775
+ tracer,
5776
+ fn: async (span) => {
5777
+ var _a17, _b, _c, _d, _e;
5778
+ const callSettings2 = prepareCallSettings(settings);
5779
+ let currentModelResponse;
5780
+ let currentToolCalls = [];
5781
+ let currentToolResults = [];
5782
+ const responseMessages = [];
5783
+ const steps = [];
5784
+ do {
5785
+ const stepInputMessages = [
5786
+ ...initialPrompt.messages,
5787
+ ...responseMessages
5788
+ ];
5789
+ const prepareStepResult = await (prepareStep == null ? void 0 : prepareStep({
5790
+ model,
5791
+ steps,
5792
+ stepNumber: steps.length
5793
+ }));
5794
+ const promptMessages = await convertToLanguageModelPrompt({
5795
+ prompt: {
5796
+ system: (_a17 = prepareStepResult == null ? void 0 : prepareStepResult.system) != null ? _a17 : initialPrompt.system,
5878
5797
  messages: stepInputMessages
5798
+ },
5799
+ supportedUrls: await model.supportedUrls
5800
+ });
5801
+ const stepModel = resolveLanguageModel(
5802
+ (_b = prepareStepResult == null ? void 0 : prepareStepResult.model) != null ? _b : model
5803
+ );
5804
+ const { toolChoice: stepToolChoice, tools: stepTools } = prepareToolsAndToolChoice({
5805
+ tools,
5806
+ toolChoice: (_c = prepareStepResult == null ? void 0 : prepareStepResult.toolChoice) != null ? _c : toolChoice,
5807
+ activeTools: (_d = prepareStepResult == null ? void 0 : prepareStepResult.activeTools) != null ? _d : activeTools
5808
+ });
5809
+ currentModelResponse = await retry(
5810
+ () => {
5811
+ var _a18;
5812
+ return recordSpan({
5813
+ name: "ai.generateText.doGenerate",
5814
+ attributes: selectTelemetryAttributes({
5815
+ telemetry,
5816
+ attributes: {
5817
+ ...assembleOperationName({
5818
+ operationId: "ai.generateText.doGenerate",
5819
+ telemetry
5820
+ }),
5821
+ ...baseTelemetryAttributes,
5822
+ // model:
5823
+ "ai.model.provider": stepModel.provider,
5824
+ "ai.model.id": stepModel.modelId,
5825
+ // prompt:
5826
+ "ai.prompt.messages": {
5827
+ input: () => stringifyForTelemetry(promptMessages)
5828
+ },
5829
+ "ai.prompt.tools": {
5830
+ // convert the language model level tools:
5831
+ input: () => stepTools == null ? void 0 : stepTools.map((tool2) => JSON.stringify(tool2))
5832
+ },
5833
+ "ai.prompt.toolChoice": {
5834
+ input: () => stepToolChoice != null ? JSON.stringify(stepToolChoice) : void 0
5835
+ },
5836
+ // standardized gen-ai llm span attributes:
5837
+ "gen_ai.system": stepModel.provider,
5838
+ "gen_ai.request.model": stepModel.modelId,
5839
+ "gen_ai.request.frequency_penalty": settings.frequencyPenalty,
5840
+ "gen_ai.request.max_tokens": settings.maxOutputTokens,
5841
+ "gen_ai.request.presence_penalty": settings.presencePenalty,
5842
+ "gen_ai.request.stop_sequences": settings.stopSequences,
5843
+ "gen_ai.request.temperature": (_a18 = settings.temperature) != null ? _a18 : void 0,
5844
+ "gen_ai.request.top_k": settings.topK,
5845
+ "gen_ai.request.top_p": settings.topP
5846
+ }
5847
+ }),
5848
+ tracer,
5849
+ fn: async (span2) => {
5850
+ var _a19, _b2, _c2, _d2, _e2, _f, _g, _h;
5851
+ const result = await stepModel.doGenerate({
5852
+ ...callSettings2,
5853
+ tools: stepTools,
5854
+ toolChoice: stepToolChoice,
5855
+ responseFormat: output == null ? void 0 : output.responseFormat,
5856
+ prompt: promptMessages,
5857
+ providerOptions,
5858
+ abortSignal,
5859
+ headers
5860
+ });
5861
+ const responseData = {
5862
+ id: (_b2 = (_a19 = result.response) == null ? void 0 : _a19.id) != null ? _b2 : generateId3(),
5863
+ timestamp: (_d2 = (_c2 = result.response) == null ? void 0 : _c2.timestamp) != null ? _d2 : currentDate(),
5864
+ modelId: (_f = (_e2 = result.response) == null ? void 0 : _e2.modelId) != null ? _f : stepModel.modelId,
5865
+ headers: (_g = result.response) == null ? void 0 : _g.headers,
5866
+ body: (_h = result.response) == null ? void 0 : _h.body
5867
+ };
5868
+ span2.setAttributes(
5869
+ selectTelemetryAttributes({
5870
+ telemetry,
5871
+ attributes: {
5872
+ "ai.response.finishReason": result.finishReason,
5873
+ "ai.response.text": {
5874
+ output: () => extractContentText(result.content)
5875
+ },
5876
+ "ai.response.toolCalls": {
5877
+ output: () => {
5878
+ const toolCalls = asToolCalls(result.content);
5879
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5880
+ }
5881
+ },
5882
+ "ai.response.id": responseData.id,
5883
+ "ai.response.model": responseData.modelId,
5884
+ "ai.response.timestamp": responseData.timestamp.toISOString(),
5885
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5886
+ "ai.usage.promptTokens": result.usage.inputTokens,
5887
+ "ai.usage.completionTokens": result.usage.outputTokens,
5888
+ // standardized gen-ai llm span attributes:
5889
+ "gen_ai.response.finish_reasons": [result.finishReason],
5890
+ "gen_ai.response.id": responseData.id,
5891
+ "gen_ai.response.model": responseData.modelId,
5892
+ "gen_ai.usage.input_tokens": result.usage.inputTokens,
5893
+ "gen_ai.usage.output_tokens": result.usage.outputTokens
5894
+ }
5895
+ })
5896
+ );
5897
+ return { ...result, response: responseData };
5898
+ }
5899
+ });
5900
+ }
5901
+ );
5902
+ currentToolCalls = await Promise.all(
5903
+ currentModelResponse.content.filter(
5904
+ (part) => part.type === "tool-call"
5905
+ ).map(
5906
+ (toolCall) => parseToolCall({
5907
+ toolCall,
5908
+ tools,
5909
+ repairToolCall,
5910
+ system,
5911
+ messages: stepInputMessages
5912
+ })
5913
+ )
5914
+ );
5915
+ currentToolResults = tools == null ? [] : await executeTools({
5916
+ toolCalls: currentToolCalls,
5917
+ tools,
5918
+ tracer,
5919
+ telemetry,
5920
+ messages: stepInputMessages,
5921
+ abortSignal
5922
+ });
5923
+ const stepContent = asContent({
5924
+ content: currentModelResponse.content,
5925
+ toolCalls: currentToolCalls,
5926
+ toolResults: currentToolResults
5927
+ });
5928
+ responseMessages.push(
5929
+ ...toResponseMessages({
5930
+ content: stepContent,
5931
+ tools: tools != null ? tools : {}
5879
5932
  })
5880
- )
5881
- );
5882
- currentToolResults = tools == null ? [] : await executeTools({
5883
- toolCalls: currentToolCalls,
5884
- tools,
5885
- tracer,
5886
- telemetry,
5887
- messages: stepInputMessages,
5888
- abortSignal
5889
- });
5890
- const stepContent = asContent({
5891
- content: currentModelResponse.content,
5892
- toolCalls: currentToolCalls,
5893
- toolResults: currentToolResults
5894
- });
5895
- responseMessages.push(
5896
- ...toResponseMessages({
5933
+ );
5934
+ const currentStepResult = new DefaultStepResult({
5897
5935
  content: stepContent,
5898
- tools: tools != null ? tools : {}
5936
+ finishReason: currentModelResponse.finishReason,
5937
+ usage: currentModelResponse.usage,
5938
+ warnings: currentModelResponse.warnings,
5939
+ providerMetadata: currentModelResponse.providerMetadata,
5940
+ request: (_e = currentModelResponse.request) != null ? _e : {},
5941
+ response: {
5942
+ ...currentModelResponse.response,
5943
+ // deep clone msgs to avoid mutating past messages in multi-step:
5944
+ messages: structuredClone(responseMessages)
5945
+ }
5946
+ });
5947
+ steps.push(currentStepResult);
5948
+ await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
5949
+ } while (
5950
+ // there are tool calls:
5951
+ currentToolCalls.length > 0 && // all current tool calls have results:
5952
+ currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
5953
+ !await isStopConditionMet({ stopConditions, steps })
5954
+ );
5955
+ span.setAttributes(
5956
+ selectTelemetryAttributes({
5957
+ telemetry,
5958
+ attributes: {
5959
+ "ai.response.finishReason": currentModelResponse.finishReason,
5960
+ "ai.response.text": {
5961
+ output: () => extractContentText(currentModelResponse.content)
5962
+ },
5963
+ "ai.response.toolCalls": {
5964
+ output: () => {
5965
+ const toolCalls = asToolCalls(currentModelResponse.content);
5966
+ return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5967
+ }
5968
+ },
5969
+ // TODO rename telemetry attributes to inputTokens and outputTokens
5970
+ "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
5971
+ "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
5972
+ }
5899
5973
  })
5900
5974
  );
5901
- const currentStepResult = new DefaultStepResult({
5902
- content: stepContent,
5903
- finishReason: currentModelResponse.finishReason,
5904
- usage: currentModelResponse.usage,
5905
- warnings: currentModelResponse.warnings,
5906
- providerMetadata: currentModelResponse.providerMetadata,
5907
- request: (_e = currentModelResponse.request) != null ? _e : {},
5908
- response: {
5909
- ...currentModelResponse.response,
5910
- // deep clone msgs to avoid mutating past messages in multi-step:
5911
- messages: structuredClone(responseMessages)
5912
- }
5975
+ const lastStep = steps[steps.length - 1];
5976
+ return new DefaultGenerateTextResult({
5977
+ steps,
5978
+ resolvedOutput: await (output == null ? void 0 : output.parseOutput(
5979
+ { text: lastStep.text },
5980
+ {
5981
+ response: lastStep.response,
5982
+ usage: lastStep.usage,
5983
+ finishReason: lastStep.finishReason
5984
+ }
5985
+ ))
5913
5986
  });
5914
- steps.push(currentStepResult);
5915
- await (onStepFinish == null ? void 0 : onStepFinish(currentStepResult));
5916
- } while (
5917
- // there are tool calls:
5918
- currentToolCalls.length > 0 && // all current tool calls have results:
5919
- currentToolResults.length === currentToolCalls.length && // continue until a stop condition is met:
5920
- !await isStopConditionMet({ stopConditions, steps })
5921
- );
5922
- span.setAttributes(
5923
- selectTelemetryAttributes({
5924
- telemetry,
5925
- attributes: {
5926
- "ai.response.finishReason": currentModelResponse.finishReason,
5927
- "ai.response.text": {
5928
- output: () => extractContentText(currentModelResponse.content)
5929
- },
5930
- "ai.response.toolCalls": {
5931
- output: () => {
5932
- const toolCalls = asToolCalls(currentModelResponse.content);
5933
- return toolCalls == null ? void 0 : JSON.stringify(toolCalls);
5934
- }
5935
- },
5936
- // TODO rename telemetry attributes to inputTokens and outputTokens
5937
- "ai.usage.promptTokens": currentModelResponse.usage.inputTokens,
5938
- "ai.usage.completionTokens": currentModelResponse.usage.outputTokens
5939
- }
5940
- })
5941
- );
5942
- const lastStep = steps[steps.length - 1];
5943
- return new DefaultGenerateTextResult({
5944
- steps,
5945
- resolvedOutput: await (output == null ? void 0 : output.parseOutput(
5946
- { text: lastStep.text },
5947
- {
5948
- response: lastStep.response,
5949
- usage: lastStep.usage,
5950
- finishReason: lastStep.finishReason
5951
- }
5952
- ))
5953
- });
5954
- }
5955
- });
5987
+ }
5988
+ });
5989
+ } catch (error) {
5990
+ throw wrapGatewayError(error);
5991
+ }
5956
5992
  }
5957
5993
  async function executeTools({
5958
5994
  toolCalls,
@@ -6489,7 +6525,9 @@ function streamText({
6489
6525
  experimental_repairToolCall: repairToolCall,
6490
6526
  experimental_transform: transform,
6491
6527
  onChunk,
6492
- onError,
6528
+ onError = ({ error }) => {
6529
+ console.error(error);
6530
+ },
6493
6531
  onFinish,
6494
6532
  onStepFinish,
6495
6533
  _internal: {
@@ -6628,7 +6666,7 @@ var DefaultStreamTextResult = class {
6628
6666
  await (onChunk == null ? void 0 : onChunk({ chunk: part }));
6629
6667
  }
6630
6668
  if (part.type === "error") {
6631
- await (onError == null ? void 0 : onError({ error: part.error }));
6669
+ await onError({ error: wrapGatewayError(part.error) });
6632
6670
  }
6633
6671
  if (part.type === "text") {
6634
6672
  const latestContent = recordedContent[recordedContent.length - 1];
@@ -7766,7 +7804,7 @@ function customProvider({
7766
7804
  var experimental_customProvider = customProvider;
7767
7805
 
7768
7806
  // core/registry/no-such-provider-error.ts
7769
- import { AISDKError as AISDKError19, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
7807
+ import { AISDKError as AISDKError20, NoSuchModelError as NoSuchModelError3 } from "@ai-sdk/provider";
7770
7808
  var name16 = "AI_NoSuchProviderError";
7771
7809
  var marker16 = `vercel.ai.error.${name16}`;
7772
7810
  var symbol16 = Symbol.for(marker16);
@@ -7785,7 +7823,7 @@ var NoSuchProviderError = class extends NoSuchModelError3 {
7785
7823
  this.availableProviders = availableProviders;
7786
7824
  }
7787
7825
  static isInstance(error) {
7788
- return AISDKError19.hasMarker(error, marker16);
7826
+ return AISDKError20.hasMarker(error, marker16);
7789
7827
  }
7790
7828
  };
7791
7829
  _a16 = symbol16;
@@ -8442,8 +8480,8 @@ var MCPClient = class {
8442
8480
  };
8443
8481
 
8444
8482
  // src/error/no-transcript-generated-error.ts
8445
- import { AISDKError as AISDKError20 } from "@ai-sdk/provider";
8446
- var NoTranscriptGeneratedError = class extends AISDKError20 {
8483
+ import { AISDKError as AISDKError21 } from "@ai-sdk/provider";
8484
+ var NoTranscriptGeneratedError = class extends AISDKError21 {
8447
8485
  constructor(options) {
8448
8486
  super({
8449
8487
  name: "AI_NoTranscriptGeneratedError",
@@ -8511,6 +8549,7 @@ export {
8511
8549
  DefaultChatTransport,
8512
8550
  DownloadError,
8513
8551
  EmptyResponseBodyError,
8552
+ GLOBAL_DEFAULT_PROVIDER,
8514
8553
  InvalidArgumentError,
8515
8554
  InvalidDataContentError,
8516
8555
  InvalidMessageRoleError,