ai 4.0.0-canary.6 → 4.0.0-canary.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -46,33 +46,6 @@ type TelemetrySettings = {
46
46
  tracer?: Tracer;
47
47
  };
48
48
 
49
- /**
50
- Represents the number of tokens used in a prompt and completion.
51
- */
52
- type LanguageModelUsage$1 = {
53
- /**
54
- The number of tokens used in the prompt.
55
- */
56
- promptTokens: number;
57
- /**
58
- The number of tokens used in the completion.
59
- */
60
- completionTokens: number;
61
- /**
62
- The total number of tokens used (promptTokens + completionTokens).
63
- */
64
- totalTokens: number;
65
- };
66
- /**
67
- Represents the number of tokens used in an embedding.
68
- */
69
- type EmbeddingModelUsage$1 = {
70
- /**
71
- The number of tokens used in the embedding.
72
- */
73
- tokens: number;
74
- };
75
-
76
49
  /**
77
50
  Embedding model that is used by the AI SDK Core functions.
78
51
  */
@@ -147,10 +120,6 @@ type LanguageModelResponseMetadata = {
147
120
  */
148
121
  headers?: Record<string, string>;
149
122
  };
150
- /**
151
- @deprecated Use `LanguageModelResponseMetadata` instead.
152
- */
153
- type LanguageModelResponseMetadataWithHeaders = LanguageModelResponseMetadata;
154
123
 
155
124
  /**
156
125
  * Provider for language and text embedding models.
@@ -188,19 +157,31 @@ functionality that can be fully encapsulated in the provider.
188
157
  type ProviderMetadata = LanguageModelV1ProviderMetadata;
189
158
 
190
159
  /**
191
- * @deprecated Use LanguageModelUsage instead.
192
- */
193
- type TokenUsage = LanguageModelUsage$1;
194
- /**
195
- * @deprecated Use LanguageModelUsage instead.
160
+ Represents the number of tokens used in a prompt and completion.
196
161
  */
197
- type CompletionTokenUsage = LanguageModelUsage$1;
198
- type LanguageModelUsage = LanguageModelUsage$1;
162
+ type LanguageModelUsage = {
163
+ /**
164
+ The number of tokens used in the prompt.
165
+ */
166
+ promptTokens: number;
167
+ /**
168
+ The number of tokens used in the completion.
169
+ */
170
+ completionTokens: number;
171
+ /**
172
+ The total number of tokens used (promptTokens + completionTokens).
173
+ */
174
+ totalTokens: number;
175
+ };
199
176
  /**
200
- * @deprecated Use EmbeddingModelUsage instead.
177
+ Represents the number of tokens used in an embedding.
201
178
  */
202
- type EmbeddingTokenUsage = EmbeddingModelUsage$1;
203
- type EmbeddingModelUsage = EmbeddingModelUsage$1;
179
+ type EmbeddingModelUsage = {
180
+ /**
181
+ The number of tokens used in the embedding.
182
+ */
183
+ tokens: number;
184
+ };
204
185
 
205
186
  /**
206
187
  The result of an `embed` call.
@@ -218,7 +199,7 @@ interface EmbedResult<VALUE> {
218
199
  /**
219
200
  The embedding token usage.
220
201
  */
221
- readonly usage: EmbeddingModelUsage$1;
202
+ readonly usage: EmbeddingModelUsage;
222
203
  /**
223
204
  Optional raw response data.
224
205
  */
@@ -288,7 +269,7 @@ interface EmbedManyResult<VALUE> {
288
269
  /**
289
270
  The embedding token usage.
290
271
  */
291
- readonly usage: EmbeddingModelUsage$1;
272
+ readonly usage: EmbeddingModelUsage;
292
273
  }
293
274
 
294
275
  /**
@@ -659,7 +640,7 @@ interface GenerateObjectResult<T> {
659
640
  /**
660
641
  The token usage of the generated text.
661
642
  */
662
- readonly usage: LanguageModelUsage$1;
643
+ readonly usage: LanguageModelUsage;
663
644
  /**
664
645
  Warnings from the model provider (e.g. unsupported settings).
665
646
  */
@@ -916,7 +897,7 @@ interface StreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM> {
916
897
  /**
917
898
  The token usage of the generated response. Resolved when the response is finished.
918
899
  */
919
- readonly usage: Promise<LanguageModelUsage$1>;
900
+ readonly usage: Promise<LanguageModelUsage>;
920
901
  /**
921
902
  Additional provider-specific metadata. They are passed through
922
903
  from the provider to the AI SDK and enable provider-specific
@@ -988,7 +969,7 @@ type ObjectStreamPart<PARTIAL> = {
988
969
  type: 'finish';
989
970
  finishReason: FinishReason;
990
971
  logprobs?: LogProbs;
991
- usage: LanguageModelUsage$1;
972
+ usage: LanguageModelUsage;
992
973
  response: LanguageModelResponseMetadata;
993
974
  providerMetadata?: ProviderMetadata;
994
975
  };
@@ -997,7 +978,7 @@ type OnFinishCallback<RESULT> = (event: {
997
978
  /**
998
979
  The token usage of the generated response.
999
980
  */
1000
- usage: LanguageModelUsage$1;
981
+ usage: LanguageModelUsage;
1001
982
  /**
1002
983
  The generated object. Can be undefined if the final object does not match the schema.
1003
984
  */
@@ -1266,10 +1247,6 @@ declare function tool<PARAMETERS extends Parameters, RESULT>(tool: CoreTool<PARA
1266
1247
  }): CoreTool<PARAMETERS, RESULT> & {
1267
1248
  execute: undefined;
1268
1249
  };
1269
- /**
1270
- * @deprecated Use `CoreTool` instead.
1271
- */
1272
- type ExperimentalTool = CoreTool;
1273
1250
 
1274
1251
  /**
1275
1252
  Converts an array of messages from useChat into an array of CoreMessages that can be used
@@ -1374,7 +1351,7 @@ type StepResult<TOOLS extends Record<string, CoreTool>> = {
1374
1351
  /**
1375
1352
  The token usage of the generated text.
1376
1353
  */
1377
- readonly usage: LanguageModelUsage$1;
1354
+ readonly usage: LanguageModelUsage;
1378
1355
  /**
1379
1356
  Warnings from the model provider (e.g. unsupported settings).
1380
1357
  */
@@ -1440,16 +1417,12 @@ interface GenerateTextResult<TOOLS extends Record<string, CoreTool>> {
1440
1417
  /**
1441
1418
  The token usage of the generated text.
1442
1419
  */
1443
- readonly usage: LanguageModelUsage$1;
1420
+ readonly usage: LanguageModelUsage;
1444
1421
  /**
1445
1422
  Warnings from the model provider (e.g. unsupported settings)
1446
1423
  */
1447
1424
  readonly warnings: CallWarning[] | undefined;
1448
1425
  /**
1449
- @deprecated use `response.messages` instead.
1450
- */
1451
- readonly responseMessages: Array<CoreAssistantMessage | CoreToolMessage>;
1452
- /**
1453
1426
  Details for all steps.
1454
1427
  You can use this to get information about intermediate steps,
1455
1428
  such as the tool calls or the response headers.
@@ -1534,7 +1507,7 @@ If set and supported by the model, calls will generate deterministic results.
1534
1507
  @returns
1535
1508
  A result object that contains the generated text, the results of the tool calls, and additional information.
1536
1509
  */
1537
- declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_continuationSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1510
+ declare function generateText<TOOLS extends Record<string, CoreTool>>({ model, tools, toolChoice, system, prompt, messages, maxRetries, abortSignal, headers, maxSteps, experimental_continueSteps: continueSteps, experimental_telemetry: telemetry, experimental_providerMetadata: providerMetadata, experimental_activeTools: activeTools, _internal: { generateId, currentDate, }, onStepFinish, ...settings }: CallSettings & Prompt & {
1538
1511
  /**
1539
1512
  The language model to use.
1540
1513
  */
@@ -1556,10 +1529,6 @@ By default, it's set to 1, which means that only a single LLM call is made.
1556
1529
  */
1557
1530
  maxSteps?: number;
1558
1531
  /**
1559
- @deprecated Use `experimental_continueSteps` instead.
1560
- */
1561
- experimental_continuationSteps?: boolean;
1562
- /**
1563
1532
  When enabled, the model will perform additional steps if the finish reason is "length" (experimental).
1564
1533
 
1565
1534
  By default, it's set to false.
@@ -1612,11 +1581,6 @@ declare class StreamData {
1612
1581
  * This assumes every chunk is a 'text' chunk.
1613
1582
  */
1614
1583
  declare function createStreamDataTransformer(): TransformStream<any, any>;
1615
- /**
1616
- @deprecated Use `StreamData` instead.
1617
- */
1618
- declare class experimental_StreamData extends StreamData {
1619
- }
1620
1584
 
1621
1585
  /**
1622
1586
  A result object for accessing different stream types and additional information.
@@ -1625,14 +1589,14 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1625
1589
  /**
1626
1590
  Warnings from the model provider (e.g. unsupported settings) for the first step.
1627
1591
  */
1628
- readonly warnings: CallWarning[] | undefined;
1592
+ readonly warnings: Promise<CallWarning[] | undefined>;
1629
1593
  /**
1630
1594
  The total token usage of the generated response.
1631
1595
  When there are multiple steps, the usage is the sum of all step usages.
1632
1596
 
1633
1597
  Resolved when the response is finished.
1634
1598
  */
1635
- readonly usage: Promise<LanguageModelUsage$1>;
1599
+ readonly usage: Promise<LanguageModelUsage>;
1636
1600
  /**
1637
1601
  The reason why the generation finished. Taken from the last step.
1638
1602
 
@@ -1664,10 +1628,6 @@ interface StreamTextResult<TOOLS extends Record<string, CoreTool>> {
1664
1628
  */
1665
1629
  readonly toolResults: Promise<ToolResultUnion<TOOLS>[]>;
1666
1630
  /**
1667
- @deprecated use `response.messages` instead.
1668
- */
1669
- readonly responseMessages: Promise<Array<CoreAssistantMessage | CoreToolMessage>>;
1670
- /**
1671
1631
  Details for all steps.
1672
1632
  You can use this to get information about intermediate steps,
1673
1633
  such as the tool calls or the response headers.
@@ -1790,7 +1750,7 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1790
1750
  type: 'step-finish';
1791
1751
  finishReason: FinishReason;
1792
1752
  logprobs?: LogProbs;
1793
- usage: LanguageModelUsage$1;
1753
+ usage: LanguageModelUsage;
1794
1754
  response: LanguageModelResponseMetadata;
1795
1755
  experimental_providerMetadata?: ProviderMetadata;
1796
1756
  isContinued: boolean;
@@ -1798,7 +1758,7 @@ type TextStreamPart<TOOLS extends Record<string, CoreTool>> = {
1798
1758
  type: 'finish';
1799
1759
  finishReason: FinishReason;
1800
1760
  logprobs?: LogProbs;
1801
- usage: LanguageModelUsage$1;
1761
+ usage: LanguageModelUsage;
1802
1762
  response: LanguageModelResponseMetadata;
1803
1763
  experimental_providerMetadata?: ProviderMetadata;
1804
1764
  } | {
@@ -2040,70 +2000,10 @@ declare class NoSuchProviderError extends NoSuchModelError {
2040
2000
  static isInstance(error: unknown): error is NoSuchProviderError;
2041
2001
  }
2042
2002
 
2043
- /**
2044
- * Provides for language and text embedding models.
2045
- *
2046
- * @deprecated Use `ProviderV1` instead.
2047
- */
2048
- interface experimental_Provider {
2049
- /**
2050
- Returns the language model with the given id in the format `providerId:modelId`.
2051
- The model id is then passed to the provider function to get the model.
2052
-
2053
- @param {string} id - The id of the model to return.
2054
-
2055
- @throws {NoSuchModelError} If no model with the given id exists.
2056
- @throws {NoSuchProviderError} If no provider with the given id exists.
2057
-
2058
- @returns {LanguageModel} The language model associated with the id.
2059
- */
2060
- languageModel?: (modelId: string) => LanguageModel;
2061
- /**
2062
- Returns the text embedding model with the given id in the format `providerId:modelId`.
2063
- The model id is then passed to the provider function to get the model.
2064
-
2065
- @param {string} id - The id of the model to return.
2066
-
2067
- @throws {NoSuchModelError} If no model with the given id exists.
2068
- @throws {NoSuchProviderError} If no provider with the given id exists.
2069
-
2070
- @returns {LanguageModel} The language model associated with the id.
2071
- */
2072
- textEmbeddingModel?: (modelId: string) => EmbeddingModel<string>;
2073
- /**
2074
- Returns the text embedding model with the given id in the format `providerId:modelId`.
2075
- The model id is then passed to the provider function to get the model.
2076
-
2077
- @param {string} id - The id of the model to return.
2078
-
2079
- @throws {NoSuchModelError} If no model with the given id exists.
2080
- @throws {NoSuchProviderError} If no provider with the given id exists.
2081
-
2082
- @returns {LanguageModel} The language model associated with the id.
2083
-
2084
- @deprecated use `textEmbeddingModel` instead.
2085
- */
2086
- textEmbedding?: (modelId: string) => EmbeddingModel<string>;
2087
- }
2088
-
2089
- /**
2090
- Registry for managing models. It enables getting a model with a string id.
2091
-
2092
- @deprecated Use `experimental_Provider` instead.
2093
- */
2094
- type experimental_ProviderRegistry = Provider;
2095
- /**
2096
- * @deprecated Use `experimental_ProviderRegistry` instead.
2097
- */
2098
- type experimental_ModelRegistry = experimental_ProviderRegistry;
2099
2003
  /**
2100
2004
  * Creates a registry for the given providers.
2101
2005
  */
2102
- declare function experimental_createProviderRegistry(providers: Record<string, experimental_Provider | Provider>): Provider;
2103
- /**
2104
- * @deprecated Use `experimental_createProviderRegistry` instead.
2105
- */
2106
- declare const experimental_createModelRegistry: typeof experimental_createProviderRegistry;
2006
+ declare function experimental_createProviderRegistry(providers: Record<string, Provider>): Provider;
2107
2007
 
2108
2008
  /**
2109
2009
  * Calculates the cosine similarity between two vectors. This is a useful metric for
@@ -2252,14 +2152,6 @@ The process parameter is a callback in which you can run the assistant on thread
2252
2152
  */
2253
2153
  type AssistantResponseCallback = (options: {
2254
2154
  /**
2255
- @deprecated use variable from outer scope instead.
2256
- */
2257
- threadId: string;
2258
- /**
2259
- @deprecated use variable from outer scope instead.
2260
- */
2261
- messageId: string;
2262
- /**
2263
2155
  Forwards an assistant message (non-streaming) to the client.
2264
2156
  */
2265
2157
  sendMessage: (message: AssistantMessage) => void;
@@ -2278,10 +2170,6 @@ It is designed to facilitate streaming assistant responses to the `useAssistant`
2278
2170
  It receives an assistant thread and a current message, and can send messages and data messages to the client.
2279
2171
  */
2280
2172
  declare function AssistantResponse({ threadId, messageId }: AssistantResponseSettings, process: AssistantResponseCallback): Response;
2281
- /**
2282
- @deprecated Use `AssistantResponse` instead.
2283
- */
2284
- declare const experimental_AssistantResponse: typeof AssistantResponse;
2285
2173
 
2286
2174
  /**
2287
2175
  * Configuration options and helper callback methods for stream lifecycle events.
@@ -2327,16 +2215,6 @@ type LangChainStreamEvent = {
2327
2215
  /**
2328
2216
  Converts LangChain output streams to AIStream.
2329
2217
 
2330
- The following streams are supported:
2331
- - `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
2332
- - `string` streams (LangChain `StringOutputParser` output)
2333
-
2334
- @deprecated Use `toDataStream` instead.
2335
- */
2336
- declare function toAIStream(stream: ReadableStream<LangChainStreamEvent> | ReadableStream<LangChainAIMessageChunk> | ReadableStream<string>, callbacks?: StreamCallbacks): ReadableStream<any>;
2337
- /**
2338
- Converts LangChain output streams to AIStream.
2339
-
2340
2218
  The following streams are supported:
2341
2219
  - `LangChainAIMessageChunk` streams (LangChain `model.stream` output)
2342
2220
  - `string` streams (LangChain `StringOutputParser` output)
@@ -2348,10 +2226,8 @@ declare function toDataStreamResponse$1(stream: ReadableStream<LangChainStreamEv
2348
2226
  callbacks?: StreamCallbacks;
2349
2227
  }): Response;
2350
2228
 
2351
- declare const langchainAdapter_toAIStream: typeof toAIStream;
2352
2229
  declare namespace langchainAdapter {
2353
2230
  export {
2354
- langchainAdapter_toAIStream as toAIStream,
2355
2231
  toDataStream$1 as toDataStream,
2356
2232
  toDataStreamResponse$1 as toDataStreamResponse,
2357
2233
  };
@@ -2376,4 +2252,4 @@ declare namespace llamaindexAdapter {
2376
2252
  };
2377
2253
  }
2378
2254
 
2379
- export { AssistantContent, AssistantResponse, CallWarning, CompletionTokenUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, EmbeddingTokenUsage, ExperimentalTool, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelResponseMetadataWithHeaders, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, TokenUsage, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_AssistantResponse, experimental_ModelRegistry, experimental_Provider, experimental_ProviderRegistry, experimental_StreamData, experimental_createModelRegistry, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
2255
+ export { AssistantContent, AssistantResponse, CallWarning, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, ToolCallUnion as CoreToolCallUnion, CoreToolChoice, CoreToolMessage, ToolResultUnion as CoreToolResultUnion, CoreUserMessage, DataContent, DownloadError, EmbedManyResult, EmbedResult, Embedding, EmbeddingModel, EmbeddingModelUsage, Experimental_LanguageModelV1Middleware, FilePart, FinishReason, GenerateObjectResult, GenerateTextResult, ImagePart, InvalidArgumentError, InvalidDataContentError, InvalidMessageRoleError, InvalidToolArgumentsError, langchainAdapter as LangChainAdapter, LanguageModel, LanguageModelRequestMetadata, LanguageModelResponseMetadata, LanguageModelUsage, llamaindexAdapter as LlamaIndexAdapter, LogProbs, MessageConversionError, NoObjectGeneratedError, NoSuchProviderError, NoSuchToolError, ObjectStreamPart, Provider, ProviderMetadata, RetryError, StepResult, StreamData, StreamObjectResult, StreamTextResult, TextPart, TextStreamPart, ToolCallPart, ToolContent, ToolResultPart, UserContent, convertToCoreMessages, cosineSimilarity, createStreamDataTransformer, embed, embedMany, experimental_createProviderRegistry, experimental_customProvider, experimental_wrapLanguageModel, generateObject, generateText, streamObject, streamText, tool };
package/dist/index.js CHANGED
@@ -50,9 +50,6 @@ __export(streams_exports, {
50
50
  createStreamDataTransformer: () => createStreamDataTransformer,
51
51
  embed: () => embed,
52
52
  embedMany: () => embedMany,
53
- experimental_AssistantResponse: () => experimental_AssistantResponse,
54
- experimental_StreamData: () => experimental_StreamData,
55
- experimental_createModelRegistry: () => experimental_createModelRegistry,
56
53
  experimental_createProviderRegistry: () => experimental_createProviderRegistry,
57
54
  experimental_customProvider: () => experimental_customProvider,
58
55
  experimental_wrapLanguageModel: () => experimental_wrapLanguageModel,
@@ -1546,11 +1543,14 @@ function standardizePrompt({
1546
1543
  }
1547
1544
 
1548
1545
  // core/types/usage.ts
1549
- function calculateLanguageModelUsage(usage) {
1546
+ function calculateLanguageModelUsage({
1547
+ promptTokens,
1548
+ completionTokens
1549
+ }) {
1550
1550
  return {
1551
- promptTokens: usage.promptTokens,
1552
- completionTokens: usage.completionTokens,
1553
- totalTokens: usage.promptTokens + usage.completionTokens
1551
+ promptTokens,
1552
+ completionTokens,
1553
+ totalTokens: promptTokens + completionTokens
1554
1554
  };
1555
1555
  }
1556
1556
 
@@ -3151,8 +3151,7 @@ async function generateText({
3151
3151
  abortSignal,
3152
3152
  headers,
3153
3153
  maxSteps = 1,
3154
- experimental_continuationSteps,
3155
- experimental_continueSteps: continueSteps = experimental_continuationSteps != null ? experimental_continuationSteps : false,
3154
+ experimental_continueSteps: continueSteps = false,
3156
3155
  experimental_telemetry: telemetry,
3157
3156
  experimental_providerMetadata: providerMetadata,
3158
3157
  experimental_activeTools: activeTools,
@@ -3424,7 +3423,6 @@ async function generateText({
3424
3423
  messages: responseMessages
3425
3424
  },
3426
3425
  logprobs: currentModelResponse.logprobs,
3427
- responseMessages,
3428
3426
  steps,
3429
3427
  providerMetadata: currentModelResponse.providerMetadata
3430
3428
  });
@@ -3501,7 +3499,6 @@ var DefaultGenerateTextResult = class {
3501
3499
  this.warnings = options.warnings;
3502
3500
  this.request = options.request;
3503
3501
  this.response = options.response;
3504
- this.responseMessages = options.responseMessages;
3505
3502
  this.steps = options.steps;
3506
3503
  this.experimental_providerMetadata = options.providerMetadata;
3507
3504
  this.logprobs = options.logprobs;
@@ -4066,7 +4063,7 @@ var DefaultStreamTextResult = class {
4066
4063
  generateId: generateId3,
4067
4064
  tools
4068
4065
  }) {
4069
- this.warnings = warnings;
4066
+ this.rawWarnings = warnings;
4070
4067
  this.rawResponse = rawResponse;
4071
4068
  const { resolve: resolveUsage, promise: usagePromise } = createResolvablePromise();
4072
4069
  this.usage = usagePromise;
@@ -4089,11 +4086,8 @@ var DefaultStreamTextResult = class {
4089
4086
  this.request = requestPromise;
4090
4087
  const { resolve: resolveResponse, promise: responsePromise } = createResolvablePromise();
4091
4088
  this.response = responsePromise;
4092
- const {
4093
- resolve: resolveResponseMessages,
4094
- promise: responseMessagesPromise
4095
- } = createResolvablePromise();
4096
- this.responseMessages = responseMessagesPromise;
4089
+ const { resolve: resolveWarnings, promise: warningsPromise } = createResolvablePromise();
4090
+ this.warnings = warningsPromise;
4097
4091
  const {
4098
4092
  stream: stitchableStream,
4099
4093
  addStream,
@@ -4247,7 +4241,7 @@ var DefaultStreamTextResult = class {
4247
4241
  },
4248
4242
  // invoke onFinish callback and resolve toolResults promise when the stream is about to close:
4249
4243
  async flush(controller) {
4250
- var _a11;
4244
+ var _a11, _b;
4251
4245
  const stepToolCallsJson = stepToolCalls.length > 0 ? JSON.stringify(stepToolCalls) : void 0;
4252
4246
  let nextStepType = "done";
4253
4247
  if (currentStep + 1 < maxSteps) {
@@ -4339,7 +4333,7 @@ var DefaultStreamTextResult = class {
4339
4333
  toolResults: stepToolResults,
4340
4334
  finishReason: stepFinishReason,
4341
4335
  usage: stepUsage,
4342
- warnings: self.warnings,
4336
+ warnings: self.rawWarnings,
4343
4337
  logprobs: stepLogProbs,
4344
4338
  request: stepRequest,
4345
4339
  response: {
@@ -4364,7 +4358,7 @@ var DefaultStreamTextResult = class {
4364
4358
  doStreamSpan: doStreamSpan3,
4365
4359
  startTimestampMs: startTimestamp2
4366
4360
  } = await startStep({ responseMessages });
4367
- self.warnings = result.warnings;
4361
+ self.rawWarnings = result.warnings;
4368
4362
  self.rawResponse = result.rawResponse;
4369
4363
  addStepStream({
4370
4364
  stream: result.stream,
@@ -4419,7 +4413,7 @@ var DefaultStreamTextResult = class {
4419
4413
  messages: responseMessages
4420
4414
  });
4421
4415
  resolveSteps(stepResults);
4422
- resolveResponseMessages(responseMessages);
4416
+ resolveWarnings((_b = self.rawWarnings) != null ? _b : []);
4423
4417
  await (onFinish == null ? void 0 : onFinish({
4424
4418
  finishReason: stepFinishReason,
4425
4419
  logprobs: stepLogProbs,
@@ -4759,15 +4753,11 @@ function experimental_createProviderRegistry(providers) {
4759
4753
  }
4760
4754
  return registry;
4761
4755
  }
4762
- var experimental_createModelRegistry = experimental_createProviderRegistry;
4763
4756
  var DefaultProviderRegistry = class {
4764
4757
  constructor() {
4765
4758
  this.providers = {};
4766
4759
  }
4767
- registerProvider({
4768
- id,
4769
- provider
4770
- }) {
4760
+ registerProvider({ id, provider }) {
4771
4761
  this.providers[id] = provider;
4772
4762
  }
4773
4763
  getProvider(id) {
@@ -4803,10 +4793,10 @@ var DefaultProviderRegistry = class {
4803
4793
  return model;
4804
4794
  }
4805
4795
  textEmbeddingModel(id) {
4806
- var _a11, _b, _c;
4796
+ var _a11;
4807
4797
  const [providerId, modelId] = this.splitId(id, "textEmbeddingModel");
4808
4798
  const provider = this.getProvider(providerId);
4809
- const model = (_c = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId)) != null ? _c : "textEmbedding" in provider ? (_b = provider.textEmbedding) == null ? void 0 : _b.call(provider, modelId) : void 0;
4799
+ const model = (_a11 = provider.textEmbeddingModel) == null ? void 0 : _a11.call(provider, modelId);
4810
4800
  if (model == null) {
4811
4801
  throw new import_provider16.NoSuchModelError({
4812
4802
  modelId: id,
@@ -4916,8 +4906,6 @@ function AssistantResponse({ threadId, messageId }, process2) {
4916
4906
  );
4917
4907
  try {
4918
4908
  await process2({
4919
- threadId,
4920
- messageId,
4921
4909
  sendMessage,
4922
4910
  sendDataMessage,
4923
4911
  forwardStream
@@ -4940,12 +4928,10 @@ function AssistantResponse({ threadId, messageId }, process2) {
4940
4928
  }
4941
4929
  });
4942
4930
  }
4943
- var experimental_AssistantResponse = AssistantResponse;
4944
4931
 
4945
4932
  // streams/langchain-adapter.ts
4946
4933
  var langchain_adapter_exports = {};
4947
4934
  __export(langchain_adapter_exports, {
4948
- toAIStream: () => toAIStream,
4949
4935
  toDataStream: () => toDataStream,
4950
4936
  toDataStreamResponse: () => toDataStreamResponse
4951
4937
  });
@@ -5054,13 +5040,8 @@ function createStreamDataTransformer() {
5054
5040
  }
5055
5041
  });
5056
5042
  }
5057
- var experimental_StreamData = class extends StreamData {
5058
- };
5059
5043
 
5060
5044
  // streams/langchain-adapter.ts
5061
- function toAIStream(stream, callbacks) {
5062
- return toDataStream(stream, callbacks);
5063
- }
5064
5045
  function toDataStream(stream, callbacks) {
5065
5046
  return stream.pipeThrough(
5066
5047
  new TransformStream({
@@ -5186,9 +5167,6 @@ function trimStartOfStream() {
5186
5167
  createStreamDataTransformer,
5187
5168
  embed,
5188
5169
  embedMany,
5189
- experimental_AssistantResponse,
5190
- experimental_StreamData,
5191
- experimental_createModelRegistry,
5192
5170
  experimental_createProviderRegistry,
5193
5171
  experimental_customProvider,
5194
5172
  experimental_wrapLanguageModel,