ai 3.1.7 → 3.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,10 +1,107 @@
1
- import { z } from 'zod';
2
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
3
2
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
3
+ import { z } from 'zod';
4
4
  import { ServerResponse } from 'node:http';
5
5
  import { AssistantStream } from 'openai/lib/AssistantStream';
6
6
  import { Run } from 'openai/resources/beta/threads/runs/runs';
7
7
 
8
+ /**
9
+ Embedding model that is used by the AI SDK Core functions.
10
+ */
11
+ type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
12
+ /**
13
+ Embedding.
14
+ */
15
+ type Embedding = EmbeddingModelV1Embedding;
16
+
17
+ /**
18
+ Language model that is used by the AI SDK Core functions.
19
+ */
20
+ type LanguageModel = LanguageModelV1;
21
+ /**
22
+ Reason why a language model finished generating a response.
23
+
24
+ Can be one of the following:
25
+ - `stop`: model generated stop sequence
26
+ - `length`: model generated maximum number of tokens
27
+ - `content-filter`: content filter violation stopped the model
28
+ - `tool-calls`: model triggered tool calls
29
+ - `error`: model stopped because of an error
30
+ - `other`: model stopped for other reasons
31
+ */
32
+ type FinishReason = LanguageModelV1FinishReason;
33
+ /**
34
+ Log probabilities for each token and its top log probabilities.
35
+ */
36
+ type LogProbs = LanguageModelV1LogProbs;
37
+ /**
38
+ Warning from the model provider for this call. The call will proceed, but e.g.
39
+ some settings might not be supported, which can lead to suboptimal results.
40
+ */
41
+ type CallWarning = LanguageModelV1CallWarning;
42
+
43
+ /**
44
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
45
+
46
+ @param model - The embedding model to use.
47
+ @param value - The value that should be embedded.
48
+
49
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
50
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
51
+
52
+ @returns A result object that contains the embedding, the value, and additional information.
53
+ */
54
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, }: {
55
+ /**
56
+ The embedding model to use.
57
+ */
58
+ model: EmbeddingModel<VALUE>;
59
+ /**
60
+ The value that should be embedded.
61
+ */
62
+ value: VALUE;
63
+ /**
64
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
65
+
66
+ @default 2
67
+ */
68
+ maxRetries?: number;
69
+ /**
70
+ Abort signal.
71
+ */
72
+ abortSignal?: AbortSignal;
73
+ }): Promise<EmbedResult<VALUE>>;
74
+ /**
75
+ The result of a `embed` call.
76
+ It contains the embedding, the value, and additional information.
77
+ */
78
+ declare class EmbedResult<VALUE> {
79
+ /**
80
+ The value that was embedded.
81
+ */
82
+ readonly value: VALUE;
83
+ /**
84
+ The embedding of the value.
85
+ */
86
+ readonly embedding: Embedding;
87
+ /**
88
+ Optional raw response data.
89
+ */
90
+ readonly rawResponse?: {
91
+ /**
92
+ Response headers.
93
+ */
94
+ headers?: Record<string, string>;
95
+ };
96
+ constructor(options: {
97
+ value: VALUE;
98
+ embedding: Embedding;
99
+ rawResponse?: {
100
+ headers?: Record<string, string>;
101
+ };
102
+ });
103
+ }
104
+
8
105
  type TokenUsage = {
9
106
  promptTokens: number;
10
107
  completionTokens: number;
@@ -242,32 +339,6 @@ type Prompt = {
242
339
  messages?: Array<CoreMessage>;
243
340
  };
244
341
 
245
- /**
246
- Language model that is used by the AI SDK Core functions.
247
- */
248
- type LanguageModel = LanguageModelV1;
249
- /**
250
- Reason why a language model finished generating a response.
251
-
252
- Can be one of the following:
253
- - `stop`: model generated stop sequence
254
- - `length`: model generated maximum number of tokens
255
- - `content-filter`: content filter violation stopped the model
256
- - `tool-calls`: model triggered tool calls
257
- - `error`: model stopped because of an error
258
- - `other`: model stopped for other reasons
259
- */
260
- type FinishReason = LanguageModelV1FinishReason;
261
- /**
262
- Log probabilities for each token and its top log probabilities.
263
- */
264
- type LogProbs = LanguageModelV1LogProbs;
265
- /**
266
- Warning from the model provider for this call. The call will proceed, but e.g.
267
- some settings might not be supported, which can lead to suboptimal results.
268
- */
269
- type CallWarning = LanguageModelV1CallWarning;
270
-
271
342
  /**
272
343
  Generate a structured, typed object for a given prompt and schema using a language model.
273
344
 
@@ -2113,4 +2184,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
2113
2184
  status?: number;
2114
2185
  }): void;
2115
2186
 
2116
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2187
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.d.ts CHANGED
@@ -1,10 +1,107 @@
1
- import { z } from 'zod';
2
- import { LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
1
+ import { EmbeddingModelV1, EmbeddingModelV1Embedding, LanguageModelV1, LanguageModelV1FinishReason, LanguageModelV1LogProbs, LanguageModelV1CallWarning } from '@ai-sdk/provider';
3
2
  export { APICallError, EmptyResponseBodyError, InvalidArgumentError, InvalidDataContentError, InvalidPromptError, InvalidResponseDataError, InvalidToolArgumentsError, JSONParseError, LoadAPIKeyError, NoObjectGeneratedError, NoSuchToolError, RetryError, ToolCallParseError, TypeValidationError, UnsupportedFunctionalityError, UnsupportedJSONSchemaError } from '@ai-sdk/provider';
3
+ import { z } from 'zod';
4
4
  import { ServerResponse } from 'node:http';
5
5
  import { AssistantStream } from 'openai/lib/AssistantStream';
6
6
  import { Run } from 'openai/resources/beta/threads/runs/runs';
7
7
 
8
+ /**
9
+ Embedding model that is used by the AI SDK Core functions.
10
+ */
11
+ type EmbeddingModel<VALUE> = EmbeddingModelV1<VALUE>;
12
+ /**
13
+ Embedding.
14
+ */
15
+ type Embedding = EmbeddingModelV1Embedding;
16
+
17
+ /**
18
+ Language model that is used by the AI SDK Core functions.
19
+ */
20
+ type LanguageModel = LanguageModelV1;
21
+ /**
22
+ Reason why a language model finished generating a response.
23
+
24
+ Can be one of the following:
25
+ - `stop`: model generated stop sequence
26
+ - `length`: model generated maximum number of tokens
27
+ - `content-filter`: content filter violation stopped the model
28
+ - `tool-calls`: model triggered tool calls
29
+ - `error`: model stopped because of an error
30
+ - `other`: model stopped for other reasons
31
+ */
32
+ type FinishReason = LanguageModelV1FinishReason;
33
+ /**
34
+ Log probabilities for each token and its top log probabilities.
35
+ */
36
+ type LogProbs = LanguageModelV1LogProbs;
37
+ /**
38
+ Warning from the model provider for this call. The call will proceed, but e.g.
39
+ some settings might not be supported, which can lead to suboptimal results.
40
+ */
41
+ type CallWarning = LanguageModelV1CallWarning;
42
+
43
+ /**
44
+ Embed a value using an embedding model. The type of the value is defined by the embedding model.
45
+
46
+ @param model - The embedding model to use.
47
+ @param value - The value that should be embedded.
48
+
49
+ @param maxRetries - Maximum number of retries. Set to 0 to disable retries. Default: 2.
50
+ @param abortSignal - An optional abort signal that can be used to cancel the call.
51
+
52
+ @returns A result object that contains the embedding, the value, and additional information.
53
+ */
54
+ declare function embed<VALUE>({ model, value, maxRetries, abortSignal, }: {
55
+ /**
56
+ The embedding model to use.
57
+ */
58
+ model: EmbeddingModel<VALUE>;
59
+ /**
60
+ The value that should be embedded.
61
+ */
62
+ value: VALUE;
63
+ /**
64
+ Maximum number of retries per embedding model call. Set to 0 to disable retries.
65
+
66
+ @default 2
67
+ */
68
+ maxRetries?: number;
69
+ /**
70
+ Abort signal.
71
+ */
72
+ abortSignal?: AbortSignal;
73
+ }): Promise<EmbedResult<VALUE>>;
74
+ /**
75
+ The result of a `embed` call.
76
+ It contains the embedding, the value, and additional information.
77
+ */
78
+ declare class EmbedResult<VALUE> {
79
+ /**
80
+ The value that was embedded.
81
+ */
82
+ readonly value: VALUE;
83
+ /**
84
+ The embedding of the value.
85
+ */
86
+ readonly embedding: Embedding;
87
+ /**
88
+ Optional raw response data.
89
+ */
90
+ readonly rawResponse?: {
91
+ /**
92
+ Response headers.
93
+ */
94
+ headers?: Record<string, string>;
95
+ };
96
+ constructor(options: {
97
+ value: VALUE;
98
+ embedding: Embedding;
99
+ rawResponse?: {
100
+ headers?: Record<string, string>;
101
+ };
102
+ });
103
+ }
104
+
8
105
  type TokenUsage = {
9
106
  promptTokens: number;
10
107
  completionTokens: number;
@@ -242,32 +339,6 @@ type Prompt = {
242
339
  messages?: Array<CoreMessage>;
243
340
  };
244
341
 
245
- /**
246
- Language model that is used by the AI SDK Core functions.
247
- */
248
- type LanguageModel = LanguageModelV1;
249
- /**
250
- Reason why a language model finished generating a response.
251
-
252
- Can be one of the following:
253
- - `stop`: model generated stop sequence
254
- - `length`: model generated maximum number of tokens
255
- - `content-filter`: content filter violation stopped the model
256
- - `tool-calls`: model triggered tool calls
257
- - `error`: model stopped because of an error
258
- - `other`: model stopped for other reasons
259
- */
260
- type FinishReason = LanguageModelV1FinishReason;
261
- /**
262
- Log probabilities for each token and its top log probabilities.
263
- */
264
- type LogProbs = LanguageModelV1LogProbs;
265
- /**
266
- Warning from the model provider for this call. The call will proceed, but e.g.
267
- some settings might not be supported, which can lead to suboptimal results.
268
- */
269
- type CallWarning = LanguageModelV1CallWarning;
270
-
271
342
  /**
272
343
  Generate a structured, typed object for a given prompt and schema using a language model.
273
344
 
@@ -2113,4 +2184,4 @@ declare function streamToResponse(res: ReadableStream, response: ServerResponse,
2113
2184
  status?: number;
2114
2185
  }): void;
2115
2186
 
2116
- export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
2187
+ export { AIStream, AIStreamCallbacksAndOptions, AIStreamParser, AIStreamParserOptions, AWSBedrockAnthropicMessagesStream, AWSBedrockAnthropicStream, AWSBedrockCohereStream, AWSBedrockLlama2Stream, AWSBedrockStream, AnthropicStream, AssistantContent, AssistantMessage, AssistantResponse, CallWarning, ChatRequest, ChatRequestOptions, CohereStream, CompletionUsage, CoreAssistantMessage, CoreMessage, CoreSystemMessage, CoreTool, CoreToolMessage, CoreUserMessage, CreateMessage, DataContent, DataMessage, DeepPartial, EmbedResult, Embedding, EmbeddingModel, ExperimentalAssistantMessage, ExperimentalMessage, ExperimentalTool, ExperimentalToolMessage, ExperimentalUserMessage, FinishReason, Function, FunctionCall$1 as FunctionCall, FunctionCallHandler, FunctionCallPayload, GenerateObjectResult, GenerateTextResult, GoogleGenerativeAIStream, HuggingFaceStream, IdGenerator, ImagePart, InkeepAIStreamCallbacksAndOptions, InkeepChatResultCallbacks, InkeepOnFinalMetadata, InkeepStream, JSONValue, langchainAdapter as LangChainAdapter, LangChainStream, LanguageModel, LogProbs, Message$1 as Message, MistralStream, ObjectStreamPart, ObjectStreamPartInput, OpenAIStream, OpenAIStreamCallbacks, ReactResponseRow, ReplicateStream, RequestOptions, StreamData, StreamObjectResult, StreamPart, StreamString, StreamTextResult, StreamingTextResponse, TextPart$1 as TextPart, TextStreamPart, Tool, ToolCall, ToolCallHandler, ToolCallPart, ToolCallPayload, ToolChoice, ToolContent, ToolInvocation, ToolResultPart, UseChatOptions, UseCompletionOptions, UserContent, convertDataContentToBase64String, convertDataContentToUint8Array, convertToCoreMessages, createCallbacksTransformer, createChunkDecoder, createEventStreamTransformer, createStreamDataTransformer, embed, experimental_AssistantResponse, experimental_StreamData, experimental_StreamingReactResponse, experimental_generateObject, experimental_generateText, experimental_streamObject, experimental_streamText, formatStreamPart, generateId, generateObject, generateText, isStreamStringEqualToType, generateId as nanoid, parseStreamPart, readDataStream, readableFromAsyncIterable, streamObject, streamText, streamToResponse, tool, trimStartOfStreamHelper };
package/dist/index.js CHANGED
@@ -40,6 +40,7 @@ __export(streams_exports, {
40
40
  AnthropicStream: () => AnthropicStream,
41
41
  AssistantResponse: () => AssistantResponse,
42
42
  CohereStream: () => CohereStream,
43
+ EmbedResult: () => EmbedResult,
43
44
  EmptyResponseBodyError: () => import_provider8.EmptyResponseBodyError,
44
45
  GenerateObjectResult: () => GenerateObjectResult,
45
46
  GenerateTextResult: () => GenerateTextResult,
@@ -76,6 +77,7 @@ __export(streams_exports, {
76
77
  createChunkDecoder: () => createChunkDecoder,
77
78
  createEventStreamTransformer: () => createEventStreamTransformer,
78
79
  createStreamDataTransformer: () => createStreamDataTransformer,
80
+ embed: () => embed,
79
81
  experimental_AssistantResponse: () => experimental_AssistantResponse,
80
82
  experimental_StreamData: () => experimental_StreamData,
81
83
  experimental_StreamingReactResponse: () => experimental_StreamingReactResponse,
@@ -100,6 +102,96 @@ __export(streams_exports, {
100
102
  });
101
103
  module.exports = __toCommonJS(streams_exports);
102
104
 
105
+ // core/util/retry-with-exponential-backoff.ts
106
+ var import_provider = require("@ai-sdk/provider");
107
+ var import_provider_utils = require("@ai-sdk/provider-utils");
108
+
109
+ // core/util/delay.ts
110
+ async function delay(delayInMs) {
111
+ return new Promise((resolve) => setTimeout(resolve, delayInMs));
112
+ }
113
+
114
+ // core/util/retry-with-exponential-backoff.ts
115
+ var retryWithExponentialBackoff = ({
116
+ maxRetries = 2,
117
+ initialDelayInMs = 2e3,
118
+ backoffFactor = 2
119
+ } = {}) => async (f) => _retryWithExponentialBackoff(f, {
120
+ maxRetries,
121
+ delayInMs: initialDelayInMs,
122
+ backoffFactor
123
+ });
124
+ async function _retryWithExponentialBackoff(f, {
125
+ maxRetries,
126
+ delayInMs,
127
+ backoffFactor
128
+ }, errors = []) {
129
+ try {
130
+ return await f();
131
+ } catch (error) {
132
+ if ((0, import_provider_utils.isAbortError)(error)) {
133
+ throw error;
134
+ }
135
+ if (maxRetries === 0) {
136
+ throw error;
137
+ }
138
+ const errorMessage = (0, import_provider_utils.getErrorMessage)(error);
139
+ const newErrors = [...errors, error];
140
+ const tryNumber = newErrors.length;
141
+ if (tryNumber > maxRetries) {
142
+ throw new import_provider.RetryError({
143
+ message: `Failed after ${tryNumber} attempts. Last error: ${errorMessage}`,
144
+ reason: "maxRetriesExceeded",
145
+ errors: newErrors
146
+ });
147
+ }
148
+ if (error instanceof Error && import_provider.APICallError.isAPICallError(error) && error.isRetryable === true && tryNumber <= maxRetries) {
149
+ await delay(delayInMs);
150
+ return _retryWithExponentialBackoff(
151
+ f,
152
+ { maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
153
+ newErrors
154
+ );
155
+ }
156
+ if (tryNumber === 1) {
157
+ throw error;
158
+ }
159
+ throw new import_provider.RetryError({
160
+ message: `Failed after ${tryNumber} attempts with non-retryable error: '${errorMessage}'`,
161
+ reason: "errorNotRetryable",
162
+ errors: newErrors
163
+ });
164
+ }
165
+ }
166
+
167
+ // core/embed/embed.ts
168
+ async function embed({
169
+ model,
170
+ value,
171
+ maxRetries,
172
+ abortSignal
173
+ }) {
174
+ const retry = retryWithExponentialBackoff({ maxRetries });
175
+ const modelResponse = await retry(
176
+ () => model.doEmbed({
177
+ values: [value],
178
+ abortSignal
179
+ })
180
+ );
181
+ return new EmbedResult({
182
+ value,
183
+ embedding: modelResponse.embeddings[0],
184
+ rawResponse: modelResponse.rawResponse
185
+ });
186
+ }
187
+ var EmbedResult = class {
188
+ constructor(options) {
189
+ this.value = options.value;
190
+ this.embedding = options.embedding;
191
+ this.rawResponse = options.rawResponse;
192
+ }
193
+ };
194
+
103
195
  // core/generate-object/generate-object.ts
104
196
  var import_provider5 = require("@ai-sdk/provider");
105
197
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
@@ -130,16 +222,16 @@ function detectImageMimeType(image) {
130
222
  }
131
223
 
132
224
  // core/prompt/data-content.ts
133
- var import_provider = require("@ai-sdk/provider");
134
- var import_provider_utils = require("@ai-sdk/provider-utils");
225
+ var import_provider2 = require("@ai-sdk/provider");
226
+ var import_provider_utils2 = require("@ai-sdk/provider-utils");
135
227
  function convertDataContentToBase64String(content) {
136
228
  if (typeof content === "string") {
137
229
  return content;
138
230
  }
139
231
  if (content instanceof ArrayBuffer) {
140
- return (0, import_provider_utils.convertUint8ArrayToBase64)(new Uint8Array(content));
232
+ return (0, import_provider_utils2.convertUint8ArrayToBase64)(new Uint8Array(content));
141
233
  }
142
- return (0, import_provider_utils.convertUint8ArrayToBase64)(content);
234
+ return (0, import_provider_utils2.convertUint8ArrayToBase64)(content);
143
235
  }
144
236
  function convertDataContentToUint8Array(content) {
145
237
  if (content instanceof Uint8Array) {
@@ -147,9 +239,9 @@ function convertDataContentToUint8Array(content) {
147
239
  }
148
240
  if (typeof content === "string") {
149
241
  try {
150
- return (0, import_provider_utils.convertBase64ToUint8Array)(content);
242
+ return (0, import_provider_utils2.convertBase64ToUint8Array)(content);
151
243
  } catch (error) {
152
- throw new import_provider.InvalidDataContentError({
244
+ throw new import_provider2.InvalidDataContentError({
153
245
  message: "Invalid data content. Content string is not a base64-encoded image.",
154
246
  content,
155
247
  cause: error
@@ -159,7 +251,7 @@ function convertDataContentToUint8Array(content) {
159
251
  if (content instanceof ArrayBuffer) {
160
252
  return new Uint8Array(content);
161
253
  }
162
- throw new import_provider.InvalidDataContentError({ content });
254
+ throw new import_provider2.InvalidDataContentError({ content });
163
255
  }
164
256
 
165
257
  // core/prompt/convert-to-language-model-prompt.ts
@@ -247,16 +339,16 @@ function convertToLanguageModelPrompt(prompt) {
247
339
  }
248
340
 
249
341
  // core/prompt/get-validated-prompt.ts
250
- var import_provider2 = require("@ai-sdk/provider");
342
+ var import_provider3 = require("@ai-sdk/provider");
251
343
  function getValidatedPrompt(prompt) {
252
344
  if (prompt.prompt == null && prompt.messages == null) {
253
- throw new import_provider2.InvalidPromptError({
345
+ throw new import_provider3.InvalidPromptError({
254
346
  prompt,
255
347
  message: "prompt or messages must be defined"
256
348
  });
257
349
  }
258
350
  if (prompt.prompt != null && prompt.messages != null) {
259
- throw new import_provider2.InvalidPromptError({
351
+ throw new import_provider3.InvalidPromptError({
260
352
  prompt,
261
353
  message: "prompt and messages cannot be defined at the same time"
262
354
  });
@@ -276,7 +368,7 @@ function getValidatedPrompt(prompt) {
276
368
  }
277
369
 
278
370
  // core/prompt/prepare-call-settings.ts
279
- var import_provider3 = require("@ai-sdk/provider");
371
+ var import_provider4 = require("@ai-sdk/provider");
280
372
  function prepareCallSettings({
281
373
  maxTokens,
282
374
  temperature,
@@ -288,14 +380,14 @@ function prepareCallSettings({
288
380
  }) {
289
381
  if (maxTokens != null) {
290
382
  if (!Number.isInteger(maxTokens)) {
291
- throw new import_provider3.InvalidArgumentError({
383
+ throw new import_provider4.InvalidArgumentError({
292
384
  parameter: "maxTokens",
293
385
  value: maxTokens,
294
386
  message: "maxTokens must be an integer"
295
387
  });
296
388
  }
297
389
  if (maxTokens < 1) {
298
- throw new import_provider3.InvalidArgumentError({
390
+ throw new import_provider4.InvalidArgumentError({
299
391
  parameter: "maxTokens",
300
392
  value: maxTokens,
301
393
  message: "maxTokens must be >= 1"
@@ -304,7 +396,7 @@ function prepareCallSettings({
304
396
  }
305
397
  if (temperature != null) {
306
398
  if (typeof temperature !== "number") {
307
- throw new import_provider3.InvalidArgumentError({
399
+ throw new import_provider4.InvalidArgumentError({
308
400
  parameter: "temperature",
309
401
  value: temperature,
310
402
  message: "temperature must be a number"
@@ -313,7 +405,7 @@ function prepareCallSettings({
313
405
  }
314
406
  if (topP != null) {
315
407
  if (typeof topP !== "number") {
316
- throw new import_provider3.InvalidArgumentError({
408
+ throw new import_provider4.InvalidArgumentError({
317
409
  parameter: "topP",
318
410
  value: topP,
319
411
  message: "topP must be a number"
@@ -322,7 +414,7 @@ function prepareCallSettings({
322
414
  }
323
415
  if (presencePenalty != null) {
324
416
  if (typeof presencePenalty !== "number") {
325
- throw new import_provider3.InvalidArgumentError({
417
+ throw new import_provider4.InvalidArgumentError({
326
418
  parameter: "presencePenalty",
327
419
  value: presencePenalty,
328
420
  message: "presencePenalty must be a number"
@@ -331,7 +423,7 @@ function prepareCallSettings({
331
423
  }
332
424
  if (frequencyPenalty != null) {
333
425
  if (typeof frequencyPenalty !== "number") {
334
- throw new import_provider3.InvalidArgumentError({
426
+ throw new import_provider4.InvalidArgumentError({
335
427
  parameter: "frequencyPenalty",
336
428
  value: frequencyPenalty,
337
429
  message: "frequencyPenalty must be a number"
@@ -340,7 +432,7 @@ function prepareCallSettings({
340
432
  }
341
433
  if (seed != null) {
342
434
  if (!Number.isInteger(seed)) {
343
- throw new import_provider3.InvalidArgumentError({
435
+ throw new import_provider4.InvalidArgumentError({
344
436
  parameter: "seed",
345
437
  value: seed,
346
438
  message: "seed must be an integer"
@@ -349,14 +441,14 @@ function prepareCallSettings({
349
441
  }
350
442
  if (maxRetries != null) {
351
443
  if (!Number.isInteger(maxRetries)) {
352
- throw new import_provider3.InvalidArgumentError({
444
+ throw new import_provider4.InvalidArgumentError({
353
445
  parameter: "maxRetries",
354
446
  value: maxRetries,
355
447
  message: "maxRetries must be an integer"
356
448
  });
357
449
  }
358
450
  if (maxRetries < 0) {
359
- throw new import_provider3.InvalidArgumentError({
451
+ throw new import_provider4.InvalidArgumentError({
360
452
  parameter: "maxRetries",
361
453
  value: maxRetries,
362
454
  message: "maxRetries must be >= 0"
@@ -380,68 +472,6 @@ function convertZodToJSONSchema(zodSchema) {
380
472
  return (0, import_zod_to_json_schema.default)(zodSchema);
381
473
  }
382
474
 
383
- // core/util/retry-with-exponential-backoff.ts
384
- var import_provider4 = require("@ai-sdk/provider");
385
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
386
-
387
- // core/util/delay.ts
388
- async function delay(delayInMs) {
389
- return new Promise((resolve) => setTimeout(resolve, delayInMs));
390
- }
391
-
392
- // core/util/retry-with-exponential-backoff.ts
393
- var retryWithExponentialBackoff = ({
394
- maxRetries = 2,
395
- initialDelayInMs = 2e3,
396
- backoffFactor = 2
397
- } = {}) => async (f) => _retryWithExponentialBackoff(f, {
398
- maxRetries,
399
- delayInMs: initialDelayInMs,
400
- backoffFactor
401
- });
402
- async function _retryWithExponentialBackoff(f, {
403
- maxRetries,
404
- delayInMs,
405
- backoffFactor
406
- }, errors = []) {
407
- try {
408
- return await f();
409
- } catch (error) {
410
- if ((0, import_provider_utils2.isAbortError)(error)) {
411
- throw error;
412
- }
413
- if (maxRetries === 0) {
414
- throw error;
415
- }
416
- const errorMessage = (0, import_provider_utils2.getErrorMessage)(error);
417
- const newErrors = [...errors, error];
418
- const tryNumber = newErrors.length;
419
- if (tryNumber > maxRetries) {
420
- throw new import_provider4.RetryError({
421
- message: `Failed after ${tryNumber} attempts. Last error: ${errorMessage}`,
422
- reason: "maxRetriesExceeded",
423
- errors: newErrors
424
- });
425
- }
426
- if (error instanceof Error && import_provider4.APICallError.isAPICallError(error) && error.isRetryable === true && tryNumber <= maxRetries) {
427
- await delay(delayInMs);
428
- return _retryWithExponentialBackoff(
429
- f,
430
- { maxRetries, delayInMs: backoffFactor * delayInMs, backoffFactor },
431
- newErrors
432
- );
433
- }
434
- if (tryNumber === 1) {
435
- throw error;
436
- }
437
- throw new import_provider4.RetryError({
438
- message: `Failed after ${tryNumber} attempts with non-retryable error: '${errorMessage}'`,
439
- reason: "errorNotRetryable",
440
- errors: newErrors
441
- });
442
- }
443
- }
444
-
445
475
  // core/generate-object/inject-json-schema-into-system.ts
446
476
  var DEFAULT_SCHEMA_PREFIX = "JSON schema:";
447
477
  var DEFAULT_SCHEMA_SUFFIX = "You MUST answer with a JSON object that matches the JSON schema above.";
@@ -3292,6 +3322,7 @@ function streamToResponse(res, response, init) {
3292
3322
  AnthropicStream,
3293
3323
  AssistantResponse,
3294
3324
  CohereStream,
3325
+ EmbedResult,
3295
3326
  EmptyResponseBodyError,
3296
3327
  GenerateObjectResult,
3297
3328
  GenerateTextResult,
@@ -3328,6 +3359,7 @@ function streamToResponse(res, response, init) {
3328
3359
  createChunkDecoder,
3329
3360
  createEventStreamTransformer,
3330
3361
  createStreamDataTransformer,
3362
+ embed,
3331
3363
  experimental_AssistantResponse,
3332
3364
  experimental_StreamData,
3333
3365
  experimental_StreamingReactResponse,