ai 3.0.12 → 3.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/ai-model-specification/dist/index.d.mts +704 -0
- package/ai-model-specification/dist/index.d.ts +704 -0
- package/ai-model-specification/dist/index.js +806 -0
- package/ai-model-specification/dist/index.js.map +1 -0
- package/ai-model-specification/dist/index.mjs +742 -0
- package/ai-model-specification/dist/index.mjs.map +1 -0
- package/dist/index.d.mts +686 -4
- package/dist/index.d.ts +686 -4
- package/dist/index.js +1723 -15
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1700 -15
- package/dist/index.mjs.map +1 -1
- package/mistral/dist/index.d.mts +367 -0
- package/mistral/dist/index.d.ts +367 -0
- package/mistral/dist/index.js +936 -0
- package/mistral/dist/index.js.map +1 -0
- package/mistral/dist/index.mjs +900 -0
- package/mistral/dist/index.mjs.map +1 -0
- package/openai/dist/index.d.mts +430 -0
- package/openai/dist/index.d.ts +430 -0
- package/openai/dist/index.js +1355 -0
- package/openai/dist/index.js.map +1 -0
- package/openai/dist/index.mjs +1319 -0
- package/openai/dist/index.mjs.map +1 -0
- package/package.json +33 -7
- package/prompts/dist/index.d.mts +13 -1
- package/prompts/dist/index.d.ts +13 -1
- package/prompts/dist/index.js +13 -0
- package/prompts/dist/index.js.map +1 -1
- package/prompts/dist/index.mjs +12 -0
- package/prompts/dist/index.mjs.map +1 -1
- package/react/dist/index.d.mts +8 -4
- package/react/dist/index.d.ts +8 -4
- package/react/dist/index.js +36 -34
- package/react/dist/index.js.map +1 -1
- package/react/dist/index.mjs +36 -34
- package/react/dist/index.mjs.map +1 -1
- package/rsc/dist/index.d.ts +45 -8
- package/rsc/dist/rsc-server.d.mts +45 -8
- package/rsc/dist/rsc-server.mjs +67 -13
- package/rsc/dist/rsc-server.mjs.map +1 -1
- package/rsc/dist/rsc-shared.d.mts +5 -8
- package/rsc/dist/rsc-shared.mjs +23 -2
- package/rsc/dist/rsc-shared.mjs.map +1 -1
- package/solid/dist/index.js +29 -27
- package/solid/dist/index.js.map +1 -1
- package/solid/dist/index.mjs +29 -27
- package/solid/dist/index.mjs.map +1 -1
- package/svelte/dist/index.js +31 -29
- package/svelte/dist/index.js.map +1 -1
- package/svelte/dist/index.mjs +31 -29
- package/svelte/dist/index.mjs.map +1 -1
- package/vue/dist/index.js +29 -27
- package/vue/dist/index.js.map +1 -1
- package/vue/dist/index.mjs +29 -27
- package/vue/dist/index.mjs.map +1 -1
@@ -0,0 +1 @@
|
|
1
|
+
{"version":3,"sources":["../../ai-model-specification/errors/api-call-error.ts","../../ai-model-specification/util/generate-id.ts","../../ai-model-specification/util/get-error-message.ts","../../ai-model-specification/errors/load-api-key-error.ts","../../ai-model-specification/util/load-api-key.ts","../../ai-model-specification/util/parse-json.ts","../../ai-model-specification/errors/json-parse-error.ts","../../ai-model-specification/errors/type-validation-error.ts","../../ai-model-specification/util/validate-types.ts","../../ai-model-specification/util/post-to-api.ts","../../ai-model-specification/util/response-handler.ts","../../ai-model-specification/errors/no-response-body-error.ts","../../ai-model-specification/errors/unsupported-functionality-error.ts","../mistral-chat-language-model.ts","../convert-to-mistral-chat-messages.ts","../map-mistral-finish-reason.ts","../mistral-error.ts","../mistral-facade.ts"],"sourcesContent":["export class APICallError extends Error {\n readonly url: string;\n readonly requestBodyValues: unknown;\n readonly statusCode?: number;\n readonly responseBody?: string;\n readonly cause?: unknown;\n readonly isRetryable: boolean;\n readonly data?: unknown;\n\n constructor({\n message,\n url,\n requestBodyValues,\n statusCode,\n responseBody,\n cause,\n isRetryable = statusCode != null &&\n (statusCode === 408 || // request timeout\n statusCode === 409 || // conflict\n statusCode === 429 || // too many requests\n statusCode >= 500), // server error\n data,\n }: {\n message: string;\n url: string;\n requestBodyValues: unknown;\n statusCode?: number;\n responseBody?: string;\n cause?: unknown;\n isRetryable?: boolean;\n data?: unknown;\n }) {\n super(message);\n\n this.name = 'AI_APICallError';\n\n this.url = url;\n this.requestBodyValues = requestBodyValues;\n this.statusCode = statusCode;\n this.responseBody = responseBody;\n this.cause = cause;\n this.isRetryable = isRetryable;\n this.data = data;\n }\n\n static isAPICallError(error: unknown): error is APICallError {\n return (\n error instanceof Error &&\n error.name === 'AI_APICallError' &&\n typeof (error as APICallError).url === 'string' &&\n typeof (error as APICallError).requestBodyValues === 'object' &&\n ((error as APICallError).statusCode == null ||\n typeof (error as APICallError).statusCode === 'number') &&\n ((error as APICallError).responseBody == null ||\n typeof (error as APICallError).responseBody === 'string') &&\n ((error as APICallError).cause == null ||\n typeof (error as APICallError).cause === 'object') &&\n typeof (error as APICallError).isRetryable === 'boolean' &&\n ((error as APICallError).data == null ||\n typeof (error as APICallError).data === 'object')\n );\n }\n\n toJSON() {\n return {\n name: this.name,\n message: this.message,\n url: this.url,\n requestBodyValues: this.requestBodyValues,\n statusCode: this.statusCode,\n responseBody: this.responseBody,\n cause: this.cause,\n isRetryable: this.isRetryable,\n data: this.data,\n };\n }\n}\n","import { customAlphabet } from 'nanoid/non-secure';\n\n/**\n * Generates a 7-character random string to use for IDs. Not secure.\n */\nexport const generateId = customAlphabet(\n '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',\n 7,\n);\n","export function getErrorMessage(error: unknown | undefined) {\n if (error == null) {\n return 'unknown error';\n }\n\n if (typeof error === 'string') {\n return error;\n }\n\n if (error instanceof Error) {\n return error.message;\n }\n\n return JSON.stringify(error);\n}\n","export class LoadAPIKeyError extends Error {\n constructor({ message }: { message: string }) {\n super(message);\n\n this.name = 'AI_LoadAPIKeyError';\n }\n\n static isLoadAPIKeyError(error: unknown): error is LoadAPIKeyError {\n return error instanceof Error && error.name === 'AI_LoadAPIKeyError';\n }\n\n toJSON() {\n return {\n name: this.name,\n message: this.message,\n };\n }\n}\n","import { LoadAPIKeyError } from '../errors/load-api-key-error';\n\nexport function loadApiKey({\n apiKey,\n environmentVariableName,\n apiKeyParameterName = 'apiKey',\n description,\n}: {\n apiKey: string | undefined;\n environmentVariableName: string;\n apiKeyParameterName?: string;\n description: string;\n}): string {\n if (typeof apiKey === 'string') {\n return apiKey;\n }\n\n if (apiKey != null) {\n throw new LoadAPIKeyError({\n message: `${description} API key must be a string.`,\n });\n }\n\n if (typeof process === 'undefined') {\n throw new LoadAPIKeyError({\n message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter. Environment variables is not supported in this environment.`,\n });\n }\n\n apiKey = process.env[environmentVariableName];\n\n if (apiKey == null) {\n throw new LoadAPIKeyError({\n message: `${description} API key is missing. Pass it using the '${apiKeyParameterName}' parameter or the ${environmentVariableName} environment variable.`,\n });\n }\n\n if (typeof apiKey !== 'string') {\n throw new LoadAPIKeyError({\n message: `${description} API key must be a string. The value of the ${environmentVariableName} environment variable is not a string.`,\n });\n }\n\n return apiKey;\n}\n","import SecureJSON from 'secure-json-parse';\nimport { ZodSchema } from 'zod';\nimport { JSONParseError } from '../errors/json-parse-error';\nimport { TypeValidationError } from '../errors/type-validation-error';\nimport { safeValidateTypes, validateTypes } from './validate-types';\n\n/**\n * Parses a JSON string into an unknown object.\n *\n * @param text - The JSON string to parse.\n * @returns {unknown} - The parsed JSON object.\n */\nexport function parseJSON({ text }: { text: string }): unknown;\n/**\n * Parses a JSON string into a strongly-typed object using the provided schema.\n *\n * @template T - The type of the object to parse the JSON into.\n * @param {string} text - The JSON string to parse.\n * @param {Schema<T>} schema - The schema to use for parsing the JSON.\n * @returns {T} - The parsed object.\n */\nexport function parseJSON<T>({\n text,\n schema,\n}: {\n text: string;\n schema: ZodSchema<T>;\n}): T;\nexport function parseJSON<T>({\n text,\n schema,\n}: {\n text: string;\n schema?: ZodSchema<T>;\n}): T {\n try {\n const value = SecureJSON.parse(text);\n\n if (schema == null) {\n return value;\n }\n\n return validateTypes({ value, schema });\n } catch (error) {\n if (\n JSONParseError.isJSONParseError(error) ||\n TypeValidationError.isTypeValidationError(error)\n ) {\n throw error;\n }\n\n throw new JSONParseError({ text, cause: error });\n }\n}\n\nexport type ParseResult<T> =\n | { success: true; value: T }\n | { success: false; error: JSONParseError | TypeValidationError };\n\n/**\n * Safely parses a JSON string and returns the result as an object of type `unknown`.\n *\n * @param text - The JSON string to parse.\n * @returns {object} Either an object with `success: true` and the parsed data, or an object with `success: false` and the error that occurred.\n */\nexport function safeParseJSON({ text }: { text: string }): ParseResult<unknown>;\n/**\n * Safely parses a JSON string into a strongly-typed object, using a provided schema to validate the object.\n *\n * @template T - The type of the object to parse the JSON into.\n * @param {string} text - The JSON string to parse.\n * @param {Schema<T>} schema - The schema to use for parsing the JSON.\n * @returns An object with either a `success` flag and the parsed and typed data, or a `success` flag and an error object.\n */\nexport function safeParseJSON<T>({\n text,\n schema,\n}: {\n text: string;\n schema: ZodSchema<T>;\n}): ParseResult<T>;\nexport function safeParseJSON<T>({\n text,\n schema,\n}: {\n text: string;\n schema?: ZodSchema<T>;\n}):\n | { success: true; value: T }\n | { success: false; error: JSONParseError | TypeValidationError } {\n try {\n const value = SecureJSON.parse(text);\n\n if (schema == null) {\n return {\n success: true,\n value: value as T,\n };\n }\n\n return safeValidateTypes({ value, schema });\n } catch (error) {\n return {\n success: false,\n error: JSONParseError.isJSONParseError(error)\n ? error\n : new JSONParseError({ text, cause: error }),\n };\n }\n}\n\nexport function isParseableJson(input: string): boolean {\n try {\n SecureJSON.parse(input);\n return true;\n } catch {\n return false;\n }\n}\n","import { getErrorMessage } from '../util/get-error-message';\n\nexport class JSONParseError extends Error {\n // note: property order determines debugging output\n readonly text: string;\n readonly cause: unknown;\n\n constructor({ text, cause }: { text: string; cause: unknown }) {\n super(\n `JSON parsing failed: ` +\n `Text: ${text}.\\n` +\n `Error message: ${getErrorMessage(cause)}`,\n );\n\n this.name = 'AI_JSONParseError';\n\n this.cause = cause;\n this.text = text;\n }\n\n static isJSONParseError(error: unknown): error is JSONParseError {\n return (\n error instanceof Error &&\n error.name === 'AI_JSONParseError' &&\n typeof (error as JSONParseError).text === 'string' &&\n typeof (error as JSONParseError).cause === 'string'\n );\n }\n\n toJSON() {\n return {\n name: this.name,\n message: this.message,\n cause: this.cause,\n stack: this.stack,\n\n valueText: this.text,\n };\n }\n}\n","import { getErrorMessage } from '../util/get-error-message';\n\nexport class TypeValidationError extends Error {\n readonly value: unknown;\n readonly cause: unknown;\n\n constructor({ value, cause }: { value: unknown; cause: unknown }) {\n super(\n `Type validation failed: ` +\n `Value: ${JSON.stringify(value)}.\\n` +\n `Error message: ${getErrorMessage(cause)}`,\n );\n\n this.name = 'AI_TypeValidationError';\n\n this.cause = cause;\n this.value = value;\n }\n\n static isTypeValidationError(error: unknown): error is TypeValidationError {\n return (\n error instanceof Error &&\n error.name === 'AI_TypeValidationError' &&\n typeof (error as TypeValidationError).value === 'string' &&\n typeof (error as TypeValidationError).cause === 'string'\n );\n }\n\n toJSON() {\n return {\n name: this.name,\n message: this.message,\n cause: this.cause,\n stack: this.stack,\n\n value: this.value,\n };\n }\n}\n","import { ZodSchema } from 'zod';\nimport { TypeValidationError } from '../errors/type-validation-error';\n\n/**\n * Validates the types of an unknown object using a schema and\n * return a strongly-typed object.\n *\n * @template T - The type of the object to validate.\n * @param {string} options.value - The object to validate.\n * @param {Schema<T>} options.schema - The schema to use for validating the JSON.\n * @returns {T} - The typed object.\n */\nexport function validateTypes<T>({\n value,\n schema,\n}: {\n value: unknown;\n schema: ZodSchema<T>;\n}): T {\n try {\n return schema.parse(value);\n } catch (error) {\n throw new TypeValidationError({ value, cause: error });\n }\n}\n\n/**\n * Safely validates the types of an unknown object using a schema and\n * return a strongly-typed object.\n *\n * @template T - The type of the object to validate.\n * @param {string} options.value - The JSON object to validate.\n * @param {Schema<T>} options.schema - The schema to use for validating the JSON.\n * @returns An object with either a `success` flag and the parsed and typed data, or a `success` flag and an error object.\n */\nexport function safeValidateTypes<T>({\n value,\n schema,\n}: {\n value: unknown;\n schema: ZodSchema<T>;\n}):\n | { success: true; value: T }\n | { success: false; error: TypeValidationError } {\n try {\n const validationResult = schema.safeParse(value);\n\n if (validationResult.success) {\n return {\n success: true,\n value: validationResult.data,\n };\n }\n\n return {\n success: false,\n error: new TypeValidationError({\n value,\n cause: validationResult.error,\n }),\n };\n } catch (error) {\n return {\n success: false,\n error: TypeValidationError.isTypeValidationError(error)\n ? error\n : new TypeValidationError({ value, cause: error }),\n };\n }\n}\n","import { APICallError } from '../errors/api-call-error';\nimport { ResponseHandler } from './response-handler';\n\nexport const postJsonToApi = async <T>({\n url,\n headers,\n body,\n failedResponseHandler,\n successfulResponseHandler,\n abortSignal,\n}: {\n url: string;\n headers?: Record<string, string | undefined>;\n body: unknown;\n failedResponseHandler: ResponseHandler<APICallError>;\n successfulResponseHandler: ResponseHandler<T>;\n abortSignal?: AbortSignal;\n}) =>\n postToApi({\n url,\n headers: {\n ...headers,\n 'Content-Type': 'application/json',\n },\n body: {\n content: JSON.stringify(body),\n values: body,\n },\n failedResponseHandler,\n successfulResponseHandler,\n abortSignal,\n });\n\nexport const postToApi = async <T>({\n url,\n headers = {},\n body,\n successfulResponseHandler,\n failedResponseHandler,\n abortSignal,\n}: {\n url: string;\n headers?: Record<string, string | undefined>;\n body: {\n content: string | FormData | Uint8Array;\n values: unknown;\n };\n failedResponseHandler: ResponseHandler<Error>;\n successfulResponseHandler: ResponseHandler<T>;\n abortSignal?: AbortSignal;\n}) => {\n try {\n // remove undefined headers:\n const definedHeaders = Object.fromEntries(\n Object.entries(headers).filter(([_key, value]) => value != null),\n ) as Record<string, string>;\n\n const response = await fetch(url, {\n method: 'POST',\n headers: definedHeaders,\n body: body.content,\n signal: abortSignal,\n });\n\n if (!response.ok) {\n try {\n throw await failedResponseHandler({\n response,\n url,\n requestBodyValues: body.values,\n });\n } catch (error) {\n if (error instanceof Error) {\n if (\n error.name === 'AbortError' ||\n APICallError.isAPICallError(error)\n ) {\n throw error;\n }\n }\n\n throw new APICallError({\n message: 'Failed to process error response',\n cause: error,\n statusCode: response.status,\n url,\n requestBodyValues: body.values,\n });\n }\n }\n\n try {\n return await successfulResponseHandler({\n response,\n url,\n requestBodyValues: body.values,\n });\n } catch (error) {\n if (error instanceof Error) {\n if (error.name === 'AbortError' || APICallError.isAPICallError(error)) {\n throw error;\n }\n }\n\n throw new APICallError({\n message: 'Failed to process successful response',\n cause: error,\n statusCode: response.status,\n url,\n requestBodyValues: body.values,\n });\n }\n } catch (error) {\n if (error instanceof Error) {\n if (error.name === 'AbortError') {\n throw error;\n }\n }\n\n // unwrap original error when fetch failed (for easier debugging):\n if (error instanceof TypeError && error.message === 'fetch failed') {\n const cause = (error as any).cause;\n\n if (cause != null) {\n // Failed to connect to server:\n throw new APICallError({\n message: `Cannot connect to API: ${cause.message}`,\n cause,\n url,\n requestBodyValues: body.values,\n isRetryable: true, // retry when network error\n });\n }\n }\n\n throw error;\n }\n};\n","import {\n EventSourceParserStream,\n ParsedEvent,\n} from 'eventsource-parser/stream';\nimport { ZodSchema } from 'zod';\nimport { APICallError } from '../errors';\nimport { NoResponseBodyError } from '../errors/no-response-body-error';\nimport { ParseResult, parseJSON, safeParseJSON } from './parse-json';\n\nexport type ResponseHandler<RETURN_TYPE> = (options: {\n url: string;\n requestBodyValues: unknown;\n response: Response;\n}) => PromiseLike<RETURN_TYPE>;\n\nexport const createJsonErrorResponseHandler =\n <T>({\n errorSchema,\n errorToMessage,\n isRetryable,\n }: {\n errorSchema: ZodSchema<T>;\n errorToMessage: (error: T) => string;\n isRetryable?: (response: Response, error?: T) => boolean;\n }): ResponseHandler<APICallError> =>\n async ({ response, url, requestBodyValues }) => {\n const responseBody = await response.text();\n\n // Some providers return an empty response body for some errors:\n if (responseBody.trim() === '') {\n return new APICallError({\n message: response.statusText,\n url,\n requestBodyValues,\n statusCode: response.status,\n responseBody,\n isRetryable: isRetryable?.(response),\n });\n }\n\n // resilient parsing in case the response is not JSON or does not match the schema:\n try {\n const parsedError = parseJSON({\n text: responseBody,\n schema: errorSchema,\n });\n\n return new APICallError({\n message: errorToMessage(parsedError),\n url,\n requestBodyValues,\n statusCode: response.status,\n responseBody,\n data: parsedError,\n isRetryable: isRetryable?.(response, parsedError),\n });\n } catch (parseError) {\n return new APICallError({\n message: response.statusText,\n url,\n requestBodyValues,\n statusCode: response.status,\n responseBody,\n isRetryable: isRetryable?.(response),\n });\n }\n };\n\nexport const createEventSourceResponseHandler =\n <T>(\n chunkSchema: ZodSchema<T>,\n ): ResponseHandler<ReadableStream<ParseResult<T>>> =>\n async ({ response }: { response: Response }) => {\n if (response.body == null) {\n throw new NoResponseBodyError();\n }\n\n return response.body\n .pipeThrough(new TextDecoderStream())\n .pipeThrough(new EventSourceParserStream())\n .pipeThrough(\n new TransformStream<ParsedEvent, ParseResult<T>>({\n transform({ data }, controller) {\n if (data === '[DONE]') {\n return;\n }\n\n controller.enqueue(\n safeParseJSON({\n text: data,\n schema: chunkSchema,\n }),\n );\n },\n }),\n );\n };\n\nexport const createJsonResponseHandler =\n <T>(responseSchema: ZodSchema<T>): ResponseHandler<T> =>\n async ({ response, url, requestBodyValues }) => {\n const responseBody = await response.text();\n\n const parsedResult = safeParseJSON({\n text: responseBody,\n schema: responseSchema,\n });\n\n if (!parsedResult.success) {\n throw new APICallError({\n message: 'Invalid JSON response',\n cause: parsedResult.error,\n statusCode: response.status,\n responseBody,\n url,\n requestBodyValues,\n });\n }\n\n return parsedResult.value;\n };\n","export class NoResponseBodyError extends Error {\n constructor({ message = 'No response body' }: { message?: string } = {}) {\n super(message);\n\n this.name = 'AI_NoResponseBodyError';\n }\n\n static isNoResponseBodyError(error: unknown): error is NoResponseBodyError {\n return error instanceof Error && error.name === 'AI_NoResponseBodyError';\n }\n\n toJSON() {\n return {\n name: this.name,\n message: this.message,\n stack: this.stack,\n };\n }\n}\n","export class UnsupportedFunctionalityError extends Error {\n readonly functionality: string;\n readonly provider: string;\n\n constructor({\n provider,\n functionality,\n }: {\n provider: string;\n functionality: string;\n }) {\n super(\n `Functionality not supported by the provider. ` +\n `Provider: ${provider}.\\n` +\n `Functionality: ${functionality}`,\n );\n\n this.name = 'AI_UnsupportedFunctionalityError';\n\n this.provider = provider;\n this.functionality = functionality;\n }\n\n static isUnsupportedFunctionalityError(\n error: unknown,\n ): error is UnsupportedFunctionalityError {\n return (\n error instanceof Error &&\n error.name === 'AI_UnsupportedFunctionalityError' &&\n typeof (error as UnsupportedFunctionalityError).provider === 'string' &&\n typeof (error as UnsupportedFunctionalityError).functionality === 'string'\n );\n }\n\n toJSON() {\n return {\n name: this.name,\n message: this.message,\n stack: this.stack,\n\n provider: this.provider,\n functionality: this.functionality,\n };\n }\n}\n","import { z } from 'zod';\nimport {\n LanguageModelV1,\n LanguageModelV1CallWarning,\n LanguageModelV1FinishReason,\n LanguageModelV1StreamPart,\n ParseResult,\n UnsupportedFunctionalityError,\n createEventSourceResponseHandler,\n createJsonResponseHandler,\n postJsonToApi,\n} from '../ai-model-specification';\nimport { convertToMistralChatMessages } from './convert-to-mistral-chat-messages';\nimport { mapMistralFinishReason } from './map-mistral-finish-reason';\nimport {\n MistralChatModelId,\n MistralChatSettings,\n} from './mistral-chat-settings';\nimport { mistralFailedResponseHandler } from './mistral-error';\n\ntype MistralChatConfig = {\n provider: string;\n baseUrl: string;\n headers: () => Record<string, string | undefined>;\n generateId: () => string;\n};\n\nexport class MistralChatLanguageModel implements LanguageModelV1 {\n readonly specificationVersion = 'v1';\n readonly defaultObjectGenerationMode = 'json';\n\n readonly modelId: MistralChatModelId;\n readonly settings: MistralChatSettings;\n\n private readonly config: MistralChatConfig;\n\n constructor(\n modelId: MistralChatModelId,\n settings: MistralChatSettings,\n config: MistralChatConfig,\n ) {\n this.modelId = modelId;\n this.settings = settings;\n this.config = config;\n }\n\n get provider(): string {\n return this.config.provider;\n }\n\n private getArgs({\n mode,\n prompt,\n maxTokens,\n temperature,\n topP,\n frequencyPenalty,\n presencePenalty,\n seed,\n }: Parameters<LanguageModelV1['doGenerate']>[0]) {\n const type = mode.type;\n\n const warnings: LanguageModelV1CallWarning[] = [];\n\n if (frequencyPenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'frequencyPenalty',\n });\n }\n\n if (presencePenalty != null) {\n warnings.push({\n type: 'unsupported-setting',\n setting: 'presencePenalty',\n });\n }\n\n const baseArgs = {\n // model id:\n model: this.modelId,\n\n // model specific settings:\n safe_prompt: this.settings.safePrompt,\n\n // standardized settings:\n max_tokens: maxTokens,\n temperature, // uses 0..1 scale\n top_p: topP,\n random_seed: seed,\n\n // messages:\n messages: convertToMistralChatMessages({\n provider: this.provider,\n prompt,\n }),\n };\n\n switch (type) {\n case 'regular': {\n // when the tools array is empty, change it to undefined to prevent OpenAI errors:\n const tools = mode.tools?.length ? mode.tools : undefined;\n\n return {\n args: {\n ...baseArgs,\n tools: tools?.map(tool => ({\n type: 'function',\n function: {\n name: tool.name,\n description: tool.description,\n parameters: tool.parameters,\n },\n })),\n },\n warnings,\n };\n }\n\n case 'object-json': {\n return {\n args: {\n ...baseArgs,\n response_format: { type: 'json_object' },\n },\n warnings,\n };\n }\n\n case 'object-tool': {\n return {\n args: {\n ...baseArgs,\n tool_choice: 'any',\n tools: [{ type: 'function', function: mode.tool }],\n },\n warnings,\n };\n }\n\n case 'object-grammar': {\n throw new UnsupportedFunctionalityError({\n functionality: 'object-grammar mode',\n provider: this.provider,\n });\n }\n\n default: {\n const _exhaustiveCheck: never = type;\n throw new Error(`Unsupported type: ${_exhaustiveCheck}`);\n }\n }\n }\n\n async doGenerate(\n options: Parameters<LanguageModelV1['doGenerate']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {\n const { args, warnings } = this.getArgs(options);\n\n const response = await postJsonToApi({\n url: `${this.config.baseUrl}/chat/completions`,\n headers: this.config.headers(),\n body: args,\n failedResponseHandler: mistralFailedResponseHandler,\n successfulResponseHandler: createJsonResponseHandler(\n openAIChatResponseSchema,\n ),\n abortSignal: options.abortSignal,\n });\n\n const { messages: rawPrompt, ...rawSettings } = args;\n const choice = response.choices[0];\n\n return {\n text: choice.message.content ?? undefined,\n toolCalls: choice.message.tool_calls?.map(toolCall => ({\n toolCallType: 'function',\n toolCallId: this.config.generateId(),\n toolName: toolCall.function.name,\n args: toolCall.function.arguments!,\n })),\n finishReason: mapMistralFinishReason(choice.finish_reason),\n usage: {\n promptTokens: response.usage.prompt_tokens,\n completionTokens: response.usage.completion_tokens,\n },\n rawCall: { rawPrompt, rawSettings },\n warnings,\n };\n }\n\n async doStream(\n options: Parameters<LanguageModelV1['doStream']>[0],\n ): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {\n const { args, warnings } = this.getArgs(options);\n\n const response = await postJsonToApi({\n url: `${this.config.baseUrl}/chat/completions`,\n headers: this.config.headers(),\n body: {\n ...args,\n stream: true,\n },\n failedResponseHandler: mistralFailedResponseHandler,\n successfulResponseHandler: createEventSourceResponseHandler(\n mistralChatChunkSchema,\n ),\n abortSignal: options.abortSignal,\n });\n\n const { messages: rawPrompt, ...rawSettings } = args;\n\n let finishReason: LanguageModelV1FinishReason = 'other';\n let usage: { promptTokens: number; completionTokens: number } = {\n promptTokens: Number.NaN,\n completionTokens: Number.NaN,\n };\n\n const generateId = this.config.generateId;\n\n return {\n stream: response.pipeThrough(\n new TransformStream<\n ParseResult<z.infer<typeof mistralChatChunkSchema>>,\n LanguageModelV1StreamPart\n >({\n transform(chunk, controller) {\n if (!chunk.success) {\n controller.enqueue({ type: 'error', error: chunk.error });\n return;\n }\n\n const value = chunk.value;\n\n if (value.usage != null) {\n usage = {\n promptTokens: value.usage.prompt_tokens,\n completionTokens: value.usage.completion_tokens,\n };\n }\n\n const choice = value.choices[0];\n\n if (choice?.finish_reason != null) {\n finishReason = mapMistralFinishReason(choice.finish_reason);\n }\n\n if (choice?.delta == null) {\n return;\n }\n\n const delta = choice.delta;\n\n if (delta.content != null) {\n controller.enqueue({\n type: 'text-delta',\n textDelta: delta.content,\n });\n }\n\n if (delta.tool_calls != null) {\n for (const toolCall of delta.tool_calls) {\n // mistral tool calls come in one piece\n\n controller.enqueue({\n type: 'tool-call-delta',\n toolCallType: 'function',\n toolCallId: generateId(),\n toolName: toolCall.function.name,\n argsTextDelta: toolCall.function.arguments,\n });\n\n controller.enqueue({\n type: 'tool-call',\n toolCallType: 'function',\n toolCallId: generateId(),\n toolName: toolCall.function.name,\n args: toolCall.function.arguments,\n });\n }\n }\n },\n\n flush(controller) {\n controller.enqueue({ type: 'finish', finishReason, usage });\n },\n }),\n ),\n rawCall: { rawPrompt, rawSettings },\n warnings,\n };\n }\n}\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst openAIChatResponseSchema = z.object({\n choices: z.array(\n z.object({\n message: z.object({\n role: z.literal('assistant'),\n content: z.string().nullable(),\n tool_calls: z\n .array(\n z.object({\n function: z.object({\n name: z.string(),\n arguments: z.string(),\n }),\n }),\n )\n .optional()\n .nullable(),\n }),\n index: z.number(),\n finish_reason: z.string().optional().nullable(),\n }),\n ),\n object: z.literal('chat.completion'),\n usage: z.object({\n prompt_tokens: z.number(),\n completion_tokens: z.number(),\n }),\n});\n\n// limited version of the schema, focussed on what is needed for the implementation\n// this approach limits breakages when the API changes and increases efficiency\nconst mistralChatChunkSchema = z.object({\n object: z.literal('chat.completion.chunk'),\n choices: z.array(\n z.object({\n delta: z.object({\n role: z.enum(['assistant']).optional(),\n content: z.string().nullable().optional(),\n tool_calls: z\n .array(\n z.object({\n function: z.object({ name: z.string(), arguments: z.string() }),\n }),\n )\n .optional()\n .nullable(),\n }),\n finish_reason: z.string().nullable().optional(),\n index: z.number(),\n }),\n ),\n usage: z\n .object({\n prompt_tokens: z.number(),\n completion_tokens: z.number(),\n })\n .optional()\n .nullable(),\n});\n","import {\n LanguageModelV1Prompt,\n UnsupportedFunctionalityError,\n} from '../ai-model-specification';\nimport { MistralChatPrompt } from './mistral-chat-prompt';\n\nexport function convertToMistralChatMessages({\n prompt,\n provider,\n}: {\n prompt: LanguageModelV1Prompt;\n provider: string;\n}): MistralChatPrompt {\n const messages: MistralChatPrompt = [];\n\n for (const { role, content } of prompt) {\n switch (role) {\n case 'system': {\n messages.push({ role: 'system', content });\n break;\n }\n\n case 'user': {\n messages.push({\n role: 'user',\n content: content\n .map(part => {\n switch (part.type) {\n case 'text': {\n return part.text;\n }\n case 'image': {\n throw new UnsupportedFunctionalityError({\n provider,\n functionality: 'image-part',\n });\n }\n }\n })\n .join(''),\n });\n break;\n }\n\n case 'assistant': {\n let text = '';\n const toolCalls: Array<{\n id: string;\n type: 'function';\n function: { name: string; arguments: string };\n }> = [];\n\n for (const part of content) {\n switch (part.type) {\n case 'text': {\n text += part.text;\n break;\n }\n case 'tool-call': {\n toolCalls.push({\n id: part.toolCallId,\n type: 'function',\n function: {\n name: part.toolName,\n arguments: JSON.stringify(part.args),\n },\n });\n break;\n }\n default: {\n const _exhaustiveCheck: never = part;\n throw new Error(`Unsupported part: ${_exhaustiveCheck}`);\n }\n }\n }\n\n messages.push({\n role: 'assistant',\n content: text,\n tool_calls:\n toolCalls.length > 0\n ? toolCalls.map(({ function: { name, arguments: args } }) => ({\n id: 'null',\n type: 'function',\n function: { name, arguments: args },\n }))\n : undefined,\n });\n\n break;\n }\n case 'tool': {\n for (const toolResponse of content) {\n messages.push({\n role: 'tool',\n name: toolResponse.toolName,\n content: JSON.stringify(toolResponse.result),\n });\n }\n break;\n }\n default: {\n const _exhaustiveCheck: never = role;\n throw new Error(`Unsupported role: ${_exhaustiveCheck}`);\n }\n }\n }\n\n return messages;\n}\n","import { LanguageModelV1FinishReason } from '../ai-model-specification';\n\nexport function mapMistralFinishReason(\n finishReason: string | null | undefined,\n): LanguageModelV1FinishReason {\n switch (finishReason) {\n case 'stop':\n return 'stop';\n case 'length':\n case 'model_length':\n return 'length';\n case 'tool_calls':\n return 'tool-calls';\n default:\n return 'other';\n }\n}\n","import { z } from 'zod';\nimport { createJsonErrorResponseHandler } from '../ai-model-specification';\n\nconst mistralErrorDataSchema = z.object({\n object: z.literal('error'),\n message: z.string(),\n type: z.string(),\n param: z.string().nullable(),\n code: z.string().nullable(),\n});\n\nexport type MistralErrorData = z.infer<typeof mistralErrorDataSchema>;\n\nexport const mistralFailedResponseHandler = createJsonErrorResponseHandler({\n errorSchema: mistralErrorDataSchema,\n errorToMessage: data => data.message,\n});\n","import { generateId, loadApiKey } from '../ai-model-specification';\nimport { MistralChatLanguageModel } from './mistral-chat-language-model';\nimport {\n MistralChatModelId,\n MistralChatSettings,\n} from './mistral-chat-settings';\n\n/**\n * Mistral provider.\n */\nexport class Mistral {\n readonly baseUrl?: string;\n readonly apiKey?: string;\n\n private readonly generateId: () => string;\n\n constructor(\n options: {\n baseUrl?: string;\n apiKey?: string;\n organization?: string;\n generateId?: () => string;\n } = {},\n ) {\n this.baseUrl = options.baseUrl;\n this.apiKey = options.apiKey;\n this.generateId = options.generateId ?? generateId;\n }\n\n private get baseConfig() {\n return {\n baseUrl: this.baseUrl ?? 'https://api.mistral.ai/v1',\n headers: () => ({\n Authorization: `Bearer ${loadApiKey({\n apiKey: this.apiKey,\n environmentVariableName: 'MISTRAL_API_KEY',\n description: 'Mistral',\n })}`,\n }),\n };\n }\n\n chat(modelId: MistralChatModelId, settings: MistralChatSettings = {}) {\n return new MistralChatLanguageModel(modelId, settings, {\n provider: 'mistral.chat',\n ...this.baseConfig,\n generateId: this.generateId,\n });\n }\n}\n\n/**\n * Default Mistral provider instance.\n */\nexport const mistral = new Mistral();\n"],"mappings":";AAAO,IAAM,eAAN,cAA2B,MAAM;AAAA,EAStC,YAAY;AAAA,IACV;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,cAAc,cAAc,SACzB,eAAe;AAAA,IACd,eAAe;AAAA,IACf,eAAe;AAAA,IACf,cAAc;AAAA;AAAA,IAClB;AAAA,EACF,GASG;AACD,UAAM,OAAO;AAEb,SAAK,OAAO;AAEZ,SAAK,MAAM;AACX,SAAK,oBAAoB;AACzB,SAAK,aAAa;AAClB,SAAK,eAAe;AACpB,SAAK,QAAQ;AACb,SAAK,cAAc;AACnB,SAAK,OAAO;AAAA,EACd;AAAA,EAEA,OAAO,eAAe,OAAuC;AAC3D,WACE,iBAAiB,SACjB,MAAM,SAAS,qBACf,OAAQ,MAAuB,QAAQ,YACvC,OAAQ,MAAuB,sBAAsB,aACnD,MAAuB,cAAc,QACrC,OAAQ,MAAuB,eAAe,cAC9C,MAAuB,gBAAgB,QACvC,OAAQ,MAAuB,iBAAiB,cAChD,MAAuB,SAAS,QAChC,OAAQ,MAAuB,UAAU,aAC3C,OAAQ,MAAuB,gBAAgB,cAC7C,MAAuB,QAAQ,QAC/B,OAAQ,MAAuB,SAAS;AAAA,EAE9C;AAAA,EAEA,SAAS;AACP,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,MACd,KAAK,KAAK;AAAA,MACV,mBAAmB,KAAK;AAAA,MACxB,YAAY,KAAK;AAAA,MACjB,cAAc,KAAK;AAAA,MACnB,OAAO,KAAK;AAAA,MACZ,aAAa,KAAK;AAAA,MAClB,MAAM,KAAK;AAAA,IACb;AAAA,EACF;AACF;;;AC5EA,SAAS,sBAAsB;AAKxB,IAAM,aAAa;AAAA,EACxB;AAAA,EACA;AACF;;;ACRO,SAAS,gBAAgB,OAA4B;AAC1D,MAAI,SAAS,MAAM;AACjB,WAAO;AAAA,EACT;AAEA,MAAI,OAAO,UAAU,UAAU;AAC7B,WAAO;AAAA,EACT;AAEA,MAAI,iBAAiB,OAAO;AAC1B,WAAO,MAAM;AAAA,EACf;AAEA,SAAO,KAAK,UAAU,KAAK;AAC7B;;;ACdO,IAAM,kBAAN,cAA8B,MAAM;AAAA,EACzC,YAAY,EAAE,QAAQ,GAAwB;AAC5C,UAAM,OAAO;AAEb,SAAK,OAAO;AAAA,EACd;AAAA,EAEA,OAAO,kBAAkB,OAA0C;AACjE,WAAO,iBAAiB,SAAS,MAAM,SAAS;AAAA,EAClD;AAAA,EAEA,SAAS;AACP,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,IAChB;AAAA,EACF;AACF;;;ACfO,SAAS,WAAW;AAAA,EACzB;AAAA,EACA;AAAA,EACA,sBAAsB;AAAA,EACtB;AACF,GAKW;AACT,MAAI,OAAO,WAAW,UAAU;AAC9B,WAAO;AAAA,EACT;AAEA,MAAI,UAAU,MAAM;AAClB,UAAM,IAAI,gBAAgB;AAAA,MACxB,SAAS,GAAG,WAAW;AAAA,IACzB,CAAC;AAAA,EACH;AAEA,MAAI,OAAO,YAAY,aAAa;AAClC,UAAM,IAAI,gBAAgB;AAAA,MACxB,SAAS,GAAG,WAAW,2CAA2C,mBAAmB;AAAA,IACvF,CAAC;AAAA,EACH;AAEA,WAAS,QAAQ,IAAI,uBAAuB;AAE5C,MAAI,UAAU,MAAM;AAClB,UAAM,IAAI,gBAAgB;AAAA,MACxB,SAAS,GAAG,WAAW,2CAA2C,mBAAmB,sBAAsB,uBAAuB;AAAA,IACpI,CAAC;AAAA,EACH;AAEA,MAAI,OAAO,WAAW,UAAU;AAC9B,UAAM,IAAI,gBAAgB;AAAA,MACxB,SAAS,GAAG,WAAW,+CAA+C,uBAAuB;AAAA,IAC/F,CAAC;AAAA,EACH;AAEA,SAAO;AACT;;;AC5CA,OAAO,gBAAgB;;;ACEhB,IAAM,iBAAN,cAA6B,MAAM;AAAA,EAKxC,YAAY,EAAE,MAAM,MAAM,GAAqC;AAC7D;AAAA,MACE,8BACW,IAAI;AAAA,iBACK,gBAAgB,KAAK,CAAC;AAAA,IAC5C;AAEA,SAAK,OAAO;AAEZ,SAAK,QAAQ;AACb,SAAK,OAAO;AAAA,EACd;AAAA,EAEA,OAAO,iBAAiB,OAAyC;AAC/D,WACE,iBAAiB,SACjB,MAAM,SAAS,uBACf,OAAQ,MAAyB,SAAS,YAC1C,OAAQ,MAAyB,UAAU;AAAA,EAE/C;AAAA,EAEA,SAAS;AACP,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,MACd,OAAO,KAAK;AAAA,MACZ,OAAO,KAAK;AAAA,MAEZ,WAAW,KAAK;AAAA,IAClB;AAAA,EACF;AACF;;;ACrCO,IAAM,sBAAN,cAAkC,MAAM;AAAA,EAI7C,YAAY,EAAE,OAAO,MAAM,GAAuC;AAChE;AAAA,MACE,kCACY,KAAK,UAAU,KAAK,CAAC;AAAA,iBACb,gBAAgB,KAAK,CAAC;AAAA,IAC5C;AAEA,SAAK,OAAO;AAEZ,SAAK,QAAQ;AACb,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,OAAO,sBAAsB,OAA8C;AACzE,WACE,iBAAiB,SACjB,MAAM,SAAS,4BACf,OAAQ,MAA8B,UAAU,YAChD,OAAQ,MAA8B,UAAU;AAAA,EAEpD;AAAA,EAEA,SAAS;AACP,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,MACd,OAAO,KAAK;AAAA,MACZ,OAAO,KAAK;AAAA,MAEZ,OAAO,KAAK;AAAA,IACd;AAAA,EACF;AACF;;;AC1BO,SAAS,cAAiB;AAAA,EAC/B;AAAA,EACA;AACF,GAGM;AACJ,MAAI;AACF,WAAO,OAAO,MAAM,KAAK;AAAA,EAC3B,SAAS,OAAO;AACd,UAAM,IAAI,oBAAoB,EAAE,OAAO,OAAO,MAAM,CAAC;AAAA,EACvD;AACF;AAWO,SAAS,kBAAqB;AAAA,EACnC;AAAA,EACA;AACF,GAKmD;AACjD,MAAI;AACF,UAAM,mBAAmB,OAAO,UAAU,KAAK;AAE/C,QAAI,iBAAiB,SAAS;AAC5B,aAAO;AAAA,QACL,SAAS;AAAA,QACT,OAAO,iBAAiB;AAAA,MAC1B;AAAA,IACF;AAEA,WAAO;AAAA,MACL,SAAS;AAAA,MACT,OAAO,IAAI,oBAAoB;AAAA,QAC7B;AAAA,QACA,OAAO,iBAAiB;AAAA,MAC1B,CAAC;AAAA,IACH;AAAA,EACF,SAAS,OAAO;AACd,WAAO;AAAA,MACL,SAAS;AAAA,MACT,OAAO,oBAAoB,sBAAsB,KAAK,IAClD,QACA,IAAI,oBAAoB,EAAE,OAAO,OAAO,MAAM,CAAC;AAAA,IACrD;AAAA,EACF;AACF;;;AHzCO,SAAS,UAAa;AAAA,EAC3B;AAAA,EACA;AACF,GAGM;AACJ,MAAI;AACF,UAAM,QAAQ,WAAW,MAAM,IAAI;AAEnC,QAAI,UAAU,MAAM;AAClB,aAAO;AAAA,IACT;AAEA,WAAO,cAAc,EAAE,OAAO,OAAO,CAAC;AAAA,EACxC,SAAS,OAAO;AACd,QACE,eAAe,iBAAiB,KAAK,KACrC,oBAAoB,sBAAsB,KAAK,GAC/C;AACA,YAAM;AAAA,IACR;AAEA,UAAM,IAAI,eAAe,EAAE,MAAM,OAAO,MAAM,CAAC;AAAA,EACjD;AACF;AA4BO,SAAS,cAAiB;AAAA,EAC/B;AAAA,EACA;AACF,GAKoE;AAClE,MAAI;AACF,UAAM,QAAQ,WAAW,MAAM,IAAI;AAEnC,QAAI,UAAU,MAAM;AAClB,aAAO;AAAA,QACL,SAAS;AAAA,QACT;AAAA,MACF;AAAA,IACF;AAEA,WAAO,kBAAkB,EAAE,OAAO,OAAO,CAAC;AAAA,EAC5C,SAAS,OAAO;AACd,WAAO;AAAA,MACL,SAAS;AAAA,MACT,OAAO,eAAe,iBAAiB,KAAK,IACxC,QACA,IAAI,eAAe,EAAE,MAAM,OAAO,MAAM,CAAC;AAAA,IAC/C;AAAA,EACF;AACF;;;AI1GO,IAAM,gBAAgB,OAAU;AAAA,EACrC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAQE,UAAU;AAAA,EACR;AAAA,EACA,SAAS;AAAA,IACP,GAAG;AAAA,IACH,gBAAgB;AAAA,EAClB;AAAA,EACA,MAAM;AAAA,IACJ,SAAS,KAAK,UAAU,IAAI;AAAA,IAC5B,QAAQ;AAAA,EACV;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,CAAC;AAEI,IAAM,YAAY,OAAU;AAAA,EACjC;AAAA,EACA,UAAU,CAAC;AAAA,EACX;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF,MAUM;AACJ,MAAI;AAEF,UAAM,iBAAiB,OAAO;AAAA,MAC5B,OAAO,QAAQ,OAAO,EAAE,OAAO,CAAC,CAAC,MAAM,KAAK,MAAM,SAAS,IAAI;AAAA,IACjE;AAEA,UAAM,WAAW,MAAM,MAAM,KAAK;AAAA,MAChC,QAAQ;AAAA,MACR,SAAS;AAAA,MACT,MAAM,KAAK;AAAA,MACX,QAAQ;AAAA,IACV,CAAC;AAED,QAAI,CAAC,SAAS,IAAI;AAChB,UAAI;AACF,cAAM,MAAM,sBAAsB;AAAA,UAChC;AAAA,UACA;AAAA,UACA,mBAAmB,KAAK;AAAA,QAC1B,CAAC;AAAA,MACH,SAAS,OAAO;AACd,YAAI,iBAAiB,OAAO;AAC1B,cACE,MAAM,SAAS,gBACf,aAAa,eAAe,KAAK,GACjC;AACA,kBAAM;AAAA,UACR;AAAA,QACF;AAEA,cAAM,IAAI,aAAa;AAAA,UACrB,SAAS;AAAA,UACT,OAAO;AAAA,UACP,YAAY,SAAS;AAAA,UACrB;AAAA,UACA,mBAAmB,KAAK;AAAA,QAC1B,CAAC;AAAA,MACH;AAAA,IACF;AAEA,QAAI;AACF,aAAO,MAAM,0BAA0B;AAAA,QACrC;AAAA,QACA;AAAA,QACA,mBAAmB,KAAK;AAAA,MAC1B,CAAC;AAAA,IACH,SAAS,OAAO;AACd,UAAI,iBAAiB,OAAO;AAC1B,YAAI,MAAM,SAAS,gBAAgB,aAAa,eAAe,KAAK,GAAG;AACrE,gBAAM;AAAA,QACR;AAAA,MACF;AAEA,YAAM,IAAI,aAAa;AAAA,QACrB,SAAS;AAAA,QACT,OAAO;AAAA,QACP,YAAY,SAAS;AAAA,QACrB;AAAA,QACA,mBAAmB,KAAK;AAAA,MAC1B,CAAC;AAAA,IACH;AAAA,EACF,SAAS,OAAO;AACd,QAAI,iBAAiB,OAAO;AAC1B,UAAI,MAAM,SAAS,cAAc;AAC/B,cAAM;AAAA,MACR;AAAA,IACF;AAGA,QAAI,iBAAiB,aAAa,MAAM,YAAY,gBAAgB;AAClE,YAAM,QAAS,MAAc;AAE7B,UAAI,SAAS,MAAM;AAEjB,cAAM,IAAI,aAAa;AAAA,UACrB,SAAS,0BAA0B,MAAM,OAAO;AAAA,UAChD;AAAA,UACA;AAAA,UACA,mBAAmB,KAAK;AAAA,UACxB,aAAa;AAAA;AAAA,QACf,CAAC;AAAA,MACH;AAAA,IACF;AAEA,UAAM;AAAA,EACR;AACF;;;ACzIA;AAAA,EACE;AAAA,OAEK;;;ACHA,IAAM,sBAAN,cAAkC,MAAM;AAAA,EAC7C,YAAY,EAAE,UAAU,mBAAmB,IAA0B,CAAC,GAAG;AACvE,UAAM,OAAO;AAEb,SAAK,OAAO;AAAA,EACd;AAAA,EAEA,OAAO,sBAAsB,OAA8C;AACzE,WAAO,iBAAiB,SAAS,MAAM,SAAS;AAAA,EAClD;AAAA,EAEA,SAAS;AACP,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,MACd,OAAO,KAAK;AAAA,IACd;AAAA,EACF;AACF;;;ADHO,IAAM,iCACX,CAAI;AAAA,EACF;AAAA,EACA;AAAA,EACA;AACF,MAKA,OAAO,EAAE,UAAU,KAAK,kBAAkB,MAAM;AAC9C,QAAM,eAAe,MAAM,SAAS,KAAK;AAGzC,MAAI,aAAa,KAAK,MAAM,IAAI;AAC9B,WAAO,IAAI,aAAa;AAAA,MACtB,SAAS,SAAS;AAAA,MAClB;AAAA,MACA;AAAA,MACA,YAAY,SAAS;AAAA,MACrB;AAAA,MACA,aAAa,2CAAc;AAAA,IAC7B,CAAC;AAAA,EACH;AAGA,MAAI;AACF,UAAM,cAAc,UAAU;AAAA,MAC5B,MAAM;AAAA,MACN,QAAQ;AAAA,IACV,CAAC;AAED,WAAO,IAAI,aAAa;AAAA,MACtB,SAAS,eAAe,WAAW;AAAA,MACnC;AAAA,MACA;AAAA,MACA,YAAY,SAAS;AAAA,MACrB;AAAA,MACA,MAAM;AAAA,MACN,aAAa,2CAAc,UAAU;AAAA,IACvC,CAAC;AAAA,EACH,SAAS,YAAY;AACnB,WAAO,IAAI,aAAa;AAAA,MACtB,SAAS,SAAS;AAAA,MAClB;AAAA,MACA;AAAA,MACA,YAAY,SAAS;AAAA,MACrB;AAAA,MACA,aAAa,2CAAc;AAAA,IAC7B,CAAC;AAAA,EACH;AACF;AAEK,IAAM,mCACX,CACE,gBAEF,OAAO,EAAE,SAAS,MAA8B;AAC9C,MAAI,SAAS,QAAQ,MAAM;AACzB,UAAM,IAAI,oBAAoB;AAAA,EAChC;AAEA,SAAO,SAAS,KACb,YAAY,IAAI,kBAAkB,CAAC,EACnC,YAAY,IAAI,wBAAwB,CAAC,EACzC;AAAA,IACC,IAAI,gBAA6C;AAAA,MAC/C,UAAU,EAAE,KAAK,GAAG,YAAY;AAC9B,YAAI,SAAS,UAAU;AACrB;AAAA,QACF;AAEA,mBAAW;AAAA,UACT,cAAc;AAAA,YACZ,MAAM;AAAA,YACN,QAAQ;AAAA,UACV,CAAC;AAAA,QACH;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AACJ;AAEK,IAAM,4BACX,CAAI,mBACJ,OAAO,EAAE,UAAU,KAAK,kBAAkB,MAAM;AAC9C,QAAM,eAAe,MAAM,SAAS,KAAK;AAEzC,QAAM,eAAe,cAAc;AAAA,IACjC,MAAM;AAAA,IACN,QAAQ;AAAA,EACV,CAAC;AAED,MAAI,CAAC,aAAa,SAAS;AACzB,UAAM,IAAI,aAAa;AAAA,MACrB,SAAS;AAAA,MACT,OAAO,aAAa;AAAA,MACpB,YAAY,SAAS;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO,aAAa;AACtB;;;AExHK,IAAM,gCAAN,cAA4C,MAAM;AAAA,EAIvD,YAAY;AAAA,IACV;AAAA,IACA;AAAA,EACF,GAGG;AACD;AAAA,MACE,0DACe,QAAQ;AAAA,iBACH,aAAa;AAAA,IACnC;AAEA,SAAK,OAAO;AAEZ,SAAK,WAAW;AAChB,SAAK,gBAAgB;AAAA,EACvB;AAAA,EAEA,OAAO,gCACL,OACwC;AACxC,WACE,iBAAiB,SACjB,MAAM,SAAS,sCACf,OAAQ,MAAwC,aAAa,YAC7D,OAAQ,MAAwC,kBAAkB;AAAA,EAEtE;AAAA,EAEA,SAAS;AACP,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,MACd,OAAO,KAAK;AAAA,MAEZ,UAAU,KAAK;AAAA,MACf,eAAe,KAAK;AAAA,IACtB;AAAA,EACF;AACF;;;AC5CA,SAAS,KAAAA,UAAS;;;ACMX,SAAS,6BAA6B;AAAA,EAC3C;AAAA,EACA;AACF,GAGsB;AACpB,QAAM,WAA8B,CAAC;AAErC,aAAW,EAAE,MAAM,QAAQ,KAAK,QAAQ;AACtC,YAAQ,MAAM;AAAA,MACZ,KAAK,UAAU;AACb,iBAAS,KAAK,EAAE,MAAM,UAAU,QAAQ,CAAC;AACzC;AAAA,MACF;AAAA,MAEA,KAAK,QAAQ;AACX,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,SAAS,QACN,IAAI,UAAQ;AACX,oBAAQ,KAAK,MAAM;AAAA,cACjB,KAAK,QAAQ;AACX,uBAAO,KAAK;AAAA,cACd;AAAA,cACA,KAAK,SAAS;AACZ,sBAAM,IAAI,8BAA8B;AAAA,kBACtC;AAAA,kBACA,eAAe;AAAA,gBACjB,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF,CAAC,EACA,KAAK,EAAE;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAAA,MAEA,KAAK,aAAa;AAChB,YAAI,OAAO;AACX,cAAM,YAID,CAAC;AAEN,mBAAW,QAAQ,SAAS;AAC1B,kBAAQ,KAAK,MAAM;AAAA,YACjB,KAAK,QAAQ;AACX,sBAAQ,KAAK;AACb;AAAA,YACF;AAAA,YACA,KAAK,aAAa;AAChB,wBAAU,KAAK;AAAA,gBACb,IAAI,KAAK;AAAA,gBACT,MAAM;AAAA,gBACN,UAAU;AAAA,kBACR,MAAM,KAAK;AAAA,kBACX,WAAW,KAAK,UAAU,KAAK,IAAI;AAAA,gBACrC;AAAA,cACF,CAAC;AACD;AAAA,YACF;AAAA,YACA,SAAS;AACP,oBAAM,mBAA0B;AAChC,oBAAM,IAAI,MAAM,qBAAqB,gBAAgB,EAAE;AAAA,YACzD;AAAA,UACF;AAAA,QACF;AAEA,iBAAS,KAAK;AAAA,UACZ,MAAM;AAAA,UACN,SAAS;AAAA,UACT,YACE,UAAU,SAAS,IACf,UAAU,IAAI,CAAC,EAAE,UAAU,EAAE,MAAM,WAAW,KAAK,EAAE,OAAO;AAAA,YAC1D,IAAI;AAAA,YACJ,MAAM;AAAA,YACN,UAAU,EAAE,MAAM,WAAW,KAAK;AAAA,UACpC,EAAE,IACF;AAAA,QACR,CAAC;AAED;AAAA,MACF;AAAA,MACA,KAAK,QAAQ;AACX,mBAAW,gBAAgB,SAAS;AAClC,mBAAS,KAAK;AAAA,YACZ,MAAM;AAAA,YACN,MAAM,aAAa;AAAA,YACnB,SAAS,KAAK,UAAU,aAAa,MAAM;AAAA,UAC7C,CAAC;AAAA,QACH;AACA;AAAA,MACF;AAAA,MACA,SAAS;AACP,cAAM,mBAA0B;AAChC,cAAM,IAAI,MAAM,qBAAqB,gBAAgB,EAAE;AAAA,MACzD;AAAA,IACF;AAAA,EACF;AAEA,SAAO;AACT;;;AC3GO,SAAS,uBACd,cAC6B;AAC7B,UAAQ,cAAc;AAAA,IACpB,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT;AACE,aAAO;AAAA,EACX;AACF;;;AChBA,SAAS,SAAS;AAGlB,IAAM,yBAAyB,EAAE,OAAO;AAAA,EACtC,QAAQ,EAAE,QAAQ,OAAO;AAAA,EACzB,SAAS,EAAE,OAAO;AAAA,EAClB,MAAM,EAAE,OAAO;AAAA,EACf,OAAO,EAAE,OAAO,EAAE,SAAS;AAAA,EAC3B,MAAM,EAAE,OAAO,EAAE,SAAS;AAC5B,CAAC;AAIM,IAAM,+BAA+B,+BAA+B;AAAA,EACzE,aAAa;AAAA,EACb,gBAAgB,UAAQ,KAAK;AAC/B,CAAC;;;AHWM,IAAM,2BAAN,MAA0D;AAAA,EAS/D,YACE,SACA,UACA,QACA;AAZF,SAAS,uBAAuB;AAChC,SAAS,8BAA8B;AAYrC,SAAK,UAAU;AACf,SAAK,WAAW;AAChB,SAAK,SAAS;AAAA,EAChB;AAAA,EAEA,IAAI,WAAmB;AACrB,WAAO,KAAK,OAAO;AAAA,EACrB;AAAA,EAEQ,QAAQ;AAAA,IACd;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,GAAiD;AA3DnD;AA4DI,UAAM,OAAO,KAAK;AAElB,UAAM,WAAyC,CAAC;AAEhD,QAAI,oBAAoB,MAAM;AAC5B,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,QAAI,mBAAmB,MAAM;AAC3B,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,UAAM,WAAW;AAAA;AAAA,MAEf,OAAO,KAAK;AAAA;AAAA,MAGZ,aAAa,KAAK,SAAS;AAAA;AAAA,MAG3B,YAAY;AAAA,MACZ;AAAA;AAAA,MACA,OAAO;AAAA,MACP,aAAa;AAAA;AAAA,MAGb,UAAU,6BAA6B;AAAA,QACrC,UAAU,KAAK;AAAA,QACf;AAAA,MACF,CAAC;AAAA,IACH;AAEA,YAAQ,MAAM;AAAA,MACZ,KAAK,WAAW;AAEd,cAAM,UAAQ,UAAK,UAAL,mBAAY,UAAS,KAAK,QAAQ;AAEhD,eAAO;AAAA,UACL,MAAM;AAAA,YACJ,GAAG;AAAA,YACH,OAAO,+BAAO,IAAI,WAAS;AAAA,cACzB,MAAM;AAAA,cACN,UAAU;AAAA,gBACR,MAAM,KAAK;AAAA,gBACX,aAAa,KAAK;AAAA,gBAClB,YAAY,KAAK;AAAA,cACnB;AAAA,YACF;AAAA,UACF;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,MAEA,KAAK,eAAe;AAClB,eAAO;AAAA,UACL,MAAM;AAAA,YACJ,GAAG;AAAA,YACH,iBAAiB,EAAE,MAAM,cAAc;AAAA,UACzC;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,MAEA,KAAK,eAAe;AAClB,eAAO;AAAA,UACL,MAAM;AAAA,YACJ,GAAG;AAAA,YACH,aAAa;AAAA,YACb,OAAO,CAAC,EAAE,MAAM,YAAY,UAAU,KAAK,KAAK,CAAC;AAAA,UACnD;AAAA,UACA;AAAA,QACF;AAAA,MACF;AAAA,MAEA,KAAK,kBAAkB;AACrB,cAAM,IAAI,8BAA8B;AAAA,UACtC,eAAe;AAAA,UACf,UAAU,KAAK;AAAA,QACjB,CAAC;AAAA,MACH;AAAA,MAEA,SAAS;AACP,cAAM,mBAA0B;AAChC,cAAM,IAAI,MAAM,qBAAqB,gBAAgB,EAAE;AAAA,MACzD;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,WACJ,SAC6D;AA5JjE;AA6JI,UAAM,EAAE,MAAM,SAAS,IAAI,KAAK,QAAQ,OAAO;AAE/C,UAAM,WAAW,MAAM,cAAc;AAAA,MACnC,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,SAAS,KAAK,OAAO,QAAQ;AAAA,MAC7B,MAAM;AAAA,MACN,uBAAuB;AAAA,MACvB,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA,aAAa,QAAQ;AAAA,IACvB,CAAC;AAED,UAAM,EAAE,UAAU,WAAW,GAAG,YAAY,IAAI;AAChD,UAAM,SAAS,SAAS,QAAQ,CAAC;AAEjC,WAAO;AAAA,MACL,OAAM,YAAO,QAAQ,YAAf,YAA0B;AAAA,MAChC,YAAW,YAAO,QAAQ,eAAf,mBAA2B,IAAI,eAAa;AAAA,QACrD,cAAc;AAAA,QACd,YAAY,KAAK,OAAO,WAAW;AAAA,QACnC,UAAU,SAAS,SAAS;AAAA,QAC5B,MAAM,SAAS,SAAS;AAAA,MAC1B;AAAA,MACA,cAAc,uBAAuB,OAAO,aAAa;AAAA,MACzD,OAAO;AAAA,QACL,cAAc,SAAS,MAAM;AAAA,QAC7B,kBAAkB,SAAS,MAAM;AAAA,MACnC;AAAA,MACA,SAAS,EAAE,WAAW,YAAY;AAAA,MAClC;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,SACJ,SAC2D;AAC3D,UAAM,EAAE,MAAM,SAAS,IAAI,KAAK,QAAQ,OAAO;AAE/C,UAAM,WAAW,MAAM,cAAc;AAAA,MACnC,KAAK,GAAG,KAAK,OAAO,OAAO;AAAA,MAC3B,SAAS,KAAK,OAAO,QAAQ;AAAA,MAC7B,MAAM;AAAA,QACJ,GAAG;AAAA,QACH,QAAQ;AAAA,MACV;AAAA,MACA,uBAAuB;AAAA,MACvB,2BAA2B;AAAA,QACzB;AAAA,MACF;AAAA,MACA,aAAa,QAAQ;AAAA,IACvB,CAAC;AAED,UAAM,EAAE,UAAU,WAAW,GAAG,YAAY,IAAI;AAEhD,QAAI,eAA4C;AAChD,QAAI,QAA4D;AAAA,MAC9D,cAAc,OAAO;AAAA,MACrB,kBAAkB,OAAO;AAAA,IAC3B;AAEA,UAAMC,cAAa,KAAK,OAAO;AAE/B,WAAO;AAAA,MACL,QAAQ,SAAS;AAAA,QACf,IAAI,gBAGF;AAAA,UACA,UAAU,OAAO,YAAY;AAC3B,gBAAI,CAAC,MAAM,SAAS;AAClB,yBAAW,QAAQ,EAAE,MAAM,SAAS,OAAO,MAAM,MAAM,CAAC;AACxD;AAAA,YACF;AAEA,kBAAM,QAAQ,MAAM;AAEpB,gBAAI,MAAM,SAAS,MAAM;AACvB,sBAAQ;AAAA,gBACN,cAAc,MAAM,MAAM;AAAA,gBAC1B,kBAAkB,MAAM,MAAM;AAAA,cAChC;AAAA,YACF;AAEA,kBAAM,SAAS,MAAM,QAAQ,CAAC;AAE9B,iBAAI,iCAAQ,kBAAiB,MAAM;AACjC,6BAAe,uBAAuB,OAAO,aAAa;AAAA,YAC5D;AAEA,iBAAI,iCAAQ,UAAS,MAAM;AACzB;AAAA,YACF;AAEA,kBAAM,QAAQ,OAAO;AAErB,gBAAI,MAAM,WAAW,MAAM;AACzB,yBAAW,QAAQ;AAAA,gBACjB,MAAM;AAAA,gBACN,WAAW,MAAM;AAAA,cACnB,CAAC;AAAA,YACH;AAEA,gBAAI,MAAM,cAAc,MAAM;AAC5B,yBAAW,YAAY,MAAM,YAAY;AAGvC,2BAAW,QAAQ;AAAA,kBACjB,MAAM;AAAA,kBACN,cAAc;AAAA,kBACd,YAAYA,YAAW;AAAA,kBACvB,UAAU,SAAS,SAAS;AAAA,kBAC5B,eAAe,SAAS,SAAS;AAAA,gBACnC,CAAC;AAED,2BAAW,QAAQ;AAAA,kBACjB,MAAM;AAAA,kBACN,cAAc;AAAA,kBACd,YAAYA,YAAW;AAAA,kBACvB,UAAU,SAAS,SAAS;AAAA,kBAC5B,MAAM,SAAS,SAAS;AAAA,gBAC1B,CAAC;AAAA,cACH;AAAA,YACF;AAAA,UACF;AAAA,UAEA,MAAM,YAAY;AAChB,uBAAW,QAAQ,EAAE,MAAM,UAAU,cAAc,MAAM,CAAC;AAAA,UAC5D;AAAA,QACF,CAAC;AAAA,MACH;AAAA,MACA,SAAS,EAAE,WAAW,YAAY;AAAA,MAClC;AAAA,IACF;AAAA,EACF;AACF;AAIA,IAAM,2BAA2BC,GAAE,OAAO;AAAA,EACxC,SAASA,GAAE;AAAA,IACTA,GAAE,OAAO;AAAA,MACP,SAASA,GAAE,OAAO;AAAA,QAChB,MAAMA,GAAE,QAAQ,WAAW;AAAA,QAC3B,SAASA,GAAE,OAAO,EAAE,SAAS;AAAA,QAC7B,YAAYA,GACT;AAAA,UACCA,GAAE,OAAO;AAAA,YACP,UAAUA,GAAE,OAAO;AAAA,cACjB,MAAMA,GAAE,OAAO;AAAA,cACf,WAAWA,GAAE,OAAO;AAAA,YACtB,CAAC;AAAA,UACH,CAAC;AAAA,QACH,EACC,SAAS,EACT,SAAS;AAAA,MACd,CAAC;AAAA,MACD,OAAOA,GAAE,OAAO;AAAA,MAChB,eAAeA,GAAE,OAAO,EAAE,SAAS,EAAE,SAAS;AAAA,IAChD,CAAC;AAAA,EACH;AAAA,EACA,QAAQA,GAAE,QAAQ,iBAAiB;AAAA,EACnC,OAAOA,GAAE,OAAO;AAAA,IACd,eAAeA,GAAE,OAAO;AAAA,IACxB,mBAAmBA,GAAE,OAAO;AAAA,EAC9B,CAAC;AACH,CAAC;AAID,IAAM,yBAAyBA,GAAE,OAAO;AAAA,EACtC,QAAQA,GAAE,QAAQ,uBAAuB;AAAA,EACzC,SAASA,GAAE;AAAA,IACTA,GAAE,OAAO;AAAA,MACP,OAAOA,GAAE,OAAO;AAAA,QACd,MAAMA,GAAE,KAAK,CAAC,WAAW,CAAC,EAAE,SAAS;AAAA,QACrC,SAASA,GAAE,OAAO,EAAE,SAAS,EAAE,SAAS;AAAA,QACxC,YAAYA,GACT;AAAA,UACCA,GAAE,OAAO;AAAA,YACP,UAAUA,GAAE,OAAO,EAAE,MAAMA,GAAE,OAAO,GAAG,WAAWA,GAAE,OAAO,EAAE,CAAC;AAAA,UAChE,CAAC;AAAA,QACH,EACC,SAAS,EACT,SAAS;AAAA,MACd,CAAC;AAAA,MACD,eAAeA,GAAE,OAAO,EAAE,SAAS,EAAE,SAAS;AAAA,MAC9C,OAAOA,GAAE,OAAO;AAAA,IAClB,CAAC;AAAA,EACH;AAAA,EACA,OAAOA,GACJ,OAAO;AAAA,IACN,eAAeA,GAAE,OAAO;AAAA,IACxB,mBAAmBA,GAAE,OAAO;AAAA,EAC9B,CAAC,EACA,SAAS,EACT,SAAS;AACd,CAAC;;;AIxVM,IAAM,UAAN,MAAc;AAAA,EAMnB,YACE,UAKI,CAAC,GACL;AAvBJ;AAwBI,SAAK,UAAU,QAAQ;AACvB,SAAK,SAAS,QAAQ;AACtB,SAAK,cAAa,aAAQ,eAAR,YAAsB;AAAA,EAC1C;AAAA,EAEA,IAAY,aAAa;AA7B3B;AA8BI,WAAO;AAAA,MACL,UAAS,UAAK,YAAL,YAAgB;AAAA,MACzB,SAAS,OAAO;AAAA,QACd,eAAe,UAAU,WAAW;AAAA,UAClC,QAAQ,KAAK;AAAA,UACb,yBAAyB;AAAA,UACzB,aAAa;AAAA,QACf,CAAC,CAAC;AAAA,MACJ;AAAA,IACF;AAAA,EACF;AAAA,EAEA,KAAK,SAA6B,WAAgC,CAAC,GAAG;AACpE,WAAO,IAAI,yBAAyB,SAAS,UAAU;AAAA,MACrD,UAAU;AAAA,MACV,GAAG,KAAK;AAAA,MACR,YAAY,KAAK;AAAA,IACnB,CAAC;AAAA,EACH;AACF;AAKO,IAAM,UAAU,IAAI,QAAQ;","names":["z","generateId","z"]}
|
@@ -0,0 +1,430 @@
|
|
1
|
+
type JsonSchema = Record<string, unknown>;
|
2
|
+
|
3
|
+
type LanguageModelV1CallSettings = {
|
4
|
+
/**
|
5
|
+
* Maximum number of tokens to generate.
|
6
|
+
*/
|
7
|
+
maxTokens?: number;
|
8
|
+
/**
|
9
|
+
* Temperature setting. This is a number between 0 (almost no randomness) and
|
10
|
+
* 1 (very random).
|
11
|
+
*
|
12
|
+
* Different LLM providers have different temperature
|
13
|
+
* scales, so they'd need to map it (without mapping, the same temperature has
|
14
|
+
* different effects on different models). The provider can also chose to map
|
15
|
+
* this to topP, potentially even using a custom setting on their model.
|
16
|
+
*
|
17
|
+
* Note: This is an example of a setting that requires a clear specification of
|
18
|
+
* the semantics.
|
19
|
+
*/
|
20
|
+
temperature?: number;
|
21
|
+
/**
|
22
|
+
* Nucleus sampling. This is a number between 0 and 1.
|
23
|
+
*
|
24
|
+
* E.g. 0.1 would mean that only tokens with the top 10% probability mass
|
25
|
+
* are considered.
|
26
|
+
*
|
27
|
+
* It is recommended to set either `temperature` or `topP`, but not both.
|
28
|
+
*/
|
29
|
+
topP?: number;
|
30
|
+
/**
|
31
|
+
* Presence penalty setting. It affects the likelihood of the model to
|
32
|
+
* repeat information that is already in the prompt.
|
33
|
+
*
|
34
|
+
* The presence penalty is a number between -1 (increase repetition)
|
35
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
36
|
+
*/
|
37
|
+
presencePenalty?: number;
|
38
|
+
/**
|
39
|
+
* Frequency penalty setting. It affects the likelihood of the model
|
40
|
+
* to repeatedly use the same words or phrases.
|
41
|
+
*
|
42
|
+
* The frequency penalty is a number between -1 (increase repetition)
|
43
|
+
* and 1 (maximum penalty, decrease repetition). 0 means no penalty.
|
44
|
+
*/
|
45
|
+
frequencyPenalty?: number;
|
46
|
+
/**
|
47
|
+
* The seed (integer) to use for random sampling. If set and supported
|
48
|
+
* by the model, calls will generate deterministic results.
|
49
|
+
*/
|
50
|
+
seed?: number;
|
51
|
+
/**
|
52
|
+
* Abort signal for cancelling the operation.
|
53
|
+
*/
|
54
|
+
abortSignal?: AbortSignal;
|
55
|
+
};
|
56
|
+
|
57
|
+
/**
|
58
|
+
* A tool has a name, a description, and a set of parameters.
|
59
|
+
*
|
60
|
+
* Note: this is **not** the user-facing tool definition. The AI SDK methods will
|
61
|
+
* map the user-facing tool definitions to this format.
|
62
|
+
*/
|
63
|
+
type LanguageModelV1FunctionTool = {
|
64
|
+
/**
|
65
|
+
* The type of the tool. Only functions for now, but this gives us room to
|
66
|
+
* add more specific tool types in the future and use a discriminated union.
|
67
|
+
*/
|
68
|
+
type: 'function';
|
69
|
+
/**
|
70
|
+
* The name of the tool. Unique within this model call.
|
71
|
+
*/
|
72
|
+
name: string;
|
73
|
+
description?: string;
|
74
|
+
parameters: JsonSchema;
|
75
|
+
};
|
76
|
+
|
77
|
+
/**
|
78
|
+
* A prompt is a list of messages.
|
79
|
+
*
|
80
|
+
* Note: Not all models and prompt formats support multi-modal inputs and
|
81
|
+
* tool calls. The validation happens at runtime.
|
82
|
+
*
|
83
|
+
* Note: This is not a user-facing prompt. The AI SDK methods will map the
|
84
|
+
* user-facing prompt types such as chat or instruction prompts to this format.
|
85
|
+
*/
|
86
|
+
type LanguageModelV1Prompt = Array<LanguageModelV1Message>;
|
87
|
+
type LanguageModelV1Message = {
|
88
|
+
role: 'system';
|
89
|
+
content: string;
|
90
|
+
} | {
|
91
|
+
role: 'user';
|
92
|
+
content: Array<LanguageModelV1TextPart | LanguageModelV1ImagePart>;
|
93
|
+
} | {
|
94
|
+
role: 'assistant';
|
95
|
+
content: Array<LanguageModelV1TextPart | LanguageModelV1ToolCallPart>;
|
96
|
+
} | {
|
97
|
+
role: 'tool';
|
98
|
+
content: Array<LanguageModelV1ToolResultPart>;
|
99
|
+
};
|
100
|
+
interface LanguageModelV1TextPart {
|
101
|
+
type: 'text';
|
102
|
+
/**
|
103
|
+
* The text content.
|
104
|
+
*/
|
105
|
+
text: string;
|
106
|
+
}
|
107
|
+
interface LanguageModelV1ImagePart {
|
108
|
+
type: 'image';
|
109
|
+
/**
|
110
|
+
* Image data as a Uint8Array (e.g. from a Blob or Buffer) or a URL.
|
111
|
+
*/
|
112
|
+
image: Uint8Array | URL;
|
113
|
+
/**
|
114
|
+
* Optional mime type of the image.
|
115
|
+
*/
|
116
|
+
mimeType?: string;
|
117
|
+
}
|
118
|
+
interface LanguageModelV1ToolCallPart {
|
119
|
+
type: 'tool-call';
|
120
|
+
toolCallId: string;
|
121
|
+
toolName: string;
|
122
|
+
args: unknown;
|
123
|
+
}
|
124
|
+
interface LanguageModelV1ToolResultPart {
|
125
|
+
type: 'tool-result';
|
126
|
+
toolCallId: string;
|
127
|
+
toolName: string;
|
128
|
+
result: unknown;
|
129
|
+
}
|
130
|
+
|
131
|
+
type LanguageModelV1CallOptions = LanguageModelV1CallSettings & {
|
132
|
+
/**
|
133
|
+
* Whether the user provided the input as messages or as
|
134
|
+
* a prompt. This can help guide non-chat models in the
|
135
|
+
* expansion, bc different expansions can be needed for
|
136
|
+
* chat/non-chat use cases.
|
137
|
+
*/
|
138
|
+
inputFormat: 'messages' | 'prompt';
|
139
|
+
/**
|
140
|
+
* The mode affects the behavior of the language model. It is required to
|
141
|
+
* support provider-independent streaming and generation of structured objects.
|
142
|
+
* The model can take this information and e.g. configure json mode, the correct
|
143
|
+
* low level grammar, etc. It can also be used to optimize the efficiency of the
|
144
|
+
* streaming, e.g. tool-delta stream parts are only needed in the
|
145
|
+
* object-tool mode.
|
146
|
+
*/
|
147
|
+
mode: {
|
148
|
+
type: 'regular';
|
149
|
+
tools?: Array<LanguageModelV1FunctionTool>;
|
150
|
+
} | {
|
151
|
+
type: 'object-json';
|
152
|
+
} | {
|
153
|
+
type: 'object-grammar';
|
154
|
+
schema: JsonSchema;
|
155
|
+
} | {
|
156
|
+
type: 'object-tool';
|
157
|
+
tool: LanguageModelV1FunctionTool;
|
158
|
+
};
|
159
|
+
/**
|
160
|
+
* A language mode prompt is a standardized prompt type.
|
161
|
+
*
|
162
|
+
* Note: This is **not** the user-facing prompt. The AI SDK methods will map the
|
163
|
+
* user-facing prompt types such as chat or instruction prompts to this format.
|
164
|
+
* That approach allows us to evolve the user facing prompts without breaking
|
165
|
+
* the language model interface.
|
166
|
+
*/
|
167
|
+
prompt: LanguageModelV1Prompt;
|
168
|
+
};
|
169
|
+
|
170
|
+
/**
|
171
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
172
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
173
|
+
*/
|
174
|
+
type LanguageModelV1CallWarning = {
|
175
|
+
type: 'unsupported-setting';
|
176
|
+
setting: keyof LanguageModelV1CallSettings;
|
177
|
+
} | {
|
178
|
+
type: 'other';
|
179
|
+
message: string;
|
180
|
+
};
|
181
|
+
|
182
|
+
type LanguageModelV1FinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other';
|
183
|
+
|
184
|
+
type LanguageModelV1FunctionToolCall = {
|
185
|
+
toolCallType: 'function';
|
186
|
+
toolCallId: string;
|
187
|
+
toolName: string;
|
188
|
+
/**
|
189
|
+
* Stringified JSON object with the tool call arguments. Must match the
|
190
|
+
* parameters schema of the tool.
|
191
|
+
*/
|
192
|
+
args: string;
|
193
|
+
};
|
194
|
+
|
195
|
+
type LanguageModelV1 = {
|
196
|
+
/**
|
197
|
+
* The language model must specify which language model interface
|
198
|
+
* version it implements. This will allow us to evolve the language
|
199
|
+
* model interface and retain backwards compatibility. The different
|
200
|
+
* implementation versions can be handled as a discriminated union
|
201
|
+
* on our side.
|
202
|
+
*/
|
203
|
+
readonly specificationVersion: 'v1';
|
204
|
+
/**
|
205
|
+
* Name of the provider for logging purposes.
|
206
|
+
*/
|
207
|
+
readonly provider: string;
|
208
|
+
/**
|
209
|
+
* Provider-specific model ID for logging purposes.
|
210
|
+
*/
|
211
|
+
readonly modelId: string;
|
212
|
+
/**
|
213
|
+
* Default object generation mode that should be used with this model when
|
214
|
+
* no mode is specified. Should be the mode with the best results for this
|
215
|
+
* model. `undefined` can be returned if object generation is not supported.
|
216
|
+
*
|
217
|
+
* This is needed to generate the best objects possible w/o requiring the
|
218
|
+
* user to explicitly specify the object generation mode.
|
219
|
+
*/
|
220
|
+
readonly defaultObjectGenerationMode: 'json' | 'tool' | 'grammar' | undefined;
|
221
|
+
/**
|
222
|
+
* Generates a language model output (non-streaming).
|
223
|
+
*
|
224
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
225
|
+
* by the user.
|
226
|
+
*/
|
227
|
+
doGenerate(options: LanguageModelV1CallOptions): PromiseLike<{
|
228
|
+
/**
|
229
|
+
* Text that the model has generated. Can be undefined if the model
|
230
|
+
* has only generated tool calls.
|
231
|
+
*/
|
232
|
+
text?: string;
|
233
|
+
/**
|
234
|
+
* Tool calls that the model has generated. Can be undefined if the
|
235
|
+
* model has only generated text.
|
236
|
+
*/
|
237
|
+
toolCalls?: Array<LanguageModelV1FunctionToolCall>;
|
238
|
+
/**
|
239
|
+
* Finish reason.
|
240
|
+
*/
|
241
|
+
finishReason: LanguageModelV1FinishReason;
|
242
|
+
/**
|
243
|
+
* Usage information.
|
244
|
+
*/
|
245
|
+
usage: {
|
246
|
+
promptTokens: number;
|
247
|
+
completionTokens: number;
|
248
|
+
};
|
249
|
+
/**
|
250
|
+
* Raw prompt and setting information for observability provider integration.
|
251
|
+
*/
|
252
|
+
rawCall: {
|
253
|
+
/**
|
254
|
+
* Raw prompt after expansion and conversion to the format that the
|
255
|
+
* provider uses to send the information to their API.
|
256
|
+
*/
|
257
|
+
rawPrompt: unknown;
|
258
|
+
/**
|
259
|
+
* Raw settings that are used for the API call. Includes provider-specific
|
260
|
+
* settings.
|
261
|
+
*/
|
262
|
+
rawSettings: Record<string, unknown>;
|
263
|
+
};
|
264
|
+
warnings?: LanguageModelV1CallWarning[];
|
265
|
+
}>;
|
266
|
+
/**
|
267
|
+
* Generates a language model output (streaming).
|
268
|
+
*
|
269
|
+
* Naming: "do" prefix to prevent accidental direct usage of the method
|
270
|
+
* by the user.
|
271
|
+
*
|
272
|
+
* @return A stream of higher-level language model output parts.
|
273
|
+
*/
|
274
|
+
doStream(options: LanguageModelV1CallOptions): PromiseLike<{
|
275
|
+
stream: ReadableStream<LanguageModelV1StreamPart>;
|
276
|
+
/**
|
277
|
+
* Raw prompt and setting information for observability provider integration.
|
278
|
+
*/
|
279
|
+
rawCall: {
|
280
|
+
/**
|
281
|
+
* Raw prompt after expansion and conversion to the format that the
|
282
|
+
* provider uses to send the information to their API.
|
283
|
+
*/
|
284
|
+
rawPrompt: unknown;
|
285
|
+
/**
|
286
|
+
* Raw settings that are used for the API call. Includes provider-specific
|
287
|
+
* settings.
|
288
|
+
*/
|
289
|
+
rawSettings: Record<string, unknown>;
|
290
|
+
};
|
291
|
+
warnings?: LanguageModelV1CallWarning[];
|
292
|
+
}>;
|
293
|
+
};
|
294
|
+
type LanguageModelV1StreamPart = {
|
295
|
+
type: 'text-delta';
|
296
|
+
textDelta: string;
|
297
|
+
} | ({
|
298
|
+
type: 'tool-call';
|
299
|
+
} & LanguageModelV1FunctionToolCall) | {
|
300
|
+
type: 'tool-call-delta';
|
301
|
+
toolCallType: 'function';
|
302
|
+
toolCallId: string;
|
303
|
+
toolName: string;
|
304
|
+
argsTextDelta: string;
|
305
|
+
} | {
|
306
|
+
type: 'finish';
|
307
|
+
finishReason: LanguageModelV1FinishReason;
|
308
|
+
usage: {
|
309
|
+
promptTokens: number;
|
310
|
+
completionTokens: number;
|
311
|
+
};
|
312
|
+
} | {
|
313
|
+
type: 'error';
|
314
|
+
error: unknown;
|
315
|
+
};
|
316
|
+
|
317
|
+
type OpenAIChatModelId = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-0125-preview' | 'gpt-4-vision-preview' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | (string & {});
|
318
|
+
interface OpenAIChatSettings {
|
319
|
+
/**
|
320
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
321
|
+
*
|
322
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
323
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100. You
|
324
|
+
* can use this tokenizer tool to convert text to token IDs. Mathematically,
|
325
|
+
* the bias is added to the logits generated by the model prior to sampling.
|
326
|
+
* The exact effect will vary per model, but values between -1 and 1 should
|
327
|
+
* decrease or increase likelihood of selection; values like -100 or 100
|
328
|
+
* should result in a ban or exclusive selection of the relevant token.
|
329
|
+
*
|
330
|
+
* As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
331
|
+
* token from being generated.
|
332
|
+
*/
|
333
|
+
logitBias?: Record<number, number>;
|
334
|
+
/**
|
335
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
336
|
+
* monitor and detect abuse. Learn more.
|
337
|
+
*/
|
338
|
+
user?: string;
|
339
|
+
}
|
340
|
+
|
341
|
+
type OpenAIChatConfig = {
|
342
|
+
provider: string;
|
343
|
+
baseUrl: string;
|
344
|
+
headers: () => Record<string, string | undefined>;
|
345
|
+
};
|
346
|
+
declare class OpenAIChatLanguageModel implements LanguageModelV1 {
|
347
|
+
readonly specificationVersion = "v1";
|
348
|
+
readonly defaultObjectGenerationMode = "tool";
|
349
|
+
readonly modelId: OpenAIChatModelId;
|
350
|
+
readonly settings: OpenAIChatSettings;
|
351
|
+
private readonly config;
|
352
|
+
constructor(modelId: OpenAIChatModelId, settings: OpenAIChatSettings, config: OpenAIChatConfig);
|
353
|
+
get provider(): string;
|
354
|
+
private getArgs;
|
355
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
356
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
357
|
+
}
|
358
|
+
|
359
|
+
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
360
|
+
interface OpenAICompletionSettings {
|
361
|
+
/**
|
362
|
+
* Echo back the prompt in addition to the completion
|
363
|
+
*/
|
364
|
+
echo?: boolean;
|
365
|
+
/**
|
366
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
367
|
+
*
|
368
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
369
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100. You
|
370
|
+
* can use this tokenizer tool to convert text to token IDs. Mathematically,
|
371
|
+
* the bias is added to the logits generated by the model prior to sampling.
|
372
|
+
* The exact effect will vary per model, but values between -1 and 1 should
|
373
|
+
* decrease or increase likelihood of selection; values like -100 or 100
|
374
|
+
* should result in a ban or exclusive selection of the relevant token.
|
375
|
+
*
|
376
|
+
* As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
377
|
+
* token from being generated.
|
378
|
+
*/
|
379
|
+
logitBias?: Record<number, number>;
|
380
|
+
/**
|
381
|
+
* The suffix that comes after a completion of inserted text.
|
382
|
+
*/
|
383
|
+
suffix?: string;
|
384
|
+
/**
|
385
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
386
|
+
* monitor and detect abuse. Learn more.
|
387
|
+
*/
|
388
|
+
user?: string;
|
389
|
+
}
|
390
|
+
|
391
|
+
type OpenAICompletionConfig = {
|
392
|
+
provider: string;
|
393
|
+
baseUrl: string;
|
394
|
+
headers: () => Record<string, string | undefined>;
|
395
|
+
};
|
396
|
+
declare class OpenAICompletionLanguageModel implements LanguageModelV1 {
|
397
|
+
readonly specificationVersion = "v1";
|
398
|
+
readonly defaultObjectGenerationMode: undefined;
|
399
|
+
readonly modelId: OpenAICompletionModelId;
|
400
|
+
readonly settings: OpenAICompletionSettings;
|
401
|
+
private readonly config;
|
402
|
+
constructor(modelId: OpenAICompletionModelId, settings: OpenAICompletionSettings, config: OpenAICompletionConfig);
|
403
|
+
get provider(): string;
|
404
|
+
private getArgs;
|
405
|
+
doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
|
406
|
+
doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
|
407
|
+
}
|
408
|
+
|
409
|
+
/**
|
410
|
+
* OpenAI provider.
|
411
|
+
*/
|
412
|
+
declare class OpenAI {
|
413
|
+
readonly baseUrl?: string;
|
414
|
+
readonly apiKey?: string;
|
415
|
+
readonly organization?: string;
|
416
|
+
constructor(options?: {
|
417
|
+
baseUrl?: string;
|
418
|
+
apiKey?: string;
|
419
|
+
organization?: string;
|
420
|
+
});
|
421
|
+
private get baseConfig();
|
422
|
+
chat(modelId: OpenAIChatModelId, settings?: OpenAIChatSettings): OpenAIChatLanguageModel;
|
423
|
+
completion(modelId: OpenAICompletionModelId, settings?: OpenAICompletionSettings): OpenAICompletionLanguageModel;
|
424
|
+
}
|
425
|
+
/**
|
426
|
+
* Default OpenAI provider instance.
|
427
|
+
*/
|
428
|
+
declare const openai: OpenAI;
|
429
|
+
|
430
|
+
export { OpenAI, openai };
|