@wix/auto_sdk_ai-gateway_prompts 1.0.30 → 1.0.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/cjs/index.d.ts +1 -1
- package/build/cjs/index.js +24 -0
- package/build/cjs/index.js.map +1 -1
- package/build/cjs/index.typings.d.ts +82 -5
- package/build/cjs/index.typings.js +24 -0
- package/build/cjs/index.typings.js.map +1 -1
- package/build/cjs/meta.d.ts +82 -5
- package/build/cjs/meta.js +24 -0
- package/build/cjs/meta.js.map +1 -1
- package/build/es/index.d.mts +1 -1
- package/build/es/index.mjs +23 -0
- package/build/es/index.mjs.map +1 -1
- package/build/es/index.typings.d.mts +82 -5
- package/build/es/index.typings.mjs +23 -0
- package/build/es/index.typings.mjs.map +1 -1
- package/build/es/meta.d.mts +82 -5
- package/build/es/meta.mjs +23 -0
- package/build/es/meta.mjs.map +1 -1
- package/build/internal/cjs/index.d.ts +3 -3
- package/build/internal/cjs/index.js +24 -0
- package/build/internal/cjs/index.js.map +1 -1
- package/build/internal/cjs/index.typings.d.ts +84 -7
- package/build/internal/cjs/index.typings.js +24 -0
- package/build/internal/cjs/index.typings.js.map +1 -1
- package/build/internal/cjs/meta.d.ts +82 -5
- package/build/internal/cjs/meta.js +24 -0
- package/build/internal/cjs/meta.js.map +1 -1
- package/build/internal/es/index.d.mts +3 -3
- package/build/internal/es/index.mjs +23 -0
- package/build/internal/es/index.mjs.map +1 -1
- package/build/internal/es/index.typings.d.mts +84 -7
- package/build/internal/es/index.typings.mjs +23 -0
- package/build/internal/es/index.typings.mjs.map +1 -1
- package/build/internal/es/meta.d.mts +82 -5
- package/build/internal/es/meta.mjs +23 -0
- package/build/internal/es/meta.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../../src/api-infra-v1-prompt-proxy-prompts.universal.ts","../../src/api-infra-v1-prompt-proxy-prompts.http.ts"],"sourcesContent":["import { transformError as sdkTransformError } from '@wix/sdk-runtime/transform-error';\nimport {\n renameKeysFromSDKRequestToRESTRequest,\n renameKeysFromRESTResponseToSDKResponse,\n} from '@wix/sdk-runtime/rename-all-nested-keys';\nimport { HttpClient, NonNullablePaths } from '@wix/sdk-types';\nimport * as ambassadorWixApiInfraV1PromptProxy from './api-infra-v1-prompt-proxy-prompts.http.js';\n\n/**\n * A Prompt is a ...\n * You can ...\n * Read more about Prompts\n * in this [article](<LINK_TO_KB_ARTICLE>).\n */\nexport interface PromptProxy {\n /**\n * @format GUID\n * @readonly\n */\n _id?: string;\n}\n\nexport interface GenerationCompletedResultEvent {\n /**\n * @format GUID\n * @readonly\n */\n predictionId?: string;\n generationResult?: GenerateContentModelResponse;\n /** @maxLength 10000 */\n errorMessage?: string | null;\n}\n\nexport interface GenerateContentModelResponse\n extends GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n /** Extracted generated content data from the model's response. */\n generatedContent?: GeneratedContent;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n /** Token usage information. */\n tokenUsage?: V1TokenUsage;\n /** Metadata about the response, such as finish reason. */\n responseMetadata?: ResponseMetadata;\n}\n\n/** @oneof */\nexport interface GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n}\n\n/** Model generation result, at least one of the fields should be present */\nexport interface GeneratedContent {\n /**\n * Zero or more textual results. Only present when the model returned a text.\n * @maxSize 1000\n */\n texts?: TextContent[];\n /**\n * Zero or more images. Only present when the model returned an image.\n * @maxSize 1000\n */\n images?: MediaContent[];\n /**\n * Zero or more videos. Only present when the model returned a video.\n * @maxSize 1000\n */\n videos?: MediaContent[];\n /**\n * Zero or more thinking texts. Only present when the model returned a thought.\n * @maxSize 1000\n */\n thinkingTexts?: ThinkingTextContent[];\n /**\n * Zero or more tool call requests. Only present when the model requested to call a tool.\n * @maxSize 1000\n */\n tools?: ToolUseContent[];\n}\n\nexport interface TextContent {\n /**\n * Generated text\n * @maxLength 1000000\n */\n generatedText?: string | null;\n}\n\nexport interface MediaContent {\n /**\n * Mime type, e.g. \"image/jpeg\" or \"video/mp4\"\n * @maxLength 500\n */\n mimeType?: string | null;\n /**\n * Wix Media Platform (WixMP) url where the image or video is stored.\n * @maxLength 5000\n */\n url?: string;\n}\n\nexport interface ThinkingTextContent {\n /**\n * The thought text of the model thinking\n * @maxLength 1000000\n */\n thoughtText?: string | null;\n}\n\nexport interface ToolUseContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string;\n /** Tool use input */\n input?: Record<string, any> | null;\n}\n\nexport interface V1TokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n /** Total number of tokens used in the request. */\n totalTokens?: number | null;\n /** cache creation token usage */\n cacheCreationTokens?: number | null;\n /** cache read token usage */\n cacheReadTokens?: number | null;\n /** thought tokens usage */\n thoughtsTokens?: number | null;\n /** tool use tokens usage */\n toolUseTokens?: number | null;\n}\n\nexport interface ResponseMetadata {\n /**\n * Finish reason of the model response.\n * @maxLength 1000\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: OpenaiproxyV1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: ChatCompletionMessageFunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: OpenaiproxyV1ChatCompletionMessageContentPart[];\n}\n\nexport interface ChatCompletionMessageFunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum OpenaiproxyV1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals =\n | OpenaiproxyV1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ChatCompletionMessageToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n _id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: ChatCompletionMessageFunctionWithArgs;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageContentPart\n extends OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface CreateChatCompletionResponsePromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CreateChatCompletionResponseCompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport enum OpenaiproxyV1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_0301 = 'GPT_3_5_TURBO_0301',\n GPT_4 = 'GPT_4',\n GPT_4_0314 = 'GPT_4_0314',\n GPT_4_32K = 'GPT_4_32K',\n GPT_4_32K_0314 = 'GPT_4_32K_0314',\n GPT_3_5_TURBO_0613 = 'GPT_3_5_TURBO_0613',\n GPT_3_5_TURBO_16K = 'GPT_3_5_TURBO_16K',\n GPT_3_5_TURBO_16K_0613 = 'GPT_3_5_TURBO_16K_0613',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_4_32K_0613 = 'GPT_4_32K_0613',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_1106_PREVIEW = 'GPT_4_1106_PREVIEW',\n GPT_4_VISION_PREVIEW = 'GPT_4_VISION_PREVIEW',\n GPT_4_TURBO_PREVIEW = 'GPT_4_TURBO_PREVIEW',\n GPT_4_0125_PREVIEW = 'GPT_4_0125_PREVIEW',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4_TURBO_2024_04_09 = 'GPT_4_TURBO_2024_04_09',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4O_2024_08_06 = 'GPT_4O_2024_08_06',\n O1_PREVIEW = 'O1_PREVIEW',\n O1_PREVIEW_2024_09_12 = 'O1_PREVIEW_2024_09_12',\n O1_MINI = 'O1_MINI',\n O1_MINI_2024_09_12 = 'O1_MINI_2024_09_12',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O1_2024_12_17 = 'O1_2024_12_17',\n O3_MINI_2025_01_31 = 'O3_MINI_2025_01_31',\n GPT_4_OLD = 'GPT_4_OLD',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n O3_2025_04_16 = 'O3_2025_04_16',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n GPT_EXP = 'GPT_EXP',\n GPT_EXP_2 = 'GPT_EXP_2',\n GPT_5_2025_08_07 = 'GPT_5_2025_08_07',\n GPT_5_MINI_2025_08_07 = 'GPT_5_MINI_2025_08_07',\n GPT_5_NANO_2025_08_07 = 'GPT_5_NANO_2025_08_07',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ModelWithLiterals =\n | OpenaiproxyV1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_0301'\n | 'GPT_4'\n | 'GPT_4_0314'\n | 'GPT_4_32K'\n | 'GPT_4_32K_0314'\n | 'GPT_3_5_TURBO_0613'\n | 'GPT_3_5_TURBO_16K'\n | 'GPT_3_5_TURBO_16K_0613'\n | 'GPT_4_0613'\n | 'GPT_4_32K_0613'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_1106_PREVIEW'\n | 'GPT_4_VISION_PREVIEW'\n | 'GPT_4_TURBO_PREVIEW'\n | 'GPT_4_0125_PREVIEW'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4_TURBO_2024_04_09'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4O_2024_08_06'\n | 'O1_PREVIEW'\n | 'O1_PREVIEW_2024_09_12'\n | 'O1_MINI'\n | 'O1_MINI_2024_09_12'\n | 'GPT_4O_2024_11_20'\n | 'O1_2024_12_17'\n | 'O3_MINI_2025_01_31'\n | 'GPT_4_OLD'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'O3_2025_04_16'\n | 'O4_MINI_2025_04_16'\n | 'GPT_EXP'\n | 'GPT_EXP_2'\n | 'GPT_5_2025_08_07'\n | 'GPT_5_MINI_2025_08_07'\n | 'GPT_5_NANO_2025_08_07';\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: OpenaiproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: CreateChatCompletionResponsePromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CreateChatCompletionResponseCompletionTokenDetails;\n}\n\nexport interface TextBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: TextBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface TextBisonPrediction {\n /**\n * The result generated from input text.\n * @maxLength 100000\n */\n content?: string | null;\n /** Citation metadata */\n citationMetadata?: CitationMetadata;\n /** A collection of categories and their associated confidence scores. */\n safetyAttributes?: SafetyAttribute;\n}\n\nexport interface CitationMetadata {\n /**\n * Citations array\n * @maxSize 1000\n */\n citations?: V1Citation[];\n}\n\nexport interface V1Citation {\n /** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */\n startIndex?: number | null;\n /** Index in the prediction output where the citation ends (exclusive). Must be > start_index and < len(output). */\n endIndex?: number | null;\n /**\n * URL associated with this citation. If present, this URL links to the webpage of the source of this citation.\n * Possible URLs include news websites, GitHub repos, etc.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * Title associated with this citation. If present, it refers to the title of the source of this citation.\n * Possible titles include news titles, book titles, etc.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * License associated with this recitation. If present, it refers to the license of the source of this citation.\n * Possible licenses include code licenses, e.g., mit license.\n * @maxLength 100\n */\n license?: string | null;\n /**\n * Publication date associated with this citation. If present, it refers to the date at which the source of this citation was published.\n * Possible formats are YYYY, YYYY-MM, YYYY-MM-DD.\n * @maxLength 100\n */\n publicationDate?: string | null;\n}\n\nexport interface SafetyAttribute {\n /**\n * The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /** A flag indicating if the model's input or output was blocked. */\n blocked?: boolean | null;\n /**\n * The confidence scores of the each category, higher value means higher confidence.\n * @maxSize 100\n */\n scores?: number[] | null;\n /**\n * An error code that identifies why the input or output was blocked.\n * For a list of error codes, see https://cloud.google.com/vertex-ai/docs/generative-ai/learn/responsible-ai#safety_filters_and_attributes.\n * @maxSize 100\n */\n errors?: string[] | null;\n}\n\nexport interface Metadata {\n /** TokenMetadata object */\n tokenMetadata?: TokenMetadata;\n}\n\nexport interface TokenMetadata {\n /** Number of input tokens. This is the total number of tokens across all messages, examples, and context. */\n inputTokenCount?: TokenCount;\n /** Number of output tokens. This is the total number of tokens in content across all candidates in the response. */\n outputTokenCount?: TokenCount;\n}\n\nexport interface TokenCount {\n /** Number of tokens */\n totalTokens?: number | null;\n /** Number of billable characters */\n totalBillableCharacters?: number | null;\n}\n\nexport interface ChatBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: ChatBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface ChatBisonPrediction {\n /**\n * The chat result generated from given message.\n * @maxSize 100\n */\n candidates?: ChatMessage[];\n /**\n * Citation metadata\n * @maxSize 100\n */\n citationMetadata?: CitationMetadata[];\n /**\n * An array of collections of categories and their associated confidence scores. 1-1 mapping to candidates.\n * @maxSize 100\n */\n safetyAttributes?: SafetyAttribute[];\n}\n\nexport interface ChatMessage {\n /**\n * Author tag for the turn.\n * @maxLength 100000\n */\n author?: string | null;\n /**\n * Text content of the chat message.\n * @maxLength 100000\n */\n content?: string;\n}\n\nexport interface CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface V1ChatCompletionMessage {\n /** The role of the message author. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: FunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: ChatCompletionMessageContentPart[];\n}\n\nexport interface FunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ChatCompletionMessageMessageRoleWithLiterals =\n | ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n _id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: FunctionWithArgs;\n}\n\nexport interface ChatCompletionMessageContentPart\n extends ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface PromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport enum V1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n /** New models for Migration */\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n}\n\n/** @enumType */\nexport type V1ModelWithLiterals =\n | V1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_0613'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4O_2024_11_20'\n | 'O4_MINI_2025_04_16';\n\nexport interface CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: V1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: PromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CompletionTokenDetails;\n}\n\nexport interface GenerateContentResponse {\n /**\n * The generated response.\n * @maxSize 1000\n */\n candidates?: Candidate[];\n /** The usage metadata. */\n usageMetadata?: UsageMetadata;\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Candidate {\n /** The generated response content. */\n content?: CandidateContent;\n /** The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */\n finishReason?: FinishReasonWithLiterals;\n /**\n * The safety ratings of the response.\n * @maxSize 100\n */\n safetyRatings?: SafetyRating[];\n /** The citation metadata of the response. */\n citationMetadata?: CandidateCitationMetadata;\n /** Output only. Metadata specifies sources used to ground generated content. */\n groundingMetadata?: GroundingMetadata;\n}\n\nexport interface CandidateContent {\n /**\n * The generated response content.\n * @maxSize 1000\n */\n parts?: CandidateContentPart[];\n}\n\nexport interface FunctionCall {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional. The function parameters and values in JSON object format. */\n args?: Record<string, any> | null;\n}\n\nexport interface ExecutableCode {\n /** Required. Programming language of the code. */\n language?: LanguageWithLiterals;\n /**\n * Required. The code to be executed.\n * @maxLength 100000\n */\n code?: string;\n}\n\nexport enum Language {\n /** Unspecified language. This value should not be used. */\n LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED',\n /** Python >= 3.10, with numpy and simpy available. */\n PYTHON = 'PYTHON',\n}\n\n/** @enumType */\nexport type LanguageWithLiterals = Language | 'LANGUAGE_UNSPECIFIED' | 'PYTHON';\n\nexport interface V1CodeExecutionResult {\n /** Required. Outcome of the code execution. */\n outcome?: OutcomeWithLiterals;\n /**\n * Optional. Contains stdout when code execution is successful, stderr or other description otherwise.\n * @maxLength 100000\n */\n output?: string | null;\n}\n\nexport enum Outcome {\n /** Unspecified status. This value should not be used. */\n OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED',\n /** Code execution completed successfully. */\n OUTCOME_OK = 'OUTCOME_OK',\n /** Code execution finished but with a failure. stderr should contain the reason. */\n OUTCOME_FAILED = 'OUTCOME_FAILED',\n /** Code execution ran for too long, and was cancelled. There may or may not be a partial output present. */\n OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED',\n}\n\n/** @enumType */\nexport type OutcomeWithLiterals =\n | Outcome\n | 'OUTCOME_UNSPECIFIED'\n | 'OUTCOME_OK'\n | 'OUTCOME_FAILED'\n | 'OUTCOME_DEADLINE_EXCEEDED';\n\n/**\n * Raw media bytes.\n * Text should not be sent as raw bytes, use the 'text' field.\n */\nexport interface Blob {\n /**\n * The IANA standard MIME type of the source data.\n * Examples: - image/png - image/jpeg\n * If an unsupported MIME type is provided, an error will be returned.\n * For a complete list of supported types, see https://ai.google.dev/gemini-api/docs/file-prompting-strategies#supported_file_formats.\n * @maxLength 100\n */\n mimeType?: string;\n /**\n * Represents raw bytes for media formats. Will be fetched from the passed URL in request, and uploaded to WixMP URL in response.\n * @format WEB_URL\n */\n data?: string;\n}\n\nexport interface CandidateContentPart {\n /**\n * The text generated by the model.\n * @maxLength 100000\n */\n text?: string | null;\n /** function call */\n functionCall?: FunctionCall;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /**\n * Thought flag indicates that the content part is a thought.\n * @readonly\n */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 1000000\n */\n thoughtSignature?: string | null;\n}\n\nexport enum FinishReason {\n UNKNOWN_FINISH_REASON = 'UNKNOWN_FINISH_REASON',\n /** The finish reason is unspecified. */\n UNSPECIFIED = 'UNSPECIFIED',\n /** Natural stop point of the model or provided stop sequence. */\n STOP = 'STOP',\n /** The maximum number of tokens as specified in the request was reached. */\n MAX_TOKENS = 'MAX_TOKENS',\n /**\n * The token generation was stopped as the response was flagged for safety reasons.\n * Note that Candidate.content is empty if content filters block the output.\n */\n SAFETY = 'SAFETY',\n /** The token generation was stopped as the response was flagged for unauthorized citations. */\n RECITATION = 'RECITATION',\n /** All other reasons that stopped the token */\n OTHER = 'OTHER',\n /** The response candidate content was flagged for using an unsupported language. */\n LANGUAGE = 'LANGUAGE',\n /** Token generation stopped because the content contains forbidden terms. */\n BLOCKLIST = 'BLOCKLIST',\n /** Token generation stopped for potentially containing prohibited content. */\n PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',\n /** Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information (SPII). */\n SPII = 'SPII',\n /** The function call generated by the model is invalid. */\n MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL',\n /** Token generation stopped because generated images contain safety violations. */\n IMAGE_SAFETY = 'IMAGE_SAFETY',\n /** Model generated a tool call but no tools were enabled in the request. */\n UNEXPECTED_TOOL_CALL = 'UNEXPECTED_TOOL_CALL',\n /** Model called too many tools consecutively, thus the system exited execution. */\n TOO_MANY_TOOL_CALLS = 'TOO_MANY_TOOL_CALLS',\n}\n\n/** @enumType */\nexport type FinishReasonWithLiterals =\n | FinishReason\n | 'UNKNOWN_FINISH_REASON'\n | 'UNSPECIFIED'\n | 'STOP'\n | 'MAX_TOKENS'\n | 'SAFETY'\n | 'RECITATION'\n | 'OTHER'\n | 'LANGUAGE'\n | 'BLOCKLIST'\n | 'PROHIBITED_CONTENT'\n | 'SPII'\n | 'MALFORMED_FUNCTION_CALL'\n | 'IMAGE_SAFETY'\n | 'UNEXPECTED_TOOL_CALL'\n | 'TOO_MANY_TOOL_CALLS';\n\nexport interface SafetyRating {\n /** The safety category that the response belongs to. */\n category?: HarmCategoryWithLiterals;\n /** The probability that the response belongs to the specified safety category. */\n probability?: HarmProbabilityWithLiterals;\n /** The probability score that the response belongs to the specified safety category. */\n probabilityScore?: number | null;\n /**\n * The severity of the response's safety rating.\n * @maxLength 100\n */\n severity?: string | null;\n /** the severity score of the response's safety rating. */\n severityScore?: number | null;\n /**\n * A boolean flag associated with a safety attribute that indicates if the model's input or output was blocked.\n * If blocked is true, then the errors field in the response contains one or more error codes.\n * If blocked is false, then the response doesn't include the errors field.\n */\n blocked?: boolean | null;\n}\n\nexport enum HarmCategory {\n UNKNOWN_CATEGORY = 'UNKNOWN_CATEGORY',\n HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',\n}\n\n/** @enumType */\nexport type HarmCategoryWithLiterals =\n | HarmCategory\n | 'UNKNOWN_CATEGORY'\n | 'HARM_CATEGORY_SEXUALLY_EXPLICIT'\n | 'HARM_CATEGORY_HATE_SPEECH'\n | 'HARM_CATEGORY_HARASSMENT'\n | 'HARM_CATEGORY_DANGEROUS_CONTENT';\n\nexport enum HarmProbability {\n UNKNOWN_PROBABILITY = 'UNKNOWN_PROBABILITY',\n NEGLIGIBLE = 'NEGLIGIBLE',\n LOW = 'LOW',\n MEDIUM = 'MEDIUM',\n HIGH = 'HIGH',\n}\n\n/** @enumType */\nexport type HarmProbabilityWithLiterals =\n | HarmProbability\n | 'UNKNOWN_PROBABILITY'\n | 'NEGLIGIBLE'\n | 'LOW'\n | 'MEDIUM'\n | 'HIGH';\n\nexport interface CandidateCitationMetadata {\n /**\n * The citations of the response.\n * @maxSize 1000\n */\n citations?: CandidateCitationMetadataCitation[];\n}\n\nexport interface PublicationDate {\n /** The year of the publication date. */\n year?: number | null;\n /** The month of the publication date. */\n month?: number | null;\n /** The day of the publication date. */\n day?: number | null;\n}\n\nexport interface CandidateCitationMetadataCitation {\n /** An integer that specifies where a citation starts in the content. */\n startIndex?: number | null;\n /** An integer that specifies where a citation ends in the content. */\n endIndex?: number | null;\n /**\n * The URI of a citation source. Examples of a URI source might be a news website or a GitHub repository.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * The title of a citation source. Examples of source titles might be that of a news article or a book.\n * @maxLength 500\n */\n title?: string | null;\n /**\n * The license associated with a citation.\n * @maxLength 500\n */\n license?: string | null;\n /** The date a citation was published. Its valid formats are YYYY, YYYY-MM, and YYYY-MM-DD. */\n publicationDate?: PublicationDate;\n}\n\n/** Metadata returned to client when grounding is enabled. */\nexport interface GroundingMetadata {\n /**\n * Optional. Web search queries for the following-up web search.\n * @maxSize 1000\n * @maxLength 1000\n */\n webSearchQueries?: string[];\n /** Optional. Google search entry for the following-up web searches. */\n searchEntryPoint?: SearchEntryPoint;\n /**\n * List of supporting references retrieved from specified grounding source.\n * @maxSize 1000\n */\n groundingChunks?: GroundingChunk[];\n /**\n * Optional. List of grounding support.\n * @maxSize 1000\n */\n groundingSupports?: GroundingSupport[];\n /** Optional. Output only. Retrieval metadata. */\n retrievalMetadata?: RetrievalMetadata;\n}\n\n/** Google search entry point. */\nexport interface SearchEntryPoint {\n /**\n * Optional. Web content snippet that can be embedded in a web page or an app webview.\n * @maxLength 10000000\n */\n renderedContent?: string | null;\n /** Optional. Base64 encoded JSON representing array of <search term, search url> tuple. */\n sdkBlob?: Uint8Array | null;\n}\n\n/** Grounding chunk. */\nexport interface GroundingChunk extends GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** @oneof */\nexport interface GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** Chunk from the web. */\nexport interface Web {\n /**\n * URI reference of the chunk.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the chunk.\n * @maxLength 1000\n */\n title?: string | null;\n}\n\n/** Chunk from context retrieved by the retrieval tools. */\nexport interface RetrievedContext {\n /**\n * URI reference of the attribution.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the attribution.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * Text of the attribution.\n * @maxLength 100000\n */\n text?: string | null;\n}\n\n/** Grounding support. */\nexport interface GroundingSupport {\n /** Segment of the content this support belongs to. */\n segment?: Segment;\n /**\n * A list of indices (into 'grounding_chunk') specifying the\n * citations associated with the claim. For instance [1,3,4] means\n * that grounding_chunk[1], grounding_chunk[3],\n * grounding_chunk[4] are the retrieved content attributed to the claim.\n * @maxSize 1000\n */\n groundingChunkIndices?: number[];\n /**\n * Confidence score of the support references. Ranges from 0 to 1. 1 is the\n * most confident. This list must have the same size as the\n * grounding_chunk_indices.\n * @maxSize 1000\n */\n confidenceScores?: number[];\n}\n\n/** Segment of the content. */\nexport interface Segment {\n /** Output only. The index of a Part object within its parent Content object. */\n partIndex?: number | null;\n /**\n * Output only. Start index in the given Part, measured in bytes. Offset from\n * the start of the Part, inclusive, starting at zero.\n */\n startIndex?: number;\n /**\n * Output only. End index in the given Part, measured in bytes. Offset from\n * the start of the Part, exclusive, starting at zero.\n */\n endIndex?: number;\n /**\n * Output only. The text corresponding to the segment from the response.\n * @maxLength 100000\n */\n text?: string;\n}\n\n/** Metadata related to retrieval in the grounding flow. */\nexport interface RetrievalMetadata {\n /**\n * Optional. Score indicating how likely information from Google Search could\n * help answer the prompt. The score is in the range `[0, 1]`, where 0 is the\n * least likely and 1 is the most likely. This score is only populated when\n * Google Search grounding and dynamic retrieval is enabled. It will be\n * compared to the threshold to determine whether to trigger Google Search.\n */\n googleSearchDynamicRetrievalScore?: number | null;\n}\n\nexport interface UsageMetadata {\n /** Number of tokens in the request. */\n promptTokenCount?: number | null;\n /** Number of tokens in the response. */\n candidatesTokenCount?: number | null;\n /** Number of tokens in the request and response(s). */\n totalTokenCount?: number | null;\n /** Optional. Number of tokens of thoughts for thinking models. */\n thoughtsTokenCount?: number | null;\n /**\n * Output only. List of modalities that were processed in the request input.\n * @maxSize 10\n */\n promptTokensDetails?: ModalityTokenCount[];\n /**\n * Output only. List of modalities that were returned in the response.\n * @maxSize 10\n */\n candidatesTokensDetails?: ModalityTokenCount[];\n}\n\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality?: ModalityWithLiterals;\n /** Number of tokens. */\n tokenCount?: string | null;\n}\n\nexport enum Modality {\n UNKNOWN_MODALITY = 'UNKNOWN_MODALITY',\n /** Indicates the model should return text. */\n TEXT = 'TEXT',\n /** Indicates the model should return images. */\n IMAGE = 'IMAGE',\n /** Indicates the model should return audio. */\n AUDIO = 'AUDIO',\n}\n\n/** @enumType */\nexport type ModalityWithLiterals =\n | Modality\n | 'UNKNOWN_MODALITY'\n | 'TEXT'\n | 'IMAGE'\n | 'AUDIO';\n\nexport interface InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: RoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n */\n contentBlocks?: ContentBlock[];\n}\n\nexport enum ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type ResponseTypeTypeWithLiterals =\n | ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport enum Role {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type RoleWithLiterals = Role | 'UNKNOWN' | 'USER' | 'ASSISTANT';\n\nexport interface Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface ContentBlock extends ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\n/** @oneof */\nexport interface ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\nexport interface Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: TypeWithLiterals;\n}\n\nexport enum Type {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type TypeWithLiterals = Type | 'UNKNOWN' | 'EPHEMERAL';\n\nexport interface ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: MediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport enum MediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type MediaTypeWithLiterals =\n | MediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface SimpleContentBlock extends SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\n/** @oneof */\nexport interface SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\nexport interface Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface V1InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: GoogleproxyV1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: GoogleproxyV1Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 1000\n */\n contentBlocks?: GoogleproxyV1ContentBlock[];\n}\n\nexport enum GoogleproxyV1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ResponseTypeTypeWithLiterals =\n | GoogleproxyV1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport enum V1MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type V1MessageRoleRoleWithLiterals =\n | V1MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface GoogleproxyV1Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface GoogleproxyV1ContentBlock\n extends GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: V1CacheControlTypeWithLiterals;\n}\n\nexport enum V1CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type V1CacheControlTypeWithLiterals =\n | V1CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\nexport interface GoogleproxyV1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: V1ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport enum V1ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type V1ImageMediaTypeMediaTypeWithLiterals =\n | V1ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface GoogleproxyV1ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: V1SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface V1SimpleContentBlock extends V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\n/** @oneof */\nexport interface V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\nexport interface GoogleproxyV1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface GoogleproxyV1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface InvokeAnthropicModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * \"end_turn\": the model reached a natural stopping point\n * \"max_tokens\": we exceeded the requested max_tokens or the model's maximum\n * \"stop_sequence\": one of your provided custom stop_sequences was generated\n * \"tool_use\": the model invoked one or more tools\n * \"pause_turn\": we paused a long-running turn. You may provide the response back as-is in a subsequent request to let the model continue.\n * \"refusal\": when streaming classifiers intervene to handle potential policy violations\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n type?: V1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: MessageRoleRoleWithLiterals;\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: V1Usage;\n /**\n * Information about the container used in this request.\n * This will be non-null if a container tool (e.g. code execution) was used.\n */\n container?: Container;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\nexport enum V1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type V1ResponseTypeTypeWithLiterals =\n | V1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport enum MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type MessageRoleRoleWithLiterals =\n | MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface V1Usage {\n /** Breakdown of cached tokens by TTL */\n cacheCreation?: UsageCacheCreation;\n /** The number of input tokens used to create the cache entry. */\n cacheCreationInputTokens?: number | null;\n /** The number of input tokens read from the cache. */\n cacheReadInputTokens?: number | null;\n /** The number of input tokens which were used. */\n inputTokens?: number;\n /** The number of output tokens which were used. */\n outputTokens?: number;\n /** The number of server tool requests. */\n serverToolUse?: UsageServerToolUse;\n /**\n * If the request used the priority, standard, or batch tier.\n * Available options: standard, priority, batch\n * @maxLength 500\n */\n serviceTier?: string | null;\n}\n\nexport interface UsageCacheCreation {\n /** The number of input tokens used to create the 1 hour cache entry. */\n ephemeral1hInputTokens?: number;\n /** The number of input tokens used to create the 5 minute cache entry. */\n ephemeral5mInputTokens?: number;\n}\n\nexport interface UsageServerToolUse {\n /** The number of web search tool requests. */\n webSearchRequests?: number;\n /** The number of web fetch tool requests. */\n webFetchRequests?: number;\n}\n\nexport interface Container {\n /**\n * The time at which the container will expire.\n * @maxLength 100\n */\n expiresAt?: string;\n /**\n * Identifier for the container used in this request\n * @maxLength 512\n */\n _id?: string;\n}\n\n/** Content object used in both request and response */\nexport interface V1ContentBlock extends V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\n/** @oneof */\nexport interface V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\nexport interface V1Text {\n /**\n * Text content.\n * @maxLength 1000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n /**\n * Structured citations for this text block.\n * Populated by the model when citations are enabled.\n * @maxSize 256\n */\n citations?: Citation[];\n}\n\nexport interface V1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: CacheControlTypeWithLiterals;\n /**\n * The time-to-live for the cache control breakpoint. This may be one the following values:\n * 5m: 5 minutes (default)\n * 1h: 1 hour\n * @maxLength 50\n */\n ttl?: string | null;\n}\n\nexport enum CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type CacheControlTypeWithLiterals =\n | CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\n/** Unified wrapper for all citation kinds (attach to Text.citations). */\nexport interface Citation extends CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\n/** @oneof */\nexport interface CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\nexport interface CharLocationCitation {\n /**\n * Should be \"char_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start character index (inclusive) within the document text. */\n startCharIndex?: number | null;\n /** 0-based end character index (exclusive) within the document text. */\n endCharIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface PageLocationCitation {\n /**\n * Should be \"page_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 1-based start page number (inclusive). */\n startPageNumber?: number | null;\n /** 1-based end page number (exclusive). */\n endPageNumber?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface ContentBlockLocationCitation {\n /**\n * Should be \"content_block_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start content-block index (inclusive) within the custom document. */\n startBlockIndex?: number | null;\n /** 0-based end content-block index (exclusive) within the custom document. */\n endBlockIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface WebSearchResultLocationCitation {\n /**\n * Should be \"web_search_result_location\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the cited source\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the cited source\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * A reference that must be passed back for multi-turn conversations.\n * @maxLength 1000000\n */\n encryptedIndex?: string | null;\n /**\n * Up to 150 characters of the cited content\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface SearchResultLocationCitation {\n /**\n * Should be \"search_result_location\".\n * @maxLength 500\n */\n type?: string;\n /** Index of the search_result within the current turn (0-based). */\n searchResultIndex?: number | null;\n /** 0-based start block indices within that search_result's content. */\n startBlockIndex?: number | null;\n /** 0-based end block indices within that search_result's content. */\n endBlockIndex?: number | null;\n /**\n * Source string\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Optional title (same as search_result.title).\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional quoted snippet\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface V1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport enum ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type ImageMediaTypeMediaTypeWithLiterals =\n | ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface V1ToolUse {\n /**\n * Tool use id\n * @maxLength 512\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Optional: enable tool use caching */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1ToolResult {\n /**\n * Tool use id\n * @maxLength 512\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: ToolResultContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface ToolResultContentBlock\n extends ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\n/** @oneof */\nexport interface ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\nexport interface DocumentContent {\n /**\n * Should be \"document\"\n * @maxLength 500\n */\n type?: string;\n /** Citable payload or reference. */\n source?: DocumentSource;\n /**\n * Optional: Document title\n * Can be passed to the model but not used towards cited content.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional: Any document metadata as text or stringified json.\n * Can be passed to the model but not used towards cited content.\n * @maxLength 1000000\n */\n context?: string | null;\n /** Enable citations for this doc */\n citations?: CitationsEnabled;\n /** Optional: Cache the document content */\n cacheControl?: V1CacheControl;\n}\n\nexport interface DocumentSource {\n /**\n * One of: \"text\" | \"base64\" | \"content\" | \"file\" | \"url\".\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Required types \"text\"/\"base64\" (e.g., \"text/plain\", \"application/pdf\").\n * @maxLength 500\n */\n mediaType?: string | null;\n /**\n * For type \"text\": raw text. For \"base64\": bytes as base64.\n * @maxLength 10000000\n */\n data?: string | null;\n /**\n * For type \"file\": Files API id (e.g., \"file_01...\")\n * @maxLength 5000\n */\n fileId?: string | null;\n /**\n * For type \"url\": absolute URL to the document\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * For type \"content\": custom content block; Only text blocks are citable\n * @maxSize 500\n */\n content?: V1ContentBlock[];\n}\n\nexport interface CitationsEnabled {\n /** Whether to enable citations */\n enabled?: boolean | null;\n}\n\nexport interface ToolResultSearchResult {\n /**\n * Should be \"search_result\".\n * @maxLength 500\n */\n type?: string;\n /**\n * Where this result came from (URL or source label).\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Human-readable title for the result.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Inline text snippets that summarize/support the result.\n * @maxSize 1000\n */\n content?: V1Text[];\n /**\n * Enable/disable citations for this result's content.\n * Matches Anthropic \"citations\" on search_result blocks.\n */\n citations?: CitationsEnabled;\n}\n\nexport interface V1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface V1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\n/**\n * Assistant requests a Model Context Protocol (MCP) tool call.\n * Pair with ToolResult using the same `id`.\n */\nexport interface McpToolUse {\n /**\n * Unique id for this tool call; must match McpToolResult.tool_use_id.\n * @maxLength 512\n */\n _id?: string | null;\n /**\n * Tool name as exposed by the MCP server.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Which MCP server to call (must match a server in the request).\n * @maxLength 1000\n */\n serverName?: string | null;\n /** JSON arguments for the tool (object per the tool's schema). */\n input?: Record<string, any> | null;\n}\n\n/**\n * Server-tool invocation announced by the ASSISTANT for Anthropic-run tools\n * (e.g., \"web_search\", \"code_execution\").\n */\nexport interface ServerToolUse {\n /**\n * Should be \"server_tool_use\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n _id?: string | null;\n /**\n * The tool name. Available options: \"web_search\" | \"web_fetch\" | \"code_execution\" | \"bash_code_execution\" | \"text_editor_code_execution\"\n * @maxLength 500\n */\n name?: string | null;\n /**\n * Tool-specific parameters object:\n * web_search → { \"query\": \"<string>\" }\n * web_fetch → { \"url\": \"<string>\" }\n * code_execution→ { \"code\": \"<python source>\" }\n */\n input?: Record<string, any> | null;\n}\n\n/** Server tool result (web search). Either results[] OR error. */\nexport interface WebSearchToolResult extends WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n /**\n * Should be \"web_search_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n}\n\n/** Success payload: the JSON `content` ARRAY of result items. */\nexport interface WebSearchResultList {\n /**\n * Results items\n * @maxSize 1000\n */\n items?: WebSearchResult[];\n}\n\n/**\n * One search result item.\n * Docs (“Search results include”): url, title, page_age, encrypted_content.\n * Each item also has the literal type.\n */\nexport interface WebSearchResult {\n /**\n * Should be \"web_search_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the source page.\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the source page.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * When the site was last updated (e.g., \"April 30, 2025\").\n * @maxLength 100\n */\n pageAge?: string | null;\n /**\n * Encrypted content that must be passed back in multi-turn conversations for citations.\n * @maxLength 1000000\n */\n encryptedContent?: string | null;\n}\n\n/**\n * Error payload\n * Possible error codes: too_many_requests | invalid_input | max_uses_exceeded | query_too_long | unavailable\n */\nexport interface WebSearchToolResultError {\n /**\n * Should be \"web_search_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface CodeExecutionToolResult\n extends CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n /**\n * Should be \"code_execution_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n}\n\n/** Success payload for code execution. */\nexport interface CodeExecutionResult {\n /**\n * Should be \"code_execution_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Output from successful execution (print, etc.).\n * @maxLength 1000000\n */\n stdout?: string | null;\n /**\n * Error messages emitted by the program.\n * @maxLength 1000000\n */\n stderr?: string | null;\n /** 0 = success, non-zero = failure. */\n returnCode?: number | null;\n /**\n * Optional: Array of produced artifacts.\n * Example item (typical): { \"file_id\": \"file_abc123\", ... }\n * @maxSize 4096\n */\n content?: Record<string, any>[] | null;\n}\n\n/**\n * Error payload (HTTP 200; error lives in the result body).\n * Docs list: unavailable | code_execution_exceeded | container_expired\n */\nexport interface CodeExecutionToolResultError {\n /**\n * Should be \"code_execution_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value, e.g. \"unavailable\", \"code_execution_exceeded\", \"container_expired\".\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface ContainerUpload {\n /**\n * Should be \"container_upload\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * File identifier returned by the Files API (e.g., \"file_01abc...\").\n * @maxLength 5000\n */\n fileId?: string | null;\n}\n\n/** Web fetch tool result */\nexport interface WebFetchToolResult extends WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n /**\n * Should be \"web_fetch_tool_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n}\n\nexport interface WebFetchToolResultContentSuccess {\n /**\n * Should be \"web_fetch_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * The URL that was fetched\n * @maxLength 10000\n */\n url?: string | null;\n /** A document block containing the fetched content */\n content?: DocumentContent;\n /**\n * Timestamp when the content was retrieved\n * @maxLength 256\n */\n retrievedAt?: string | null;\n}\n\nexport interface WebFetchToolResultContentError {\n /**\n * Should be \"web_fetch_tool_result_error\"\n * @maxLength 500\n */\n type?: string;\n /**\n * These are the possible error codes:\n * - invalid_tool_input: Invalid URL format\n * - url_too_long: URL exceeds maximum length (250 characters)\n * - url_not_allowed: URL blocked by domain filtering rules and model restrictions\n * - url_not_accessible: Failed to fetch content (HTTP error)\n * - too_many_requests: Rate limit exceeded\n * - unsupported_content_type: Content type not supported (only text and PDF)\n * - max_uses_exceeded: Maximum web fetch tool uses exceeded\n * - unavailable: An internal error occurred\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface InvokeLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeMlPlatformLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeChatCompletionResponse {\n /**\n * Unique response ID\n * @maxLength 100\n */\n _id?: string | null;\n /** The model used to generate the response */\n model?: PerplexityModelWithLiterals;\n /**\n * The object type, which always equals chat.completion\n * @maxLength 100\n */\n object?: string | null;\n /** The Unix timestamp (in seconds) of when the completion was created */\n created?: number | null;\n /**\n * Citations for the generated answer\n * @maxLength 10000\n * @maxSize 1000\n */\n citations?: string[];\n /** The list of completion choices the model generated for the input prompt */\n choices?: InvokeChatCompletionResponseChoice[];\n /** URLs and size metadata for returned images */\n images?: PerplexityImageDescriptor[];\n /**\n * Further questions related to the search\n * @maxLength 10000\n * @maxSize 1000\n */\n relatedQuestions?: string[];\n /** Usage statistics for the completion request. */\n usage?: InvokeChatCompletionResponseUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface PerplexityMessage {\n /**\n * The content of the message\n * @maxLength 200000\n */\n content?: string;\n /**\n * The role of the speaker in this turn of conversation. After the (optional) system message,\n * user and assistant roles should alternate with `user` then `assistant`, ending in `user`.\n */\n role?: PerplexityMessageMessageRoleWithLiterals;\n}\n\nexport enum PerplexityMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n SYSTEM = 'SYSTEM',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type PerplexityMessageMessageRoleWithLiterals =\n | PerplexityMessageMessageRole\n | 'UNKNOWN'\n | 'SYSTEM'\n | 'USER'\n | 'ASSISTANT';\n\nexport enum PerplexityModel {\n UNKNOWN_PERPLEXITY_MODEL = 'UNKNOWN_PERPLEXITY_MODEL',\n SONAR = 'SONAR',\n SONAR_PRO = 'SONAR_PRO',\n SONAR_REASONING = 'SONAR_REASONING',\n SONAR_REASONING_PRO = 'SONAR_REASONING_PRO',\n SONAR_DEEP_RESEARCH = 'SONAR_DEEP_RESEARCH',\n}\n\n/** @enumType */\nexport type PerplexityModelWithLiterals =\n | PerplexityModel\n | 'UNKNOWN_PERPLEXITY_MODEL'\n | 'SONAR'\n | 'SONAR_PRO'\n | 'SONAR_REASONING'\n | 'SONAR_REASONING_PRO'\n | 'SONAR_DEEP_RESEARCH';\n\n/** Structures the completion choice */\nexport interface InvokeChatCompletionResponseChoice {\n /** Choice index */\n index?: number | null;\n /**\n * Stop reason, can be `STOP` or `LENGTH`\n * @maxLength 10\n */\n finishReason?: string | null;\n /** Choice message, containing content and role */\n message?: PerplexityMessage;\n}\n\nexport interface PerplexityImageDescriptor {\n /**\n * Full image url\n * @maxLength 5000\n */\n imageUrl?: string | null;\n /**\n * Image origin website\n * @maxLength 5000\n */\n originUrl?: string | null;\n /** Height */\n height?: number | null;\n /** Width */\n width?: number | null;\n}\n\n/** Usage statistics for the completion request. */\nexport interface InvokeChatCompletionResponseUsage {\n /** The number of tokens provided in the request prompt. */\n promptTokens?: number | null;\n /** The number of tokens generated in the response output. */\n completionTokens?: number | null;\n /** The total number of tokens used in the chat completion (prompt + completion). */\n totalTokens?: number | null;\n /** Tokens passed into the input from citations found during search. Priced like `prompt_tokens` */\n citationTokens?: number | null;\n /** Reasoning tokens are used to reason through the research material before generating the final output via the CoTs */\n reasoningTokens?: number | null;\n /** Number of search queries executed. */\n numSearchQueries?: number | null;\n}\n\nexport interface CreateImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: V1ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * The prompt that was used to generate the image, if there was any revision to the prompt.\n * @maxLength 100000\n */\n revisedPrompt?: string | null;\n}\n\nexport enum V1ImageModel {\n UNKNOWN_IMAGE_GENERATION_MODEL = 'UNKNOWN_IMAGE_GENERATION_MODEL',\n DALL_E_2 = 'DALL_E_2',\n DALL_E_3 = 'DALL_E_3',\n}\n\n/** @enumType */\nexport type V1ImageModelWithLiterals =\n | V1ImageModel\n | 'UNKNOWN_IMAGE_GENERATION_MODEL'\n | 'DALL_E_2'\n | 'DALL_E_3';\n\nexport interface V1TextToImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /** A specific value [0 .. 4294967294] used to guide the 'randomness' of the generation. */\n seed?: string | null;\n /**\n * Finish reason by the model provider.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport enum ImageModel {\n STABILITY_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_IMAGE_MODEL_UNSPECIFIED',\n /** stable-diffusion-xl-1024-v1-0 - Stable Diffusion XL v1.0 */\n SDXL_1_0 = 'SDXL_1_0',\n}\n\n/** @enumType */\nexport type ImageModelWithLiterals =\n | ImageModel\n | 'STABILITY_IMAGE_MODEL_UNSPECIFIED'\n | 'SDXL_1_0';\n\nexport interface GenerateCoreResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageCoreModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport enum ImageCoreModel {\n STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED',\n STABLE_IMAGE_CORE = 'STABLE_IMAGE_CORE',\n}\n\n/** @enumType */\nexport type ImageCoreModelWithLiterals =\n | ImageCoreModel\n | 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED'\n | 'STABLE_IMAGE_CORE';\n\nexport interface GenerateStableDiffusionResponse {\n /**\n * The generated image objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport enum ImageStableDiffusionModel {\n STABLE_DIFFUSION_MODEL_UNSPECIFIED = 'STABLE_DIFFUSION_MODEL_UNSPECIFIED',\n /** sd3-large */\n SD3_LARGE = 'SD3_LARGE',\n /** sd3-large-turbo */\n SD3_LARGE_TURBO = 'SD3_LARGE_TURBO',\n /** sd3-medium */\n SD3_MEDIUM = 'SD3_MEDIUM',\n /** sd3.5-large */\n SD3_5_LARGE = 'SD3_5_LARGE',\n /** sd3.5-large-turbo */\n SD3_5_LARGE_TURBO = 'SD3_5_LARGE_TURBO',\n /** sd3.5-medium */\n SD3_5_MEDIUM = 'SD3_5_MEDIUM',\n}\n\n/** @enumType */\nexport type ImageStableDiffusionModelWithLiterals =\n | ImageStableDiffusionModel\n | 'STABLE_DIFFUSION_MODEL_UNSPECIFIED'\n | 'SD3_LARGE'\n | 'SD3_LARGE_TURBO'\n | 'SD3_MEDIUM'\n | 'SD3_5_LARGE'\n | 'SD3_5_LARGE_TURBO'\n | 'SD3_5_MEDIUM';\n\nexport interface GenerateAnImageResponse {\n /**\n * The id of the task.\n * @format GUID\n */\n _id?: string | null;\n /**\n * status of the image generation\n * one of Task not found, Pending, Request Moderated, Content Moderated, Ready, Error\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Result object for the generated image */\n result?: ResultObject;\n}\n\nexport interface ResultObject {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * prompt used for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n /** seed used for image generation */\n seed?: string | null;\n}\n\nexport interface CreatePredictionResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n _id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /**\n * Prediction text output\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n textOutput?: string[] | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Token counts */\n tokenUsage?: CreatePredictionResponseTokenUsage;\n}\n\nexport interface CreatePredictionResponseTokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n}\n\nexport interface EditImageWithPromptResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport enum EditImageWithPromptRequestModel {\n UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL = 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL',\n INPAINT = 'INPAINT',\n OUTPAINT = 'OUTPAINT',\n}\n\n/** @enumType */\nexport type EditImageWithPromptRequestModelWithLiterals =\n | EditImageWithPromptRequestModel\n | 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL'\n | 'INPAINT'\n | 'OUTPAINT';\n\nexport interface TextToImageResponse {\n /**\n * Generation TextToImageTaskResult\n * @maxSize 1000\n */\n data?: TextToImageTaskResult[];\n}\n\nexport interface TextToImageTaskResult {\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * The unique identifier of the image.\n * @format GUID\n */\n imageUuid?: string;\n /**\n * If outputType is set to URL, this parameter contains the URL of the image to be downloaded.\n * @maxLength 2048\n */\n imageUrl?: string | null;\n /** If checkNSFW parameter is used, NSFWContent is included informing if the image has been flagged as potentially sensitive content. */\n nsfwContent?: boolean;\n /** A cost of generated image. */\n microcentsSpent?: string | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n}\n\nexport interface GenerateImageResponse {\n /**\n * Array of generated image results, one for each requested sampleCount\n * @maxSize 8\n */\n predictions?: Prediction[];\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Prediction {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * Enhanced prompt used for generation (only returned for models that support prompt enhancement)\n * @maxLength 1000\n */\n prompt?: string | null;\n /**\n * The responsible AI filter reason\n * Only returned if includeRaiReason is enabled and this image was filtered out\n * @maxLength 1000\n */\n raiFilteredReason?: string | null;\n /** Safety attributes information */\n safetyAttributes?: SafetyAttributes;\n}\n\nexport interface SafetyAttributes {\n /**\n * The safety attribute categories\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /**\n * The safety attribute scores\n * @maxSize 100\n */\n scores?: number[] | null;\n}\n\nexport interface GenerateVideoResponse {\n /**\n * Generated videos\n * @maxSize 4\n */\n videos?: GeneratedVideo[];\n /** Cost of the request in micro-cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GeneratedVideo {\n /**\n * The URL of the generated video.\n * @format WEB_URL\n */\n videoUrl?: string | null;\n /**\n * The video MIME type (currently only \"video/mp4\")\n * @maxLength 50\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateImageMlPlatformResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n _id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface CreateImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport enum OpenAiImageModel {\n UNKNOWN_IMAGE_CREATION_MODEL = 'UNKNOWN_IMAGE_CREATION_MODEL',\n GPT_4O_IMAGE = 'GPT_4O_IMAGE',\n GPT_IMAGE_1 = 'GPT_IMAGE_1',\n}\n\n/** @enumType */\nexport type OpenAiImageModelWithLiterals =\n | OpenAiImageModel\n | 'UNKNOWN_IMAGE_CREATION_MODEL'\n | 'GPT_4O_IMAGE'\n | 'GPT_IMAGE_1';\n\nexport interface ImageUsage {\n /** Number of tokens in the input */\n inputTokens?: number | null;\n /** Details about input tokens */\n inputTokensDetails?: OpenAiImageTokenDetails;\n /** Number of tokens in the output */\n outputTokens?: number | null;\n /** Total number of tokens used */\n totalTokens?: number | null;\n}\n\nexport interface OpenAiImageTokenDetails {\n /** Number of tokens used for image processing */\n imageTokens?: number | null;\n /** Number of tokens used for text processing */\n textTokens?: number | null;\n}\n\nexport interface EditImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport interface V1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: ChatCompletionModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: V1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface GoogleproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: V1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: V1ChatCompletionMessageContentPart[];\n}\n\nexport interface V1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum V1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type V1ChatCompletionMessageMessageRoleWithLiterals =\n | V1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface V1ChatCompletionMessageContentPart\n extends V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport enum ChatCompletionModel {\n UNKNOWN_CHAT_COMPLETION_MODEL = 'UNKNOWN_CHAT_COMPLETION_MODEL',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-scout\n * llama-4-scout-17b-16e-instruct-maas\n */\n LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS = 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-maverick\n * llama-4-maverick-17b-128e-instruct-maas\n */\n LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS = 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS',\n}\n\n/** @enumType */\nexport type ChatCompletionModelWithLiterals =\n | ChatCompletionModel\n | 'UNKNOWN_CHAT_COMPLETION_MODEL'\n | 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS'\n | 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS';\n\nexport interface V1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: GoogleproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface V1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /**\n * Model that produced the completion.\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of chat completion choices. Can be more than one if n is greater than 1.\n * @maxSize 10000\n */\n choices?: Choice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: TokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface ChatCompletionMessage {\n /** The role of the message author. */\n role?: MessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: ContentPart[];\n}\n\nexport interface ImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum MessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type MessageRoleWithLiterals =\n | MessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface ContentPart extends ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface Choice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface TokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface VideoInferenceResponse {\n /**\n * Generation VideoInferenceTaskResult\n * @maxSize 1000\n */\n data?: VideoInferenceTaskResult[];\n}\n\nexport interface VideoInferenceTaskResult {\n /**\n * The API will return the taskType you sent in the request.\n * @maxLength 100\n */\n taskType?: string;\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * A unique identifier for the generated video.\n * @format GUID\n */\n videoUuid?: string | null;\n /**\n * If outputType is set to URL, this parameter contains the URL of the video to be downloaded.\n * @maxLength 10000\n */\n videoUrl?: string | null;\n /**\n * The seed value that was used to generate this video.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /** A cost of generated video. */\n microcentsSpent?: string | null;\n /**\n * The current processing status (for polling operations).\n * @maxLength 50\n */\n status?: string | null;\n}\n\nexport interface V1OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n _id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: OpenAiResponsesResponseIncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: V1ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: V1ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: V1ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface OpenAiResponsesResponseIncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport enum V1ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n O3_PRO_2025_06_10 = 'O3_PRO_2025_06_10',\n O3_DEEP_RESEARCH_2025_06_26 = 'O3_DEEP_RESEARCH_2025_06_26',\n GPT_5_CODEX = 'GPT_5_CODEX',\n GPT_5_1_2025_11_13 = 'GPT_5_1_2025_11_13',\n GPT_5_1_CODEX = 'GPT_5_1_CODEX',\n GPT_5_1_CODEX_MINI = 'GPT_5_1_CODEX_MINI',\n}\n\n/** @enumType */\nexport type V1ResponsesModelWithLiterals =\n | V1ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES'\n | 'O3_PRO_2025_06_10'\n | 'O3_DEEP_RESEARCH_2025_06_26'\n | 'GPT_5_CODEX'\n | 'GPT_5_1_2025_11_13'\n | 'GPT_5_1_CODEX'\n | 'GPT_5_1_CODEX_MINI';\n\nexport interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: ResponsesOutputMessageOutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface V1OutputAnnotation\n extends V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\n/** @oneof */\nexport interface V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\nexport interface V1UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface ResponsesOutputMessageOutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: V1OutputAnnotation[];\n}\n\nexport interface V1ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: ResponsesWebSearchToolCallAction;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface ResponsesWebSearchToolCallAction {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface V1ResponsesReasoningOutput {\n /** @maxLength 100 */\n _id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: V1ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: V1ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface V1ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface V1ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface V1ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: V1ResponsesCodeInterpreterOutput[];\n}\n\nexport interface V1ResponsesCodeInterpreterOutput\n extends V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface V1ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface V1ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface V1ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface V1ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: V1ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: V1ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface V1ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface V1ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n _id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: IncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface IncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport enum ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n}\n\n/** @enumType */\nexport type ResponsesModelWithLiterals =\n | ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES';\n\nexport interface ResponsesOutput extends ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: OutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface OutputAnnotation extends OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\n/** @oneof */\nexport interface OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\nexport interface UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface OutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: OutputAnnotation[];\n}\n\nexport interface ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: Action;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface Action {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface ResponsesReasoningOutput {\n /** @maxLength 100 */\n _id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: ResponsesCodeInterpreterOutput[];\n}\n\nexport interface ResponsesCodeInterpreterOutput\n extends ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface CreateVideoResponse {\n videoJob?: VideoJob;\n}\n\nexport interface VideoJob {\n /**\n * The unique identifier for the video generation job.\n * @maxLength 200\n */\n _id?: string | null;\n /**\n * The status of the response generation.\n * @maxLength 50\n */\n status?: string | null;\n /**\n * The generated video result url. Only present when status is \"completed\".\n * @maxLength 5000\n * @format WEB_URL\n */\n url?: string | null;\n /** Error payload that explains why generation failed, if applicable. */\n error?: ErrorInfo;\n /** The progress of the video generation as a percentage (0-100) */\n progress?: number | null;\n}\n\nexport interface ErrorInfo {\n /**\n * code\n * @maxLength 50\n */\n code?: string | null;\n /**\n * message\n * @maxLength 1000\n */\n message?: string | null;\n}\n\nexport interface GenerateContentByPromptObjectRequest {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface Prompt extends PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n /**\n * Prompt id.\n * @format GUID\n */\n _id?: string | null;\n /**\n * Names of template parameters, that will be checked and substituted during GenerateText requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedParameterNames?: string[];\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n /**\n * Names of dynamic properties, that will be checked and substituted during requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedDynamicPropertiesNames?: string[];\n}\n\n/** @oneof */\nexport interface PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n}\n\nexport interface FallbackPromptConfig {\n /**\n * Id of the fallback Prompt. This Prompt will be used for text generation in case the invocation of original Prompt fails.\n * @format GUID\n */\n fallbackPromptId?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequest\n extends OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: OpenaiproxyV1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: CreateChatCompletionRequestFunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: V1CreateChatCompletionRequestTool[];\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: V1FineTuningSpec;\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: OpenaiproxyV1CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * o1 models only\n * @maxLength 100\n */\n reasoningEffort?: string | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses.\n * Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface CreateChatCompletionRequestFunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface V1CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: CreateChatCompletionRequestFunctionSignature;\n}\n\nexport interface V1FineTuningSpec {\n /**\n * Organization field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:<my-org>:custom_suffix:id\n * @maxLength 100\n */\n org?: string | null;\n /**\n * Suffix field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:<custom_suffix>:id\n * @maxLength 100\n */\n suffix?: string | null;\n /**\n * Id field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:custom_suffix:<id>\n * @maxLength 100\n */\n _id?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface TextBisonPredictRequest {\n /**\n * TextInstance objects containing input prompts.\n * @maxSize 100\n */\n instances?: TextInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: TextBisonModelWithLiterals;\n}\n\nexport interface TextInstance {\n /**\n * Text input to generate model response. Prompts can include preamble, questions, suggestions, instructions, or examples.\n * @maxLength 100000\n */\n prompt?: string | null;\n}\n\nexport interface PredictParameters {\n /**\n * The temperature is used for sampling during response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that\n * require a less open-ended or creative response, while higher temperatures can lead to more diverse or creative results.\n * A temperature of 0 means that the highest probability tokens are always selected. In this case, responses for a\n * given prompt are mostly deterministic, but a small amount of variation is still possible.\n * For most use cases, try starting with a temperature of 0.2. If the model returns a response that's too generic,\n * too short, or the model gives a fallback response, try increasing the temperature.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for longer responses.\n * @min 1\n * @max 2048\n */\n maxOutputTokens?: number | null;\n /**\n * Top-K changes how the model selects tokens for output. A top-K of 1 means the next selected token is the most probable\n * among all tokens in the model's vocabulary (also called greedy decoding), while a top-K of 3 means that the next\n * token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled. Then tokens are further\n * filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-K is 40.\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output. Tokens are selected from the most (see top-K) to least\n * probable until the sum of their probabilities equals the top-P value. For example, if tokens A, B, and C have a\n * probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5, then the model will select either A or B as the next\n * token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-P is 0.95.\n * @max 1\n */\n topP?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in\n * the response. If a string appears multiple times in the response, then the response truncates where it's first\n * encountered. The strings are case-sensitive.\n * @maxSize 100\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * The number of response variations to return.\n * @min 1\n * @max 8\n */\n candidateCount?: number | null;\n}\n\nexport enum TextBisonModel {\n UNKNOWN_TEXT_BISON_MODEL = 'UNKNOWN_TEXT_BISON_MODEL',\n TEXT_BISON = 'TEXT_BISON',\n TEXT_BISON_001 = 'TEXT_BISON_001',\n TEXT_BISON_32K = 'TEXT_BISON_32K',\n TEXT_BISON_002 = 'TEXT_BISON_002',\n TEXT_BISON_32K_002 = 'TEXT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type TextBisonModelWithLiterals =\n | TextBisonModel\n | 'UNKNOWN_TEXT_BISON_MODEL'\n | 'TEXT_BISON'\n | 'TEXT_BISON_001'\n | 'TEXT_BISON_32K'\n | 'TEXT_BISON_002'\n | 'TEXT_BISON_32K_002';\n\nexport interface ChatBisonPredictRequest {\n /**\n * ChatInstance objects containing inputs.\n * @maxSize 100\n */\n instances?: ChatInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: ChatBisonModelWithLiterals;\n}\n\nexport interface ChatInstance {\n /**\n * Optional. Context shapes how the model responds throughout the conversation. For example, you can use context\n * to specify words the model can or cannot use, topics to focus on or avoid, or the response format or style.\n * @maxLength 100000\n */\n context?: string | null;\n /**\n * Optional. Examples for the model to learn how to respond to the conversation.\n * @maxSize 1000\n */\n examples?: Example[];\n /**\n * Required. Conversation history provided to the model in a structured alternate-author form. Messages appear in\n * chronological order: oldest first, newest last. When the history of messages causes the input to exceed the\n * maximum length, the oldest messages are removed until the entire prompt is within the allowed limit.\n * @maxSize 1000\n */\n messages?: ChatMessage[];\n}\n\nexport interface Example {\n /** An example of an input Message from the user. */\n input?: ChatMessage;\n /** An example of what the model should output given the input. */\n output?: ChatMessage;\n}\n\nexport enum ChatBisonModel {\n UNKNOWN_CHAT_BISON_MODEL = 'UNKNOWN_CHAT_BISON_MODEL',\n CHAT_BISON = 'CHAT_BISON',\n CHAT_BISON_001 = 'CHAT_BISON_001',\n CHAT_BISON_32K = 'CHAT_BISON_32K',\n CHAT_BISON_002 = 'CHAT_BISON_002',\n CHAT_BISON_32K_002 = 'CHAT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type ChatBisonModelWithLiterals =\n | ChatBisonModel\n | 'UNKNOWN_CHAT_BISON_MODEL'\n | 'CHAT_BISON'\n | 'CHAT_BISON_001'\n | 'CHAT_BISON_32K'\n | 'CHAT_BISON_002'\n | 'CHAT_BISON_32K_002';\n\nexport interface CreateChatCompletionRequest\n extends CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: V1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: V1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: FunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: CreateChatCompletionRequestTool[];\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface FunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: FunctionSignature;\n}\n\nexport interface CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface GenerateContentRequest {\n /** ID of the model to use. */\n model?: GoogleproxyV1ModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n contents?: Content[];\n /** The system instruction to the model. */\n systemInstruction?: SystemInstruction;\n /**\n * A list of Tools the model may use to generate the next response.\n * @maxSize 1000\n */\n tools?: GoogleproxyV1Tool[];\n /**\n * Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.\n * @maxSize 100\n */\n safetySettings?: SafetySetting[];\n /** The generation configuration for the response. */\n generationConfig?: GenerationConfig;\n /** Tool configuration for any Tool specified in the request. */\n toolConfig?: ToolConfig;\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: FineTuningSpec;\n}\n\nexport enum GoogleproxyV1Model {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n GEMINI_1_0_PRO = 'GEMINI_1_0_PRO',\n GEMINI_1_0_PRO_VISION = 'GEMINI_1_0_PRO_VISION',\n GEMINI_1_5_PRO = 'GEMINI_1_5_PRO',\n GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH',\n GEMINI_2_0_FLASH = 'GEMINI_2_0_FLASH',\n GEMINI_2_0_FLASH_LITE = 'GEMINI_2_0_FLASH_LITE',\n GEMINI_2_5_PRO = 'GEMINI_2_5_PRO',\n GEMINI_2_5_FLASH = 'GEMINI_2_5_FLASH',\n GEMINI_2_5_FLASH_LITE = 'GEMINI_2_5_FLASH_LITE',\n GEMINI_2_5_FLASH_IMAGE = 'GEMINI_2_5_FLASH_IMAGE',\n GEMINI_2_5_COMPUTER_USE = 'GEMINI_2_5_COMPUTER_USE',\n GEMINI_3_0_PRO = 'GEMINI_3_0_PRO',\n GEMINI_3_0_PRO_IMAGE = 'GEMINI_3_0_PRO_IMAGE',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ModelWithLiterals =\n | GoogleproxyV1Model\n | 'UNKNOWN_MODEL'\n | 'GEMINI_1_0_PRO'\n | 'GEMINI_1_0_PRO_VISION'\n | 'GEMINI_1_5_PRO'\n | 'GEMINI_1_5_FLASH'\n | 'GEMINI_2_0_FLASH'\n | 'GEMINI_2_0_FLASH_LITE'\n | 'GEMINI_2_5_PRO'\n | 'GEMINI_2_5_FLASH'\n | 'GEMINI_2_5_FLASH_LITE'\n | 'GEMINI_2_5_FLASH_IMAGE'\n | 'GEMINI_2_5_COMPUTER_USE'\n | 'GEMINI_3_0_PRO'\n | 'GEMINI_3_0_PRO_IMAGE';\n\nexport interface Content {\n /**\n * The role in a conversation associated with the content.\n * Specifying a role is required even in single turn use cases. Acceptable values include the following:\n * USER: Specifies content that's sent by you. MODEL: Specifies the model's response.\n */\n role?: ContentRoleWithLiterals;\n /**\n * Ordered parts that make up the input. Parts may have different MIME types.\n * For gemini-1.0-pro, only the text field is valid. The token limit is 32k.\n * For gemini-1.0-pro-vision, you may specify either text only, text and up to 16 images, or text and 1 video. The token limit is 16k.\n * @maxSize 1000\n */\n parts?: V1ContentPart[];\n}\n\nexport enum ContentRole {\n UNKNOWN_CONTENT_ROLE = 'UNKNOWN_CONTENT_ROLE',\n USER = 'USER',\n MODEL = 'MODEL',\n}\n\n/** @enumType */\nexport type ContentRoleWithLiterals =\n | ContentRole\n | 'UNKNOWN_CONTENT_ROLE'\n | 'USER'\n | 'MODEL';\n\nexport interface V1ContentPart {\n /**\n * Union field data can be only one of the following:\n * The text instructions or chat dialogue to include in the prompt.\n * @maxLength 1000000000\n */\n text?: string | null;\n /** data field not supported for gemini-1.0-pro */\n contentData?: ContentData;\n /** A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values. */\n functionCall?: FunctionCall;\n /**\n * The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the\n * function is used as context to the model. This should contain the result of aFunctionCall made based on model prediction.\n */\n functionResponse?: FunctionResponse;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /** Optional. Media resolution level for the input media. */\n mediaResolution?: MediaResolution;\n /** Thought flag indicates that the content part is a thought. */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 1000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ContentData {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * The MIME type of the content data. supported types are image/jpeg, image/png.\n * @maxLength 100\n */\n mimeType?: string | null;\n}\n\nexport interface FunctionResponse {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string;\n /** Required. The function response in JSON object format. */\n response?: Record<string, any> | null;\n}\n\nexport interface MediaResolution {\n /** Media resolution level */\n level?: MediaResolutionLevelWithLiterals;\n}\n\nexport enum MediaResolutionLevel {\n /** Media resolution has not been set. */\n MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED',\n /** Media resolution set to low (64 tokens). */\n MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW',\n /** Media resolution set to medium (256 tokens). */\n MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM',\n /** Media resolution set to high (zoomed reframing with 256 tokens). */\n MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH',\n}\n\n/** @enumType */\nexport type MediaResolutionLevelWithLiterals =\n | MediaResolutionLevel\n | 'MEDIA_RESOLUTION_UNSPECIFIED'\n | 'MEDIA_RESOLUTION_LOW'\n | 'MEDIA_RESOLUTION_MEDIUM'\n | 'MEDIA_RESOLUTION_HIGH';\n\nexport interface SystemInstruction {\n /**\n * The role field of systemInstruction is ignored and doesn't affect the performance of the model.\n * @maxLength 20\n */\n role?: string | null;\n /**\n * Instructions for the model to steer it toward better performance.\n * The text strings count toward the token limit.\n * @maxSize 10\n */\n parts?: V1ContentPart[];\n}\n\nexport interface GoogleproxyV1Tool {\n /**\n * One or more function declarations\n * More information about the function declarations :\n * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling\n * @maxSize 1000\n */\n functionDeclarations?: FunctionDeclaration[];\n /** Optional. Retrieval tool that is powered by Google search. */\n googleSearchRetrieval?: GoogleSearchRetrieval;\n /** Optional. Enables the model to execute code as part of generation. */\n codeExecution?: CodeExecution;\n /** Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. */\n googleSearch?: GoogleSearch;\n}\n\nexport enum DynamicRetrievalConfigMode {\n /** Always trigger retrieval. */\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n /** Run retrieval only when system decides it is necessary. */\n MODE_DYNAMIC = 'MODE_DYNAMIC',\n}\n\n/** @enumType */\nexport type DynamicRetrievalConfigModeWithLiterals =\n | DynamicRetrievalConfigMode\n | 'MODE_UNSPECIFIED'\n | 'MODE_DYNAMIC';\n\nexport interface DynamicRetrievalConfig {\n /** The mode of the predictor to be used in dynamic retrieval. */\n mode?: DynamicRetrievalConfigModeWithLiterals;\n /** The threshold to be used in dynamic retrieval. If not set, a system default value is used. */\n dynamicThreshold?: string | null;\n}\n\nexport interface FunctionDeclaration {\n /**\n * The name of the function to call. Must start with a letter or an underscore.\n * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description and purpose of the function. The model uses this to decide how and whether to call the function.\n * For the best results, we recommend that you include a description.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * The parameters of this function in a format that's compatible with the OpenAPI\n * https://spec.opensoapis.org/oas/v3.0.3#schema\n */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GoogleSearchRetrieval {\n /** Specifies the dynamic retrieval configuration for the given source. */\n dynamicRetrievalConfig?: DynamicRetrievalConfig;\n}\n\nexport interface CodeExecution {}\n\nexport interface GoogleSearch {}\n\nexport interface SafetySetting {\n /** The safety category to configure a threshold for. */\n category?: HarmCategoryWithLiterals;\n /** The threshold for blocking responses that could belong to the specified safety category based on probability. */\n threshold?: ThresholdWithLiterals;\n}\n\nexport enum Threshold {\n UNKNOWN_THRESHOLD = 'UNKNOWN_THRESHOLD',\n BLOCK_NONE = 'BLOCK_NONE',\n BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',\n BLOCK_MED_AND_ABOVE = 'BLOCK_MED_AND_ABOVE',\n BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',\n}\n\n/** @enumType */\nexport type ThresholdWithLiterals =\n | Threshold\n | 'UNKNOWN_THRESHOLD'\n | 'BLOCK_NONE'\n | 'BLOCK_LOW_AND_ABOVE'\n | 'BLOCK_MED_AND_ABOVE'\n | 'BLOCK_ONLY_HIGH';\n\nexport interface GenerationConfig {\n /**\n * The temperature is used for sampling during the response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection.\n * Lower temperatures are good for prompts that require a more deterministic and less open-ended or creative response,\n * while higher temperatures can lead to more diverse or creative results. A temperature of 0 is deterministic:\n * the highest probability response is always selected.\n * Range: 0.0 - 1.0, Default for gemini-1.0-pro: 0.9, Default for gemini-1.0-pro-vision: 0.4\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters.\n * 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for potentially longer responses.\n * Range for gemini-1.0-pro: 1-8192 (default: 8192),\n * Range for gemini-1.0-pro-vision: 1-2048 (default: 2048)\n * Range for gemini-2.5-pro: 1-65536\n * @min 1\n * @max 65536\n */\n maxOutputTokens?: string | null;\n /**\n * Top-K changes how the model selects tokens for output.\n * A top-K of 1 means the next selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-K of 3 means that the next token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled.\n * Then tokens are further filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default for gemini-1.0-pro-vision: 32, Default for gemini-1.0-pro: none\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output.\n * Tokens are selected from the most (see top-K) to least probable until the sum of their probabilities equals the top-P value.\n * For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5,\n * then the model will select either A or B as the next token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default: 1.0\n * @max 1\n */\n topP?: number | null;\n /**\n * The number of response variations to return.This value must be 1.\n * @min 1\n * @max 1\n */\n candidateCount?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in the response.\n * If a string appears multiple times in the response, then the response truncates where it's first encountered. The strings are case-sensitive.\n * For example, if the following is the returned response when stopSequences isn't specified:\n * public static string reverse(string myString)\n * Then the returned response with stopSequences set to [\"Str\",\"reverse\"] is:\n * public static string\n * Maximum 5 items in the list.\n * @maxSize 5\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * Available for gemini-1.5-pro\n * The output format of the generated candidate text.\n * Supported MIME types: text/plain: (default) Text output. application/json: JSON response in the candidates.\n * text/x.enum: For classification tasks, output an enum value as defined in the response schema.\n * How to control the output format: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output\n * @maxLength 50\n */\n responseMimeType?: string | null;\n /**\n * Available for gemini-1.5-pro.\n * The schema that generated candidate text must follow. For more information, see Control generated output.\n * You must specify the responseType or responseMimeType field to use this parameter.\n * Link for examples : https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema\n */\n responseSchema?: Record<string, any> | null;\n /**\n * Optional. Output schema of the generated response. This is an alternative to responseSchema that accepts JSON Schema.\n * If set, responseSchema must be omitted, but responseMimeType is required.\n * While the full JSON Schema may be sent, not all features are supported.\n * more information about supported features and examples can be found here:\n * https://ai.google.dev/api/generate-content#FIELDS.response_json_schema\n */\n responseJsonSchema?: Record<string, any> | null;\n /** Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking */\n thinkingConfig?: GenerationThinkingConfig;\n /**\n * Optional. The requested modalities of the response.\n * Represents the set of modalities that the model can return, and should be expected in the response.\n * This is an exact match to the modalities of the response.\n * A model may have multiple combinations of supported modalities.\n * If the requested modalities do not match any of the supported combinations, an error will be returned.\n * An empty list is equivalent to requesting only TEXT.\n * Currently supported as experimental feature for gemini-2.0-flash only.\n * @maxSize 5\n */\n responseModalities?: ModalityWithLiterals[];\n /**\n * Optional. Configuration for image generation.\n * This message allows you to control various aspects of image generation, such as the output format, aspect ratio, and whether the model can generate images of people.\n */\n imageConfig?: ImageConfig;\n /**\n * The media_resolution parameter controls how the Gemini API processes media inputs like images, videos,\n * and PDF documents by determining the maximum number of tokens allocated for media inputs,\n * allowing you to balance response quality against latency and cost.\n */\n mediaResolution?: MediaResolutionLevelWithLiterals;\n}\n\nexport interface GenerationThinkingConfig {\n /** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */\n includeThoughts?: boolean | null;\n /** The number of thoughts tokens that the model should generate. */\n thinkingBudget?: string | null;\n /**\n * Thinking level parameter offering 2 states:\n * Low: Minimizes latency and cost. Best for simple instruction following or chat.\n * High: Maximizes reasoning depth. Default. Dynamic thinking.\n * The model may take significantly longer to reach a first token,\n * but the output will be more thoroughly vetted.\n * Note: You cannot use both thinking_level and the legacy thinking_budget parameter in the same request. Doing so will return a 400 error\n * @maxLength 20\n */\n thinkingLevel?: string | null;\n}\n\nexport interface ImageConfig {\n /** Optional. The image output format for generated images. */\n imageOutputOptions?: ImageOutputOptions;\n /**\n * Optional. The desired aspect ratio for the generated images. The following aspect ratios are supported:\n * \"1:1\" \"2:3\", \"3:2\" \"3:4\", \"4:3\" \"4:5\", \"5:4\" \"9:16\", \"16:9\" \"21:9\"\n * @maxLength 10\n */\n aspectRatio?: string | null;\n /** Optional. Controls whether the model can generate people. */\n personGeneration?: PersonGenerationWithLiterals;\n}\n\nexport interface ImageOutputOptions {\n /**\n * Optional. The image format that the output should be saved as.\n * @maxLength 100\n */\n mimeType?: string | null;\n /** Optional. The compression quality of the output image. */\n compressionQuality?: string | null;\n}\n\nexport enum PersonGeneration {\n /** The default behavior is unspecified. The model will decide whether to generate images of people. */\n PERSON_GENERATION_UNSPECIFIED = 'PERSON_GENERATION_UNSPECIFIED',\n /** Allows the model to generate images of people, including adults and children. */\n ALLOW_ALL = 'ALLOW_ALL',\n /** Allows the model to generate images of adults, but not children. */\n ALLOW_ADULT = 'ALLOW_ADULT',\n /** Prevents the model from generating images of people. */\n ALLOW_NONE = 'ALLOW_NONE',\n}\n\n/** @enumType */\nexport type PersonGenerationWithLiterals =\n | PersonGeneration\n | 'PERSON_GENERATION_UNSPECIFIED'\n | 'ALLOW_ALL'\n | 'ALLOW_ADULT'\n | 'ALLOW_NONE';\n\nexport interface ToolConfig {\n /** Function calling config. */\n functionCallingConfig?: FunctionCallingConfig;\n}\n\nexport interface FunctionCallingConfig {\n /** Specifies the mode in which function calling should execute. If unspecified, the default value will be set to AUTO. */\n mode?: ModeWithLiterals;\n /**\n * A set of function names that, when provided, limits the functions the model will call.\n * This should only be set when the Mode is ANY or VALIDATED. Function names should match [FunctionDeclaration.name]. When set, model will predict a function call from only allowed function names.\n * @maxLength 64\n * @maxSize 100\n */\n allowedFunctionNames?: string[];\n}\n\nexport enum Mode {\n UNKNOWN = 'UNKNOWN',\n /** Default model behavior, model decides to predict either a function call or a natural language response. */\n AUTO = 'AUTO',\n /**\n * Model is constrained to always predicting a function call only. If \"allowedFunctionNames\" are set, the predicted function call will be limited to any one of \"allowedFunctionNames\",\n * else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n ANY = 'ANY',\n /** Model will not predict any function call. Model behavior is same as when not passing any function declarations. */\n NONE = 'NONE',\n /**\n * Model decides to predict either a function call or a natural language response, but will validate function calls with constrained decoding. If \"allowedFunctionNames\" are set, the predicted function call will be\n * limited to any one of \"allowedFunctionNames\", else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n VALIDATED = 'VALIDATED',\n}\n\n/** @enumType */\nexport type ModeWithLiterals =\n | Mode\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'NONE'\n | 'VALIDATED';\n\nexport interface FineTuningSpec {\n /**\n * Endpoint ID of the fine-tuning model to use.\n * @maxLength 100\n */\n _id?: string | null;\n}\n\nexport interface InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport enum Model {\n UNKNOWN = 'UNKNOWN',\n /** anthropic.claude-3-sonnet-20240229-v1:0 */\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n /** anthropic.claude-3-haiku-20240307-v1:0 */\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n /** anthropic.claude-3-5-sonnet-20240620-v1:0 */\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n /** anthropic.claude-3-5-sonnet-20241022-v2:0 */\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n /** us.anthropic.claude-3-5-haiku-20241022-v1:0 */\n CLAUDE_3_5_HAIKU_1_0 = 'CLAUDE_3_5_HAIKU_1_0',\n /** us.anthropic.claude-3-7-sonnet-20250219-v1:0 */\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n /** us.anthropic.claude-sonnet-4-5-20250929-v1:0 */\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n /** us.anthropic.claude-haiku-4-5-20251001-v1:0 */\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ModelWithLiterals =\n | Model\n | 'UNKNOWN'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_5_HAIKU_1_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: RoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: ContentBlock[];\n}\n\nexport interface Tool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n}\n\nexport enum ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type ToolChoiceTypeWithLiterals =\n | ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: ToolConfiguration;\n}\n\nexport enum McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type McpServerTypeWithLiterals = McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface ToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface V1InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ClaudeModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: V1AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: GoogleproxyV1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: InvokeAnthropicClaudeModelRequestTool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: GoogleproxyV1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: GoogleproxyV1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: GoogleproxyV1McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface GoogleproxyV1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport enum ClaudeModel {\n UNKNOWN_CLAUDE_MODEL = 'UNKNOWN_CLAUDE_MODEL',\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_OPUS_1_0 = 'CLAUDE_3_OPUS_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ClaudeModelWithLiterals =\n | ClaudeModel\n | 'UNKNOWN_CLAUDE_MODEL'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_OPUS_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface V1AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GoogleproxyV1ContentBlock[];\n}\n\nexport interface InvokeAnthropicClaudeModelRequestTool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: GoogleproxyV1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface GoogleproxyV1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: GoogleproxyV1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum GoogleproxyV1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ToolChoiceTypeWithLiterals =\n | GoogleproxyV1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface GoogleproxyV1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface GoogleproxyV1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: GoogleproxyV1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: V1McpServerToolConfiguration;\n}\n\nexport enum GoogleproxyV1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1McpServerTypeWithLiterals =\n | GoogleproxyV1McpServerType\n | 'UNKNOWN'\n | 'URL';\n\nexport interface V1McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface InvokeAnthropicModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: AnthropicModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicMessage[];\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: V1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: V1Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: V1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: V1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: V1McpServer[];\n /**\n * Container identifier for reuse across requests.\n * @maxLength 512\n */\n container?: string | null;\n /** An object describing metadata about the request. */\n metadata?: RequestMetadata;\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport enum AnthropicModel {\n UNKNOWN_ANTHROPIC_MODEL = 'UNKNOWN_ANTHROPIC_MODEL',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_1_OPUS_1_0 = 'CLAUDE_4_1_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type AnthropicModelWithLiterals =\n | AnthropicModel\n | 'UNKNOWN_ANTHROPIC_MODEL'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_1_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicMessage {\n /** The role of the message author. */\n role?: MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\n/** Top-level tool wrapper. Exactly one branch is set. */\nexport interface V1Tool extends V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\n/** @oneof */\nexport interface V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\nexport interface CustomTool {\n /**\n * The name of the tool. Must match the regex ^[a-zA-Z0-9_-]{1,64}$.\n * @maxLength 1000\n */\n name?: string;\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: V1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface ComputerUseTool {\n /** Display width in pixels, recommend ≤1280 */\n displayWidthPx?: number;\n /** Display height in pixels, recommend ≤800 */\n displayHeightPx?: number;\n /** Display number for X11 environments */\n displayNumber?: number | null;\n}\n\nexport interface TextEditorTool {\n /** Parameter to control truncation when viewing large files. Available only for text_editor_20250728 and later. */\n maxCharacters?: number | null;\n}\n\nexport interface BashTool {\n /**\n * Name must be \"bash\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebSearchTool {\n /** Optional: Limit the number of searches per request; exceeding -> error \"max_uses_exceeded\". */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only include results from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never include results from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Localize search results */\n userLocation?: WebSearchUserLocation;\n /** Optional: caches the tool definition only (it will not cache the results) */\n cacheControl?: V1CacheControl;\n}\n\nexport interface WebSearchUserLocation {\n /**\n * The type of location (must be \"approximate\")\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The city name\n * @maxLength 500\n */\n city?: string | null;\n /**\n * The region or state\n * @maxLength 500\n */\n region?: string | null;\n /**\n * The country\n * @maxLength 500\n */\n country?: string | null;\n /**\n * The IANA timezone ID, e.g. \"America/Los_Angeles\"\n * @maxLength 500\n */\n timezone?: string | null;\n}\n\nexport interface CodeExecutionTool {\n /**\n * Name must be \"code_execution\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebFetchTool {\n /** Optional: Limit the number of fetches per request */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only fetch from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never fetch from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Enable citations for fetched content */\n citations?: CitationsEnabled;\n /** Optional: Maximum content length in tokens */\n maxContentTokens?: number | null;\n}\n\nexport interface V1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: V1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum V1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n NONE = 'NONE',\n}\n\n/** @enumType */\nexport type V1ToolChoiceTypeWithLiterals =\n | V1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL'\n | 'NONE';\n\nexport interface V1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n * @min 1024\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface V1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: V1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: McpServerToolConfiguration;\n}\n\nexport enum V1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type V1McpServerTypeWithLiterals = V1McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface RequestMetadata {\n /**\n * An external identifier for the user who is associated with the request.\n * This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. Do not include any identifying information such as name, email address, or phone number.\n * Maximum length: 256\n * Examples: \"13803d75-b4b5-4c3e-b2a2-6f21399b021b\"\n * @maxLength 256\n */\n userId?: string | null;\n}\n\nexport interface InvokeLlamaModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: LlamaModelWithLiterals;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport enum LlamaModel {\n UNKNOWN_LLAMA_MODEL = 'UNKNOWN_LLAMA_MODEL',\n /** meta.llama3-8b-instruct-v1:0 */\n LLAMA_3_8B_INSTRUCT_1_0 = 'LLAMA_3_8B_INSTRUCT_1_0',\n /** meta.llama3-70b-instruct-v1:0 */\n LLAMA_3_70B_INSTRUCT_1_0 = 'LLAMA_3_70B_INSTRUCT_1_0',\n /** meta.llama3-1-8b-instruct-v1:0 */\n LLAMA_3_1_8B_INSTRUCT_1_0 = 'LLAMA_3_1_8B_INSTRUCT_1_0',\n /** meta.llama3-1-70b-instruct-v1:0 */\n LLAMA_3_1_70B_INSTRUCT_1_0 = 'LLAMA_3_1_70B_INSTRUCT_1_0',\n /** meta.llama3-2-1b-instruct-v1:0 */\n LLAMA_3_2_1B_INSTRUCT_1_0 = 'LLAMA_3_2_1B_INSTRUCT_1_0',\n /** meta.llama3-2-3b-instruct-v1:0 */\n LLAMA_3_2_3B_INSTRUCT_1_0 = 'LLAMA_3_2_3B_INSTRUCT_1_0',\n}\n\n/** @enumType */\nexport type LlamaModelWithLiterals =\n | LlamaModel\n | 'UNKNOWN_LLAMA_MODEL'\n | 'LLAMA_3_8B_INSTRUCT_1_0'\n | 'LLAMA_3_70B_INSTRUCT_1_0'\n | 'LLAMA_3_1_8B_INSTRUCT_1_0'\n | 'LLAMA_3_1_70B_INSTRUCT_1_0'\n | 'LLAMA_3_2_1B_INSTRUCT_1_0'\n | 'LLAMA_3_2_3B_INSTRUCT_1_0';\n\nexport interface CreateImageRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: V1ImageModelWithLiterals;\n /** The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3.\n */\n quality?: ImageQualityWithLiterals;\n /** The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. */\n size?: ImageSizeWithLiterals;\n /**\n * The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images.\n * Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3.\n */\n style?: ImageStyleWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n}\n\nexport enum ImageQuality {\n UNKNOWN_IMAGE_QUALITY = 'UNKNOWN_IMAGE_QUALITY',\n STANDARD = 'STANDARD',\n HD = 'HD',\n}\n\n/** @enumType */\nexport type ImageQualityWithLiterals =\n | ImageQuality\n | 'UNKNOWN_IMAGE_QUALITY'\n | 'STANDARD'\n | 'HD';\n\nexport enum ImageSize {\n UNKNOWN_IMAGE_SIZE = 'UNKNOWN_IMAGE_SIZE',\n SIZE_256X256 = 'SIZE_256X256',\n SIZE_512X512 = 'SIZE_512X512',\n SIZE_1024X1024 = 'SIZE_1024X1024',\n SIZE_1792X1024 = 'SIZE_1792X1024',\n SIZE_1024X1792 = 'SIZE_1024X1792',\n}\n\n/** @enumType */\nexport type ImageSizeWithLiterals =\n | ImageSize\n | 'UNKNOWN_IMAGE_SIZE'\n | 'SIZE_256X256'\n | 'SIZE_512X512'\n | 'SIZE_1024X1024'\n | 'SIZE_1792X1024'\n | 'SIZE_1024X1792';\n\nexport enum ImageStyle {\n UNKNOWN_IMAGE_STYLE = 'UNKNOWN_IMAGE_STYLE',\n VIVID = 'VIVID',\n NATURAL = 'NATURAL',\n}\n\n/** @enumType */\nexport type ImageStyleWithLiterals =\n | ImageStyle\n | 'UNKNOWN_IMAGE_STYLE'\n | 'VIVID'\n | 'NATURAL';\n\nexport interface V1TextToImageRequest {\n /** The model to use for generating the image. */\n model?: ImageModelWithLiterals;\n /** Height of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n height?: number | null;\n /** Width of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n width?: number | null;\n /**\n * An array of text prompts to use for generation.\n * @minSize 1\n * @maxSize 10\n */\n textPrompts?: TextPrompt[];\n /** How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt. Default: 7 */\n cfgScale?: number | null;\n /**\n * CLIP Guidance is a technique that uses the CLIP neural network to guide the generation of images to be more in-line with your included prompt,\n * which often results in improved coherency.\n */\n clipGuidancePreset?: ClipGuidancePresetWithLiterals;\n /** Which sampler to use for the diffusion process. If this value is omitted we'll automatically select an appropriate sampler for you. */\n sampler?: SamplerWithLiterals;\n /** Number of images to generate. Default: 1 */\n samples?: number | null;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Number of diffusion steps to run. Default: 30 */\n steps?: number | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: TextToImageRequestStylePresetWithLiterals;\n}\n\nexport interface TextPrompt {\n /**\n * The text to generate the image from.\n * @maxLength 4000\n */\n text?: string | null;\n /** The weight of the text prompt. */\n weight?: number | null;\n}\n\nexport enum ClipGuidancePreset {\n CLIP_GUIDANCE_PRESET_UNSPECIFIED = 'CLIP_GUIDANCE_PRESET_UNSPECIFIED',\n FAST_BLUE = 'FAST_BLUE',\n FAST_GREEN = 'FAST_GREEN',\n NONE = 'NONE',\n SIMPLE = 'SIMPLE',\n SLOW = 'SLOW',\n SLOWER = 'SLOWER',\n SLOWEST = 'SLOWEST',\n}\n\n/** @enumType */\nexport type ClipGuidancePresetWithLiterals =\n | ClipGuidancePreset\n | 'CLIP_GUIDANCE_PRESET_UNSPECIFIED'\n | 'FAST_BLUE'\n | 'FAST_GREEN'\n | 'NONE'\n | 'SIMPLE'\n | 'SLOW'\n | 'SLOWER'\n | 'SLOWEST';\n\nexport enum Sampler {\n SAMPLER_UNSPECIFIED = 'SAMPLER_UNSPECIFIED',\n DDIM = 'DDIM',\n DDPM = 'DDPM',\n K_DPMPP_2M = 'K_DPMPP_2M',\n K_DPMPP_2S_ANCESTRAL = 'K_DPMPP_2S_ANCESTRAL',\n K_DPM_2 = 'K_DPM_2',\n K_DPM_2_ANCESTRAL = 'K_DPM_2_ANCESTRAL',\n K_EULER = 'K_EULER',\n K_EULER_ANCESTRAL = 'K_EULER_ANCESTRAL',\n K_HEUN = 'K_HEUN',\n K_LMS = 'K_LMS',\n}\n\n/** @enumType */\nexport type SamplerWithLiterals =\n | Sampler\n | 'SAMPLER_UNSPECIFIED'\n | 'DDIM'\n | 'DDPM'\n | 'K_DPMPP_2M'\n | 'K_DPMPP_2S_ANCESTRAL'\n | 'K_DPM_2'\n | 'K_DPM_2_ANCESTRAL'\n | 'K_EULER'\n | 'K_EULER_ANCESTRAL'\n | 'K_HEUN'\n | 'K_LMS';\n\nexport enum TextToImageRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type TextToImageRequestStylePresetWithLiterals =\n | TextToImageRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateCoreRequest {\n /** The model to use for generating the image. will always be STABLE_IMAGE_CORE */\n model?: ImageCoreModelWithLiterals;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Default: 0\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: GenerateCoreRequestStylePresetWithLiterals;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\nexport enum GenerateCoreRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type GenerateCoreRequestStylePresetWithLiterals =\n | GenerateCoreRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateStableDiffusionRequest {\n /**\n * The text prompt to generate the image from.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Controls whether this is a text-to-image or image-to-image generation.\n * - TEXT_TO_IMAGE requires only the prompt parameter.\n * - IMAGE_TO_IMAGE requires prompt, image, and strength parameters.\n */\n mode?: GenerationModeWithLiterals;\n /**\n * The image to use as the starting point for the generation.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Controls how much influence the image parameter has on the output image.\n * A value of 0 yields an image identical to the input; 1 ignores the input image.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n */\n strength?: number | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * This parameter is only valid for TEXT_TO_IMAGE mode.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /** The model to use for generation. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Dictates the content-type of the generated image. */\n outputFormat?: GenerateStableDiffusionRequestOutputFormatWithLiterals;\n /**\n * Keywords of what you do not wish to see in the output image.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n}\n\nexport enum GenerationMode {\n UNKNOWN_GENERATION_MODE = 'UNKNOWN_GENERATION_MODE',\n TEXT_TO_IMAGE = 'TEXT_TO_IMAGE',\n IMAGE_TO_IMAGE = 'IMAGE_TO_IMAGE',\n}\n\n/** @enumType */\nexport type GenerationModeWithLiterals =\n | GenerationMode\n | 'UNKNOWN_GENERATION_MODE'\n | 'TEXT_TO_IMAGE'\n | 'IMAGE_TO_IMAGE';\n\nexport enum GenerateStableDiffusionRequestOutputFormat {\n OUTPUT_FORMAT_UNSPECIFIED = 'OUTPUT_FORMAT_UNSPECIFIED',\n JPEG = 'JPEG',\n PNG = 'PNG',\n}\n\n/** @enumType */\nexport type GenerateStableDiffusionRequestOutputFormatWithLiterals =\n | GenerateStableDiffusionRequestOutputFormat\n | 'OUTPUT_FORMAT_UNSPECIFIED'\n | 'JPEG'\n | 'PNG';\n\n/** Request to generate an image */\nexport interface GenerateAnImageRequest {\n /** The model to use for generating the image. */\n model?: GenerateAnImageModelWithLiterals;\n /**\n * The prompt to use for image generation.\n * Relevant models : ALL\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Optional seed for reproducibility. If not provided, a random seed will be used.\n * Relevant models : ALL\n */\n seed?: number | null;\n /**\n * Aspect ratio of the image between 21:9 and 9:21\n * default: 16:9\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * Width of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n width?: number | null;\n /**\n * Height of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n height?: number | null;\n /**\n * Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.\n * Relevant models : ALL\n * @max 6\n */\n safetyTolerance?: number | null;\n /**\n * Output format for the generated image. Can be 'jpeg' or 'png'.\n * Relevant models : ALL\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Generate less processed, more natural-looking images\n * Relevant models : FLUX_PRO_1_1_ULTRA\n */\n raw?: boolean | null;\n /**\n * Optional image to remix\n * The URL must be a valid wix mp or wix static URL.\n * Relevant models FLUX_PRO_1_1_ULTRA, FLUX_1_DEV, FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Blend between the prompt and the image prompt\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @max 1\n */\n imagePromptStrength?: number | null;\n /**\n * Optional image to remix\n * Image to use as control input - relevant models FLUX_PRO_1_DEPTH and FLUX_PRO_1_CANNY\n * @maxLength 100000\n */\n controlImageUrl?: string | null;\n /**\n * Whether to perform up sampling on the prompt\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n */\n promptUpsampling?: boolean | null;\n /**\n * Number of steps for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @min 15\n * @max 50\n */\n steps?: number | null;\n /**\n * Guidance strength for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @max 100\n */\n guidance?: number | null;\n /**\n * Image Mask\n * A Urk representing a mask for the areas you want to modify in the image.\n * The mask should be the same dimensions as the image and in black and white.\n * Black areas (0%) indicate no modification, while white areas (100%) specify areas for in painting.\n * Optional if you provide an alpha mask in the original image.\n * Validation: The endpoint verifies that the dimensions of the mask match the original image.\n * Relevant models FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageMaskUrl?: string | null;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\nexport enum GenerateAnImageModel {\n GEN_IMAGE_MODEL_UNSPECIFIED = 'GEN_IMAGE_MODEL_UNSPECIFIED',\n FLUX_PRO_1_1_ULTRA = 'FLUX_PRO_1_1_ULTRA',\n FLUX_1_DEV = 'FLUX_1_DEV',\n FLUX_PRO_1_CANNY = 'FLUX_PRO_1_CANNY',\n FLUX_PRO_1_DEPTH = 'FLUX_PRO_1_DEPTH',\n FLUX_PRO_1_FILL = 'FLUX_PRO_1_FILL',\n}\n\n/** @enumType */\nexport type GenerateAnImageModelWithLiterals =\n | GenerateAnImageModel\n | 'GEN_IMAGE_MODEL_UNSPECIFIED'\n | 'FLUX_PRO_1_1_ULTRA'\n | 'FLUX_1_DEV'\n | 'FLUX_PRO_1_CANNY'\n | 'FLUX_PRO_1_DEPTH'\n | 'FLUX_PRO_1_FILL';\n\nexport interface CreatePredictionRequest\n extends CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** The model version ID */\n model?: CreatePredictionModelWithLiterals;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n}\n\nexport enum CreatePredictionModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n /** Flux-dev-controlnet */\n FLUX_DEV_CONTROLNET = 'FLUX_DEV_CONTROLNET',\n /** https://replicate.com/reve/edit. Has a `prompt` field, routed through GenerateContent */\n REVE_EDIT = 'REVE_EDIT',\n /** https://replicate.com/lucataco/florence-2-large */\n LUCATACO_FLORENCE_2_LARGE = 'LUCATACO_FLORENCE_2_LARGE',\n /** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\n PERCEPTRON_ISAAC_01 = 'PERCEPTRON_ISAAC_01',\n}\n\n/** @enumType */\nexport type CreatePredictionModelWithLiterals =\n | CreatePredictionModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID'\n | 'FLUX_DEV_CONTROLNET'\n | 'REVE_EDIT'\n | 'LUCATACO_FLORENCE_2_LARGE'\n | 'PERCEPTRON_ISAAC_01';\n\nexport interface FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * Number of images to generate\n * @min 1\n * @max 4\n */\n numOutputs?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the quality of the output image for jpg and webp (1-100)\n * @min 1\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n}\n\nexport interface FluxDevControlnet {\n /** Set a seed for reproducibility. Random by default. */\n seed?: number | null;\n /**\n * Number of steps\n * @min 1\n * @max 50\n */\n steps?: number | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Optional LoRA model to use.\n * Give a URL to a HuggingFace .safetensors file, a Replicate .tar file or a CivitAI download link.\n * @maxLength 2000\n */\n loraUrl?: string | null;\n /**\n * Type of control net\n * @maxLength 100\n */\n controlType?: string | null;\n /**\n * Image to use with control net\n * @maxLength 2000\n */\n controlImage?: string | null;\n /**\n * Strength of LoRA model\n * @min -1\n * @max 3\n */\n loraStrength?: number | null;\n /**\n * Format of the output images\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Guidance scale\n * @max 5\n */\n guidanceScale?: number | null;\n /**\n * Quality of the output images, from 0 to 100.\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Things you do not want to see in your image\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Strength of control net.\n * @max 3\n */\n controlStrength?: number | null;\n /**\n * Preprocessor to use with depth control net\n * @maxLength 100\n */\n depthPreprocessor?: string | null;\n /**\n * Preprocessor to use with soft edge control net\n * @maxLength 100\n */\n softEdgePreprocessor?: string | null;\n /**\n * Strength of image to image control.\n * @max 1\n */\n imageToImageStrength?: number | null;\n /** Return the preprocessed image used to control the generation process. */\n returnPreprocessedImage?: boolean | null;\n}\n\nexport interface ReveEdit {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Edit instructions\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Specific version to use. Default: \"latest\"\n * @maxLength 10000\n */\n version?: string | null;\n}\n\n/** https://replicate.com/lucataco/florence-2-large/readme */\nexport interface LucatacoFlorence2Large {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /** Which task to perform */\n taskInput?: TaskInputWithLiterals;\n /**\n * Optional input for some task types\n * @maxLength 10000\n */\n textInput?: string | null;\n}\n\nexport enum TaskInput {\n UNRECOGNIZED_TASK_INPUT = 'UNRECOGNIZED_TASK_INPUT',\n OBJECT_DETECTION = 'OBJECT_DETECTION',\n CAPTION = 'CAPTION',\n DETAILED_CAPTION = 'DETAILED_CAPTION',\n MORE_DETAILED_CAPTION = 'MORE_DETAILED_CAPTION',\n CAPTION_TO_PHRASE_GROUNDING = 'CAPTION_TO_PHRASE_GROUNDING',\n REGION_PROPOSAL = 'REGION_PROPOSAL',\n DENSE_REGION_CAPTION = 'DENSE_REGION_CAPTION',\n OCR = 'OCR',\n OCR_WITH_REGION = 'OCR_WITH_REGION',\n}\n\n/** @enumType */\nexport type TaskInputWithLiterals =\n | TaskInput\n | 'UNRECOGNIZED_TASK_INPUT'\n | 'OBJECT_DETECTION'\n | 'CAPTION'\n | 'DETAILED_CAPTION'\n | 'MORE_DETAILED_CAPTION'\n | 'CAPTION_TO_PHRASE_GROUNDING'\n | 'REGION_PROPOSAL'\n | 'DENSE_REGION_CAPTION'\n | 'OCR'\n | 'OCR_WITH_REGION';\n\n/** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\nexport interface PerceptronIsaac01 {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /** Which task to perform */\n response?: ResponseTypeWithLiterals;\n /** Max new tokens */\n maxNewTokens?: string | null;\n}\n\nexport enum ResponseType {\n UNRECOGNIZED_RESPONSE_TYPE = 'UNRECOGNIZED_RESPONSE_TYPE',\n TEXT = 'TEXT',\n BOX = 'BOX',\n POINT = 'POINT',\n POLYGON = 'POLYGON',\n}\n\n/** @enumType */\nexport type ResponseTypeWithLiterals =\n | ResponseType\n | 'UNRECOGNIZED_RESPONSE_TYPE'\n | 'TEXT'\n | 'BOX'\n | 'POINT'\n | 'POLYGON';\n\nexport interface EditImageWithPromptRequest {\n /** The model to use for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /**\n * The image you wish to inpaint.\n * Supported Formats: jpeg, png, webp\n * Validation Rules:\n * - Every side must be at least 64 pixels\n * - Total pixel count must be between 4,096 and 9,437,184 pixels\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * image format jpeg, png, webp\n * @maxLength 100\n */\n imageFormat?: string | null;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * Optional for OUTPAINT model , and required for INPAINT model\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Controls the strength of the inpainting process on a per-pixel basis,\n * either via a second image (passed into this parameter) or via the alpha channel of the image parameter.\n * Passing in a Mask\n * The image passed to this parameter should be a black and white image that represents,\n * at any pixel, the strength of inpainting based on how dark or light the given pixel is.\n * Completely black pixels represent no inpainting strength while completely white pixels represent maximum strength.\n * In the event the mask is a different size than the image parameter, it will be automatically resized.\n * Alpha Channel Support\n * If you don't provide an explicit mask, one will be derived from the alpha channel of the image parameter.\n * Transparent pixels will be inpainted while opaque pixels will be preserved.\n * In the event an image with an alpha channel is provided along with a mask, the mask will take precedence.\n * Relevant only for INPAINT model\n * @maxLength 100000\n */\n imageMask?: string | null;\n /**\n * image mask format jpeg, png, webp\n * Relevant only for INPAINT model\n * @maxLength 100\n */\n imageMaskFormat?: string | null;\n /**\n * Grows the edges of the mask outward in all directions by the specified number of pixels. The expanded area around the mask will be blurred,\n * which can help smooth the transition between inpainted content and the original image.\n * Try this parameter if you notice seams or rough edges around the inpainted content.\n * Default: 5\n * Relevant only for INPAINT model\n * @max 100\n */\n growMask?: number | null;\n /**\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: StylePresetWithLiterals;\n /**\n * The direction to outpaint the image\n * Relevant only for OUTPAINT model\n * At least one of the fields must be set\n */\n outpaintDirection?: OutpaintDirection;\n /**\n * Controls the likelihood of creating additional details not heavily conditioned by the init image [0..1]\n * Relevant only for OUTPAINT model\n * @max 1\n */\n creativity?: number | null;\n}\n\nexport enum StylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type StylePresetWithLiterals =\n | StylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface OutpaintDirection {\n /**\n * The number of pixels to outpaint on the left side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n left?: number | null;\n /**\n * The number of pixels to outpaint on the right side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n right?: number | null;\n /**\n * The number of pixels to outpaint on the top of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n up?: number | null;\n /**\n * The number of pixels to outpaint on the bottom of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n down?: number | null;\n}\n\nexport interface TextToImageRequest {\n /**\n * Specifies the format of the output image. Supported formats are: PNG, JPG and WEBP. Default: JPG.\n * @maxLength 4\n */\n outputFormat?: string | null;\n /**\n * Sets the compression quality of the output image. Higher values preserve more quality but increase file size, lower values reduce file size but decrease quality. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /** This parameter is used to enable or disable the NSFW check. */\n checkNsfw?: boolean | null;\n /**\n * A positive prompt is a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n * For example, if the positive prompt is \"dragon drinking coffee\", the model will generate an image of a dragon drinking coffee. The more detailed the prompt, the more accurate the results.\n * The length of the prompt must be between 2 and 3000 characters.\n * @maxLength 1000000\n */\n positivePrompt?: string;\n /**\n * Used to define the height dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n height?: number;\n /**\n * Used to define the width dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n width?: number;\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /** Model to invoke. */\n model?: TextToImageRequestModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The number of steps is the number of iterations the model will perform to generate the image. Default: 28.\n * @min 1\n * @max 100\n */\n steps?: number | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /**\n * Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results. Default: 7.\n * @max 30\n */\n cfgScale?: number | null;\n /** The number of images to generate from the specified prompt. */\n numberResults?: number | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n /**\n * Used to determine the influence of the seedImage image in the generated output. A lower value results in more influence from the original image, while a higher value allows more creative deviation.\n * @max 1\n */\n strength?: number | null;\n /**\n * An array of LoRA models to be applied during the image generation process.\n * @maxSize 10\n */\n loraModels?: LoraModelSelect[];\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /** Inputs for the image generation process. */\n inputs?: Inputs;\n}\n\nexport enum TextToImageRequestModel {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n /** runware:101@1 */\n FLUX_1_DEV = 'FLUX_1_DEV',\n /** runware:100@1 */\n FLUX_1_SCHNELL = 'FLUX_1_SCHNELL',\n /** bfl:4@1 */\n FLUX_1_KONTEXT_MAX = 'FLUX_1_KONTEXT_MAX',\n /** bfl:3@1 */\n FLUX_1_KONTEXT_PRO = 'FLUX_1_KONTEXT_PRO',\n /** runware:108@20 */\n QWEN_IMAGE_EDIT = 'QWEN_IMAGE_EDIT',\n /** ideogram:4@1 */\n IDEOGRAM_3_0 = 'IDEOGRAM_3_0',\n /** ideogram:4@3 */\n IDEOGRAM_3_0_EDIT = 'IDEOGRAM_3_0_EDIT',\n /** bfl:2@2 */\n FLUX_1_1_PRO_ULTRA = 'FLUX_1_1_PRO_ULTRA',\n /** bfl:1@2 */\n FLUX_1_FILL_PRO = 'FLUX_1_FILL_PRO',\n /** bytedance:5@0 */\n SEEDREAM_4 = 'SEEDREAM_4',\n /** runware:102@1 */\n FLUX_DEV_FILL = 'FLUX_DEV_FILL',\n /** bfl:1@5 */\n FLUX_DEPTH_PRO = 'FLUX_DEPTH_PRO',\n /** bfl:1@4 */\n FLUX_CANNY_PRO = 'FLUX_CANNY_PRO',\n /** Should be used together with model_id filed from allowed models list */\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type TextToImageRequestModelWithLiterals =\n | TextToImageRequestModel\n | 'UNKNOWN_MODEL'\n | 'FLUX_1_DEV'\n | 'FLUX_1_SCHNELL'\n | 'FLUX_1_KONTEXT_MAX'\n | 'FLUX_1_KONTEXT_PRO'\n | 'QWEN_IMAGE_EDIT'\n | 'IDEOGRAM_3_0'\n | 'IDEOGRAM_3_0_EDIT'\n | 'FLUX_1_1_PRO_ULTRA'\n | 'FLUX_1_FILL_PRO'\n | 'SEEDREAM_4'\n | 'FLUX_DEV_FILL'\n | 'FLUX_DEPTH_PRO'\n | 'FLUX_CANNY_PRO'\n | 'FROM_MODEL_ID';\n\nexport interface LoraModelSelect {\n /**\n * The unique identifier of the LoRA model, typically in the format \"wix:<id>@<version>\".\n * @minLength 1\n * @maxLength 255\n */\n model?: string | null;\n /**\n * The weight or influence of the LoRA model during the generation process.\n * A higher value indicates a stronger influence of the LoRA model on the output.\n * @min -4\n * @max 4\n */\n weight?: number | null;\n}\n\nexport interface Inputs {\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n}\n\nexport interface InvokeMlPlatformLlamaModelRequest {\n /**\n * The ML platform model id.\n * @minLength 1\n * @maxLength 50\n */\n modelId?: string;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport interface InvokeChatCompletionRequest {\n /** Model to invoke */\n model?: PerplexityModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far\n * @maxSize 1000\n */\n messages?: PerplexityMessage[];\n /**\n * Max number of completion tokens.\n * Completion token count + prompt token count must not exceed the size of the context window\n * @max 200000\n */\n maxTokens?: number | null;\n /**\n * The amount of randomness in the response, valued between 0 inclusive and 2 exclusive.\n * Higher values are more random, and lower values are more deterministic.\n */\n temperature?: number | null;\n /**\n * The nucleus sampling threshold, valued between 0 and 1 inclusive.\n * For each subsequent token, the model considers the results of the tokens with top_p probability mass.\n * Perplexity recommends either altering top_k or top_p, but not both.\n */\n topP?: number | null;\n /**\n * Given a list of domains, limit the citations used by the online model to URLs from the specified domains.\n * Currently limited to only 3 domains for whitelisting and blacklisting.\n * For blacklisting add a - to the beginning of the domain string.\n * @maxLength 10000\n * @maxSize 3\n */\n searchDomainFilter?: string[];\n /** Determines whether or not a request to an online model should return images. */\n returnImages?: boolean | null;\n /** Determines whether or not a request to an online model should return related questions. */\n returnRelatedQuestions?: boolean | null;\n /**\n * Returns search results within the specified time interval - does not apply to images.\n * Must be one of \"month\", \"week\", \"day\", \"hour\"\n * @maxLength 10\n */\n searchRecencyFilter?: string | null;\n /**\n * The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive.\n * If set to 0, top-k filtering is disabled. Perplexity recommends either altering top_k or top_p, but not both.\n */\n topK?: number | null;\n /**\n * A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics. Incompatible with `frequency_penalty`.\n */\n presencePenalty?: number | null;\n /**\n * A multiplicative penalty greater than 0. Values greater than 1.0 penalize new tokens based on their existing\n * frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n * A value of 1.0 means no penalty. Incompatible with `presence_penalty`.\n */\n frequencyPenalty?: number | null;\n /**\n * Enable structured outputs with a JSON or Regex schema.\n * https://docs.perplexity.ai/guides/structured-outputs\n */\n responseFormat?: InvokeChatCompletionRequestResponseFormat;\n}\n\nexport interface InvokeChatCompletionRequestResponseFormat\n extends InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** @oneof */\nexport interface InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** mimics https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api */\nexport interface GenerateImageRequest {\n /** ID of the model to use. */\n model?: ImagenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n instances?: Instance[];\n /** The configuration for the generation. */\n parameters?: Parameters;\n}\n\nexport enum ImagenModel {\n UNKNOWN_IMAGEN_MODEL = 'UNKNOWN_IMAGEN_MODEL',\n IMAGEN_3_0_GENERATE_002 = 'IMAGEN_3_0_GENERATE_002',\n IMAGEN_3_0_FAST_GENERATE_001 = 'IMAGEN_3_0_FAST_GENERATE_001',\n IMAGEN_4_0_GENERATE_001 = 'IMAGEN_4_0_GENERATE_001',\n IMAGEN_4_0_FAST_GENERATE_001 = 'IMAGEN_4_0_FAST_GENERATE_001',\n IMAGEN_4_0_ULTRA_GENERATE_001 = 'IMAGEN_4_0_ULTRA_GENERATE_001',\n}\n\n/** @enumType */\nexport type ImagenModelWithLiterals =\n | ImagenModel\n | 'UNKNOWN_IMAGEN_MODEL'\n | 'IMAGEN_3_0_GENERATE_002'\n | 'IMAGEN_3_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_GENERATE_001'\n | 'IMAGEN_4_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_ULTRA_GENERATE_001';\n\nexport interface Instance {\n /**\n * The text prompt for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n}\n\nexport interface Parameters {\n /**\n * The number of images to generate (1-4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /** Optional random seed for image generation */\n seed?: string | null;\n /** Optional parameter to use LLM-based prompt rewriting for higher quality images */\n enhancePrompt?: boolean | null;\n /**\n * Optional text to discourage in the generated images\n * @maxLength 480\n */\n negativePrompt?: string | null;\n /**\n * Optional aspect ratio for the image (1:1, 9:16, 16:9, 3:4, 4:3)\n * @maxLength 5\n */\n aspectRatio?: string | null;\n /** Optional output image format options */\n outputOptions?: OutputOptions;\n /**\n * Optional setting for allowing/disallowing generation of people\n * @maxLength 20\n */\n personGeneration?: string | null;\n /**\n * Optional safety filtering level\n * @maxLength 50\n */\n safetySetting?: string | null;\n /** Optional flag to add invisible watermark */\n addWatermark?: boolean | null;\n}\n\nexport interface OutputOptions {\n /**\n * Image format (image/png or image/jpeg)\n * @maxLength 20\n */\n mimeType?: string | null;\n /**\n * Compression quality for JPEG (0-100)\n * @max 100\n */\n compressionQuality?: number | null;\n}\n\nexport interface GenerateImageMlPlatformRequest\n extends GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n /** The model version ID */\n model?: GenerateImageMlPlatformModelWithLiterals;\n}\n\n/** @oneof */\nexport interface GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n}\n\nexport enum GenerateImageMlPlatformModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n}\n\n/** @enumType */\nexport type GenerateImageMlPlatformModelWithLiterals =\n | GenerateImageMlPlatformModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID';\n\nexport interface V1FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n /** Time step to start CFG - new field for ml platform */\n timestepToStartCfg?: number | null;\n /** Option to disable the NSFW safety checker */\n disableSafetyChecker?: boolean | null;\n}\n\nexport interface CreateImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536x1024\n * Portrait: 1024x1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n}\n\nexport interface EditImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536 x 1024\n * Portrait: 1024 x 1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * The image to be edited.\n * @maxLength 10000\n */\n imageUrl?: string | null;\n /**\n * The image mask to be edited.\n * @maxLength 10000\n */\n imageMaskUrl?: string | null;\n /**\n * Additional images to be edited.\n * @maxSize 10\n * @maxLength 10000\n */\n imageUrls?: string[] | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n /**\n * Control how much effort the model will exert to match the style and features, especially facial features, of input images.\n * This parameter is only supported for gpt-image-1. Supports high and low. Defaults to low.\n * @maxLength 10\n */\n inputFidelity?: string | null;\n}\n\n/** Mirrors https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo-video-generation */\nexport interface GenerateVideoRequest {\n /** ID of the Video generation model to use. */\n model?: VideoGenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 100\n */\n instances?: GenerateVideoInstance[];\n /** Generation-time settings. */\n parameters?: GenerateVideoParameters;\n}\n\nexport enum VideoGenModel {\n UNKNOWN_VIDEO_GEN_MODEL = 'UNKNOWN_VIDEO_GEN_MODEL',\n VEO_2_0_GENERATE_001 = 'VEO_2_0_GENERATE_001',\n VEO_3_0_GENERATE_001 = 'VEO_3_0_GENERATE_001',\n VEO_3_0_FAST_GENERATE_001 = 'VEO_3_0_FAST_GENERATE_001',\n}\n\n/** @enumType */\nexport type VideoGenModelWithLiterals =\n | VideoGenModel\n | 'UNKNOWN_VIDEO_GEN_MODEL'\n | 'VEO_2_0_GENERATE_001'\n | 'VEO_3_0_GENERATE_001'\n | 'VEO_3_0_FAST_GENERATE_001';\n\nexport interface GenerateVideoInstance {\n /**\n * Mandatory (text-to-video), optional if an input image prompt is provided (image-to-video)\n * Text input for guiding video generation.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Mandatory (image-to-video), optional if a text prompt is provided (text-to-video)\n * Image input for guiding video generation.\n */\n image?: V1ImageInput;\n}\n\nexport interface V1ImageInput {\n /**\n * A publicly available image URL\n * @format WEB_URL\n */\n imageUrl?: string | null;\n /**\n * MIME type of the image (image/jpeg or image/png)\n * @maxLength 20\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateVideoParameters {\n /**\n * Requested video length in seconds (4, 6, or 8. The default is 8)\n * @min 4\n * @max 8\n */\n durationSeconds?: number | null;\n /**\n * A text string that describes anything you want to discourage the model from generating.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /** Use gemini to enhance your prompts (default is True) */\n enhancePrompt?: boolean | null;\n /**\n * A number to request to make generated videos deterministic.\n * Adding a seed number with your request without changing other parameters will cause the model to produce the same videos.\n */\n seed?: string | null;\n /**\n * Number of videos to generate (1–4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /**\n * Aspect ratio: 16:9 (default, landscape) or 9:16 (portrait)\n * @maxLength 50\n */\n aspectRatio?: string | null;\n /**\n * The safety setting that controls whether people or face generation is allowed:\n * \"allow_adult\" (default value): allow generation of adults only\n * \"disallow\": disallows inclusion of people/faces in images\n * @maxLength 50\n */\n personGeneration?: string | null;\n /** Whether to generate audio for the video */\n generateAudio?: boolean | null;\n /**\n * The resolution of the generated video. Supported values: 720p, 1080p. Default: 1080p\n * @maxLength 50\n */\n resolution?: string | null;\n}\n\n/** Add to your existing proto file */\nexport interface V1CreateChatCompletionRequest {\n /** Model identifier */\n model?: ChatCompletionModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: GoogleproxyV1ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: V1CreateChatCompletionRequestResponseFormat;\n}\n\nexport interface V1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawRequest {\n /**\n * ML Platform model identifier\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: ResponseFormat;\n}\n\nexport interface ResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface VideoInferenceRequest {\n /** Specifies the format of the output video. Supported formats are: MP4 and WEBM. Default: MP4. */\n outputFormat?: OutputFormatWithLiterals;\n /**\n * Sets the compression quality of the output video. Higher values preserve more quality but increase file size. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /**\n * The text description that guides the video generation process. This prompt defines what you want to see in the video.\n * The length of the prompt must be at least 2 characters.\n * @minLength 2\n * @maxLength 100000\n */\n positivePrompt?: string | null;\n /**\n * Specifies what you want to avoid in the generated video.\n * @maxLength 100000\n */\n negativePrompt?: string | null;\n /**\n * An array of objects that define key frames to guide video generation.\n * @maxSize 100\n */\n frameImages?: FrameImage[];\n /**\n * An array containing reference images used to condition the generation process. Must be URLs pointing to the images. The images must be accessible publicly.\n * @maxSize 10\n * @maxLength 100000\n */\n referenceImages?: string[] | null;\n /**\n * The width of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n width?: number | null;\n /**\n * The height of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n height?: number | null;\n /** The AI model to use for video generation. */\n model?: VideoModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The length of the generated video in seconds.\n * @min 1\n * @max 10\n */\n duration?: number | null;\n /**\n * The frame rate (frames per second) of the generated video. Default: 24.\n * @min 15\n * @max 60\n */\n fps?: number | null;\n /**\n * The number of denoising steps the model performs during video generation.\n * @min 10\n * @max 50\n */\n steps?: number | null;\n /** A seed is a value used to randomize the video generation. */\n seed?: string | null;\n /**\n * Controls how closely the video generation follows your prompt. Recommended range is 6.0-10.0 for most video models.\n * @max 50\n */\n cfgScale?: number | null;\n /**\n * Specifies how many videos to generate for the given parameters. Default: 1.\n * @min 1\n * @max 4\n */\n numberResults?: number | null;\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /**\n * Skip polling flag - if set to false, will poll until video generation is complete\n * If not set or true, returns immediately with task UUID for manual polling\n */\n skipPolling?: boolean | null;\n}\n\nexport enum OutputFormat {\n UNKNOWN_OUTPUT_FORMAT = 'UNKNOWN_OUTPUT_FORMAT',\n /** MPEG-4 video format, widely compatible and recommended for most use cases.MPEG-4 video format, widely compatible and recommended for most use cases. */\n MP4 = 'MP4',\n /** WebM video format, optimized for web delivery and smaller file sizes. */\n WEBM = 'WEBM',\n}\n\n/** @enumType */\nexport type OutputFormatWithLiterals =\n | OutputFormat\n | 'UNKNOWN_OUTPUT_FORMAT'\n | 'MP4'\n | 'WEBM';\n\nexport interface FrameImage {\n /**\n * Specifies the input image that will be used to constrain the video content at the specified frame position.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 100000\n */\n inputImage?: string;\n /**\n * Specifies the position of this frame constraint within the video timeline.\n * Can be \"first\", \"last\", or a numeric frame number.\n * @maxLength 20\n */\n frame?: string | null;\n}\n\nexport enum VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SEEDANCE_1_0_PRO = 'SEEDANCE_1_0_PRO',\n SEEDANCE_1_0_LITE = 'SEEDANCE_1_0_LITE',\n SEEDANCE_1_0_PRO_FAST = 'SEEDANCE_1_0_PRO_FAST',\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type VideoModelWithLiterals =\n | VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SEEDANCE_1_0_PRO'\n | 'SEEDANCE_1_0_LITE'\n | 'SEEDANCE_1_0_PRO_FAST'\n | 'FROM_MODEL_ID';\n\nexport interface V1OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: V1ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: V1ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: V1ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: V1ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: V1ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: V1ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport interface V1ResponsesInputItem extends V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesInputMessageResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: V1ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesInputMessageResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesInputMessageResponsesMessageRoleWithLiterals =\n | ResponsesInputMessageResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface V1ResponsesInputMessageContent\n extends V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n}\n\nexport interface ResponsesInputMessageContentImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface ResponsesInputMessageContentFileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface V1ResponsesTextFormat\n extends V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n}\n\nexport interface ResponsesTextFormatJsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface V1ResponsesTool extends V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\nexport interface V1ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: ResponsesWebSearchUserLocation;\n}\n\nexport interface ResponsesWebSearchUserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface V1ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: V1ResponsesCodeInterpreterContainer;\n}\n\nexport interface V1ResponsesCodeInterpreterContainer\n extends V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface V1ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\nexport interface OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport interface ResponsesInputItem extends ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesMessageRoleWithLiterals =\n | ResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface ResponsesInputMessageContent\n extends ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n}\n\nexport interface ImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface FileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface ResponsesTextFormat extends ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n}\n\nexport interface JsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface ResponsesTool extends ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\nexport interface ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: UserLocation;\n}\n\nexport interface UserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: ResponsesCodeInterpreterContainer;\n}\n\nexport interface ResponsesCodeInterpreterContainer\n extends ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\n/** More info and default values at https://platform.openai.com/docs/api-reference/videos/create */\nexport interface CreateVideoRequest {\n /**\n * Text prompt that describes the video to generate.\n * @maxLength 10000\n */\n prompt?: string;\n /** The video generation model to use. */\n model?: V1VideoModelWithLiterals;\n /**\n * Size of the generated video (width x height in pixels). Examples: \"720x1280\", \"1280x720\".\n * @maxLength 50\n */\n size?: string | null;\n /**\n * Clip duration in seconds. Default is 4 seconds if not specified.\n * @min 1\n * @max 180\n */\n seconds?: number | null;\n /**\n * Optional publicly accessible URL to an image reference that guides generation.\n * @maxLength 5000\n * @format WEB_URL\n */\n inputReferenceUrl?: string | null;\n}\n\nexport enum V1VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SORA_2 = 'SORA_2',\n SORA_2_PRO = 'SORA_2_PRO',\n}\n\n/** @enumType */\nexport type V1VideoModelWithLiterals =\n | V1VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SORA_2'\n | 'SORA_2_PRO';\n\nexport interface UserRequestInfo {\n /**\n * Interaction id\n * @maxLength 100\n */\n interactionId?: string | null;\n /**\n * Additional tags ,use comma separation format for multiple tags.\n * @maxLength 1000\n */\n additionalTags?: string | null;\n /**\n * GenAI feature name, required by FinOps for evaluation\n * @maxLength 1000\n */\n featureName?: string | null;\n /**\n * AppDefId to which the cost will be attributed to instead of the one that signs the request.\n * Will not work unless your application is explicitly allowed to override costs attribution.\n * Please reach out to #ai-tools-support if you think you need this field.\n * @format GUID\n */\n costAttributionOverrideId?: string | null;\n}\n\nexport interface FallbackProperties {\n /**\n * Flag to indicate whether to opt out of the request forwarding as a fallback.\n * Currently, only the fallback from OpenAI to Azure is supported for certain OpenAI models.\n * If set to true, the request will not be redirected to Azure in the event of a server failure by OpenAI.\n */\n optOut?: boolean | null;\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n}\n\nexport interface AsyncGenerationConfig {\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n /** SPI generation configuration. */\n spiGenerationConfig?: SpiGenerationConfig;\n}\n\nexport interface SpiGenerationConfig {\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n appId?: string | null;\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n componentId?: string | null;\n}\n\nexport interface DynamicRequestConfig {\n /**\n * List of GatewayToolDefinition's, used to overwrite tools in the prompt.\n * @maxSize 100\n */\n gatewayToolDefinitions?: GatewayToolDefinition[];\n /**\n * List of GatewayMessageDefinition's, which will be converted to model-specific format and appended to the messages saved in the prompt.\n * @maxSize 100\n */\n gatewayMessageDefinitions?: GatewayMessageDefinition[];\n}\n\nexport interface GatewayToolDefinition extends GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\n/** @oneof */\nexport interface GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\nexport interface GatewayToolDefinitionCustomTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the tool does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the tool accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface BuiltInTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional parameters specific to the built-in tool. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GatewayMessageDefinition {\n /** The role of the message author. */\n role?: GatewayMessageDefinitionRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport enum GatewayMessageDefinitionRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n TOOL = 'TOOL',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type GatewayMessageDefinitionRoleWithLiterals =\n | GatewayMessageDefinitionRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface GatewayContentBlock extends GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\n/** @oneof */\nexport interface GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\nexport interface ToolResultContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n error?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport interface GenerateContentByPromptObjectResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateTextByPromptObjectRequest {\n /** Prompt object that describes the text generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface GeneratedTextChunk extends GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n /**\n * Extracted text content from the chunk.\n * @maxLength 100\n */\n content?: string | null;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\n/** @oneof */\nexport interface GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n}\n\nexport interface ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 100\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ToolCall[];\n}\n\nexport interface ChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface V1ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChatCompletionChunkChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkChoiceChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 1000\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n}\n\nexport interface ChatCompletionChunkChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkChoiceChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface GoogleproxyV1AnthropicStreamChunk\n extends GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1ContentBlockDelta\n extends GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface V1AnthropicStreamChunkMessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: GoogleproxyV1Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface AnthropicStreamChunk extends AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n}\n\nexport interface ContentBlockDelta extends ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface MessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface V1AnthropicStreamChunk\n extends V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * Index of the content block this chunk refers to (when relevant).\n * For example, text and tool-input deltas apply to the block at this index.\n */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n}\n\nexport interface V1ContentBlockDelta extends V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\n/** @oneof */\nexport interface V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\nexport interface AnthropicStreamChunkMessageDelta {\n /**\n * Why generation concluded for this assistant message, when applicable:\n * \"end_turn\" | \"max_tokens\" | \"stop_sequence\" | \"tool_use\" | \"pause_turn\" | \"refusal\".\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * The specific custom stop sequence that was produced, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Cumulative token usage at this point in the stream. */\n usage?: V1Usage;\n /** Cost of the request so far, in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateAudioRequest\n extends GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n}\n\nexport interface CreateSpeechRequest {\n /** One of the available TTS models: https://platform.openai.com/docs/models#tts */\n model?: SpeechModelWithLiterals;\n /**\n * The text to generate audio for. The maximum length is 4096 characters.\n * @maxLength 4096\n */\n input?: string;\n /**\n * The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide.\n * @maxLength 100\n */\n voice?: string;\n /**\n * The format to audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.\n * @maxLength 100\n */\n responseFormat?: string | null;\n /**\n * The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.\n * @min 0.25\n * @max 4\n */\n speed?: number | null;\n}\n\nexport enum SpeechModel {\n UNKNOWN_SPEECH_MODEL = 'UNKNOWN_SPEECH_MODEL',\n TTS_1 = 'TTS_1',\n TTS_1_HD = 'TTS_1_HD',\n}\n\n/** @enumType */\nexport type SpeechModelWithLiterals =\n | SpeechModel\n | 'UNKNOWN_SPEECH_MODEL'\n | 'TTS_1'\n | 'TTS_1_HD';\n\nexport interface TextToSpeechRequest {\n /**\n * Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.\n * @maxLength 100\n */\n voiceId?: string;\n /**\n * The output format of the generated audio. List of supported values: mp3_22050_32, mp3_44100_32, mp3_44100_64, mp3_44100_96, mp3_44100_128, mp3_44100_192, pcm_16000, pcm_22050, pcm_24000, pcm_44100, ulaw_8000\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * When enable_logging is set to false full privacy mode will be used for the request.\n * This will mean history features are unavailable for this request, including request stitching.\n * Full privacy mode may only be used by enterprise customers.\n */\n enableLogging?: boolean;\n /**\n * The text that will get converted into speech.\n * @maxLength 10000000\n */\n text?: string;\n /** Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. */\n modelId?: ElevenLabsTextToSpeechModelWithLiterals;\n /**\n * Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.\n * @maxLength 100\n */\n languageCode?: string | null;\n /** Voice settings overriding stored settings for the given voice. They are applied only on the given request. */\n voiceSettings?: VoiceSettings;\n /**\n * A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request\n * @maxSize 10\n */\n pronunciationDictionaryLocators?: PronunciationDictionaryLocator[];\n /** If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. */\n seed?: string | null;\n /**\n * The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n previousText?: string | null;\n /**\n * The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n nextText?: string | null;\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n previousRequestIds?: string[];\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n nextRequestIds?: string[];\n /**\n * This parameter controls text normalization with three modes: ‘auto’, ‘on’, and ‘off’. When set to ‘auto’, the system will automatically decide whether to apply text normalization (e.g., spelling out numbers).\n * With ‘on’, text normalization will always be applied, while with ‘off’, it will be skipped. Cannot be turned on for ‘eleven_turbo_v2_5’ model.\n * Defaults to ‘auto’.\n * @maxLength 100\n */\n applyTextNormalization?: string | null;\n /** When set to true, response chunks will include with precise character-level timing information for audio-text synchronization. */\n withTimings?: boolean;\n}\n\nexport enum ElevenLabsTextToSpeechModel {\n UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL = 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL',\n ELEVEN_MULTILINGUAL_V2 = 'ELEVEN_MULTILINGUAL_V2',\n ELEVEN_FLASH_V2_5 = 'ELEVEN_FLASH_V2_5',\n ELEVEN_FLASH_V2 = 'ELEVEN_FLASH_V2',\n}\n\n/** @enumType */\nexport type ElevenLabsTextToSpeechModelWithLiterals =\n | ElevenLabsTextToSpeechModel\n | 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL'\n | 'ELEVEN_MULTILINGUAL_V2'\n | 'ELEVEN_FLASH_V2_5'\n | 'ELEVEN_FLASH_V2';\n\nexport interface VoiceSettings {\n /** Defines the stability for voice settings. */\n stability?: number;\n /** Defines the similarity boost for voice settings. */\n similarityBoost?: number;\n /** Defines the style for voice settings. This parameter is available on V2+ models. */\n style?: number | null;\n /** Defines the use speaker boost for voice settings. This parameter is available on V2+ models. */\n useSpeakerBoost?: boolean;\n}\n\nexport interface PronunciationDictionaryLocator {\n /**\n * pronunciation_dictionary_id\n * @maxLength 100\n */\n pronunciationDictionaryId?: string;\n /**\n * version_id\n * @maxLength 100\n */\n versionId?: string;\n}\n\nexport interface GeneratedAudioChunk\n extends GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\n/** @oneof */\nexport interface GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\nexport interface SpeechChunk {\n /** Partial audio file bytes. */\n content?: Uint8Array;\n}\n\nexport interface TextToSpeechChunk {\n /** Base64 encoded audio chunk */\n audioBase64?: Uint8Array;\n /** Alignment information for the generated audio given the input text sequence. */\n alignment?: AlignmentInfoInChunk;\n /** Alignment information for the generated audio given the input normalized text sequence. */\n normalizedAlignment?: AlignmentInfoInChunk;\n}\n\nexport interface AlignmentInfoInChunk {\n /**\n * Array of individual characters from the input or normalized text\n * @maxSize 1000000\n */\n characterStartTimesSeconds?: number[];\n /**\n * Array of start times (in seconds) for each character\n * @maxSize 1000000\n */\n characterEndTimesSeconds?: number[];\n /**\n * Array of end times (in seconds) for each character\n * @maxSize 1000000\n * @maxLength 1\n */\n characters?: string[];\n}\n\nexport interface DomainEvent extends DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n /** Event ID. With this ID you can easily spot duplicated events and ignore them. */\n _id?: string;\n /**\n * Fully Qualified Domain Name of an entity. This is a unique identifier assigned to the API main business entities.\n * For example, `wix.stores.catalog.product`, `wix.bookings.session`, `wix.payments.transaction`.\n */\n entityFqdn?: string;\n /**\n * Event action name, placed at the top level to make it easier for users to dispatch messages.\n * For example: `created`/`updated`/`deleted`/`started`/`completed`/`email_opened`.\n */\n slug?: string;\n /** ID of the entity associated with the event. */\n entityId?: string;\n /** Event timestamp in [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) format and UTC time. For example, `2020-04-26T13:57:50.699Z`. */\n eventTime?: Date | null;\n /**\n * Whether the event was triggered as a result of a privacy regulation application\n * (for example, GDPR).\n */\n triggeredByAnonymizeRequest?: boolean | null;\n /** If present, indicates the action that triggered the event. */\n originatedFrom?: string | null;\n /**\n * A sequence number that indicates the order of updates to an entity. For example, if an entity was updated at 16:00 and then again at 16:01, the second update will always have a higher sequence number.\n * You can use this number to make sure you're handling updates in the right order. Just save the latest sequence number on your end and compare it to the one in each new message. If the new message has an older (lower) number, you can safely ignore it.\n */\n entityEventSequence?: string | null;\n}\n\n/** @oneof */\nexport interface DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n}\n\nexport interface EntityCreatedEvent {\n entity?: string;\n}\n\nexport interface RestoreInfo {\n deletedDate?: Date | null;\n}\n\nexport interface EntityUpdatedEvent {\n /**\n * Since platformized APIs only expose PATCH and not PUT we can't assume that the fields sent from the client are the actual diff.\n * This means that to generate a list of changed fields (as opposed to sent fields) one needs to traverse both objects.\n * We don't want to impose this on all developers and so we leave this traversal to the notification recipients which need it.\n */\n currentEntity?: string;\n}\n\nexport interface EntityDeletedEvent {\n /** Entity that was deleted. */\n deletedEntity?: string | null;\n}\n\nexport interface ActionEvent {\n body?: string;\n}\n\nexport interface MessageEnvelope {\n /**\n * App instance ID.\n * @format GUID\n */\n instanceId?: string | null;\n /**\n * Event type.\n * @maxLength 150\n */\n eventType?: string;\n /** The identification type and identity data. */\n identity?: IdentificationData;\n /** Stringify payload. */\n data?: string;\n}\n\nexport interface IdentificationData extends IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n /** @readonly */\n identityType?: WebhookIdentityTypeWithLiterals;\n}\n\n/** @oneof */\nexport interface IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n}\n\nexport enum WebhookIdentityType {\n UNKNOWN = 'UNKNOWN',\n ANONYMOUS_VISITOR = 'ANONYMOUS_VISITOR',\n MEMBER = 'MEMBER',\n WIX_USER = 'WIX_USER',\n APP = 'APP',\n}\n\n/** @enumType */\nexport type WebhookIdentityTypeWithLiterals =\n | WebhookIdentityType\n | 'UNKNOWN'\n | 'ANONYMOUS_VISITOR'\n | 'MEMBER'\n | 'WIX_USER'\n | 'APP';\n\nexport interface BaseEventMetadata {\n /**\n * App instance ID.\n * @format GUID\n */\n instanceId?: string | null;\n /**\n * Event type.\n * @maxLength 150\n */\n eventType?: string;\n /** The identification type and identity data. */\n identity?: IdentificationData;\n}\n\nexport interface EventMetadata extends BaseEventMetadata {\n /** Event ID. With this ID you can easily spot duplicated events and ignore them. */\n _id?: string;\n /**\n * Fully Qualified Domain Name of an entity. This is a unique identifier assigned to the API main business entities.\n * For example, `wix.stores.catalog.product`, `wix.bookings.session`, `wix.payments.transaction`.\n */\n entityFqdn?: string;\n /**\n * Event action name, placed at the top level to make it easier for users to dispatch messages.\n * For example: `created`/`updated`/`deleted`/`started`/`completed`/`email_opened`.\n */\n slug?: string;\n /** ID of the entity associated with the event. */\n entityId?: string;\n /** Event timestamp in [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) format and UTC time. For example, `2020-04-26T13:57:50.699Z`. */\n eventTime?: Date | null;\n /**\n * Whether the event was triggered as a result of a privacy regulation application\n * (for example, GDPR).\n */\n triggeredByAnonymizeRequest?: boolean | null;\n /** If present, indicates the action that triggered the event. */\n originatedFrom?: string | null;\n /**\n * A sequence number that indicates the order of updates to an entity. For example, if an entity was updated at 16:00 and then again at 16:01, the second update will always have a higher sequence number.\n * You can use this number to make sure you're handling updates in the right order. Just save the latest sequence number on your end and compare it to the one in each new message. If the new message has an older (lower) number, you can safely ignore it.\n */\n entityEventSequence?: string | null;\n}\n\nexport interface PromptProxyCompletedEnvelope {\n data: GenerationCompletedResultEvent;\n metadata: EventMetadata;\n}\n\n/** @permissionId API_INFRA.WIX_AI_EXTERNAL_GATEWAY_GENERATE_CONTENT\n * @webhook\n * @eventType wix.api_infra.v1.prompt_proxy_completed\n * @slug completed\n * @documentationMaturity preview\n */\nexport declare function onPromptProxyCompleted(\n handler: (event: PromptProxyCompletedEnvelope) => void | Promise<void>\n): void;\n\n/**\n * Generate different content such as text, image, and video according to Prompt object configuration\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n * @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GATEWAY_GENERATE_CONTENT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObject\n */\nexport async function generateContentByPromptObject(\n options?: GenerateContentByPromptObjectOptions\n): Promise<\n NonNullablePaths<\n GenerateContentByPromptObjectResponse,\n | `response.openAiChatCompletionResponse.model`\n | `response.openAiChatCompletionResponse.choices`\n | `response.openAiChatCompletionResponse.choices.${number}.message.role`\n | `response.googleTextBisonResponse.predictions`\n | `response.googleChatBisonResponse.predictions`\n | `response.azureChatCompletionResponse.model`\n | `response.azureChatCompletionResponse.choices`\n | `response.azureChatCompletionResponse.choices.${number}.message.role`\n | `response.googleGeminiGenerateContentResponse.candidates`\n | `response.googleGeminiGenerateContentResponse.candidates.${number}.finishReason`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails.${number}.modality`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.candidatesTokensDetails`\n | `response.anthropicClaudeResponse.responseId`\n | `response.anthropicClaudeResponse.model`\n | `response.anthropicClaudeResponse.responseType`\n | `response.anthropicClaudeResponse.role`\n | `response.anthropicClaudeResponse.content`\n | `response.anthropicClaudeResponse.usage.inputTokens`\n | `response.anthropicClaudeResponse.usage.outputTokens`\n | `response.anthropicClaudeResponse.contentBlocks`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.googleAnthropicClaudeResponse.responseId`\n | `response.googleAnthropicClaudeResponse.model`\n | `response.googleAnthropicClaudeResponse.responseType`\n | `response.googleAnthropicClaudeResponse.role`\n | `response.googleAnthropicClaudeResponse.content`\n | `response.googleAnthropicClaudeResponse.usage.inputTokens`\n | `response.googleAnthropicClaudeResponse.usage.outputTokens`\n | `response.googleAnthropicClaudeResponse.contentBlocks`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.responseId`\n | `response.invokeAnthropicModelResponse.model`\n | `response.invokeAnthropicModelResponse.type`\n | `response.invokeAnthropicModelResponse.role`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral1hInputTokens`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral5mInputTokens`\n | `response.invokeAnthropicModelResponse.usage.inputTokens`\n | `response.invokeAnthropicModelResponse.usage.outputTokens`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webSearchRequests`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webFetchRequests`\n | `response.invokeAnthropicModelResponse.container.expiresAt`\n | `response.invokeAnthropicModelResponse.container._id`\n | `response.invokeAnthropicModelResponse.content`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.text`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.cacheControl.type`\n | `response.invokeAnthropicModelResponse.content.${number}.image.mediaType`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.signature`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.thinking`\n | `response.invokeAnthropicModelResponse.content.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.content.${number}.document.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentSuccess.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentError.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.type`\n | `response.perplexityChatCompletionResponse.model`\n | `response.perplexityChatCompletionResponse.citations`\n | `response.perplexityChatCompletionResponse.choices`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.content`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.role`\n | `response.perplexityChatCompletionResponse.images`\n | `response.perplexityChatCompletionResponse.relatedQuestions`\n | `response.openAiCreateImageResponse.data`\n | `response.openAiCreateImageResponse.model`\n | `response.stabilityAiTextToImageResponse.data`\n | `response.stabilityAiTextToImageResponse.model`\n | `response.stabilityAiGenerateCoreResponse.data`\n | `response.stabilityAiGenerateCoreResponse.model`\n | `response.stabilityAiStableDiffusionResponse.data`\n | `response.stabilityAiStableDiffusionResponse.model`\n | `response.replicateCreatePredictionResponse.output`\n | `response.replicateCreatePredictionResponse.textOutput`\n | `response.stabilityAiEditImageWithPromptResponse.data`\n | `response.stabilityAiEditImageWithPromptResponse.model`\n | `response.runwareTextToImageResponse.data`\n | `response.runwareTextToImageResponse.data.${number}.taskUuid`\n | `response.runwareTextToImageResponse.data.${number}.imageUuid`\n | `response.runwareTextToImageResponse.data.${number}.nsfwContent`\n | `response.googleGenerateImageResponse.predictions`\n | `response.googleGenerateVideoResponse.videos`\n | `response.mlPlatformGenerateImageResponse.output`\n | `response.openAiCreateOpenAiImageResponse.data`\n | `response.openAiCreateOpenAiImageResponse.model`\n | `response.openAiEditOpenAiImageResponse.data`\n | `response.openAiEditOpenAiImageResponse.model`\n | `response.googleCreateChatCompletionResponse.model`\n | `response.googleCreateChatCompletionResponse.choices`\n | `response.googleCreateChatCompletionResponse.choices.${number}.message.role`\n | `response.mlPlatformOpenAiRawResponse.modelId`\n | `response.mlPlatformOpenAiRawResponse.choices`\n | `response.mlPlatformOpenAiRawResponse.choices.${number}.message.role`\n | `response.runwareVideoInferenceResponse.data`\n | `response.runwareVideoInferenceResponse.data.${number}.taskType`\n | `response.runwareVideoInferenceResponse.data.${number}.taskUuid`\n | `response.openAiResponsesResponse.model`\n | `response.openAiResponsesResponse.output`\n | `response.azureOpenAiResponsesResponse.model`\n | `response.azureOpenAiResponsesResponse.output`\n | `response.generatedContent.texts`\n | `response.generatedContent.images`\n | `response.generatedContent.images.${number}.url`\n | `response.generatedContent.videos`\n | `response.generatedContent.thinkingTexts`\n | `response.generatedContent.tools`\n | `response.generatedContent.tools.${number}.name`\n | `materializedPrompt.openAiChatCompletionRequest.model`\n | `materializedPrompt.openAiChatCompletionRequest.messages`\n | `materializedPrompt.openAiChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.openAiChatCompletionRequest.functions`\n | `materializedPrompt.openAiChatCompletionRequest.stop`\n | `materializedPrompt.openAiChatCompletionRequest.tools`\n | `materializedPrompt.openAiChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleTextBisonRequest.instances`\n | `materializedPrompt.googleTextBisonRequest.parameters.stopSequences`\n | `materializedPrompt.googleTextBisonRequest.model`\n | `materializedPrompt.googleChatBisonRequest.instances`\n | `materializedPrompt.googleChatBisonRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.messages`\n | `materializedPrompt.azureChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.azureChatCompletionRequest.functions`\n | `materializedPrompt.azureChatCompletionRequest.stop`\n | `materializedPrompt.azureChatCompletionRequest.tools`\n | `materializedPrompt.azureChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleGeminiGenerateContentRequest.model`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents.${number}.role`\n | `materializedPrompt.googleGeminiGenerateContentRequest.systemInstruction.parts`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools.${number}.googleSearchRetrieval.dynamicRetrievalConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.category`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.threshold`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.stopSequences`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.responseModalities`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.imageConfig.personGeneration`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.mediaResolution`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.allowedFunctionNames`\n | `materializedPrompt.anthropicClaudeRequest.model`\n | `materializedPrompt.anthropicClaudeRequest.messages`\n | `materializedPrompt.anthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.anthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.anthropicClaudeRequest.stopSequences`\n | `materializedPrompt.anthropicClaudeRequest.tools`\n | `materializedPrompt.anthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.anthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.anthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.model`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.googleAnthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.googleAnthropicClaudeRequest.stopSequences`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.invokeAnthropicModelRequest.model`\n | `materializedPrompt.invokeAnthropicModelRequest.messages`\n | `materializedPrompt.invokeAnthropicModelRequest.messages.${number}.role`\n | `materializedPrompt.invokeAnthropicModelRequest.systemPrompt`\n | `materializedPrompt.invokeAnthropicModelRequest.stopSequences`\n | `materializedPrompt.invokeAnthropicModelRequest.tools`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.name`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.cacheControl.type`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayWidthPx`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayHeightPx`\n | `materializedPrompt.invokeAnthropicModelRequest.toolChoice.type`\n | `materializedPrompt.invokeAnthropicModelRequest.thinking.budgetTokens`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.name`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.type`\n | `materializedPrompt.llamaModelRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.quality`\n | `materializedPrompt.openAiCreateImageRequest.size`\n | `materializedPrompt.openAiCreateImageRequest.style`\n | `materializedPrompt.stabilityAiTextToImageRequest.model`\n | `materializedPrompt.stabilityAiTextToImageRequest.textPrompts`\n | `materializedPrompt.stabilityAiTextToImageRequest.clipGuidancePreset`\n | `materializedPrompt.stabilityAiTextToImageRequest.sampler`\n | `materializedPrompt.stabilityAiTextToImageRequest.stylePreset`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.model`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.stylePreset`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.mode`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.model`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.outputFormat`\n | `materializedPrompt.blackForestLabsGenerateImageRequest.model`\n | `materializedPrompt.replicateCreatePredictionRequest.lucatacoFlorence2Large.taskInput`\n | `materializedPrompt.replicateCreatePredictionRequest.perceptronIsaac01.response`\n | `materializedPrompt.replicateCreatePredictionRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.stylePreset`\n | `materializedPrompt.runwareTextToImageRequest.positivePrompt`\n | `materializedPrompt.runwareTextToImageRequest.height`\n | `materializedPrompt.runwareTextToImageRequest.width`\n | `materializedPrompt.runwareTextToImageRequest.referenceImages`\n | `materializedPrompt.runwareTextToImageRequest.model`\n | `materializedPrompt.runwareTextToImageRequest.loraModels`\n | `materializedPrompt.runwareTextToImageRequest.inputs.referenceImages`\n | `materializedPrompt.mlPlatformLlamaModelRequest.modelId`\n | `materializedPrompt.perplexityChatCompletionRequest.model`\n | `materializedPrompt.perplexityChatCompletionRequest.messages`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.content`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.perplexityChatCompletionRequest.searchDomainFilter`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.jsonSchema`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.regex`\n | `materializedPrompt.googleGenerateImageRequest.model`\n | `materializedPrompt.googleGenerateImageRequest.instances`\n | `materializedPrompt.mlPlatformGenerateImageRequest.model`\n | `materializedPrompt.openAiCreateOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.imageUrls`\n | `materializedPrompt.googleGenerateVideoRequest.model`\n | `materializedPrompt.googleGenerateVideoRequest.instances`\n | `materializedPrompt.googleCreateChatCompletionRequest.model`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.modelId`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages.${number}.role`\n | `materializedPrompt.runwareVideoInferenceRequest.outputFormat`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages.${number}.inputImage`\n | `materializedPrompt.runwareVideoInferenceRequest.referenceImages`\n | `materializedPrompt.runwareVideoInferenceRequest.model`\n | `materializedPrompt.openAiResponsesRequest.model`\n | `materializedPrompt.openAiResponsesRequest.include`\n | `materializedPrompt.openAiResponsesRequest.input`\n | `materializedPrompt.openAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.openAiResponsesRequest.tools`\n | `materializedPrompt.azureOpenAiResponsesRequest.model`\n | `materializedPrompt.azureOpenAiResponsesRequest.include`\n | `materializedPrompt.azureOpenAiResponsesRequest.input`\n | `materializedPrompt.azureOpenAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.azureOpenAiResponsesRequest.tools`\n | `materializedPrompt.openAiCreateVideoRequest.prompt`\n | `materializedPrompt.openAiCreateVideoRequest.model`\n | `materializedPrompt.templatedParameterNames`\n | `materializedPrompt.templatedDynamicPropertiesNames`\n | `predictionId`,\n 8\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n prompt: options?.prompt,\n params: options?.params,\n userRequestInfo: options?.userRequestInfo,\n fallbackProperties: options?.fallbackProperties,\n dynamicProperties: options?.dynamicProperties,\n asyncGenerationConfig: options?.asyncGenerationConfig,\n dynamicRequestConfig: options?.dynamicRequestConfig,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateContentByPromptObject(payload);\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n prompt: '$[0].prompt',\n params: '$[0].params',\n userRequestInfo: '$[0].userRequestInfo',\n fallbackProperties: '$[0].fallbackProperties',\n dynamicProperties: '$[0].dynamicProperties',\n asyncGenerationConfig: '$[0].asyncGenerationConfig',\n dynamicRequestConfig: '$[0].dynamicRequestConfig',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateContentByPromptObjectOptions {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n * @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GENERATE_TEXT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateTextByPromptObjectStreamed\n */\nexport async function generateTextByPromptObjectStreamed(\n options?: GenerateTextByPromptObjectStreamedOptions\n): Promise<\n NonNullablePaths<\n GeneratedTextChunk,\n | `azureChatCompletionChunk.choices`\n | `azureChatCompletionChunk.choices.${number}.delta.role`\n | `azureChatCompletionChunk.model`\n | `openaiChatCompletionChunk.choices`\n | `openaiChatCompletionChunk.choices.${number}.delta.role`\n | `openaiChatCompletionChunk.model`\n | `googleAnthropicStreamChunk.toolUse.cacheControl.type`\n | `googleAnthropicStreamChunk.contentBlockDelta.text`\n | `googleAnthropicStreamChunk.contentBlockDelta.partialJson`\n | `googleAnthropicStreamChunk.contentBlockDelta.thinking`\n | `googleAnthropicStreamChunk.contentBlockDelta.signature`\n | `googleAnthropicStreamChunk.messageDelta.usage.inputTokens`\n | `googleAnthropicStreamChunk.messageDelta.usage.outputTokens`\n | `googleAnthropicStreamChunk.redactedThinking.data`\n | `googleAnthropicStreamChunk.responseId`\n | `googleAnthropicStreamChunk.model`\n | `googleGeminiStreamChunk.candidates`\n | `googleGeminiStreamChunk.candidates.${number}.finishReason`\n | `googleGeminiStreamChunk.usageMetadata.promptTokensDetails`\n | `googleGeminiStreamChunk.usageMetadata.promptTokensDetails.${number}.modality`\n | `googleGeminiStreamChunk.usageMetadata.candidatesTokensDetails`\n | `amazonAnthropicStreamChunk.toolUse.cacheControl.type`\n | `amazonAnthropicStreamChunk.contentBlockDelta.text`\n | `amazonAnthropicStreamChunk.contentBlockDelta.partialJson`\n | `amazonAnthropicStreamChunk.contentBlockDelta.thinking`\n | `amazonAnthropicStreamChunk.contentBlockDelta.signature`\n | `amazonAnthropicStreamChunk.messageDelta.usage.inputTokens`\n | `amazonAnthropicStreamChunk.messageDelta.usage.outputTokens`\n | `amazonAnthropicStreamChunk.redactedThinking.data`\n | `amazonAnthropicStreamChunk.responseId`\n | `amazonAnthropicStreamChunk.model`\n | `anthropicStreamChunk.toolUse.cacheControl.type`\n | `anthropicStreamChunk.webSearchToolResult.contentResults.items`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.type`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.type`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.textContent.text`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.image.mediaType`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.thinking.signature`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.thinking.thinking`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.redactedThinking.data`\n | `anthropicStreamChunk.webFetchToolResult.contentError.type`\n | `anthropicStreamChunk.webFetchToolResult.type`\n | `anthropicStreamChunk.contentBlockDelta.text`\n | `anthropicStreamChunk.contentBlockDelta.partialJson`\n | `anthropicStreamChunk.contentBlockDelta.thinking`\n | `anthropicStreamChunk.contentBlockDelta.signature`\n | `anthropicStreamChunk.messageDelta.usage.cacheCreation.ephemeral1hInputTokens`\n | `anthropicStreamChunk.messageDelta.usage.cacheCreation.ephemeral5mInputTokens`\n | `anthropicStreamChunk.messageDelta.usage.inputTokens`\n | `anthropicStreamChunk.messageDelta.usage.outputTokens`\n | `anthropicStreamChunk.messageDelta.usage.serverToolUse.webSearchRequests`\n | `anthropicStreamChunk.messageDelta.usage.serverToolUse.webFetchRequests`\n | `anthropicStreamChunk.responseId`\n | `anthropicStreamChunk.model`\n | `predictionId`,\n 10\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n prompt: options?.prompt,\n params: options?.params,\n userRequestInfo: options?.userRequestInfo,\n fallbackProperties: options?.fallbackProperties,\n dynamicProperties: options?.dynamicProperties,\n dynamicRequestConfig: options?.dynamicRequestConfig,\n asyncResultTopic: options?.asyncResultTopic,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateTextByPromptObjectStreamed(\n payload\n );\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n prompt: '$[0].prompt',\n params: '$[0].params',\n userRequestInfo: '$[0].userRequestInfo',\n fallbackProperties: '$[0].fallbackProperties',\n dynamicProperties: '$[0].dynamicProperties',\n dynamicRequestConfig: '$[0].dynamicRequestConfig',\n asyncResultTopic: '$[0].asyncResultTopic',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateTextByPromptObjectStreamedOptions {\n /** Prompt object that describes the text generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\n/** @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GATEWAY_GENERATE_CONTENT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObjectAsync\n */\nexport async function generateContentByPromptObjectAsync(\n options?: GenerateContentByPromptObjectAsyncOptions\n): Promise<\n NonNullablePaths<\n GenerateContentByPromptObjectResponse,\n | `response.openAiChatCompletionResponse.model`\n | `response.openAiChatCompletionResponse.choices`\n | `response.openAiChatCompletionResponse.choices.${number}.message.role`\n | `response.googleTextBisonResponse.predictions`\n | `response.googleChatBisonResponse.predictions`\n | `response.azureChatCompletionResponse.model`\n | `response.azureChatCompletionResponse.choices`\n | `response.azureChatCompletionResponse.choices.${number}.message.role`\n | `response.googleGeminiGenerateContentResponse.candidates`\n | `response.googleGeminiGenerateContentResponse.candidates.${number}.finishReason`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails.${number}.modality`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.candidatesTokensDetails`\n | `response.anthropicClaudeResponse.responseId`\n | `response.anthropicClaudeResponse.model`\n | `response.anthropicClaudeResponse.responseType`\n | `response.anthropicClaudeResponse.role`\n | `response.anthropicClaudeResponse.content`\n | `response.anthropicClaudeResponse.usage.inputTokens`\n | `response.anthropicClaudeResponse.usage.outputTokens`\n | `response.anthropicClaudeResponse.contentBlocks`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.googleAnthropicClaudeResponse.responseId`\n | `response.googleAnthropicClaudeResponse.model`\n | `response.googleAnthropicClaudeResponse.responseType`\n | `response.googleAnthropicClaudeResponse.role`\n | `response.googleAnthropicClaudeResponse.content`\n | `response.googleAnthropicClaudeResponse.usage.inputTokens`\n | `response.googleAnthropicClaudeResponse.usage.outputTokens`\n | `response.googleAnthropicClaudeResponse.contentBlocks`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.responseId`\n | `response.invokeAnthropicModelResponse.model`\n | `response.invokeAnthropicModelResponse.type`\n | `response.invokeAnthropicModelResponse.role`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral1hInputTokens`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral5mInputTokens`\n | `response.invokeAnthropicModelResponse.usage.inputTokens`\n | `response.invokeAnthropicModelResponse.usage.outputTokens`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webSearchRequests`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webFetchRequests`\n | `response.invokeAnthropicModelResponse.container.expiresAt`\n | `response.invokeAnthropicModelResponse.container._id`\n | `response.invokeAnthropicModelResponse.content`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.text`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.cacheControl.type`\n | `response.invokeAnthropicModelResponse.content.${number}.image.mediaType`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.signature`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.thinking`\n | `response.invokeAnthropicModelResponse.content.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.content.${number}.document.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentSuccess.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentError.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.type`\n | `response.perplexityChatCompletionResponse.model`\n | `response.perplexityChatCompletionResponse.citations`\n | `response.perplexityChatCompletionResponse.choices`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.content`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.role`\n | `response.perplexityChatCompletionResponse.images`\n | `response.perplexityChatCompletionResponse.relatedQuestions`\n | `response.openAiCreateImageResponse.data`\n | `response.openAiCreateImageResponse.model`\n | `response.stabilityAiTextToImageResponse.data`\n | `response.stabilityAiTextToImageResponse.model`\n | `response.stabilityAiGenerateCoreResponse.data`\n | `response.stabilityAiGenerateCoreResponse.model`\n | `response.stabilityAiStableDiffusionResponse.data`\n | `response.stabilityAiStableDiffusionResponse.model`\n | `response.replicateCreatePredictionResponse.output`\n | `response.replicateCreatePredictionResponse.textOutput`\n | `response.stabilityAiEditImageWithPromptResponse.data`\n | `response.stabilityAiEditImageWithPromptResponse.model`\n | `response.runwareTextToImageResponse.data`\n | `response.runwareTextToImageResponse.data.${number}.taskUuid`\n | `response.runwareTextToImageResponse.data.${number}.imageUuid`\n | `response.runwareTextToImageResponse.data.${number}.nsfwContent`\n | `response.googleGenerateImageResponse.predictions`\n | `response.googleGenerateVideoResponse.videos`\n | `response.mlPlatformGenerateImageResponse.output`\n | `response.openAiCreateOpenAiImageResponse.data`\n | `response.openAiCreateOpenAiImageResponse.model`\n | `response.openAiEditOpenAiImageResponse.data`\n | `response.openAiEditOpenAiImageResponse.model`\n | `response.googleCreateChatCompletionResponse.model`\n | `response.googleCreateChatCompletionResponse.choices`\n | `response.googleCreateChatCompletionResponse.choices.${number}.message.role`\n | `response.mlPlatformOpenAiRawResponse.modelId`\n | `response.mlPlatformOpenAiRawResponse.choices`\n | `response.mlPlatformOpenAiRawResponse.choices.${number}.message.role`\n | `response.runwareVideoInferenceResponse.data`\n | `response.runwareVideoInferenceResponse.data.${number}.taskType`\n | `response.runwareVideoInferenceResponse.data.${number}.taskUuid`\n | `response.openAiResponsesResponse.model`\n | `response.openAiResponsesResponse.output`\n | `response.azureOpenAiResponsesResponse.model`\n | `response.azureOpenAiResponsesResponse.output`\n | `response.generatedContent.texts`\n | `response.generatedContent.images`\n | `response.generatedContent.images.${number}.url`\n | `response.generatedContent.videos`\n | `response.generatedContent.thinkingTexts`\n | `response.generatedContent.tools`\n | `response.generatedContent.tools.${number}.name`\n | `materializedPrompt.openAiChatCompletionRequest.model`\n | `materializedPrompt.openAiChatCompletionRequest.messages`\n | `materializedPrompt.openAiChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.openAiChatCompletionRequest.functions`\n | `materializedPrompt.openAiChatCompletionRequest.stop`\n | `materializedPrompt.openAiChatCompletionRequest.tools`\n | `materializedPrompt.openAiChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleTextBisonRequest.instances`\n | `materializedPrompt.googleTextBisonRequest.parameters.stopSequences`\n | `materializedPrompt.googleTextBisonRequest.model`\n | `materializedPrompt.googleChatBisonRequest.instances`\n | `materializedPrompt.googleChatBisonRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.messages`\n | `materializedPrompt.azureChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.azureChatCompletionRequest.functions`\n | `materializedPrompt.azureChatCompletionRequest.stop`\n | `materializedPrompt.azureChatCompletionRequest.tools`\n | `materializedPrompt.azureChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleGeminiGenerateContentRequest.model`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents.${number}.role`\n | `materializedPrompt.googleGeminiGenerateContentRequest.systemInstruction.parts`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools.${number}.googleSearchRetrieval.dynamicRetrievalConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.category`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.threshold`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.stopSequences`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.responseModalities`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.imageConfig.personGeneration`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.mediaResolution`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.allowedFunctionNames`\n | `materializedPrompt.anthropicClaudeRequest.model`\n | `materializedPrompt.anthropicClaudeRequest.messages`\n | `materializedPrompt.anthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.anthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.anthropicClaudeRequest.stopSequences`\n | `materializedPrompt.anthropicClaudeRequest.tools`\n | `materializedPrompt.anthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.anthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.anthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.model`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.googleAnthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.googleAnthropicClaudeRequest.stopSequences`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.invokeAnthropicModelRequest.model`\n | `materializedPrompt.invokeAnthropicModelRequest.messages`\n | `materializedPrompt.invokeAnthropicModelRequest.messages.${number}.role`\n | `materializedPrompt.invokeAnthropicModelRequest.systemPrompt`\n | `materializedPrompt.invokeAnthropicModelRequest.stopSequences`\n | `materializedPrompt.invokeAnthropicModelRequest.tools`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.name`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.cacheControl.type`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayWidthPx`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayHeightPx`\n | `materializedPrompt.invokeAnthropicModelRequest.toolChoice.type`\n | `materializedPrompt.invokeAnthropicModelRequest.thinking.budgetTokens`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.name`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.type`\n | `materializedPrompt.llamaModelRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.quality`\n | `materializedPrompt.openAiCreateImageRequest.size`\n | `materializedPrompt.openAiCreateImageRequest.style`\n | `materializedPrompt.stabilityAiTextToImageRequest.model`\n | `materializedPrompt.stabilityAiTextToImageRequest.textPrompts`\n | `materializedPrompt.stabilityAiTextToImageRequest.clipGuidancePreset`\n | `materializedPrompt.stabilityAiTextToImageRequest.sampler`\n | `materializedPrompt.stabilityAiTextToImageRequest.stylePreset`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.model`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.stylePreset`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.mode`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.model`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.outputFormat`\n | `materializedPrompt.blackForestLabsGenerateImageRequest.model`\n | `materializedPrompt.replicateCreatePredictionRequest.lucatacoFlorence2Large.taskInput`\n | `materializedPrompt.replicateCreatePredictionRequest.perceptronIsaac01.response`\n | `materializedPrompt.replicateCreatePredictionRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.stylePreset`\n | `materializedPrompt.runwareTextToImageRequest.positivePrompt`\n | `materializedPrompt.runwareTextToImageRequest.height`\n | `materializedPrompt.runwareTextToImageRequest.width`\n | `materializedPrompt.runwareTextToImageRequest.referenceImages`\n | `materializedPrompt.runwareTextToImageRequest.model`\n | `materializedPrompt.runwareTextToImageRequest.loraModels`\n | `materializedPrompt.runwareTextToImageRequest.inputs.referenceImages`\n | `materializedPrompt.mlPlatformLlamaModelRequest.modelId`\n | `materializedPrompt.perplexityChatCompletionRequest.model`\n | `materializedPrompt.perplexityChatCompletionRequest.messages`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.content`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.perplexityChatCompletionRequest.searchDomainFilter`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.jsonSchema`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.regex`\n | `materializedPrompt.googleGenerateImageRequest.model`\n | `materializedPrompt.googleGenerateImageRequest.instances`\n | `materializedPrompt.mlPlatformGenerateImageRequest.model`\n | `materializedPrompt.openAiCreateOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.imageUrls`\n | `materializedPrompt.googleGenerateVideoRequest.model`\n | `materializedPrompt.googleGenerateVideoRequest.instances`\n | `materializedPrompt.googleCreateChatCompletionRequest.model`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.modelId`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages.${number}.role`\n | `materializedPrompt.runwareVideoInferenceRequest.outputFormat`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages.${number}.inputImage`\n | `materializedPrompt.runwareVideoInferenceRequest.referenceImages`\n | `materializedPrompt.runwareVideoInferenceRequest.model`\n | `materializedPrompt.openAiResponsesRequest.model`\n | `materializedPrompt.openAiResponsesRequest.include`\n | `materializedPrompt.openAiResponsesRequest.input`\n | `materializedPrompt.openAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.openAiResponsesRequest.tools`\n | `materializedPrompt.azureOpenAiResponsesRequest.model`\n | `materializedPrompt.azureOpenAiResponsesRequest.include`\n | `materializedPrompt.azureOpenAiResponsesRequest.input`\n | `materializedPrompt.azureOpenAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.azureOpenAiResponsesRequest.tools`\n | `materializedPrompt.openAiCreateVideoRequest.prompt`\n | `materializedPrompt.openAiCreateVideoRequest.model`\n | `materializedPrompt.templatedParameterNames`\n | `materializedPrompt.templatedDynamicPropertiesNames`\n | `predictionId`,\n 8\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n prompt: options?.prompt,\n params: options?.params,\n userRequestInfo: options?.userRequestInfo,\n fallbackProperties: options?.fallbackProperties,\n dynamicProperties: options?.dynamicProperties,\n asyncGenerationConfig: options?.asyncGenerationConfig,\n dynamicRequestConfig: options?.dynamicRequestConfig,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateContentByPromptObjectAsync(\n payload\n );\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n prompt: '$[0].prompt',\n params: '$[0].params',\n userRequestInfo: '$[0].userRequestInfo',\n fallbackProperties: '$[0].fallbackProperties',\n dynamicProperties: '$[0].dynamicProperties',\n asyncGenerationConfig: '$[0].asyncGenerationConfig',\n dynamicRequestConfig: '$[0].dynamicRequestConfig',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateContentByPromptObjectAsyncOptions {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\n/** @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GENERATE_TEXT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateAudioStreamed\n */\nexport async function generateAudioStreamed(\n options?: GenerateAudioStreamedOptions\n): Promise<\n NonNullablePaths<\n GeneratedAudioChunk,\n | `openAiSpeechChunk.content`\n | `elevenlabsSpeechChunk.audioBase64`\n | `elevenlabsSpeechChunk.alignment.characterStartTimesSeconds`\n | `elevenlabsSpeechChunk.alignment.characterEndTimesSeconds`\n | `elevenlabsSpeechChunk.alignment.characters`,\n 4\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n openAiCreateSpeechRequest: options?.openAiCreateSpeechRequest,\n elevenlabsTextToSpeechRequest: options?.elevenlabsTextToSpeechRequest,\n userRequestInfo: options?.userRequestInfo,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateAudioStreamed(payload);\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n openAiCreateSpeechRequest: '$[0].openAiCreateSpeechRequest',\n elevenlabsTextToSpeechRequest: '$[0].elevenlabsTextToSpeechRequest',\n userRequestInfo: '$[0].userRequestInfo',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateAudioStreamedOptions\n extends GenerateAudioStreamedOptionsAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateAudioStreamedOptionsAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n}\n","import { transformSDKFloatToRESTFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformRESTFloatToSDKFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformRESTBytesToSDKBytes } from '@wix/sdk-runtime/transformations/bytes';\nimport { transformPaths } from '@wix/sdk-runtime/transformations/transform-paths';\nimport { resolveUrl } from '@wix/sdk-runtime/rest-modules';\nimport { ResolveUrlOpts } from '@wix/sdk-runtime/rest-modules';\nimport { RequestOptionsFactory } from '@wix/sdk-types';\n\nfunction resolveWixApiInfraV1WixAiExternalGatewayUrl(\n opts: Omit<ResolveUrlOpts, 'domainToMappings'>\n) {\n const domainToMappings = {\n 'www.wixapis.com': [\n {\n srcPath: '/ai-external-gateway-poc',\n destPath: '',\n },\n ],\n };\n\n return resolveUrl(Object.assign(opts, { domainToMappings }));\n}\n\nconst PACKAGE_NAME = '@wix/auto_sdk_ai-gateway_prompts';\n\n/**\n * Generate different content such as text, image, and video according to Prompt object configuration\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn:\n 'wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPromptObject;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByPromptObjectStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptObjectStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn:\n 'wix.api_infra.v1.WixAiExternalGateway.GenerateTextByPromptObjectStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-by-prompt-object-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptObjectStreamed;\n}\n\nexport function generateContentByPromptObjectAsync(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPromptObjectAsync({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn:\n 'wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObjectAsync',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt-object-async',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPromptObjectAsync;\n}\n\nexport function generateAudioStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateAudioStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'openAiCreateSpeechRequest.speed' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.style' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.stability' },\n {\n path: 'elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost',\n },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn: 'wix.api_infra.v1.WixAiExternalGateway.GenerateAudioStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-audio-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n { path: 'openAiSpeechChunk.content' },\n { path: 'elevenlabsSpeechChunk.audioBase64' },\n ],\n },\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'elevenlabsSpeechChunk.alignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.alignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateAudioStreamed;\n}\n"],"mappings":";AAAA,SAAS,kBAAkB,yBAAyB;AACpD;AAAA,EACE;AAAA,EACA;AAAA,OACK;;;ACJP,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,sBAAsB;AAC/B,SAAS,kBAAkB;AAI3B,SAAS,4CACP,MACA;AACA,QAAM,mBAAmB;AAAA,IACvB,mBAAmB;AAAA,MACjB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AAEA,SAAO,WAAW,OAAO,OAAO,MAAM,EAAE,iBAAiB,CAAC,CAAC;AAC7D;AAEA,IAAM,eAAe;AAMd,SAAS,8BACd,SAC4B;AAC5B,WAAS,gCAAgC,EAAE,KAAK,GAAQ;AACtD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,mCACd,SAC4B;AAC5B,WAAS,qCAAqC,EAAE,KAAK,GAAQ;AAC3D,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAEO,SAAS,mCACd,SAC4B;AAC5B,WAAS,qCAAqC,EAAE,KAAK,GAAQ;AAC3D,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAEO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,kCAAkC;AAAA,UAC1C,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,wDAAwD;AAAA,UAChE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,4BAA4B;AAAA,YACpC,EAAE,MAAM,oCAAoC;AAAA,UAC9C;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;;;ADneO,IAAK,gDAAL,kBAAKC,mDAAL;AACL,EAAAA,+CAAA,aAAU;AACV,EAAAA,+CAAA,UAAO;AACP,EAAAA,+CAAA,eAAY;AACZ,EAAAA,+CAAA,YAAS;AACT,EAAAA,+CAAA,cAAW;AACX,EAAAA,+CAAA,UAAO;AAKP,EAAAA,+CAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAqFL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,WAAQ;AACR,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,0BAAuB;AACvB,EAAAA,oBAAA,yBAAsB;AACtB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,2BAAwB;AAxCd,SAAAA;AAAA,GAAA;AAuWL,IAAK,mCAAL,kBAAKC,sCAAL;AACL,EAAAA,kCAAA,aAAU;AACV,EAAAA,kCAAA,UAAO;AACP,EAAAA,kCAAA,eAAY;AACZ,EAAAA,kCAAA,YAAS;AACT,EAAAA,kCAAA,cAAW;AACX,EAAAA,kCAAA,UAAO;AAKP,EAAAA,kCAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAqFL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,mBAAgB;AAChB,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AAEpB,EAAAA,SAAA,4BAAyB;AACzB,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,wBAAqB;AAbX,SAAAA;AAAA,GAAA;AAiHL,IAAK,WAAL,kBAAKC,cAAL;AAEL,EAAAA,UAAA,0BAAuB;AAEvB,EAAAA,UAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAoBL,IAAK,UAAL,kBAAKC,aAAL;AAEL,EAAAA,SAAA,yBAAsB;AAEtB,EAAAA,SAAA,gBAAa;AAEb,EAAAA,SAAA,oBAAiB;AAEjB,EAAAA,SAAA,+BAA4B;AARlB,SAAAA;AAAA,GAAA;AAuEL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,iBAAc;AAEd,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,gBAAa;AAKb,EAAAA,cAAA,YAAS;AAET,EAAAA,cAAA,gBAAa;AAEb,EAAAA,cAAA,WAAQ;AAER,EAAAA,cAAA,cAAW;AAEX,EAAAA,cAAA,eAAY;AAEZ,EAAAA,cAAA,wBAAqB;AAErB,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,6BAA0B;AAE1B,EAAAA,cAAA,kBAAe;AAEf,EAAAA,cAAA,0BAAuB;AAEvB,EAAAA,cAAA,yBAAsB;AAhCZ,SAAAA;AAAA,GAAA;AA4EL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,sBAAmB;AACnB,EAAAA,cAAA,qCAAkC;AAClC,EAAAA,cAAA,+BAA4B;AAC5B,EAAAA,cAAA,8BAA2B;AAC3B,EAAAA,cAAA,qCAAkC;AALxB,SAAAA;AAAA,GAAA;AAiBL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,gBAAa;AACb,EAAAA,iBAAA,SAAM;AACN,EAAAA,iBAAA,YAAS;AACT,EAAAA,iBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAgOL,IAAK,WAAL,kBAAKC,cAAL;AACL,EAAAA,UAAA,sBAAmB;AAEnB,EAAAA,UAAA,UAAO;AAEP,EAAAA,UAAA,WAAQ;AAER,EAAAA,UAAA,WAAQ;AAPE,SAAAA;AAAA,GAAA;AAoEL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAWL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,UAAO;AACP,EAAAA,MAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAsFL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAoBL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,aAAU;AAEV,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AAEZ,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA6JL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAWL,IAAK,oBAAL,kBAAKC,uBAAL;AACL,EAAAA,mBAAA,aAAU;AACV,EAAAA,mBAAA,UAAO;AACP,EAAAA,mBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AA2FL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAuBL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,aAAU;AAEV,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA0JL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAWL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,UAAO;AACP,EAAAA,iBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAqKL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAoLL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,aAAU;AAEV,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AAEZ,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AAikBL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAeL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,8BAA2B;AAC3B,EAAAA,iBAAA,WAAQ;AACR,EAAAA,iBAAA,eAAY;AACZ,EAAAA,iBAAA,qBAAkB;AAClB,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,yBAAsB;AANZ,SAAAA;AAAA,GAAA;AA4FL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,oCAAiC;AACjC,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AA0CL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,uCAAoC;AAEpC,EAAAA,YAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAwBL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,4CAAyC;AACzC,EAAAA,gBAAA,uBAAoB;AAFV,SAAAA;AAAA,GAAA;AAuBL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,wCAAqC;AAErC,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,qBAAkB;AAElB,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,iBAAc;AAEd,EAAAA,2BAAA,uBAAoB;AAEpB,EAAAA,2BAAA,kBAAe;AAbL,SAAAA;AAAA,GAAA;AA0HL,IAAK,kCAAL,kBAAKC,qCAAL;AACL,EAAAA,iCAAA,mDAAgD;AAChD,EAAAA,iCAAA,aAAU;AACV,EAAAA,iCAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAqKL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,kCAA+B;AAC/B,EAAAA,kBAAA,kBAAe;AACf,EAAAA,kBAAA,iBAAc;AAHJ,SAAAA;AAAA,GAAA;AAoGL,IAAK,qCAAL,kBAAKC,wCAAL;AACL,EAAAA,oCAAA,aAAU;AACV,EAAAA,oCAAA,UAAO;AACP,EAAAA,oCAAA,eAAY;AACZ,EAAAA,oCAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AA0CL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,mCAAgC;AAKhC,EAAAA,qBAAA,yCAAsC;AAKtC,EAAAA,qBAAA,6CAA0C;AAXhC,SAAAA;AAAA,GAAA;AAuGL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAiML,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,gCAA6B;AAC7B,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,iCAA8B;AAC9B,EAAAA,kBAAA,iBAAc;AACd,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,wBAAqB;AAVX,SAAAA;AAAA,GAAA;AAocL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,gCAA6B;AAC7B,EAAAA,gBAAA,qCAAkC;AAClC,EAAAA,gBAAA,qCAAkC;AAJxB,SAAAA;AAAA,GAAA;AA80BL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AA2DL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AA2NL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,0BAAuB;AAdb,SAAAA;AAAA,GAAA;AAmDL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,WAAQ;AAHE,SAAAA;AAAA,GAAA;AAgFL,IAAK,uBAAL,kBAAKC,0BAAL;AAEL,EAAAA,sBAAA,kCAA+B;AAE/B,EAAAA,sBAAA,0BAAuB;AAEvB,EAAAA,sBAAA,6BAA0B;AAE1B,EAAAA,sBAAA,2BAAwB;AARd,SAAAA;AAAA,GAAA;AAiDL,IAAK,6BAAL,kBAAKC,gCAAL;AAEL,EAAAA,4BAAA,sBAAmB;AAEnB,EAAAA,4BAAA,kBAAe;AAJL,SAAAA;AAAA,GAAA;AAwDL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,uBAAoB;AACpB,EAAAA,WAAA,gBAAa;AACb,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,qBAAkB;AALR,SAAAA;AAAA,GAAA;AAyKL,IAAK,mBAAL,kBAAKC,sBAAL;AAEL,EAAAA,kBAAA,mCAAgC;AAEhC,EAAAA,kBAAA,eAAY;AAEZ,EAAAA,kBAAA,iBAAc;AAEd,EAAAA,kBAAA,gBAAa;AARH,SAAAA;AAAA,GAAA;AAoCL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AAEV,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,SAAM;AAEN,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,eAAY;AAfF,SAAAA;AAAA,GAAA;AAmIL,IAAK,QAAL,kBAAKC,WAAL;AACL,EAAAA,OAAA,aAAU;AAEV,EAAAA,OAAA,yBAAsB;AAEtB,EAAAA,OAAA,wBAAqB;AAErB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAEvB,EAAAA,OAAA,2BAAwB;AACxB,EAAAA,OAAA,yBAAsB;AACtB,EAAAA,OAAA,uBAAoB;AAEpB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAnBb,SAAAA;AAAA,GAAA;AA2FL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,aAAU;AACV,EAAAA,gBAAA,UAAO;AACP,EAAAA,gBAAA,SAAM;AACN,EAAAA,gBAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,aAAU;AACV,EAAAA,eAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAmHL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,wBAAqB;AACrB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,0BAAuB;AAXb,SAAAA;AAAA,GAAA;AAyFL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,aAAU;AACV,EAAAA,6BAAA,UAAO;AACP,EAAAA,6BAAA,SAAM;AACN,EAAAA,6BAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,6BAAL,kBAAKC,gCAAL;AACL,EAAAA,4BAAA,aAAU;AACV,EAAAA,4BAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAqGL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,wBAAqB;AACrB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,0BAAuB;AAVb,SAAAA;AAAA,GAAA;AAgSL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,SAAM;AACN,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAkDL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAwDL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AAEtB,EAAAA,YAAA,6BAA0B;AAE1B,EAAAA,YAAA,8BAA2B;AAE3B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,gCAA6B;AAE7B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,+BAA4B;AAblB,SAAAA;AAAA,GAAA;AAwDL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AACxB,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,QAAK;AAHK,SAAAA;AAAA,GAAA;AAaL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,wBAAqB;AACrB,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AANP,SAAAA;AAAA,GAAA;AAmBL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,WAAQ;AACR,EAAAA,YAAA,aAAU;AAHA,SAAAA;AAAA,GAAA;AA0DL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,sCAAmC;AACnC,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,aAAU;AARA,SAAAA;AAAA,GAAA;AAuBL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,yBAAsB;AACtB,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,0BAAuB;AACvB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,YAAS;AACT,EAAAA,SAAA,WAAQ;AAXE,SAAAA;AAAA,GAAA;AA6BL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,8BAA2B;AAC3B,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,WAAQ;AACR,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,gBAAa;AACb,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,uBAAoB;AACpB,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAwFL,IAAK,iCAAL,kBAAKC,oCAAL;AACL,EAAAA,gCAAA,8BAA2B;AAC3B,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,WAAQ;AACR,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,gBAAa;AACb,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,uBAAoB;AACpB,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AA0FL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,mBAAgB;AAChB,EAAAA,gBAAA,oBAAiB;AAHP,SAAAA;AAAA,GAAA;AAaL,IAAK,6CAAL,kBAAKC,gDAAL;AACL,EAAAA,4CAAA,+BAA4B;AAC5B,EAAAA,4CAAA,UAAO;AACP,EAAAA,4CAAA,SAAM;AAHI,SAAAA;AAAA,GAAA;AAyHL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,iCAA8B;AAC9B,EAAAA,sBAAA,wBAAqB;AACrB,EAAAA,sBAAA,gBAAa;AACb,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,qBAAkB;AANR,SAAAA;AAAA,GAAA;AAsDL,IAAK,wBAAL,kBAAKC,2BAAL;AAEL,EAAAA,uBAAA,qCAAkC;AAElC,EAAAA,uBAAA,gBAAa;AAEb,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,eAAY;AAEZ,EAAAA,uBAAA,+BAA4B;AAE5B,EAAAA,uBAAA,yBAAsB;AAZZ,SAAAA;AAAA,GAAA;AAgOL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,6BAA0B;AAC1B,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,aAAU;AACV,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,2BAAwB;AACxB,EAAAA,WAAA,iCAA8B;AAC9B,EAAAA,WAAA,qBAAkB;AAClB,EAAAA,WAAA,0BAAuB;AACvB,EAAAA,WAAA,SAAM;AACN,EAAAA,WAAA,qBAAkB;AAVR,SAAAA;AAAA,GAAA;AA6CL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,gCAA6B;AAC7B,EAAAA,cAAA,UAAO;AACP,EAAAA,cAAA,SAAM;AACN,EAAAA,cAAA,WAAQ;AACR,EAAAA,cAAA,aAAU;AALA,SAAAA;AAAA,GAAA;AAgHL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,8BAA2B;AAC3B,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,gBAAa;AACb,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAmKL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,kBAAe;AAEf,EAAAA,yBAAA,uBAAoB;AAEpB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,mBAAgB;AA7BN,SAAAA;AAAA,GAAA;AAoOL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,mCAAgC;AANtB,SAAAA;AAAA,GAAA;AA2FL,IAAK,+BAAL,kBAAKC,kCAAL;AAEL,EAAAA,8BAAA,qCAAkC;AAElC,EAAAA,8BAAA,gBAAa;AAJH,SAAAA;AAAA,GAAA;AAyOL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,6BAA0B;AAC1B,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,+BAA4B;AAJlB,SAAAA;AAAA,GAAA;AA+TL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,SAAM;AAEN,EAAAA,cAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AA8BL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,sBAAmB;AACnB,EAAAA,YAAA,uBAAoB;AACpB,EAAAA,YAAA,2BAAwB;AACxB,EAAAA,YAAA,mBAAgB;AALN,SAAAA;AAAA,GAAA;AAgJL,IAAK,4CAAL,kBAAKC,+CAAL;AACL,EAAAA,2CAAA,sBAAmB;AACnB,EAAAA,2CAAA,UAAO;AACP,EAAAA,2CAAA,YAAS;AACT,EAAAA,2CAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAyaL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,UAAO;AACP,EAAAA,sBAAA,YAAS;AACT,EAAAA,sBAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAqUL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,yBAAsB;AACtB,EAAAA,cAAA,YAAS;AACT,EAAAA,cAAA,gBAAa;AAHH,SAAAA;AAAA,GAAA;AAyIL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AACZ,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AANF,SAAAA;AAAA,GAAA;AA+lBL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAsFL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,8CAA2C;AAC3C,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,uBAAoB;AACpB,EAAAA,6BAAA,qBAAkB;AAJR,SAAAA;AAAA,GAAA;AA+NL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,aAAU;AACV,EAAAA,qBAAA,uBAAoB;AACpB,EAAAA,qBAAA,YAAS;AACT,EAAAA,qBAAA,cAAW;AACX,EAAAA,qBAAA,SAAM;AALI,SAAAA;AAAA,GAAA;AAsFZ,eAAsBC,+BACpB,SAyQA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,QAAQ,SAAS;AAAA,IACjB,QAAQ,SAAS;AAAA,IACjB,iBAAiB,SAAS;AAAA,IAC1B,oBAAoB,SAAS;AAAA,IAC7B,mBAAmB,SAAS;AAAA,IAC5B,uBAAuB,SAAS;AAAA,IAChC,sBAAsB,SAAS;AAAA,EACjC,CAAC;AAED,QAAM,UAC+B,8BAA8B,OAAO;AAE1E,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,QAAQ;AAAA,UACR,QAAQ;AAAA,UACR,iBAAiB;AAAA,UACjB,oBAAoB;AAAA,UACpB,mBAAmB;AAAA,UACnB,uBAAuB;AAAA,UACvB,sBAAsB;AAAA,QACxB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;AA4BA,eAAsBC,oCACpB,SA8DA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,QAAQ,SAAS;AAAA,IACjB,QAAQ,SAAS;AAAA,IACjB,iBAAiB,SAAS;AAAA,IAC1B,oBAAoB,SAAS;AAAA,IAC7B,mBAAmB,SAAS;AAAA,IAC5B,sBAAsB,SAAS;AAAA,IAC/B,kBAAkB,SAAS;AAAA,EAC7B,CAAC;AAED,QAAM,UAC+B;AAAA,IACjC;AAAA,EACF;AAEF,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,QAAQ;AAAA,UACR,QAAQ;AAAA,UACR,iBAAiB;AAAA,UACjB,oBAAoB;AAAA,UACpB,mBAAmB;AAAA,UACnB,sBAAsB;AAAA,UACtB,kBAAkB;AAAA,QACpB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;AA4BA,eAAsBC,oCACpB,SAyQA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,QAAQ,SAAS;AAAA,IACjB,QAAQ,SAAS;AAAA,IACjB,iBAAiB,SAAS;AAAA,IAC1B,oBAAoB,SAAS;AAAA,IAC7B,mBAAmB,SAAS;AAAA,IAC5B,uBAAuB,SAAS;AAAA,IAChC,sBAAsB,SAAS;AAAA,EACjC,CAAC;AAED,QAAM,UAC+B;AAAA,IACjC;AAAA,EACF;AAEF,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,QAAQ;AAAA,UACR,QAAQ;AAAA,UACR,iBAAiB;AAAA,UACjB,oBAAoB;AAAA,UACpB,mBAAmB;AAAA,UACnB,uBAAuB;AAAA,UACvB,sBAAsB;AAAA,QACxB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;AAwBA,eAAsBC,uBACpB,SAWA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,2BAA2B,SAAS;AAAA,IACpC,+BAA+B,SAAS;AAAA,IACxC,iBAAiB,SAAS;AAAA,EAC5B,CAAC;AAED,QAAM,UAC+B,sBAAsB,OAAO;AAElE,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,2BAA2B;AAAA,UAC3B,+BAA+B;AAAA,UAC/B,iBAAiB;AAAA,QACnB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;","names":["payload","OpenaiproxyV1ChatCompletionMessageMessageRole","OpenaiproxyV1Model","ChatCompletionMessageMessageRole","V1Model","Language","Outcome","FinishReason","HarmCategory","HarmProbability","Modality","ResponseTypeType","Role","Type","MediaType","GoogleproxyV1ResponseTypeType","V1MessageRoleRole","V1CacheControlType","V1ImageMediaTypeMediaType","V1ResponseTypeType","MessageRoleRole","CacheControlType","ImageMediaTypeMediaType","PerplexityMessageMessageRole","PerplexityModel","V1ImageModel","ImageModel","ImageCoreModel","ImageStableDiffusionModel","EditImageWithPromptRequestModel","OpenAiImageModel","V1ChatCompletionMessageMessageRole","ChatCompletionModel","MessageRole","V1ResponsesModel","ResponsesModel","TextBisonModel","ChatBisonModel","GoogleproxyV1Model","ContentRole","MediaResolutionLevel","DynamicRetrievalConfigMode","Threshold","PersonGeneration","Mode","Model","ToolChoiceType","McpServerType","ClaudeModel","GoogleproxyV1ToolChoiceType","GoogleproxyV1McpServerType","AnthropicModel","V1ToolChoiceType","V1McpServerType","LlamaModel","ImageQuality","ImageSize","ImageStyle","ClipGuidancePreset","Sampler","TextToImageRequestStylePreset","GenerateCoreRequestStylePreset","GenerationMode","GenerateStableDiffusionRequestOutputFormat","GenerateAnImageModel","CreatePredictionModel","TaskInput","ResponseType","StylePreset","TextToImageRequestModel","ImagenModel","GenerateImageMlPlatformModel","VideoGenModel","OutputFormat","VideoModel","ResponsesInputMessageResponsesMessageRole","ResponsesMessageRole","V1VideoModel","GatewayMessageDefinitionRole","SpeechModel","ElevenLabsTextToSpeechModel","WebhookIdentityType","generateContentByPromptObject","generateTextByPromptObjectStreamed","generateContentByPromptObjectAsync","generateAudioStreamed"]}
|
|
1
|
+
{"version":3,"sources":["../../src/api-infra-v1-prompt-proxy-prompts.universal.ts","../../src/api-infra-v1-prompt-proxy-prompts.http.ts"],"sourcesContent":["import { transformError as sdkTransformError } from '@wix/sdk-runtime/transform-error';\nimport {\n renameKeysFromSDKRequestToRESTRequest,\n renameKeysFromRESTResponseToSDKResponse,\n} from '@wix/sdk-runtime/rename-all-nested-keys';\nimport { HttpClient, NonNullablePaths } from '@wix/sdk-types';\nimport * as ambassadorWixApiInfraV1PromptProxy from './api-infra-v1-prompt-proxy-prompts.http.js';\n\n/**\n * A Prompt is a ...\n * You can ...\n * Read more about Prompts\n * in this [article](<LINK_TO_KB_ARTICLE>).\n */\nexport interface PromptProxy {\n /**\n * @format GUID\n * @readonly\n */\n _id?: string;\n}\n\nexport interface GenerationCompletedResultEvent {\n /**\n * @format GUID\n * @readonly\n */\n predictionId?: string;\n generationResult?: GenerateContentModelResponse;\n /** @maxLength 10000 */\n errorMessage?: string | null;\n}\n\nexport interface GenerateContentModelResponse\n extends GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n /** Extracted generated content data from the model's response. */\n generatedContent?: GeneratedContent;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n /** Token usage information. */\n tokenUsage?: V1TokenUsage;\n /** Metadata about the response, such as finish reason. */\n responseMetadata?: ResponseMetadata;\n}\n\n/** @oneof */\nexport interface GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n}\n\n/** Model generation result, at least one of the fields should be present */\nexport interface GeneratedContent {\n /**\n * Zero or more textual results. Only present when the model returned a text.\n * @maxSize 1000\n */\n texts?: TextContent[];\n /**\n * Zero or more images. Only present when the model returned an image.\n * @maxSize 1000\n */\n images?: MediaContent[];\n /**\n * Zero or more videos. Only present when the model returned a video.\n * @maxSize 1000\n */\n videos?: MediaContent[];\n /**\n * Zero or more thinking texts. Only present when the model returned a thought.\n * @maxSize 1000\n */\n thinkingTexts?: ThinkingTextContent[];\n /**\n * Zero or more tool call requests. Only present when the model requested to call a tool.\n * @maxSize 1000\n */\n tools?: ToolUseContent[];\n}\n\nexport interface TextContent {\n /**\n * Generated text\n * @maxLength 1000000\n */\n generatedText?: string | null;\n}\n\nexport interface MediaContent {\n /**\n * Mime type, e.g. \"image/jpeg\" or \"video/mp4\"\n * @maxLength 500\n */\n mimeType?: string | null;\n /**\n * Wix Media Platform (WixMP) url where the image or video is stored.\n * @maxLength 5000\n */\n url?: string;\n}\n\nexport interface ThinkingTextContent {\n /**\n * The thought text of the model thinking\n * @maxLength 1000000\n */\n thoughtText?: string | null;\n}\n\nexport interface ToolUseContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string;\n /** Tool use input */\n input?: Record<string, any> | null;\n}\n\nexport interface V1TokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n /** Total number of tokens used in the request. */\n totalTokens?: number | null;\n /** cache creation token usage */\n cacheCreationTokens?: number | null;\n /** cache read token usage */\n cacheReadTokens?: number | null;\n /** thought tokens usage */\n thoughtsTokens?: number | null;\n /** tool use tokens usage */\n toolUseTokens?: number | null;\n}\n\nexport interface ResponseMetadata {\n /**\n * Finish reason of the model response.\n * @maxLength 1000\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: OpenaiproxyV1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: ChatCompletionMessageFunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: OpenaiproxyV1ChatCompletionMessageContentPart[];\n}\n\nexport interface ChatCompletionMessageFunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum OpenaiproxyV1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals =\n | OpenaiproxyV1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ChatCompletionMessageToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n _id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: ChatCompletionMessageFunctionWithArgs;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageContentPart\n extends OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface CreateChatCompletionResponsePromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CreateChatCompletionResponseCompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport enum OpenaiproxyV1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_0301 = 'GPT_3_5_TURBO_0301',\n GPT_4 = 'GPT_4',\n GPT_4_0314 = 'GPT_4_0314',\n GPT_4_32K = 'GPT_4_32K',\n GPT_4_32K_0314 = 'GPT_4_32K_0314',\n GPT_3_5_TURBO_0613 = 'GPT_3_5_TURBO_0613',\n GPT_3_5_TURBO_16K = 'GPT_3_5_TURBO_16K',\n GPT_3_5_TURBO_16K_0613 = 'GPT_3_5_TURBO_16K_0613',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_4_32K_0613 = 'GPT_4_32K_0613',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_1106_PREVIEW = 'GPT_4_1106_PREVIEW',\n GPT_4_VISION_PREVIEW = 'GPT_4_VISION_PREVIEW',\n GPT_4_TURBO_PREVIEW = 'GPT_4_TURBO_PREVIEW',\n GPT_4_0125_PREVIEW = 'GPT_4_0125_PREVIEW',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4_TURBO_2024_04_09 = 'GPT_4_TURBO_2024_04_09',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4O_2024_08_06 = 'GPT_4O_2024_08_06',\n O1_PREVIEW = 'O1_PREVIEW',\n O1_PREVIEW_2024_09_12 = 'O1_PREVIEW_2024_09_12',\n O1_MINI = 'O1_MINI',\n O1_MINI_2024_09_12 = 'O1_MINI_2024_09_12',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O1_2024_12_17 = 'O1_2024_12_17',\n O3_MINI_2025_01_31 = 'O3_MINI_2025_01_31',\n GPT_4_OLD = 'GPT_4_OLD',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n O3_2025_04_16 = 'O3_2025_04_16',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n GPT_EXP = 'GPT_EXP',\n GPT_EXP_2 = 'GPT_EXP_2',\n GPT_5_2025_08_07 = 'GPT_5_2025_08_07',\n GPT_5_MINI_2025_08_07 = 'GPT_5_MINI_2025_08_07',\n GPT_5_NANO_2025_08_07 = 'GPT_5_NANO_2025_08_07',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ModelWithLiterals =\n | OpenaiproxyV1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_0301'\n | 'GPT_4'\n | 'GPT_4_0314'\n | 'GPT_4_32K'\n | 'GPT_4_32K_0314'\n | 'GPT_3_5_TURBO_0613'\n | 'GPT_3_5_TURBO_16K'\n | 'GPT_3_5_TURBO_16K_0613'\n | 'GPT_4_0613'\n | 'GPT_4_32K_0613'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_1106_PREVIEW'\n | 'GPT_4_VISION_PREVIEW'\n | 'GPT_4_TURBO_PREVIEW'\n | 'GPT_4_0125_PREVIEW'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4_TURBO_2024_04_09'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4O_2024_08_06'\n | 'O1_PREVIEW'\n | 'O1_PREVIEW_2024_09_12'\n | 'O1_MINI'\n | 'O1_MINI_2024_09_12'\n | 'GPT_4O_2024_11_20'\n | 'O1_2024_12_17'\n | 'O3_MINI_2025_01_31'\n | 'GPT_4_OLD'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'O3_2025_04_16'\n | 'O4_MINI_2025_04_16'\n | 'GPT_EXP'\n | 'GPT_EXP_2'\n | 'GPT_5_2025_08_07'\n | 'GPT_5_MINI_2025_08_07'\n | 'GPT_5_NANO_2025_08_07';\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: OpenaiproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: CreateChatCompletionResponsePromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CreateChatCompletionResponseCompletionTokenDetails;\n}\n\nexport interface TextBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: TextBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface TextBisonPrediction {\n /**\n * The result generated from input text.\n * @maxLength 100000\n */\n content?: string | null;\n /** Citation metadata */\n citationMetadata?: CitationMetadata;\n /** A collection of categories and their associated confidence scores. */\n safetyAttributes?: SafetyAttribute;\n}\n\nexport interface CitationMetadata {\n /**\n * Citations array\n * @maxSize 1000\n */\n citations?: V1Citation[];\n}\n\nexport interface V1Citation {\n /** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */\n startIndex?: number | null;\n /** Index in the prediction output where the citation ends (exclusive). Must be > start_index and < len(output). */\n endIndex?: number | null;\n /**\n * URL associated with this citation. If present, this URL links to the webpage of the source of this citation.\n * Possible URLs include news websites, GitHub repos, etc.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * Title associated with this citation. If present, it refers to the title of the source of this citation.\n * Possible titles include news titles, book titles, etc.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * License associated with this recitation. If present, it refers to the license of the source of this citation.\n * Possible licenses include code licenses, e.g., mit license.\n * @maxLength 100\n */\n license?: string | null;\n /**\n * Publication date associated with this citation. If present, it refers to the date at which the source of this citation was published.\n * Possible formats are YYYY, YYYY-MM, YYYY-MM-DD.\n * @maxLength 100\n */\n publicationDate?: string | null;\n}\n\nexport interface SafetyAttribute {\n /**\n * The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /** A flag indicating if the model's input or output was blocked. */\n blocked?: boolean | null;\n /**\n * The confidence scores of the each category, higher value means higher confidence.\n * @maxSize 100\n */\n scores?: number[] | null;\n /**\n * An error code that identifies why the input or output was blocked.\n * For a list of error codes, see https://cloud.google.com/vertex-ai/docs/generative-ai/learn/responsible-ai#safety_filters_and_attributes.\n * @maxSize 100\n */\n errors?: string[] | null;\n}\n\nexport interface Metadata {\n /** TokenMetadata object */\n tokenMetadata?: TokenMetadata;\n}\n\nexport interface TokenMetadata {\n /** Number of input tokens. This is the total number of tokens across all messages, examples, and context. */\n inputTokenCount?: TokenCount;\n /** Number of output tokens. This is the total number of tokens in content across all candidates in the response. */\n outputTokenCount?: TokenCount;\n}\n\nexport interface TokenCount {\n /** Number of tokens */\n totalTokens?: number | null;\n /** Number of billable characters */\n totalBillableCharacters?: number | null;\n}\n\nexport interface ChatBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: ChatBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface ChatBisonPrediction {\n /**\n * The chat result generated from given message.\n * @maxSize 100\n */\n candidates?: ChatMessage[];\n /**\n * Citation metadata\n * @maxSize 100\n */\n citationMetadata?: CitationMetadata[];\n /**\n * An array of collections of categories and their associated confidence scores. 1-1 mapping to candidates.\n * @maxSize 100\n */\n safetyAttributes?: SafetyAttribute[];\n}\n\nexport interface ChatMessage {\n /**\n * Author tag for the turn.\n * @maxLength 100000\n */\n author?: string | null;\n /**\n * Text content of the chat message.\n * @maxLength 100000\n */\n content?: string;\n}\n\nexport interface CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface V1ChatCompletionMessage {\n /** The role of the message author. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: FunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: ChatCompletionMessageContentPart[];\n}\n\nexport interface FunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ChatCompletionMessageMessageRoleWithLiterals =\n | ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n _id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: FunctionWithArgs;\n}\n\nexport interface ChatCompletionMessageContentPart\n extends ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface PromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport enum V1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n /** New models for Migration */\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n}\n\n/** @enumType */\nexport type V1ModelWithLiterals =\n | V1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_0613'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4O_2024_11_20'\n | 'O4_MINI_2025_04_16';\n\nexport interface CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: V1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: PromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CompletionTokenDetails;\n}\n\nexport interface GenerateContentResponse {\n /**\n * The generated response.\n * @maxSize 1000\n */\n candidates?: Candidate[];\n /** The usage metadata. */\n usageMetadata?: UsageMetadata;\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Candidate {\n /** The generated response content. */\n content?: CandidateContent;\n /** The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */\n finishReason?: FinishReasonWithLiterals;\n /**\n * The safety ratings of the response.\n * @maxSize 100\n */\n safetyRatings?: SafetyRating[];\n /** The citation metadata of the response. */\n citationMetadata?: CandidateCitationMetadata;\n /** Output only. Metadata specifies sources used to ground generated content. */\n groundingMetadata?: GroundingMetadata;\n}\n\nexport interface CandidateContent {\n /**\n * The generated response content.\n * @maxSize 1000\n */\n parts?: CandidateContentPart[];\n}\n\nexport interface FunctionCall {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional. The function parameters and values in JSON object format. */\n args?: Record<string, any> | null;\n}\n\nexport interface ExecutableCode {\n /** Required. Programming language of the code. */\n language?: LanguageWithLiterals;\n /**\n * Required. The code to be executed.\n * @maxLength 100000\n */\n code?: string;\n}\n\nexport enum Language {\n /** Unspecified language. This value should not be used. */\n LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED',\n /** Python >= 3.10, with numpy and simpy available. */\n PYTHON = 'PYTHON',\n}\n\n/** @enumType */\nexport type LanguageWithLiterals = Language | 'LANGUAGE_UNSPECIFIED' | 'PYTHON';\n\nexport interface V1CodeExecutionResult {\n /** Required. Outcome of the code execution. */\n outcome?: OutcomeWithLiterals;\n /**\n * Optional. Contains stdout when code execution is successful, stderr or other description otherwise.\n * @maxLength 100000\n */\n output?: string | null;\n}\n\nexport enum Outcome {\n /** Unspecified status. This value should not be used. */\n OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED',\n /** Code execution completed successfully. */\n OUTCOME_OK = 'OUTCOME_OK',\n /** Code execution finished but with a failure. stderr should contain the reason. */\n OUTCOME_FAILED = 'OUTCOME_FAILED',\n /** Code execution ran for too long, and was cancelled. There may or may not be a partial output present. */\n OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED',\n}\n\n/** @enumType */\nexport type OutcomeWithLiterals =\n | Outcome\n | 'OUTCOME_UNSPECIFIED'\n | 'OUTCOME_OK'\n | 'OUTCOME_FAILED'\n | 'OUTCOME_DEADLINE_EXCEEDED';\n\n/**\n * Raw media bytes.\n * Text should not be sent as raw bytes, use the 'text' field.\n */\nexport interface Blob {\n /**\n * The IANA standard MIME type of the source data.\n * Examples: - image/png - image/jpeg\n * If an unsupported MIME type is provided, an error will be returned.\n * For a complete list of supported types, see https://ai.google.dev/gemini-api/docs/file-prompting-strategies#supported_file_formats.\n * @maxLength 100\n */\n mimeType?: string;\n /**\n * Represents raw bytes for media formats. Will be fetched from the passed URL in request, and uploaded to WixMP URL in response.\n * @format WEB_URL\n */\n data?: string;\n}\n\nexport interface CandidateContentPart {\n /**\n * The text generated by the model.\n * @maxLength 100000\n */\n text?: string | null;\n /** function call */\n functionCall?: FunctionCall;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /**\n * Thought flag indicates that the content part is a thought.\n * @readonly\n */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 1000000\n */\n thoughtSignature?: string | null;\n}\n\nexport enum FinishReason {\n UNKNOWN_FINISH_REASON = 'UNKNOWN_FINISH_REASON',\n /** The finish reason is unspecified. */\n UNSPECIFIED = 'UNSPECIFIED',\n /** Natural stop point of the model or provided stop sequence. */\n STOP = 'STOP',\n /** The maximum number of tokens as specified in the request was reached. */\n MAX_TOKENS = 'MAX_TOKENS',\n /**\n * The token generation was stopped as the response was flagged for safety reasons.\n * Note that Candidate.content is empty if content filters block the output.\n */\n SAFETY = 'SAFETY',\n /** The token generation was stopped as the response was flagged for unauthorized citations. */\n RECITATION = 'RECITATION',\n /** All other reasons that stopped the token */\n OTHER = 'OTHER',\n /** The response candidate content was flagged for using an unsupported language. */\n LANGUAGE = 'LANGUAGE',\n /** Token generation stopped because the content contains forbidden terms. */\n BLOCKLIST = 'BLOCKLIST',\n /** Token generation stopped for potentially containing prohibited content. */\n PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',\n /** Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information (SPII). */\n SPII = 'SPII',\n /** The function call generated by the model is invalid. */\n MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL',\n /** Token generation stopped because generated images contain safety violations. */\n IMAGE_SAFETY = 'IMAGE_SAFETY',\n /** Model generated a tool call but no tools were enabled in the request. */\n UNEXPECTED_TOOL_CALL = 'UNEXPECTED_TOOL_CALL',\n /** Model called too many tools consecutively, thus the system exited execution. */\n TOO_MANY_TOOL_CALLS = 'TOO_MANY_TOOL_CALLS',\n}\n\n/** @enumType */\nexport type FinishReasonWithLiterals =\n | FinishReason\n | 'UNKNOWN_FINISH_REASON'\n | 'UNSPECIFIED'\n | 'STOP'\n | 'MAX_TOKENS'\n | 'SAFETY'\n | 'RECITATION'\n | 'OTHER'\n | 'LANGUAGE'\n | 'BLOCKLIST'\n | 'PROHIBITED_CONTENT'\n | 'SPII'\n | 'MALFORMED_FUNCTION_CALL'\n | 'IMAGE_SAFETY'\n | 'UNEXPECTED_TOOL_CALL'\n | 'TOO_MANY_TOOL_CALLS';\n\nexport interface SafetyRating {\n /** The safety category that the response belongs to. */\n category?: HarmCategoryWithLiterals;\n /** The probability that the response belongs to the specified safety category. */\n probability?: HarmProbabilityWithLiterals;\n /** The probability score that the response belongs to the specified safety category. */\n probabilityScore?: number | null;\n /**\n * The severity of the response's safety rating.\n * @maxLength 100\n */\n severity?: string | null;\n /** the severity score of the response's safety rating. */\n severityScore?: number | null;\n /**\n * A boolean flag associated with a safety attribute that indicates if the model's input or output was blocked.\n * If blocked is true, then the errors field in the response contains one or more error codes.\n * If blocked is false, then the response doesn't include the errors field.\n */\n blocked?: boolean | null;\n}\n\nexport enum HarmCategory {\n UNKNOWN_CATEGORY = 'UNKNOWN_CATEGORY',\n HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',\n}\n\n/** @enumType */\nexport type HarmCategoryWithLiterals =\n | HarmCategory\n | 'UNKNOWN_CATEGORY'\n | 'HARM_CATEGORY_SEXUALLY_EXPLICIT'\n | 'HARM_CATEGORY_HATE_SPEECH'\n | 'HARM_CATEGORY_HARASSMENT'\n | 'HARM_CATEGORY_DANGEROUS_CONTENT';\n\nexport enum HarmProbability {\n UNKNOWN_PROBABILITY = 'UNKNOWN_PROBABILITY',\n NEGLIGIBLE = 'NEGLIGIBLE',\n LOW = 'LOW',\n MEDIUM = 'MEDIUM',\n HIGH = 'HIGH',\n}\n\n/** @enumType */\nexport type HarmProbabilityWithLiterals =\n | HarmProbability\n | 'UNKNOWN_PROBABILITY'\n | 'NEGLIGIBLE'\n | 'LOW'\n | 'MEDIUM'\n | 'HIGH';\n\nexport interface CandidateCitationMetadata {\n /**\n * The citations of the response.\n * @maxSize 1000\n */\n citations?: CandidateCitationMetadataCitation[];\n}\n\nexport interface PublicationDate {\n /** The year of the publication date. */\n year?: number | null;\n /** The month of the publication date. */\n month?: number | null;\n /** The day of the publication date. */\n day?: number | null;\n}\n\nexport interface CandidateCitationMetadataCitation {\n /** An integer that specifies where a citation starts in the content. */\n startIndex?: number | null;\n /** An integer that specifies where a citation ends in the content. */\n endIndex?: number | null;\n /**\n * The URI of a citation source. Examples of a URI source might be a news website or a GitHub repository.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * The title of a citation source. Examples of source titles might be that of a news article or a book.\n * @maxLength 500\n */\n title?: string | null;\n /**\n * The license associated with a citation.\n * @maxLength 500\n */\n license?: string | null;\n /** The date a citation was published. Its valid formats are YYYY, YYYY-MM, and YYYY-MM-DD. */\n publicationDate?: PublicationDate;\n}\n\n/** Metadata returned to client when grounding is enabled. */\nexport interface GroundingMetadata {\n /**\n * Optional. Web search queries for the following-up web search.\n * @maxSize 1000\n * @maxLength 1000\n */\n webSearchQueries?: string[];\n /** Optional. Google search entry for the following-up web searches. */\n searchEntryPoint?: SearchEntryPoint;\n /**\n * List of supporting references retrieved from specified grounding source.\n * @maxSize 1000\n */\n groundingChunks?: GroundingChunk[];\n /**\n * Optional. List of grounding support.\n * @maxSize 1000\n */\n groundingSupports?: GroundingSupport[];\n /** Optional. Output only. Retrieval metadata. */\n retrievalMetadata?: RetrievalMetadata;\n}\n\n/** Google search entry point. */\nexport interface SearchEntryPoint {\n /**\n * Optional. Web content snippet that can be embedded in a web page or an app webview.\n * @maxLength 10000000\n */\n renderedContent?: string | null;\n /** Optional. Base64 encoded JSON representing array of <search term, search url> tuple. */\n sdkBlob?: Uint8Array | null;\n}\n\n/** Grounding chunk. */\nexport interface GroundingChunk extends GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** @oneof */\nexport interface GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** Chunk from the web. */\nexport interface Web {\n /**\n * URI reference of the chunk.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the chunk.\n * @maxLength 1000\n */\n title?: string | null;\n}\n\n/** Chunk from context retrieved by the retrieval tools. */\nexport interface RetrievedContext {\n /**\n * URI reference of the attribution.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the attribution.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * Text of the attribution.\n * @maxLength 100000\n */\n text?: string | null;\n}\n\n/** Grounding support. */\nexport interface GroundingSupport {\n /** Segment of the content this support belongs to. */\n segment?: Segment;\n /**\n * A list of indices (into 'grounding_chunk') specifying the\n * citations associated with the claim. For instance [1,3,4] means\n * that grounding_chunk[1], grounding_chunk[3],\n * grounding_chunk[4] are the retrieved content attributed to the claim.\n * @maxSize 1000\n */\n groundingChunkIndices?: number[];\n /**\n * Confidence score of the support references. Ranges from 0 to 1. 1 is the\n * most confident. This list must have the same size as the\n * grounding_chunk_indices.\n * @maxSize 1000\n */\n confidenceScores?: number[];\n}\n\n/** Segment of the content. */\nexport interface Segment {\n /** Output only. The index of a Part object within its parent Content object. */\n partIndex?: number | null;\n /**\n * Output only. Start index in the given Part, measured in bytes. Offset from\n * the start of the Part, inclusive, starting at zero.\n */\n startIndex?: number;\n /**\n * Output only. End index in the given Part, measured in bytes. Offset from\n * the start of the Part, exclusive, starting at zero.\n */\n endIndex?: number;\n /**\n * Output only. The text corresponding to the segment from the response.\n * @maxLength 100000\n */\n text?: string;\n}\n\n/** Metadata related to retrieval in the grounding flow. */\nexport interface RetrievalMetadata {\n /**\n * Optional. Score indicating how likely information from Google Search could\n * help answer the prompt. The score is in the range `[0, 1]`, where 0 is the\n * least likely and 1 is the most likely. This score is only populated when\n * Google Search grounding and dynamic retrieval is enabled. It will be\n * compared to the threshold to determine whether to trigger Google Search.\n */\n googleSearchDynamicRetrievalScore?: number | null;\n}\n\nexport interface UsageMetadata {\n /** Number of tokens in the request. */\n promptTokenCount?: number | null;\n /** Number of tokens in the response. */\n candidatesTokenCount?: number | null;\n /** Number of tokens in the request and response(s). */\n totalTokenCount?: number | null;\n /** Optional. Number of tokens of thoughts for thinking models. */\n thoughtsTokenCount?: number | null;\n /**\n * Output only. List of modalities that were processed in the request input.\n * @maxSize 10\n */\n promptTokensDetails?: ModalityTokenCount[];\n /**\n * Output only. List of modalities that were returned in the response.\n * @maxSize 10\n */\n candidatesTokensDetails?: ModalityTokenCount[];\n}\n\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality?: ModalityWithLiterals;\n /** Number of tokens. */\n tokenCount?: string | null;\n}\n\nexport enum Modality {\n UNKNOWN_MODALITY = 'UNKNOWN_MODALITY',\n /** Indicates the model should return text. */\n TEXT = 'TEXT',\n /** Indicates the model should return images. */\n IMAGE = 'IMAGE',\n /** Indicates the model should return audio. */\n AUDIO = 'AUDIO',\n}\n\n/** @enumType */\nexport type ModalityWithLiterals =\n | Modality\n | 'UNKNOWN_MODALITY'\n | 'TEXT'\n | 'IMAGE'\n | 'AUDIO';\n\nexport interface InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: RoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n */\n contentBlocks?: ContentBlock[];\n}\n\nexport enum ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type ResponseTypeTypeWithLiterals =\n | ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport enum Role {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type RoleWithLiterals = Role | 'UNKNOWN' | 'USER' | 'ASSISTANT';\n\nexport interface Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface ContentBlock extends ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\n/** @oneof */\nexport interface ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\nexport interface Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: TypeWithLiterals;\n}\n\nexport enum Type {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type TypeWithLiterals = Type | 'UNKNOWN' | 'EPHEMERAL';\n\nexport interface ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: MediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport enum MediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type MediaTypeWithLiterals =\n | MediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface SimpleContentBlock extends SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\n/** @oneof */\nexport interface SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\nexport interface Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface V1InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: GoogleproxyV1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: GoogleproxyV1Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 1000\n */\n contentBlocks?: GoogleproxyV1ContentBlock[];\n}\n\nexport enum GoogleproxyV1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ResponseTypeTypeWithLiterals =\n | GoogleproxyV1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport enum V1MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type V1MessageRoleRoleWithLiterals =\n | V1MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface GoogleproxyV1Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface GoogleproxyV1ContentBlock\n extends GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: V1CacheControlTypeWithLiterals;\n}\n\nexport enum V1CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type V1CacheControlTypeWithLiterals =\n | V1CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\nexport interface GoogleproxyV1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: V1ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport enum V1ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type V1ImageMediaTypeMediaTypeWithLiterals =\n | V1ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface GoogleproxyV1ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: V1SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface V1SimpleContentBlock extends V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\n/** @oneof */\nexport interface V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\nexport interface GoogleproxyV1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface GoogleproxyV1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface InvokeAnthropicModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * \"end_turn\": the model reached a natural stopping point\n * \"max_tokens\": we exceeded the requested max_tokens or the model's maximum\n * \"stop_sequence\": one of your provided custom stop_sequences was generated\n * \"tool_use\": the model invoked one or more tools\n * \"pause_turn\": we paused a long-running turn. You may provide the response back as-is in a subsequent request to let the model continue.\n * \"refusal\": when streaming classifiers intervene to handle potential policy violations\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n type?: V1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: MessageRoleRoleWithLiterals;\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: V1Usage;\n /**\n * Information about the container used in this request.\n * This will be non-null if a container tool (e.g. code execution) was used.\n */\n container?: Container;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\nexport enum V1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type V1ResponseTypeTypeWithLiterals =\n | V1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport enum MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type MessageRoleRoleWithLiterals =\n | MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface V1Usage {\n /** Breakdown of cached tokens by TTL */\n cacheCreation?: UsageCacheCreation;\n /** The number of input tokens used to create the cache entry. */\n cacheCreationInputTokens?: number | null;\n /** The number of input tokens read from the cache. */\n cacheReadInputTokens?: number | null;\n /** The number of input tokens which were used. */\n inputTokens?: number;\n /** The number of output tokens which were used. */\n outputTokens?: number;\n /** The number of server tool requests. */\n serverToolUse?: UsageServerToolUse;\n /**\n * If the request used the priority, standard, or batch tier.\n * Available options: standard, priority, batch\n * @maxLength 500\n */\n serviceTier?: string | null;\n}\n\nexport interface UsageCacheCreation {\n /** The number of input tokens used to create the 1 hour cache entry. */\n ephemeral1hInputTokens?: number;\n /** The number of input tokens used to create the 5 minute cache entry. */\n ephemeral5mInputTokens?: number;\n}\n\nexport interface UsageServerToolUse {\n /** The number of web search tool requests. */\n webSearchRequests?: number;\n /** The number of web fetch tool requests. */\n webFetchRequests?: number;\n}\n\nexport interface Container {\n /**\n * The time at which the container will expire.\n * @maxLength 100\n */\n expiresAt?: string;\n /**\n * Identifier for the container used in this request\n * @maxLength 512\n */\n _id?: string;\n}\n\n/** Content object used in both request and response */\nexport interface V1ContentBlock extends V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\n/** @oneof */\nexport interface V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\nexport interface V1Text {\n /**\n * Text content.\n * @maxLength 1000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n /**\n * Structured citations for this text block.\n * Populated by the model when citations are enabled.\n * @maxSize 256\n */\n citations?: Citation[];\n}\n\nexport interface V1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: CacheControlTypeWithLiterals;\n /**\n * The time-to-live for the cache control breakpoint. This may be one the following values:\n * 5m: 5 minutes (default)\n * 1h: 1 hour\n * @maxLength 50\n */\n ttl?: string | null;\n}\n\nexport enum CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type CacheControlTypeWithLiterals =\n | CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\n/** Unified wrapper for all citation kinds (attach to Text.citations). */\nexport interface Citation extends CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\n/** @oneof */\nexport interface CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\nexport interface CharLocationCitation {\n /**\n * Should be \"char_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start character index (inclusive) within the document text. */\n startCharIndex?: number | null;\n /** 0-based end character index (exclusive) within the document text. */\n endCharIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface PageLocationCitation {\n /**\n * Should be \"page_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 1-based start page number (inclusive). */\n startPageNumber?: number | null;\n /** 1-based end page number (exclusive). */\n endPageNumber?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface ContentBlockLocationCitation {\n /**\n * Should be \"content_block_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start content-block index (inclusive) within the custom document. */\n startBlockIndex?: number | null;\n /** 0-based end content-block index (exclusive) within the custom document. */\n endBlockIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface WebSearchResultLocationCitation {\n /**\n * Should be \"web_search_result_location\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the cited source\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the cited source\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * A reference that must be passed back for multi-turn conversations.\n * @maxLength 1000000\n */\n encryptedIndex?: string | null;\n /**\n * Up to 150 characters of the cited content\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface SearchResultLocationCitation {\n /**\n * Should be \"search_result_location\".\n * @maxLength 500\n */\n type?: string;\n /** Index of the search_result within the current turn (0-based). */\n searchResultIndex?: number | null;\n /** 0-based start block indices within that search_result's content. */\n startBlockIndex?: number | null;\n /** 0-based end block indices within that search_result's content. */\n endBlockIndex?: number | null;\n /**\n * Source string\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Optional title (same as search_result.title).\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional quoted snippet\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface V1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport enum ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type ImageMediaTypeMediaTypeWithLiterals =\n | ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface V1ToolUse {\n /**\n * Tool use id\n * @maxLength 512\n */\n _id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Optional: enable tool use caching */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1ToolResult {\n /**\n * Tool use id\n * @maxLength 512\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: ToolResultContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface ToolResultContentBlock\n extends ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\n/** @oneof */\nexport interface ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\nexport interface DocumentContent {\n /**\n * Should be \"document\"\n * @maxLength 500\n */\n type?: string;\n /** Citable payload or reference. */\n source?: DocumentSource;\n /**\n * Optional: Document title\n * Can be passed to the model but not used towards cited content.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional: Any document metadata as text or stringified json.\n * Can be passed to the model but not used towards cited content.\n * @maxLength 1000000\n */\n context?: string | null;\n /** Enable citations for this doc */\n citations?: CitationsEnabled;\n /** Optional: Cache the document content */\n cacheControl?: V1CacheControl;\n}\n\nexport interface DocumentSource {\n /**\n * One of: \"text\" | \"base64\" | \"content\" | \"file\" | \"url\".\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Required types \"text\"/\"base64\" (e.g., \"text/plain\", \"application/pdf\").\n * @maxLength 500\n */\n mediaType?: string | null;\n /**\n * For type \"text\": raw text. For \"base64\": bytes as base64.\n * @maxLength 10000000\n */\n data?: string | null;\n /**\n * For type \"file\": Files API id (e.g., \"file_01...\")\n * @maxLength 5000\n */\n fileId?: string | null;\n /**\n * For type \"url\": absolute URL to the document\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * For type \"content\": custom content block; Only text blocks are citable\n * @maxSize 500\n */\n content?: V1ContentBlock[];\n}\n\nexport interface CitationsEnabled {\n /** Whether to enable citations */\n enabled?: boolean | null;\n}\n\nexport interface ToolResultSearchResult {\n /**\n * Should be \"search_result\".\n * @maxLength 500\n */\n type?: string;\n /**\n * Where this result came from (URL or source label).\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Human-readable title for the result.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Inline text snippets that summarize/support the result.\n * @maxSize 1000\n */\n content?: V1Text[];\n /**\n * Enable/disable citations for this result's content.\n * Matches Anthropic \"citations\" on search_result blocks.\n */\n citations?: CitationsEnabled;\n}\n\nexport interface V1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface V1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\n/**\n * Assistant requests a Model Context Protocol (MCP) tool call.\n * Pair with ToolResult using the same `id`.\n */\nexport interface McpToolUse {\n /**\n * Unique id for this tool call; must match McpToolResult.tool_use_id.\n * @maxLength 512\n */\n _id?: string | null;\n /**\n * Tool name as exposed by the MCP server.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Which MCP server to call (must match a server in the request).\n * @maxLength 1000\n */\n serverName?: string | null;\n /** JSON arguments for the tool (object per the tool's schema). */\n input?: Record<string, any> | null;\n}\n\n/**\n * Server-tool invocation announced by the ASSISTANT for Anthropic-run tools\n * (e.g., \"web_search\", \"code_execution\").\n */\nexport interface ServerToolUse {\n /**\n * Should be \"server_tool_use\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n _id?: string | null;\n /**\n * The tool name. Available options: \"web_search\" | \"web_fetch\" | \"code_execution\" | \"bash_code_execution\" | \"text_editor_code_execution\"\n * @maxLength 500\n */\n name?: string | null;\n /**\n * Tool-specific parameters object:\n * web_search → { \"query\": \"<string>\" }\n * web_fetch → { \"url\": \"<string>\" }\n * code_execution→ { \"code\": \"<python source>\" }\n */\n input?: Record<string, any> | null;\n}\n\n/** Server tool result (web search). Either results[] OR error. */\nexport interface WebSearchToolResult extends WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n /**\n * Should be \"web_search_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n}\n\n/** Success payload: the JSON `content` ARRAY of result items. */\nexport interface WebSearchResultList {\n /**\n * Results items\n * @maxSize 1000\n */\n items?: WebSearchResult[];\n}\n\n/**\n * One search result item.\n * Docs (“Search results include”): url, title, page_age, encrypted_content.\n * Each item also has the literal type.\n */\nexport interface WebSearchResult {\n /**\n * Should be \"web_search_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the source page.\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the source page.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * When the site was last updated (e.g., \"April 30, 2025\").\n * @maxLength 100\n */\n pageAge?: string | null;\n /**\n * Encrypted content that must be passed back in multi-turn conversations for citations.\n * @maxLength 1000000\n */\n encryptedContent?: string | null;\n}\n\n/**\n * Error payload\n * Possible error codes: too_many_requests | invalid_input | max_uses_exceeded | query_too_long | unavailable\n */\nexport interface WebSearchToolResultError {\n /**\n * Should be \"web_search_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface CodeExecutionToolResult\n extends CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n /**\n * Should be \"code_execution_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n}\n\n/** Success payload for code execution. */\nexport interface CodeExecutionResult {\n /**\n * Should be \"code_execution_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Output from successful execution (print, etc.).\n * @maxLength 1000000\n */\n stdout?: string | null;\n /**\n * Error messages emitted by the program.\n * @maxLength 1000000\n */\n stderr?: string | null;\n /** 0 = success, non-zero = failure. */\n returnCode?: number | null;\n /**\n * Optional: Array of produced artifacts.\n * Example item (typical): { \"file_id\": \"file_abc123\", ... }\n * @maxSize 4096\n */\n content?: Record<string, any>[] | null;\n}\n\n/**\n * Error payload (HTTP 200; error lives in the result body).\n * Docs list: unavailable | code_execution_exceeded | container_expired\n */\nexport interface CodeExecutionToolResultError {\n /**\n * Should be \"code_execution_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value, e.g. \"unavailable\", \"code_execution_exceeded\", \"container_expired\".\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface ContainerUpload {\n /**\n * Should be \"container_upload\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * File identifier returned by the Files API (e.g., \"file_01abc...\").\n * @maxLength 5000\n */\n fileId?: string | null;\n}\n\n/** Web fetch tool result */\nexport interface WebFetchToolResult extends WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n /**\n * Should be \"web_fetch_tool_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n}\n\nexport interface WebFetchToolResultContentSuccess {\n /**\n * Should be \"web_fetch_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * The URL that was fetched\n * @maxLength 10000\n */\n url?: string | null;\n /** A document block containing the fetched content */\n content?: DocumentContent;\n /**\n * Timestamp when the content was retrieved\n * @maxLength 256\n */\n retrievedAt?: string | null;\n}\n\nexport interface WebFetchToolResultContentError {\n /**\n * Should be \"web_fetch_tool_result_error\"\n * @maxLength 500\n */\n type?: string;\n /**\n * These are the possible error codes:\n * - invalid_tool_input: Invalid URL format\n * - url_too_long: URL exceeds maximum length (250 characters)\n * - url_not_allowed: URL blocked by domain filtering rules and model restrictions\n * - url_not_accessible: Failed to fetch content (HTTP error)\n * - too_many_requests: Rate limit exceeded\n * - unsupported_content_type: Content type not supported (only text and PDF)\n * - max_uses_exceeded: Maximum web fetch tool uses exceeded\n * - unavailable: An internal error occurred\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface InvokeLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeMlPlatformLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeChatCompletionResponse {\n /**\n * Unique response ID\n * @maxLength 100\n */\n _id?: string | null;\n /** The model used to generate the response */\n model?: PerplexityModelWithLiterals;\n /**\n * The object type, which always equals chat.completion\n * @maxLength 100\n */\n object?: string | null;\n /** The Unix timestamp (in seconds) of when the completion was created */\n created?: number | null;\n /**\n * Citations for the generated answer\n * @maxLength 10000\n * @maxSize 1000\n */\n citations?: string[];\n /** The list of completion choices the model generated for the input prompt */\n choices?: InvokeChatCompletionResponseChoice[];\n /** URLs and size metadata for returned images */\n images?: PerplexityImageDescriptor[];\n /**\n * Further questions related to the search\n * @maxLength 10000\n * @maxSize 1000\n */\n relatedQuestions?: string[];\n /** Usage statistics for the completion request. */\n usage?: InvokeChatCompletionResponseUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface PerplexityMessage {\n /**\n * The content of the message\n * @maxLength 200000\n */\n content?: string;\n /**\n * The role of the speaker in this turn of conversation. After the (optional) system message,\n * user and assistant roles should alternate with `user` then `assistant`, ending in `user`.\n */\n role?: PerplexityMessageMessageRoleWithLiterals;\n}\n\nexport enum PerplexityMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n SYSTEM = 'SYSTEM',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type PerplexityMessageMessageRoleWithLiterals =\n | PerplexityMessageMessageRole\n | 'UNKNOWN'\n | 'SYSTEM'\n | 'USER'\n | 'ASSISTANT';\n\nexport enum PerplexityModel {\n UNKNOWN_PERPLEXITY_MODEL = 'UNKNOWN_PERPLEXITY_MODEL',\n SONAR = 'SONAR',\n SONAR_PRO = 'SONAR_PRO',\n SONAR_REASONING = 'SONAR_REASONING',\n SONAR_REASONING_PRO = 'SONAR_REASONING_PRO',\n SONAR_DEEP_RESEARCH = 'SONAR_DEEP_RESEARCH',\n}\n\n/** @enumType */\nexport type PerplexityModelWithLiterals =\n | PerplexityModel\n | 'UNKNOWN_PERPLEXITY_MODEL'\n | 'SONAR'\n | 'SONAR_PRO'\n | 'SONAR_REASONING'\n | 'SONAR_REASONING_PRO'\n | 'SONAR_DEEP_RESEARCH';\n\n/** Structures the completion choice */\nexport interface InvokeChatCompletionResponseChoice {\n /** Choice index */\n index?: number | null;\n /**\n * Stop reason, can be `STOP` or `LENGTH`\n * @maxLength 10\n */\n finishReason?: string | null;\n /** Choice message, containing content and role */\n message?: PerplexityMessage;\n}\n\nexport interface PerplexityImageDescriptor {\n /**\n * Full image url\n * @maxLength 5000\n */\n imageUrl?: string | null;\n /**\n * Image origin website\n * @maxLength 5000\n */\n originUrl?: string | null;\n /** Height */\n height?: number | null;\n /** Width */\n width?: number | null;\n}\n\n/** Usage statistics for the completion request. */\nexport interface InvokeChatCompletionResponseUsage {\n /** The number of tokens provided in the request prompt. */\n promptTokens?: number | null;\n /** The number of tokens generated in the response output. */\n completionTokens?: number | null;\n /** The total number of tokens used in the chat completion (prompt + completion). */\n totalTokens?: number | null;\n /** Tokens passed into the input from citations found during search. Priced like `prompt_tokens` */\n citationTokens?: number | null;\n /** Reasoning tokens are used to reason through the research material before generating the final output via the CoTs */\n reasoningTokens?: number | null;\n /** Number of search queries executed. */\n numSearchQueries?: number | null;\n}\n\nexport interface CreateImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: V1ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * The prompt that was used to generate the image, if there was any revision to the prompt.\n * @maxLength 100000\n */\n revisedPrompt?: string | null;\n}\n\nexport enum V1ImageModel {\n UNKNOWN_IMAGE_GENERATION_MODEL = 'UNKNOWN_IMAGE_GENERATION_MODEL',\n DALL_E_2 = 'DALL_E_2',\n DALL_E_3 = 'DALL_E_3',\n}\n\n/** @enumType */\nexport type V1ImageModelWithLiterals =\n | V1ImageModel\n | 'UNKNOWN_IMAGE_GENERATION_MODEL'\n | 'DALL_E_2'\n | 'DALL_E_3';\n\nexport interface V1TextToImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /** A specific value [0 .. 4294967294] used to guide the 'randomness' of the generation. */\n seed?: string | null;\n /**\n * Finish reason by the model provider.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport enum ImageModel {\n STABILITY_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_IMAGE_MODEL_UNSPECIFIED',\n /** stable-diffusion-xl-1024-v1-0 - Stable Diffusion XL v1.0 */\n SDXL_1_0 = 'SDXL_1_0',\n}\n\n/** @enumType */\nexport type ImageModelWithLiterals =\n | ImageModel\n | 'STABILITY_IMAGE_MODEL_UNSPECIFIED'\n | 'SDXL_1_0';\n\nexport interface GenerateCoreResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageCoreModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport enum ImageCoreModel {\n STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED',\n STABLE_IMAGE_CORE = 'STABLE_IMAGE_CORE',\n}\n\n/** @enumType */\nexport type ImageCoreModelWithLiterals =\n | ImageCoreModel\n | 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED'\n | 'STABLE_IMAGE_CORE';\n\nexport interface GenerateStableDiffusionResponse {\n /**\n * The generated image objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport enum ImageStableDiffusionModel {\n STABLE_DIFFUSION_MODEL_UNSPECIFIED = 'STABLE_DIFFUSION_MODEL_UNSPECIFIED',\n /** sd3-large */\n SD3_LARGE = 'SD3_LARGE',\n /** sd3-large-turbo */\n SD3_LARGE_TURBO = 'SD3_LARGE_TURBO',\n /** sd3-medium */\n SD3_MEDIUM = 'SD3_MEDIUM',\n /** sd3.5-large */\n SD3_5_LARGE = 'SD3_5_LARGE',\n /** sd3.5-large-turbo */\n SD3_5_LARGE_TURBO = 'SD3_5_LARGE_TURBO',\n /** sd3.5-medium */\n SD3_5_MEDIUM = 'SD3_5_MEDIUM',\n}\n\n/** @enumType */\nexport type ImageStableDiffusionModelWithLiterals =\n | ImageStableDiffusionModel\n | 'STABLE_DIFFUSION_MODEL_UNSPECIFIED'\n | 'SD3_LARGE'\n | 'SD3_LARGE_TURBO'\n | 'SD3_MEDIUM'\n | 'SD3_5_LARGE'\n | 'SD3_5_LARGE_TURBO'\n | 'SD3_5_MEDIUM';\n\nexport interface GenerateAnImageResponse {\n /**\n * The id of the task.\n * @format GUID\n */\n _id?: string | null;\n /**\n * status of the image generation\n * one of Task not found, Pending, Request Moderated, Content Moderated, Ready, Error\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Result object for the generated image */\n result?: ResultObject;\n}\n\nexport interface ResultObject {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * prompt used for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n /** seed used for image generation */\n seed?: string | null;\n}\n\nexport interface CreatePredictionResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n _id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /**\n * Prediction text output\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n textOutput?: string[] | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Token counts */\n tokenUsage?: CreatePredictionResponseTokenUsage;\n}\n\nexport interface CreatePredictionResponseTokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n}\n\nexport interface EditImageWithPromptResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport enum EditImageWithPromptRequestModel {\n UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL = 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL',\n INPAINT = 'INPAINT',\n OUTPAINT = 'OUTPAINT',\n}\n\n/** @enumType */\nexport type EditImageWithPromptRequestModelWithLiterals =\n | EditImageWithPromptRequestModel\n | 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL'\n | 'INPAINT'\n | 'OUTPAINT';\n\nexport interface TextToImageResponse {\n /**\n * Generation TextToImageTaskResult\n * @maxSize 1000\n */\n data?: TextToImageTaskResult[];\n}\n\nexport interface TextToImageTaskResult {\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * The unique identifier of the image.\n * @format GUID\n */\n imageUuid?: string;\n /**\n * If outputType is set to URL, this parameter contains the URL of the image to be downloaded.\n * @maxLength 2048\n */\n imageUrl?: string | null;\n /** If checkNSFW parameter is used, NSFWContent is included informing if the image has been flagged as potentially sensitive content. */\n nsfwContent?: boolean;\n /** A cost of generated image. */\n microcentsSpent?: string | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n}\n\nexport interface GenerateImageResponse {\n /**\n * Array of generated image results, one for each requested sampleCount\n * @maxSize 8\n */\n predictions?: Prediction[];\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Prediction {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * Enhanced prompt used for generation (only returned for models that support prompt enhancement)\n * @maxLength 1000\n */\n prompt?: string | null;\n /**\n * The responsible AI filter reason\n * Only returned if includeRaiReason is enabled and this image was filtered out\n * @maxLength 1000\n */\n raiFilteredReason?: string | null;\n /** Safety attributes information */\n safetyAttributes?: SafetyAttributes;\n}\n\nexport interface SafetyAttributes {\n /**\n * The safety attribute categories\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /**\n * The safety attribute scores\n * @maxSize 100\n */\n scores?: number[] | null;\n}\n\nexport interface GenerateVideoResponse {\n /**\n * Generated videos\n * @maxSize 4\n */\n videos?: GeneratedVideo[];\n /** Cost of the request in micro-cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GeneratedVideo {\n /**\n * The URL of the generated video.\n * @format WEB_URL\n */\n videoUrl?: string | null;\n /**\n * The video MIME type (currently only \"video/mp4\")\n * @maxLength 50\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateImageMlPlatformResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n _id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface CreateImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport enum OpenAiImageModel {\n UNKNOWN_IMAGE_CREATION_MODEL = 'UNKNOWN_IMAGE_CREATION_MODEL',\n GPT_4O_IMAGE = 'GPT_4O_IMAGE',\n GPT_IMAGE_1 = 'GPT_IMAGE_1',\n}\n\n/** @enumType */\nexport type OpenAiImageModelWithLiterals =\n | OpenAiImageModel\n | 'UNKNOWN_IMAGE_CREATION_MODEL'\n | 'GPT_4O_IMAGE'\n | 'GPT_IMAGE_1';\n\nexport interface ImageUsage {\n /** Number of tokens in the input */\n inputTokens?: number | null;\n /** Details about input tokens */\n inputTokensDetails?: OpenAiImageTokenDetails;\n /** Number of tokens in the output */\n outputTokens?: number | null;\n /** Total number of tokens used */\n totalTokens?: number | null;\n}\n\nexport interface OpenAiImageTokenDetails {\n /** Number of tokens used for image processing */\n imageTokens?: number | null;\n /** Number of tokens used for text processing */\n textTokens?: number | null;\n}\n\nexport interface EditImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport interface V1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: ChatCompletionModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: V1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface GoogleproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: V1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: V1ChatCompletionMessageContentPart[];\n}\n\nexport interface V1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum V1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type V1ChatCompletionMessageMessageRoleWithLiterals =\n | V1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface V1ChatCompletionMessageContentPart\n extends V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport enum ChatCompletionModel {\n UNKNOWN_CHAT_COMPLETION_MODEL = 'UNKNOWN_CHAT_COMPLETION_MODEL',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-scout\n * llama-4-scout-17b-16e-instruct-maas\n */\n LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS = 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-maverick\n * llama-4-maverick-17b-128e-instruct-maas\n */\n LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS = 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS',\n}\n\n/** @enumType */\nexport type ChatCompletionModelWithLiterals =\n | ChatCompletionModel\n | 'UNKNOWN_CHAT_COMPLETION_MODEL'\n | 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS'\n | 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS';\n\nexport interface V1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: GoogleproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface V1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /**\n * Model that produced the completion.\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of chat completion choices. Can be more than one if n is greater than 1.\n * @maxSize 10000\n */\n choices?: Choice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: TokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface ChatCompletionMessage {\n /** The role of the message author. */\n role?: MessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: ContentPart[];\n}\n\nexport interface ImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum MessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type MessageRoleWithLiterals =\n | MessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface ContentPart extends ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface Choice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface TokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface VideoInferenceResponse {\n /**\n * Generation VideoInferenceTaskResult\n * @maxSize 1000\n */\n data?: VideoInferenceTaskResult[];\n}\n\nexport interface VideoInferenceTaskResult {\n /**\n * The API will return the taskType you sent in the request.\n * @maxLength 100\n */\n taskType?: string;\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * A unique identifier for the generated video.\n * @format GUID\n */\n videoUuid?: string | null;\n /**\n * If outputType is set to URL, this parameter contains the URL of the video to be downloaded.\n * @maxLength 10000\n */\n videoUrl?: string | null;\n /**\n * The seed value that was used to generate this video.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /** A cost of generated video. */\n microcentsSpent?: string | null;\n /**\n * The current processing status (for polling operations).\n * @maxLength 50\n */\n status?: string | null;\n}\n\nexport interface V1OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n _id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: OpenAiResponsesResponseIncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: V1ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: V1ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: V1ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface OpenAiResponsesResponseIncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport enum V1ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n O3_PRO_2025_06_10 = 'O3_PRO_2025_06_10',\n O3_DEEP_RESEARCH_2025_06_26 = 'O3_DEEP_RESEARCH_2025_06_26',\n GPT_5_CODEX = 'GPT_5_CODEX',\n GPT_5_1_2025_11_13 = 'GPT_5_1_2025_11_13',\n GPT_5_1_CODEX = 'GPT_5_1_CODEX',\n GPT_5_1_CODEX_MINI = 'GPT_5_1_CODEX_MINI',\n GPT_EXP_RESPONSES = 'GPT_EXP_RESPONSES',\n}\n\n/** @enumType */\nexport type V1ResponsesModelWithLiterals =\n | V1ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES'\n | 'O3_PRO_2025_06_10'\n | 'O3_DEEP_RESEARCH_2025_06_26'\n | 'GPT_5_CODEX'\n | 'GPT_5_1_2025_11_13'\n | 'GPT_5_1_CODEX'\n | 'GPT_5_1_CODEX_MINI'\n | 'GPT_EXP_RESPONSES';\n\nexport interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: ResponsesOutputMessageOutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface V1OutputAnnotation\n extends V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\n/** @oneof */\nexport interface V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\nexport interface V1UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface ResponsesOutputMessageOutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: V1OutputAnnotation[];\n}\n\nexport interface V1ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: ResponsesWebSearchToolCallAction;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface ResponsesWebSearchToolCallAction {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface V1ResponsesReasoningOutput {\n /** @maxLength 100 */\n _id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: V1ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: V1ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface V1ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface V1ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface V1ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: V1ResponsesCodeInterpreterOutput[];\n}\n\nexport interface V1ResponsesCodeInterpreterOutput\n extends V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface V1ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface V1ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface V1ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface V1ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: V1ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: V1ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface V1ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface V1ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n _id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: IncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface IncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport enum ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n}\n\n/** @enumType */\nexport type ResponsesModelWithLiterals =\n | ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES';\n\nexport interface ResponsesOutput extends ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: OutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface OutputAnnotation extends OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\n/** @oneof */\nexport interface OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\nexport interface UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface OutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: OutputAnnotation[];\n}\n\nexport interface ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: Action;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface Action {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface ResponsesReasoningOutput {\n /** @maxLength 100 */\n _id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n _id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: ResponsesCodeInterpreterOutput[];\n}\n\nexport interface ResponsesCodeInterpreterOutput\n extends ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface CreateVideoResponse {\n videoJob?: VideoJob;\n}\n\nexport interface VideoJob {\n /**\n * The unique identifier for the video generation job.\n * @maxLength 200\n */\n _id?: string | null;\n /**\n * The status of the response generation.\n * @maxLength 50\n */\n status?: string | null;\n /**\n * The generated video result url. Only present when status is \"completed\".\n * @maxLength 5000\n * @format WEB_URL\n */\n url?: string | null;\n /** Error payload that explains why generation failed, if applicable. */\n error?: ErrorInfo;\n /** The progress of the video generation as a percentage (0-100) */\n progress?: number | null;\n}\n\nexport interface ErrorInfo {\n /**\n * code\n * @maxLength 50\n */\n code?: string | null;\n /**\n * message\n * @maxLength 1000\n */\n message?: string | null;\n}\n\nexport interface GenerateContentByPromptObjectRequest {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface Prompt extends PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n /**\n * Prompt id.\n * @format GUID\n */\n _id?: string | null;\n /**\n * Names of template parameters, that will be checked and substituted during GenerateText requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedParameterNames?: string[];\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n /**\n * Names of dynamic properties, that will be checked and substituted during requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedDynamicPropertiesNames?: string[];\n}\n\n/** @oneof */\nexport interface PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n}\n\nexport interface FallbackPromptConfig {\n /**\n * Id of the fallback Prompt. This Prompt will be used for text generation in case the invocation of original Prompt fails.\n * @format GUID\n */\n fallbackPromptId?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequest\n extends OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: OpenaiproxyV1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: CreateChatCompletionRequestFunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: V1CreateChatCompletionRequestTool[];\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: V1FineTuningSpec;\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: OpenaiproxyV1CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * o1 models only\n * @maxLength 100\n */\n reasoningEffort?: string | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses.\n * Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface CreateChatCompletionRequestFunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface V1CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: CreateChatCompletionRequestFunctionSignature;\n}\n\nexport interface V1FineTuningSpec {\n /**\n * Organization field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:<my-org>:custom_suffix:id\n * @maxLength 100\n */\n org?: string | null;\n /**\n * Suffix field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:<custom_suffix>:id\n * @maxLength 100\n */\n suffix?: string | null;\n /**\n * Id field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:custom_suffix:<id>\n * @maxLength 100\n */\n _id?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface TextBisonPredictRequest {\n /**\n * TextInstance objects containing input prompts.\n * @maxSize 100\n */\n instances?: TextInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: TextBisonModelWithLiterals;\n}\n\nexport interface TextInstance {\n /**\n * Text input to generate model response. Prompts can include preamble, questions, suggestions, instructions, or examples.\n * @maxLength 100000\n */\n prompt?: string | null;\n}\n\nexport interface PredictParameters {\n /**\n * The temperature is used for sampling during response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that\n * require a less open-ended or creative response, while higher temperatures can lead to more diverse or creative results.\n * A temperature of 0 means that the highest probability tokens are always selected. In this case, responses for a\n * given prompt are mostly deterministic, but a small amount of variation is still possible.\n * For most use cases, try starting with a temperature of 0.2. If the model returns a response that's too generic,\n * too short, or the model gives a fallback response, try increasing the temperature.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for longer responses.\n * @min 1\n * @max 2048\n */\n maxOutputTokens?: number | null;\n /**\n * Top-K changes how the model selects tokens for output. A top-K of 1 means the next selected token is the most probable\n * among all tokens in the model's vocabulary (also called greedy decoding), while a top-K of 3 means that the next\n * token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled. Then tokens are further\n * filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-K is 40.\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output. Tokens are selected from the most (see top-K) to least\n * probable until the sum of their probabilities equals the top-P value. For example, if tokens A, B, and C have a\n * probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5, then the model will select either A or B as the next\n * token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-P is 0.95.\n * @max 1\n */\n topP?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in\n * the response. If a string appears multiple times in the response, then the response truncates where it's first\n * encountered. The strings are case-sensitive.\n * @maxSize 100\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * The number of response variations to return.\n * @min 1\n * @max 8\n */\n candidateCount?: number | null;\n}\n\nexport enum TextBisonModel {\n UNKNOWN_TEXT_BISON_MODEL = 'UNKNOWN_TEXT_BISON_MODEL',\n TEXT_BISON = 'TEXT_BISON',\n TEXT_BISON_001 = 'TEXT_BISON_001',\n TEXT_BISON_32K = 'TEXT_BISON_32K',\n TEXT_BISON_002 = 'TEXT_BISON_002',\n TEXT_BISON_32K_002 = 'TEXT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type TextBisonModelWithLiterals =\n | TextBisonModel\n | 'UNKNOWN_TEXT_BISON_MODEL'\n | 'TEXT_BISON'\n | 'TEXT_BISON_001'\n | 'TEXT_BISON_32K'\n | 'TEXT_BISON_002'\n | 'TEXT_BISON_32K_002';\n\nexport interface ChatBisonPredictRequest {\n /**\n * ChatInstance objects containing inputs.\n * @maxSize 100\n */\n instances?: ChatInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: ChatBisonModelWithLiterals;\n}\n\nexport interface ChatInstance {\n /**\n * Optional. Context shapes how the model responds throughout the conversation. For example, you can use context\n * to specify words the model can or cannot use, topics to focus on or avoid, or the response format or style.\n * @maxLength 100000\n */\n context?: string | null;\n /**\n * Optional. Examples for the model to learn how to respond to the conversation.\n * @maxSize 1000\n */\n examples?: Example[];\n /**\n * Required. Conversation history provided to the model in a structured alternate-author form. Messages appear in\n * chronological order: oldest first, newest last. When the history of messages causes the input to exceed the\n * maximum length, the oldest messages are removed until the entire prompt is within the allowed limit.\n * @maxSize 1000\n */\n messages?: ChatMessage[];\n}\n\nexport interface Example {\n /** An example of an input Message from the user. */\n input?: ChatMessage;\n /** An example of what the model should output given the input. */\n output?: ChatMessage;\n}\n\nexport enum ChatBisonModel {\n UNKNOWN_CHAT_BISON_MODEL = 'UNKNOWN_CHAT_BISON_MODEL',\n CHAT_BISON = 'CHAT_BISON',\n CHAT_BISON_001 = 'CHAT_BISON_001',\n CHAT_BISON_32K = 'CHAT_BISON_32K',\n CHAT_BISON_002 = 'CHAT_BISON_002',\n CHAT_BISON_32K_002 = 'CHAT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type ChatBisonModelWithLiterals =\n | ChatBisonModel\n | 'UNKNOWN_CHAT_BISON_MODEL'\n | 'CHAT_BISON'\n | 'CHAT_BISON_001'\n | 'CHAT_BISON_32K'\n | 'CHAT_BISON_002'\n | 'CHAT_BISON_32K_002';\n\nexport interface CreateChatCompletionRequest\n extends CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: V1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: V1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: FunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: CreateChatCompletionRequestTool[];\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface FunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: FunctionSignature;\n}\n\nexport interface CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface GenerateContentRequest {\n /** ID of the model to use. */\n model?: GoogleproxyV1ModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n contents?: Content[];\n /** The system instruction to the model. */\n systemInstruction?: SystemInstruction;\n /**\n * A list of Tools the model may use to generate the next response.\n * @maxSize 1000\n */\n tools?: GoogleproxyV1Tool[];\n /**\n * Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.\n * @maxSize 100\n */\n safetySettings?: SafetySetting[];\n /** The generation configuration for the response. */\n generationConfig?: GenerationConfig;\n /** Tool configuration for any Tool specified in the request. */\n toolConfig?: ToolConfig;\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: FineTuningSpec;\n}\n\nexport enum GoogleproxyV1Model {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n GEMINI_1_0_PRO = 'GEMINI_1_0_PRO',\n GEMINI_1_0_PRO_VISION = 'GEMINI_1_0_PRO_VISION',\n GEMINI_1_5_PRO = 'GEMINI_1_5_PRO',\n GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH',\n GEMINI_2_0_FLASH = 'GEMINI_2_0_FLASH',\n GEMINI_2_0_FLASH_LITE = 'GEMINI_2_0_FLASH_LITE',\n GEMINI_2_5_PRO = 'GEMINI_2_5_PRO',\n GEMINI_2_5_FLASH = 'GEMINI_2_5_FLASH',\n GEMINI_2_5_FLASH_LITE = 'GEMINI_2_5_FLASH_LITE',\n GEMINI_2_5_FLASH_IMAGE = 'GEMINI_2_5_FLASH_IMAGE',\n GEMINI_2_5_COMPUTER_USE = 'GEMINI_2_5_COMPUTER_USE',\n GEMINI_3_0_PRO = 'GEMINI_3_0_PRO',\n GEMINI_3_0_PRO_IMAGE = 'GEMINI_3_0_PRO_IMAGE',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ModelWithLiterals =\n | GoogleproxyV1Model\n | 'UNKNOWN_MODEL'\n | 'GEMINI_1_0_PRO'\n | 'GEMINI_1_0_PRO_VISION'\n | 'GEMINI_1_5_PRO'\n | 'GEMINI_1_5_FLASH'\n | 'GEMINI_2_0_FLASH'\n | 'GEMINI_2_0_FLASH_LITE'\n | 'GEMINI_2_5_PRO'\n | 'GEMINI_2_5_FLASH'\n | 'GEMINI_2_5_FLASH_LITE'\n | 'GEMINI_2_5_FLASH_IMAGE'\n | 'GEMINI_2_5_COMPUTER_USE'\n | 'GEMINI_3_0_PRO'\n | 'GEMINI_3_0_PRO_IMAGE';\n\nexport interface Content {\n /**\n * The role in a conversation associated with the content.\n * Specifying a role is required even in single turn use cases. Acceptable values include the following:\n * USER: Specifies content that's sent by you. MODEL: Specifies the model's response.\n */\n role?: ContentRoleWithLiterals;\n /**\n * Ordered parts that make up the input. Parts may have different MIME types.\n * For gemini-1.0-pro, only the text field is valid. The token limit is 32k.\n * For gemini-1.0-pro-vision, you may specify either text only, text and up to 16 images, or text and 1 video. The token limit is 16k.\n * @maxSize 1000\n */\n parts?: V1ContentPart[];\n}\n\nexport enum ContentRole {\n UNKNOWN_CONTENT_ROLE = 'UNKNOWN_CONTENT_ROLE',\n USER = 'USER',\n MODEL = 'MODEL',\n}\n\n/** @enumType */\nexport type ContentRoleWithLiterals =\n | ContentRole\n | 'UNKNOWN_CONTENT_ROLE'\n | 'USER'\n | 'MODEL';\n\nexport interface V1ContentPart {\n /**\n * Union field data can be only one of the following:\n * The text instructions or chat dialogue to include in the prompt.\n * @maxLength 1000000000\n */\n text?: string | null;\n /** data field not supported for gemini-1.0-pro */\n contentData?: ContentData;\n /** A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values. */\n functionCall?: FunctionCall;\n /**\n * The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the\n * function is used as context to the model. This should contain the result of aFunctionCall made based on model prediction.\n */\n functionResponse?: FunctionResponse;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /** Optional. Media resolution level for the input media. */\n mediaResolution?: MediaResolution;\n /** Thought flag indicates that the content part is a thought. */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 1000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ContentData {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * The MIME type of the content data. supported types are image/jpeg, image/png.\n * @maxLength 100\n */\n mimeType?: string | null;\n}\n\nexport interface FunctionResponse {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string;\n /** Required. The function response in JSON object format. */\n response?: Record<string, any> | null;\n}\n\nexport interface MediaResolution {\n /** Media resolution level */\n level?: MediaResolutionLevelWithLiterals;\n}\n\nexport enum MediaResolutionLevel {\n /** Media resolution has not been set. */\n MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED',\n /** Media resolution set to low (64 tokens). */\n MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW',\n /** Media resolution set to medium (256 tokens). */\n MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM',\n /** Media resolution set to high (zoomed reframing with 256 tokens). */\n MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH',\n}\n\n/** @enumType */\nexport type MediaResolutionLevelWithLiterals =\n | MediaResolutionLevel\n | 'MEDIA_RESOLUTION_UNSPECIFIED'\n | 'MEDIA_RESOLUTION_LOW'\n | 'MEDIA_RESOLUTION_MEDIUM'\n | 'MEDIA_RESOLUTION_HIGH';\n\nexport interface SystemInstruction {\n /**\n * The role field of systemInstruction is ignored and doesn't affect the performance of the model.\n * @maxLength 20\n */\n role?: string | null;\n /**\n * Instructions for the model to steer it toward better performance.\n * The text strings count toward the token limit.\n * @maxSize 10\n */\n parts?: V1ContentPart[];\n}\n\nexport interface GoogleproxyV1Tool {\n /**\n * One or more function declarations\n * More information about the function declarations :\n * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling\n * @maxSize 1000\n */\n functionDeclarations?: FunctionDeclaration[];\n /** Optional. Retrieval tool that is powered by Google search. */\n googleSearchRetrieval?: GoogleSearchRetrieval;\n /** Optional. Enables the model to execute code as part of generation. */\n codeExecution?: CodeExecution;\n /** Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. */\n googleSearch?: GoogleSearch;\n /**\n * Optional. Tool to support the model interacting directly with the computer.\n * If enabled, it automatically populates computer-use specific Function Declarations.\n */\n computerUse?: ComputerUse;\n}\n\nexport enum DynamicRetrievalConfigMode {\n /** Always trigger retrieval. */\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n /** Run retrieval only when system decides it is necessary. */\n MODE_DYNAMIC = 'MODE_DYNAMIC',\n}\n\n/** @enumType */\nexport type DynamicRetrievalConfigModeWithLiterals =\n | DynamicRetrievalConfigMode\n | 'MODE_UNSPECIFIED'\n | 'MODE_DYNAMIC';\n\nexport interface DynamicRetrievalConfig {\n /** The mode of the predictor to be used in dynamic retrieval. */\n mode?: DynamicRetrievalConfigModeWithLiterals;\n /** The threshold to be used in dynamic retrieval. If not set, a system default value is used. */\n dynamicThreshold?: string | null;\n}\n\nexport enum Environment {\n /** Defaults to browser. */\n ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED',\n /** Operates in a web browser. */\n ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER',\n}\n\n/** @enumType */\nexport type EnvironmentWithLiterals =\n | Environment\n | 'ENVIRONMENT_UNSPECIFIED'\n | 'ENVIRONMENT_BROWSER';\n\nexport interface FunctionDeclaration {\n /**\n * The name of the function to call. Must start with a letter or an underscore.\n * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description and purpose of the function. The model uses this to decide how and whether to call the function.\n * For the best results, we recommend that you include a description.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * The parameters of this function in a format that's compatible with the OpenAPI\n * https://spec.opensoapis.org/oas/v3.0.3#schema\n */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GoogleSearchRetrieval {\n /** Specifies the dynamic retrieval configuration for the given source. */\n dynamicRetrievalConfig?: DynamicRetrievalConfig;\n}\n\nexport interface CodeExecution {}\n\nexport interface GoogleSearch {}\n\nexport interface ComputerUse {\n /** Required. The environment being operated. */\n environment?: EnvironmentWithLiterals;\n /**\n * Optional. By default, predefined functions are included in the final model call.\n * Some of them can be explicitly excluded from being automatically included.\n * This can serve two purposes:\n * 1. Using a more restricted / different action space.\n * 2. Improving the definitions / instructions of predefined functions.\n * @maxSize 100\n * @maxLength 1000\n */\n excludedPredefinedFunctions?: string[];\n}\n\nexport interface SafetySetting {\n /** The safety category to configure a threshold for. */\n category?: HarmCategoryWithLiterals;\n /** The threshold for blocking responses that could belong to the specified safety category based on probability. */\n threshold?: ThresholdWithLiterals;\n}\n\nexport enum Threshold {\n UNKNOWN_THRESHOLD = 'UNKNOWN_THRESHOLD',\n BLOCK_NONE = 'BLOCK_NONE',\n BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',\n BLOCK_MED_AND_ABOVE = 'BLOCK_MED_AND_ABOVE',\n BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',\n}\n\n/** @enumType */\nexport type ThresholdWithLiterals =\n | Threshold\n | 'UNKNOWN_THRESHOLD'\n | 'BLOCK_NONE'\n | 'BLOCK_LOW_AND_ABOVE'\n | 'BLOCK_MED_AND_ABOVE'\n | 'BLOCK_ONLY_HIGH';\n\nexport interface GenerationConfig {\n /**\n * The temperature is used for sampling during the response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection.\n * Lower temperatures are good for prompts that require a more deterministic and less open-ended or creative response,\n * while higher temperatures can lead to more diverse or creative results. A temperature of 0 is deterministic:\n * the highest probability response is always selected.\n * Range: 0.0 - 1.0, Default for gemini-1.0-pro: 0.9, Default for gemini-1.0-pro-vision: 0.4\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters.\n * 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for potentially longer responses.\n * Range for gemini-1.0-pro: 1-8192 (default: 8192),\n * Range for gemini-1.0-pro-vision: 1-2048 (default: 2048)\n * Range for gemini-2.5-pro: 1-65536\n * @min 1\n * @max 65536\n */\n maxOutputTokens?: string | null;\n /**\n * Top-K changes how the model selects tokens for output.\n * A top-K of 1 means the next selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-K of 3 means that the next token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled.\n * Then tokens are further filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default for gemini-1.0-pro-vision: 32, Default for gemini-1.0-pro: none\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output.\n * Tokens are selected from the most (see top-K) to least probable until the sum of their probabilities equals the top-P value.\n * For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5,\n * then the model will select either A or B as the next token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default: 1.0\n * @max 1\n */\n topP?: number | null;\n /**\n * The number of response variations to return.This value must be 1.\n * @min 1\n * @max 1\n */\n candidateCount?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in the response.\n * If a string appears multiple times in the response, then the response truncates where it's first encountered. The strings are case-sensitive.\n * For example, if the following is the returned response when stopSequences isn't specified:\n * public static string reverse(string myString)\n * Then the returned response with stopSequences set to [\"Str\",\"reverse\"] is:\n * public static string\n * Maximum 5 items in the list.\n * @maxSize 5\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * Available for gemini-1.5-pro\n * The output format of the generated candidate text.\n * Supported MIME types: text/plain: (default) Text output. application/json: JSON response in the candidates.\n * text/x.enum: For classification tasks, output an enum value as defined in the response schema.\n * How to control the output format: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output\n * @maxLength 50\n */\n responseMimeType?: string | null;\n /**\n * Available for gemini-1.5-pro.\n * The schema that generated candidate text must follow. For more information, see Control generated output.\n * You must specify the responseType or responseMimeType field to use this parameter.\n * Link for examples : https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema\n */\n responseSchema?: Record<string, any> | null;\n /**\n * Optional. Output schema of the generated response. This is an alternative to responseSchema that accepts JSON Schema.\n * If set, responseSchema must be omitted, but responseMimeType is required.\n * While the full JSON Schema may be sent, not all features are supported.\n * more information about supported features and examples can be found here:\n * https://ai.google.dev/api/generate-content#FIELDS.response_json_schema\n */\n responseJsonSchema?: Record<string, any> | null;\n /** Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking */\n thinkingConfig?: GenerationThinkingConfig;\n /**\n * Optional. The requested modalities of the response.\n * Represents the set of modalities that the model can return, and should be expected in the response.\n * This is an exact match to the modalities of the response.\n * A model may have multiple combinations of supported modalities.\n * If the requested modalities do not match any of the supported combinations, an error will be returned.\n * An empty list is equivalent to requesting only TEXT.\n * Currently supported as experimental feature for gemini-2.0-flash only.\n * @maxSize 5\n */\n responseModalities?: ModalityWithLiterals[];\n /**\n * Optional. Configuration for image generation.\n * This message allows you to control various aspects of image generation, such as the output format, aspect ratio, and whether the model can generate images of people.\n */\n imageConfig?: ImageConfig;\n /**\n * The media_resolution parameter controls how the Gemini API processes media inputs like images, videos,\n * and PDF documents by determining the maximum number of tokens allocated for media inputs,\n * allowing you to balance response quality against latency and cost.\n */\n mediaResolution?: MediaResolutionLevelWithLiterals;\n}\n\nexport interface GenerationThinkingConfig {\n /** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */\n includeThoughts?: boolean | null;\n /** The number of thoughts tokens that the model should generate. */\n thinkingBudget?: string | null;\n /**\n * Thinking level parameter offering 2 states:\n * Low: Minimizes latency and cost. Best for simple instruction following or chat.\n * High: Maximizes reasoning depth. Default. Dynamic thinking.\n * The model may take significantly longer to reach a first token,\n * but the output will be more thoroughly vetted.\n * Note: You cannot use both thinking_level and the legacy thinking_budget parameter in the same request. Doing so will return a 400 error\n * @maxLength 20\n */\n thinkingLevel?: string | null;\n}\n\nexport interface ImageConfig {\n /** Optional. The image output format for generated images. */\n imageOutputOptions?: ImageOutputOptions;\n /**\n * Optional. The desired aspect ratio for the generated images. The following aspect ratios are supported:\n * \"1:1\" \"2:3\", \"3:2\" \"3:4\", \"4:3\" \"4:5\", \"5:4\" \"9:16\", \"16:9\" \"21:9\"\n * @maxLength 10\n */\n aspectRatio?: string | null;\n /** Optional. Controls whether the model can generate people. */\n personGeneration?: PersonGenerationWithLiterals;\n}\n\nexport interface ImageOutputOptions {\n /**\n * Optional. The image format that the output should be saved as.\n * @maxLength 100\n */\n mimeType?: string | null;\n /** Optional. The compression quality of the output image. */\n compressionQuality?: string | null;\n}\n\nexport enum PersonGeneration {\n /** The default behavior is unspecified. The model will decide whether to generate images of people. */\n PERSON_GENERATION_UNSPECIFIED = 'PERSON_GENERATION_UNSPECIFIED',\n /** Allows the model to generate images of people, including adults and children. */\n ALLOW_ALL = 'ALLOW_ALL',\n /** Allows the model to generate images of adults, but not children. */\n ALLOW_ADULT = 'ALLOW_ADULT',\n /** Prevents the model from generating images of people. */\n ALLOW_NONE = 'ALLOW_NONE',\n}\n\n/** @enumType */\nexport type PersonGenerationWithLiterals =\n | PersonGeneration\n | 'PERSON_GENERATION_UNSPECIFIED'\n | 'ALLOW_ALL'\n | 'ALLOW_ADULT'\n | 'ALLOW_NONE';\n\nexport interface ToolConfig {\n /** Function calling config. */\n functionCallingConfig?: FunctionCallingConfig;\n}\n\nexport interface FunctionCallingConfig {\n /** Specifies the mode in which function calling should execute. If unspecified, the default value will be set to AUTO. */\n mode?: ModeWithLiterals;\n /**\n * A set of function names that, when provided, limits the functions the model will call.\n * This should only be set when the Mode is ANY or VALIDATED. Function names should match [FunctionDeclaration.name]. When set, model will predict a function call from only allowed function names.\n * @maxLength 64\n * @maxSize 100\n */\n allowedFunctionNames?: string[];\n}\n\nexport enum Mode {\n UNKNOWN = 'UNKNOWN',\n /** Default model behavior, model decides to predict either a function call or a natural language response. */\n AUTO = 'AUTO',\n /**\n * Model is constrained to always predicting a function call only. If \"allowedFunctionNames\" are set, the predicted function call will be limited to any one of \"allowedFunctionNames\",\n * else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n ANY = 'ANY',\n /** Model will not predict any function call. Model behavior is same as when not passing any function declarations. */\n NONE = 'NONE',\n /**\n * Model decides to predict either a function call or a natural language response, but will validate function calls with constrained decoding. If \"allowedFunctionNames\" are set, the predicted function call will be\n * limited to any one of \"allowedFunctionNames\", else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n VALIDATED = 'VALIDATED',\n}\n\n/** @enumType */\nexport type ModeWithLiterals =\n | Mode\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'NONE'\n | 'VALIDATED';\n\nexport interface FineTuningSpec {\n /**\n * Endpoint ID of the fine-tuning model to use.\n * @maxLength 100\n */\n _id?: string | null;\n}\n\nexport interface InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport enum Model {\n UNKNOWN = 'UNKNOWN',\n /** anthropic.claude-3-sonnet-20240229-v1:0 */\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n /** anthropic.claude-3-haiku-20240307-v1:0 */\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n /** anthropic.claude-3-5-sonnet-20240620-v1:0 */\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n /** anthropic.claude-3-5-sonnet-20241022-v2:0 */\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n /** us.anthropic.claude-3-5-haiku-20241022-v1:0 */\n CLAUDE_3_5_HAIKU_1_0 = 'CLAUDE_3_5_HAIKU_1_0',\n /** us.anthropic.claude-3-7-sonnet-20250219-v1:0 */\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n /** us.anthropic.claude-sonnet-4-5-20250929-v1:0 */\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n /** us.anthropic.claude-haiku-4-5-20251001-v1:0 */\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ModelWithLiterals =\n | Model\n | 'UNKNOWN'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_5_HAIKU_1_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: RoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: ContentBlock[];\n}\n\nexport interface Tool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n}\n\nexport enum ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type ToolChoiceTypeWithLiterals =\n | ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: ToolConfiguration;\n}\n\nexport enum McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type McpServerTypeWithLiterals = McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface ToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface V1InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ClaudeModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: V1AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: GoogleproxyV1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: InvokeAnthropicClaudeModelRequestTool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: GoogleproxyV1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: GoogleproxyV1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: GoogleproxyV1McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface GoogleproxyV1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport enum ClaudeModel {\n UNKNOWN_CLAUDE_MODEL = 'UNKNOWN_CLAUDE_MODEL',\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_OPUS_1_0 = 'CLAUDE_3_OPUS_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ClaudeModelWithLiterals =\n | ClaudeModel\n | 'UNKNOWN_CLAUDE_MODEL'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_OPUS_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface V1AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GoogleproxyV1ContentBlock[];\n}\n\nexport interface InvokeAnthropicClaudeModelRequestTool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: GoogleproxyV1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface GoogleproxyV1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: GoogleproxyV1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum GoogleproxyV1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ToolChoiceTypeWithLiterals =\n | GoogleproxyV1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface GoogleproxyV1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface GoogleproxyV1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: GoogleproxyV1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: V1McpServerToolConfiguration;\n}\n\nexport enum GoogleproxyV1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1McpServerTypeWithLiterals =\n | GoogleproxyV1McpServerType\n | 'UNKNOWN'\n | 'URL';\n\nexport interface V1McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface InvokeAnthropicModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: AnthropicModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicMessage[];\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: V1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: V1Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: V1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: V1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: V1McpServer[];\n /**\n * Container identifier for reuse across requests.\n * @maxLength 512\n */\n container?: string | null;\n /** An object describing metadata about the request. */\n metadata?: RequestMetadata;\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport enum AnthropicModel {\n UNKNOWN_ANTHROPIC_MODEL = 'UNKNOWN_ANTHROPIC_MODEL',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_1_OPUS_1_0 = 'CLAUDE_4_1_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type AnthropicModelWithLiterals =\n | AnthropicModel\n | 'UNKNOWN_ANTHROPIC_MODEL'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_1_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicMessage {\n /** The role of the message author. */\n role?: MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\n/** Top-level tool wrapper. Exactly one branch is set. */\nexport interface V1Tool extends V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\n/** @oneof */\nexport interface V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\nexport interface CustomTool {\n /**\n * The name of the tool. Must match the regex ^[a-zA-Z0-9_-]{1,64}$.\n * @maxLength 1000\n */\n name?: string;\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: V1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface ComputerUseTool {\n /** Display width in pixels, recommend ≤1280 */\n displayWidthPx?: number;\n /** Display height in pixels, recommend ≤800 */\n displayHeightPx?: number;\n /** Display number for X11 environments */\n displayNumber?: number | null;\n}\n\nexport interface TextEditorTool {\n /** Parameter to control truncation when viewing large files. Available only for text_editor_20250728 and later. */\n maxCharacters?: number | null;\n}\n\nexport interface BashTool {\n /**\n * Name must be \"bash\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebSearchTool {\n /** Optional: Limit the number of searches per request; exceeding -> error \"max_uses_exceeded\". */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only include results from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never include results from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Localize search results */\n userLocation?: WebSearchUserLocation;\n /** Optional: caches the tool definition only (it will not cache the results) */\n cacheControl?: V1CacheControl;\n}\n\nexport interface WebSearchUserLocation {\n /**\n * The type of location (must be \"approximate\")\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The city name\n * @maxLength 500\n */\n city?: string | null;\n /**\n * The region or state\n * @maxLength 500\n */\n region?: string | null;\n /**\n * The country\n * @maxLength 500\n */\n country?: string | null;\n /**\n * The IANA timezone ID, e.g. \"America/Los_Angeles\"\n * @maxLength 500\n */\n timezone?: string | null;\n}\n\nexport interface CodeExecutionTool {\n /**\n * Name must be \"code_execution\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebFetchTool {\n /** Optional: Limit the number of fetches per request */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only fetch from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never fetch from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Enable citations for fetched content */\n citations?: CitationsEnabled;\n /** Optional: Maximum content length in tokens */\n maxContentTokens?: number | null;\n}\n\nexport interface V1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: V1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum V1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n NONE = 'NONE',\n}\n\n/** @enumType */\nexport type V1ToolChoiceTypeWithLiterals =\n | V1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL'\n | 'NONE';\n\nexport interface V1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n * @min 1024\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface V1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: V1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: McpServerToolConfiguration;\n}\n\nexport enum V1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type V1McpServerTypeWithLiterals = V1McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface RequestMetadata {\n /**\n * An external identifier for the user who is associated with the request.\n * This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. Do not include any identifying information such as name, email address, or phone number.\n * Maximum length: 256\n * Examples: \"13803d75-b4b5-4c3e-b2a2-6f21399b021b\"\n * @maxLength 256\n */\n userId?: string | null;\n}\n\nexport interface InvokeLlamaModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: LlamaModelWithLiterals;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport enum LlamaModel {\n UNKNOWN_LLAMA_MODEL = 'UNKNOWN_LLAMA_MODEL',\n /** meta.llama3-8b-instruct-v1:0 */\n LLAMA_3_8B_INSTRUCT_1_0 = 'LLAMA_3_8B_INSTRUCT_1_0',\n /** meta.llama3-70b-instruct-v1:0 */\n LLAMA_3_70B_INSTRUCT_1_0 = 'LLAMA_3_70B_INSTRUCT_1_0',\n /** meta.llama3-1-8b-instruct-v1:0 */\n LLAMA_3_1_8B_INSTRUCT_1_0 = 'LLAMA_3_1_8B_INSTRUCT_1_0',\n /** meta.llama3-1-70b-instruct-v1:0 */\n LLAMA_3_1_70B_INSTRUCT_1_0 = 'LLAMA_3_1_70B_INSTRUCT_1_0',\n /** meta.llama3-2-1b-instruct-v1:0 */\n LLAMA_3_2_1B_INSTRUCT_1_0 = 'LLAMA_3_2_1B_INSTRUCT_1_0',\n /** meta.llama3-2-3b-instruct-v1:0 */\n LLAMA_3_2_3B_INSTRUCT_1_0 = 'LLAMA_3_2_3B_INSTRUCT_1_0',\n}\n\n/** @enumType */\nexport type LlamaModelWithLiterals =\n | LlamaModel\n | 'UNKNOWN_LLAMA_MODEL'\n | 'LLAMA_3_8B_INSTRUCT_1_0'\n | 'LLAMA_3_70B_INSTRUCT_1_0'\n | 'LLAMA_3_1_8B_INSTRUCT_1_0'\n | 'LLAMA_3_1_70B_INSTRUCT_1_0'\n | 'LLAMA_3_2_1B_INSTRUCT_1_0'\n | 'LLAMA_3_2_3B_INSTRUCT_1_0';\n\nexport interface CreateImageRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: V1ImageModelWithLiterals;\n /** The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3.\n */\n quality?: ImageQualityWithLiterals;\n /** The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. */\n size?: ImageSizeWithLiterals;\n /**\n * The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images.\n * Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3.\n */\n style?: ImageStyleWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n}\n\nexport enum ImageQuality {\n UNKNOWN_IMAGE_QUALITY = 'UNKNOWN_IMAGE_QUALITY',\n STANDARD = 'STANDARD',\n HD = 'HD',\n}\n\n/** @enumType */\nexport type ImageQualityWithLiterals =\n | ImageQuality\n | 'UNKNOWN_IMAGE_QUALITY'\n | 'STANDARD'\n | 'HD';\n\nexport enum ImageSize {\n UNKNOWN_IMAGE_SIZE = 'UNKNOWN_IMAGE_SIZE',\n SIZE_256X256 = 'SIZE_256X256',\n SIZE_512X512 = 'SIZE_512X512',\n SIZE_1024X1024 = 'SIZE_1024X1024',\n SIZE_1792X1024 = 'SIZE_1792X1024',\n SIZE_1024X1792 = 'SIZE_1024X1792',\n}\n\n/** @enumType */\nexport type ImageSizeWithLiterals =\n | ImageSize\n | 'UNKNOWN_IMAGE_SIZE'\n | 'SIZE_256X256'\n | 'SIZE_512X512'\n | 'SIZE_1024X1024'\n | 'SIZE_1792X1024'\n | 'SIZE_1024X1792';\n\nexport enum ImageStyle {\n UNKNOWN_IMAGE_STYLE = 'UNKNOWN_IMAGE_STYLE',\n VIVID = 'VIVID',\n NATURAL = 'NATURAL',\n}\n\n/** @enumType */\nexport type ImageStyleWithLiterals =\n | ImageStyle\n | 'UNKNOWN_IMAGE_STYLE'\n | 'VIVID'\n | 'NATURAL';\n\nexport interface V1TextToImageRequest {\n /** The model to use for generating the image. */\n model?: ImageModelWithLiterals;\n /** Height of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n height?: number | null;\n /** Width of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n width?: number | null;\n /**\n * An array of text prompts to use for generation.\n * @minSize 1\n * @maxSize 10\n */\n textPrompts?: TextPrompt[];\n /** How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt. Default: 7 */\n cfgScale?: number | null;\n /**\n * CLIP Guidance is a technique that uses the CLIP neural network to guide the generation of images to be more in-line with your included prompt,\n * which often results in improved coherency.\n */\n clipGuidancePreset?: ClipGuidancePresetWithLiterals;\n /** Which sampler to use for the diffusion process. If this value is omitted we'll automatically select an appropriate sampler for you. */\n sampler?: SamplerWithLiterals;\n /** Number of images to generate. Default: 1 */\n samples?: number | null;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Number of diffusion steps to run. Default: 30 */\n steps?: number | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: TextToImageRequestStylePresetWithLiterals;\n}\n\nexport interface TextPrompt {\n /**\n * The text to generate the image from.\n * @maxLength 4000\n */\n text?: string | null;\n /** The weight of the text prompt. */\n weight?: number | null;\n}\n\nexport enum ClipGuidancePreset {\n CLIP_GUIDANCE_PRESET_UNSPECIFIED = 'CLIP_GUIDANCE_PRESET_UNSPECIFIED',\n FAST_BLUE = 'FAST_BLUE',\n FAST_GREEN = 'FAST_GREEN',\n NONE = 'NONE',\n SIMPLE = 'SIMPLE',\n SLOW = 'SLOW',\n SLOWER = 'SLOWER',\n SLOWEST = 'SLOWEST',\n}\n\n/** @enumType */\nexport type ClipGuidancePresetWithLiterals =\n | ClipGuidancePreset\n | 'CLIP_GUIDANCE_PRESET_UNSPECIFIED'\n | 'FAST_BLUE'\n | 'FAST_GREEN'\n | 'NONE'\n | 'SIMPLE'\n | 'SLOW'\n | 'SLOWER'\n | 'SLOWEST';\n\nexport enum Sampler {\n SAMPLER_UNSPECIFIED = 'SAMPLER_UNSPECIFIED',\n DDIM = 'DDIM',\n DDPM = 'DDPM',\n K_DPMPP_2M = 'K_DPMPP_2M',\n K_DPMPP_2S_ANCESTRAL = 'K_DPMPP_2S_ANCESTRAL',\n K_DPM_2 = 'K_DPM_2',\n K_DPM_2_ANCESTRAL = 'K_DPM_2_ANCESTRAL',\n K_EULER = 'K_EULER',\n K_EULER_ANCESTRAL = 'K_EULER_ANCESTRAL',\n K_HEUN = 'K_HEUN',\n K_LMS = 'K_LMS',\n}\n\n/** @enumType */\nexport type SamplerWithLiterals =\n | Sampler\n | 'SAMPLER_UNSPECIFIED'\n | 'DDIM'\n | 'DDPM'\n | 'K_DPMPP_2M'\n | 'K_DPMPP_2S_ANCESTRAL'\n | 'K_DPM_2'\n | 'K_DPM_2_ANCESTRAL'\n | 'K_EULER'\n | 'K_EULER_ANCESTRAL'\n | 'K_HEUN'\n | 'K_LMS';\n\nexport enum TextToImageRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type TextToImageRequestStylePresetWithLiterals =\n | TextToImageRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateCoreRequest {\n /** The model to use for generating the image. will always be STABLE_IMAGE_CORE */\n model?: ImageCoreModelWithLiterals;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Default: 0\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: GenerateCoreRequestStylePresetWithLiterals;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\nexport enum GenerateCoreRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type GenerateCoreRequestStylePresetWithLiterals =\n | GenerateCoreRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateStableDiffusionRequest {\n /**\n * The text prompt to generate the image from.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Controls whether this is a text-to-image or image-to-image generation.\n * - TEXT_TO_IMAGE requires only the prompt parameter.\n * - IMAGE_TO_IMAGE requires prompt, image, and strength parameters.\n */\n mode?: GenerationModeWithLiterals;\n /**\n * The image to use as the starting point for the generation.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Controls how much influence the image parameter has on the output image.\n * A value of 0 yields an image identical to the input; 1 ignores the input image.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n */\n strength?: number | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * This parameter is only valid for TEXT_TO_IMAGE mode.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /** The model to use for generation. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Dictates the content-type of the generated image. */\n outputFormat?: GenerateStableDiffusionRequestOutputFormatWithLiterals;\n /**\n * Keywords of what you do not wish to see in the output image.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n}\n\nexport enum GenerationMode {\n UNKNOWN_GENERATION_MODE = 'UNKNOWN_GENERATION_MODE',\n TEXT_TO_IMAGE = 'TEXT_TO_IMAGE',\n IMAGE_TO_IMAGE = 'IMAGE_TO_IMAGE',\n}\n\n/** @enumType */\nexport type GenerationModeWithLiterals =\n | GenerationMode\n | 'UNKNOWN_GENERATION_MODE'\n | 'TEXT_TO_IMAGE'\n | 'IMAGE_TO_IMAGE';\n\nexport enum GenerateStableDiffusionRequestOutputFormat {\n OUTPUT_FORMAT_UNSPECIFIED = 'OUTPUT_FORMAT_UNSPECIFIED',\n JPEG = 'JPEG',\n PNG = 'PNG',\n}\n\n/** @enumType */\nexport type GenerateStableDiffusionRequestOutputFormatWithLiterals =\n | GenerateStableDiffusionRequestOutputFormat\n | 'OUTPUT_FORMAT_UNSPECIFIED'\n | 'JPEG'\n | 'PNG';\n\n/** Request to generate an image */\nexport interface GenerateAnImageRequest {\n /** The model to use for generating the image. */\n model?: GenerateAnImageModelWithLiterals;\n /**\n * The prompt to use for image generation.\n * Relevant models : ALL\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Optional seed for reproducibility. If not provided, a random seed will be used.\n * Relevant models : ALL\n */\n seed?: number | null;\n /**\n * Aspect ratio of the image between 21:9 and 9:21\n * default: 16:9\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * Width of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n width?: number | null;\n /**\n * Height of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n height?: number | null;\n /**\n * Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.\n * Relevant models : ALL\n * @max 6\n */\n safetyTolerance?: number | null;\n /**\n * Output format for the generated image. Can be 'jpeg' or 'png'.\n * Relevant models : ALL\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Generate less processed, more natural-looking images\n * Relevant models : FLUX_PRO_1_1_ULTRA\n */\n raw?: boolean | null;\n /**\n * Optional image to remix\n * The URL must be a valid wix mp or wix static URL.\n * Relevant models FLUX_PRO_1_1_ULTRA, FLUX_1_DEV, FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Blend between the prompt and the image prompt\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @max 1\n */\n imagePromptStrength?: number | null;\n /**\n * Optional image to remix\n * Image to use as control input - relevant models FLUX_PRO_1_DEPTH and FLUX_PRO_1_CANNY\n * @maxLength 100000\n */\n controlImageUrl?: string | null;\n /**\n * Whether to perform up sampling on the prompt\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n */\n promptUpsampling?: boolean | null;\n /**\n * Number of steps for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @min 15\n * @max 50\n */\n steps?: number | null;\n /**\n * Guidance strength for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @max 100\n */\n guidance?: number | null;\n /**\n * Image Mask\n * A Urk representing a mask for the areas you want to modify in the image.\n * The mask should be the same dimensions as the image and in black and white.\n * Black areas (0%) indicate no modification, while white areas (100%) specify areas for in painting.\n * Optional if you provide an alpha mask in the original image.\n * Validation: The endpoint verifies that the dimensions of the mask match the original image.\n * Relevant models FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageMaskUrl?: string | null;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\nexport enum GenerateAnImageModel {\n GEN_IMAGE_MODEL_UNSPECIFIED = 'GEN_IMAGE_MODEL_UNSPECIFIED',\n FLUX_PRO_1_1_ULTRA = 'FLUX_PRO_1_1_ULTRA',\n FLUX_1_DEV = 'FLUX_1_DEV',\n FLUX_PRO_1_CANNY = 'FLUX_PRO_1_CANNY',\n FLUX_PRO_1_DEPTH = 'FLUX_PRO_1_DEPTH',\n FLUX_PRO_1_FILL = 'FLUX_PRO_1_FILL',\n}\n\n/** @enumType */\nexport type GenerateAnImageModelWithLiterals =\n | GenerateAnImageModel\n | 'GEN_IMAGE_MODEL_UNSPECIFIED'\n | 'FLUX_PRO_1_1_ULTRA'\n | 'FLUX_1_DEV'\n | 'FLUX_PRO_1_CANNY'\n | 'FLUX_PRO_1_DEPTH'\n | 'FLUX_PRO_1_FILL';\n\nexport interface CreatePredictionRequest\n extends CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** Input for z-image-turbo */\n prunaaiZImageTurbo?: PrunaaiZImageTurbo;\n /** The model version ID */\n model?: CreatePredictionModelWithLiterals;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** Input for z-image-turbo */\n prunaaiZImageTurbo?: PrunaaiZImageTurbo;\n}\n\nexport enum CreatePredictionModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n /** Flux-dev-controlnet */\n FLUX_DEV_CONTROLNET = 'FLUX_DEV_CONTROLNET',\n /** https://replicate.com/reve/edit. Has a `prompt` field, routed through GenerateContent */\n REVE_EDIT = 'REVE_EDIT',\n /** https://replicate.com/lucataco/florence-2-large */\n LUCATACO_FLORENCE_2_LARGE = 'LUCATACO_FLORENCE_2_LARGE',\n /** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\n PERCEPTRON_ISAAC_01 = 'PERCEPTRON_ISAAC_01',\n /** https://replicate.com/prunaai/z-image-turbo */\n PRUNAAI_Z_IMAGE_TURBO = 'PRUNAAI_Z_IMAGE_TURBO',\n}\n\n/** @enumType */\nexport type CreatePredictionModelWithLiterals =\n | CreatePredictionModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID'\n | 'FLUX_DEV_CONTROLNET'\n | 'REVE_EDIT'\n | 'LUCATACO_FLORENCE_2_LARGE'\n | 'PERCEPTRON_ISAAC_01'\n | 'PRUNAAI_Z_IMAGE_TURBO';\n\nexport interface FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * Number of images to generate\n * @min 1\n * @max 4\n */\n numOutputs?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the quality of the output image for jpg and webp (1-100)\n * @min 1\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n}\n\nexport interface FluxDevControlnet {\n /** Set a seed for reproducibility. Random by default. */\n seed?: number | null;\n /**\n * Number of steps\n * @min 1\n * @max 50\n */\n steps?: number | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Optional LoRA model to use.\n * Give a URL to a HuggingFace .safetensors file, a Replicate .tar file or a CivitAI download link.\n * @maxLength 2000\n */\n loraUrl?: string | null;\n /**\n * Type of control net\n * @maxLength 100\n */\n controlType?: string | null;\n /**\n * Image to use with control net\n * @maxLength 2000\n */\n controlImage?: string | null;\n /**\n * Strength of LoRA model\n * @min -1\n * @max 3\n */\n loraStrength?: number | null;\n /**\n * Format of the output images\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Guidance scale\n * @max 5\n */\n guidanceScale?: number | null;\n /**\n * Quality of the output images, from 0 to 100.\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Things you do not want to see in your image\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Strength of control net.\n * @max 3\n */\n controlStrength?: number | null;\n /**\n * Preprocessor to use with depth control net\n * @maxLength 100\n */\n depthPreprocessor?: string | null;\n /**\n * Preprocessor to use with soft edge control net\n * @maxLength 100\n */\n softEdgePreprocessor?: string | null;\n /**\n * Strength of image to image control.\n * @max 1\n */\n imageToImageStrength?: number | null;\n /** Return the preprocessed image used to control the generation process. */\n returnPreprocessedImage?: boolean | null;\n}\n\nexport interface ReveEdit {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Edit instructions\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Specific version to use. Default: \"latest\"\n * @maxLength 10000\n */\n version?: string | null;\n}\n\n/** https://replicate.com/lucataco/florence-2-large/readme */\nexport interface LucatacoFlorence2Large {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /** Which task to perform */\n taskInput?: TaskInputWithLiterals;\n /**\n * Optional input for some task types\n * @maxLength 10000\n */\n textInput?: string | null;\n}\n\nexport enum TaskInput {\n UNRECOGNIZED_TASK_INPUT = 'UNRECOGNIZED_TASK_INPUT',\n OBJECT_DETECTION = 'OBJECT_DETECTION',\n CAPTION = 'CAPTION',\n DETAILED_CAPTION = 'DETAILED_CAPTION',\n MORE_DETAILED_CAPTION = 'MORE_DETAILED_CAPTION',\n CAPTION_TO_PHRASE_GROUNDING = 'CAPTION_TO_PHRASE_GROUNDING',\n REGION_PROPOSAL = 'REGION_PROPOSAL',\n DENSE_REGION_CAPTION = 'DENSE_REGION_CAPTION',\n OCR = 'OCR',\n OCR_WITH_REGION = 'OCR_WITH_REGION',\n}\n\n/** @enumType */\nexport type TaskInputWithLiterals =\n | TaskInput\n | 'UNRECOGNIZED_TASK_INPUT'\n | 'OBJECT_DETECTION'\n | 'CAPTION'\n | 'DETAILED_CAPTION'\n | 'MORE_DETAILED_CAPTION'\n | 'CAPTION_TO_PHRASE_GROUNDING'\n | 'REGION_PROPOSAL'\n | 'DENSE_REGION_CAPTION'\n | 'OCR'\n | 'OCR_WITH_REGION';\n\n/** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\nexport interface PerceptronIsaac01 {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /** Which task to perform */\n response?: ResponseTypeWithLiterals;\n /** Max new tokens */\n maxNewTokens?: string | null;\n}\n\nexport enum ResponseType {\n UNRECOGNIZED_RESPONSE_TYPE = 'UNRECOGNIZED_RESPONSE_TYPE',\n TEXT = 'TEXT',\n BOX = 'BOX',\n POINT = 'POINT',\n POLYGON = 'POLYGON',\n}\n\n/** @enumType */\nexport type ResponseTypeWithLiterals =\n | ResponseType\n | 'UNRECOGNIZED_RESPONSE_TYPE'\n | 'TEXT'\n | 'BOX'\n | 'POINT'\n | 'POLYGON';\n\n/** https://replicate.com/prunaai/z-image-turbo */\nexport interface PrunaaiZImageTurbo {\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Height of the generated image\n * @min 64\n * @max 2048\n */\n width?: number | null;\n /**\n * Width of the generated image\n * @min 64\n * @max 2048\n */\n height?: number | null;\n /**\n * Number of inference steps. This actually results in (num_inference_steps - 1) DiT forwards\n * @min 1\n * @max 50\n */\n numInferenceSteps?: number | null;\n /**\n * Guidance scale. Should be 0 for Turbo models\n * @max 20\n */\n guidanceScale?: number | null;\n /** Random seed. Set for reproducible generation */\n seed?: number | null;\n /**\n * Format of the output images\n * @maxLength 5\n */\n outputFormat?: string | null;\n /**\n * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs\n * @max 100\n */\n outputQuality?: number | null;\n}\n\nexport interface EditImageWithPromptRequest {\n /** The model to use for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /**\n * The image you wish to inpaint.\n * Supported Formats: jpeg, png, webp\n * Validation Rules:\n * - Every side must be at least 64 pixels\n * - Total pixel count must be between 4,096 and 9,437,184 pixels\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * image format jpeg, png, webp\n * @maxLength 100\n */\n imageFormat?: string | null;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * Optional for OUTPAINT model , and required for INPAINT model\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Controls the strength of the inpainting process on a per-pixel basis,\n * either via a second image (passed into this parameter) or via the alpha channel of the image parameter.\n * Passing in a Mask\n * The image passed to this parameter should be a black and white image that represents,\n * at any pixel, the strength of inpainting based on how dark or light the given pixel is.\n * Completely black pixels represent no inpainting strength while completely white pixels represent maximum strength.\n * In the event the mask is a different size than the image parameter, it will be automatically resized.\n * Alpha Channel Support\n * If you don't provide an explicit mask, one will be derived from the alpha channel of the image parameter.\n * Transparent pixels will be inpainted while opaque pixels will be preserved.\n * In the event an image with an alpha channel is provided along with a mask, the mask will take precedence.\n * Relevant only for INPAINT model\n * @maxLength 100000\n */\n imageMask?: string | null;\n /**\n * image mask format jpeg, png, webp\n * Relevant only for INPAINT model\n * @maxLength 100\n */\n imageMaskFormat?: string | null;\n /**\n * Grows the edges of the mask outward in all directions by the specified number of pixels. The expanded area around the mask will be blurred,\n * which can help smooth the transition between inpainted content and the original image.\n * Try this parameter if you notice seams or rough edges around the inpainted content.\n * Default: 5\n * Relevant only for INPAINT model\n * @max 100\n */\n growMask?: number | null;\n /**\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: StylePresetWithLiterals;\n /**\n * The direction to outpaint the image\n * Relevant only for OUTPAINT model\n * At least one of the fields must be set\n */\n outpaintDirection?: OutpaintDirection;\n /**\n * Controls the likelihood of creating additional details not heavily conditioned by the init image [0..1]\n * Relevant only for OUTPAINT model\n * @max 1\n */\n creativity?: number | null;\n}\n\nexport enum StylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type StylePresetWithLiterals =\n | StylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface OutpaintDirection {\n /**\n * The number of pixels to outpaint on the left side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n left?: number | null;\n /**\n * The number of pixels to outpaint on the right side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n right?: number | null;\n /**\n * The number of pixels to outpaint on the top of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n up?: number | null;\n /**\n * The number of pixels to outpaint on the bottom of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n down?: number | null;\n}\n\nexport interface TextToImageRequest {\n /**\n * Specifies the format of the output image. Supported formats are: PNG, JPG and WEBP. Default: JPG.\n * @maxLength 4\n */\n outputFormat?: string | null;\n /**\n * Sets the compression quality of the output image. Higher values preserve more quality but increase file size, lower values reduce file size but decrease quality. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /** This parameter is used to enable or disable the NSFW check. */\n checkNsfw?: boolean | null;\n /**\n * A positive prompt is a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n * For example, if the positive prompt is \"dragon drinking coffee\", the model will generate an image of a dragon drinking coffee. The more detailed the prompt, the more accurate the results.\n * The length of the prompt must be between 2 and 3000 characters.\n * @maxLength 1000000\n */\n positivePrompt?: string;\n /**\n * Used to define the height dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n height?: number;\n /**\n * Used to define the width dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n width?: number;\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /** Model to invoke. */\n model?: TextToImageRequestModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The number of steps is the number of iterations the model will perform to generate the image. Default: 28.\n * @min 1\n * @max 100\n */\n steps?: number | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /**\n * Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results. Default: 7.\n * @max 30\n */\n cfgScale?: number | null;\n /** The number of images to generate from the specified prompt. */\n numberResults?: number | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n /**\n * Used to determine the influence of the seedImage image in the generated output. A lower value results in more influence from the original image, while a higher value allows more creative deviation.\n * @max 1\n */\n strength?: number | null;\n /**\n * An array of LoRA models to be applied during the image generation process.\n * @maxSize 10\n */\n loraModels?: LoraModelSelect[];\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /** Inputs for the image generation process. */\n inputs?: Inputs;\n}\n\nexport enum TextToImageRequestModel {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n /** runware:101@1 */\n FLUX_1_DEV = 'FLUX_1_DEV',\n /** runware:100@1 */\n FLUX_1_SCHNELL = 'FLUX_1_SCHNELL',\n /** bfl:4@1 */\n FLUX_1_KONTEXT_MAX = 'FLUX_1_KONTEXT_MAX',\n /** bfl:3@1 */\n FLUX_1_KONTEXT_PRO = 'FLUX_1_KONTEXT_PRO',\n /** runware:108@20 */\n QWEN_IMAGE_EDIT = 'QWEN_IMAGE_EDIT',\n /** ideogram:4@1 */\n IDEOGRAM_3_0 = 'IDEOGRAM_3_0',\n /** ideogram:4@3 */\n IDEOGRAM_3_0_EDIT = 'IDEOGRAM_3_0_EDIT',\n /** bfl:2@2 */\n FLUX_1_1_PRO_ULTRA = 'FLUX_1_1_PRO_ULTRA',\n /** bfl:1@2 */\n FLUX_1_FILL_PRO = 'FLUX_1_FILL_PRO',\n /** bytedance:5@0 */\n SEEDREAM_4 = 'SEEDREAM_4',\n /** runware:102@1 */\n FLUX_DEV_FILL = 'FLUX_DEV_FILL',\n /** bfl:1@5 */\n FLUX_DEPTH_PRO = 'FLUX_DEPTH_PRO',\n /** bfl:1@4 */\n FLUX_CANNY_PRO = 'FLUX_CANNY_PRO',\n /** Should be used together with model_id filed from allowed models list */\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type TextToImageRequestModelWithLiterals =\n | TextToImageRequestModel\n | 'UNKNOWN_MODEL'\n | 'FLUX_1_DEV'\n | 'FLUX_1_SCHNELL'\n | 'FLUX_1_KONTEXT_MAX'\n | 'FLUX_1_KONTEXT_PRO'\n | 'QWEN_IMAGE_EDIT'\n | 'IDEOGRAM_3_0'\n | 'IDEOGRAM_3_0_EDIT'\n | 'FLUX_1_1_PRO_ULTRA'\n | 'FLUX_1_FILL_PRO'\n | 'SEEDREAM_4'\n | 'FLUX_DEV_FILL'\n | 'FLUX_DEPTH_PRO'\n | 'FLUX_CANNY_PRO'\n | 'FROM_MODEL_ID';\n\nexport interface LoraModelSelect {\n /**\n * The unique identifier of the LoRA model, typically in the format \"wix:<id>@<version>\".\n * @minLength 1\n * @maxLength 255\n */\n model?: string | null;\n /**\n * The weight or influence of the LoRA model during the generation process.\n * A higher value indicates a stronger influence of the LoRA model on the output.\n * @min -4\n * @max 4\n */\n weight?: number | null;\n}\n\nexport interface Inputs {\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n}\n\nexport interface InvokeMlPlatformLlamaModelRequest {\n /**\n * The ML platform model id.\n * @minLength 1\n * @maxLength 50\n */\n modelId?: string;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport interface InvokeChatCompletionRequest {\n /** Model to invoke */\n model?: PerplexityModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far\n * @maxSize 1000\n */\n messages?: PerplexityMessage[];\n /**\n * Max number of completion tokens.\n * Completion token count + prompt token count must not exceed the size of the context window\n * @max 200000\n */\n maxTokens?: number | null;\n /**\n * The amount of randomness in the response, valued between 0 inclusive and 2 exclusive.\n * Higher values are more random, and lower values are more deterministic.\n */\n temperature?: number | null;\n /**\n * The nucleus sampling threshold, valued between 0 and 1 inclusive.\n * For each subsequent token, the model considers the results of the tokens with top_p probability mass.\n * Perplexity recommends either altering top_k or top_p, but not both.\n */\n topP?: number | null;\n /**\n * Given a list of domains, limit the citations used by the online model to URLs from the specified domains.\n * Currently limited to only 3 domains for whitelisting and blacklisting.\n * For blacklisting add a - to the beginning of the domain string.\n * @maxLength 10000\n * @maxSize 3\n */\n searchDomainFilter?: string[];\n /** Determines whether or not a request to an online model should return images. */\n returnImages?: boolean | null;\n /** Determines whether or not a request to an online model should return related questions. */\n returnRelatedQuestions?: boolean | null;\n /**\n * Returns search results within the specified time interval - does not apply to images.\n * Must be one of \"month\", \"week\", \"day\", \"hour\"\n * @maxLength 10\n */\n searchRecencyFilter?: string | null;\n /**\n * The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive.\n * If set to 0, top-k filtering is disabled. Perplexity recommends either altering top_k or top_p, but not both.\n */\n topK?: number | null;\n /**\n * A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics. Incompatible with `frequency_penalty`.\n */\n presencePenalty?: number | null;\n /**\n * A multiplicative penalty greater than 0. Values greater than 1.0 penalize new tokens based on their existing\n * frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n * A value of 1.0 means no penalty. Incompatible with `presence_penalty`.\n */\n frequencyPenalty?: number | null;\n /**\n * Enable structured outputs with a JSON or Regex schema.\n * https://docs.perplexity.ai/guides/structured-outputs\n */\n responseFormat?: InvokeChatCompletionRequestResponseFormat;\n}\n\nexport interface InvokeChatCompletionRequestResponseFormat\n extends InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** @oneof */\nexport interface InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** mimics https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api */\nexport interface GenerateImageRequest {\n /** ID of the model to use. */\n model?: ImagenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n instances?: Instance[];\n /** The configuration for the generation. */\n parameters?: Parameters;\n}\n\nexport enum ImagenModel {\n UNKNOWN_IMAGEN_MODEL = 'UNKNOWN_IMAGEN_MODEL',\n IMAGEN_3_0_GENERATE_002 = 'IMAGEN_3_0_GENERATE_002',\n IMAGEN_3_0_FAST_GENERATE_001 = 'IMAGEN_3_0_FAST_GENERATE_001',\n IMAGEN_4_0_GENERATE_001 = 'IMAGEN_4_0_GENERATE_001',\n IMAGEN_4_0_FAST_GENERATE_001 = 'IMAGEN_4_0_FAST_GENERATE_001',\n IMAGEN_4_0_ULTRA_GENERATE_001 = 'IMAGEN_4_0_ULTRA_GENERATE_001',\n}\n\n/** @enumType */\nexport type ImagenModelWithLiterals =\n | ImagenModel\n | 'UNKNOWN_IMAGEN_MODEL'\n | 'IMAGEN_3_0_GENERATE_002'\n | 'IMAGEN_3_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_GENERATE_001'\n | 'IMAGEN_4_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_ULTRA_GENERATE_001';\n\nexport interface Instance {\n /**\n * The text prompt for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n}\n\nexport interface Parameters {\n /**\n * The number of images to generate (1-4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /** Optional random seed for image generation */\n seed?: string | null;\n /** Optional parameter to use LLM-based prompt rewriting for higher quality images */\n enhancePrompt?: boolean | null;\n /**\n * Optional text to discourage in the generated images\n * @maxLength 480\n */\n negativePrompt?: string | null;\n /**\n * Optional aspect ratio for the image (1:1, 9:16, 16:9, 3:4, 4:3)\n * @maxLength 5\n */\n aspectRatio?: string | null;\n /** Optional output image format options */\n outputOptions?: OutputOptions;\n /**\n * Optional setting for allowing/disallowing generation of people\n * @maxLength 20\n */\n personGeneration?: string | null;\n /**\n * Optional safety filtering level\n * @maxLength 50\n */\n safetySetting?: string | null;\n /** Optional flag to add invisible watermark */\n addWatermark?: boolean | null;\n}\n\nexport interface OutputOptions {\n /**\n * Image format (image/png or image/jpeg)\n * @maxLength 20\n */\n mimeType?: string | null;\n /**\n * Compression quality for JPEG (0-100)\n * @max 100\n */\n compressionQuality?: number | null;\n}\n\nexport interface GenerateImageMlPlatformRequest\n extends GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n /** The model version ID */\n model?: GenerateImageMlPlatformModelWithLiterals;\n}\n\n/** @oneof */\nexport interface GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n}\n\nexport enum GenerateImageMlPlatformModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n}\n\n/** @enumType */\nexport type GenerateImageMlPlatformModelWithLiterals =\n | GenerateImageMlPlatformModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID';\n\nexport interface V1FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n /** Time step to start CFG - new field for ml platform */\n timestepToStartCfg?: number | null;\n /** Option to disable the NSFW safety checker */\n disableSafetyChecker?: boolean | null;\n}\n\nexport interface CreateImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536x1024\n * Portrait: 1024x1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n}\n\nexport interface EditImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536 x 1024\n * Portrait: 1024 x 1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * The image to be edited.\n * @maxLength 10000\n */\n imageUrl?: string | null;\n /**\n * The image mask to be edited.\n * @maxLength 10000\n */\n imageMaskUrl?: string | null;\n /**\n * Additional images to be edited.\n * @maxSize 10\n * @maxLength 10000\n */\n imageUrls?: string[] | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n /**\n * Control how much effort the model will exert to match the style and features, especially facial features, of input images.\n * This parameter is only supported for gpt-image-1. Supports high and low. Defaults to low.\n * @maxLength 10\n */\n inputFidelity?: string | null;\n}\n\n/** Mirrors https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo-video-generation */\nexport interface GenerateVideoRequest {\n /** ID of the Video generation model to use. */\n model?: VideoGenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 100\n */\n instances?: GenerateVideoInstance[];\n /** Generation-time settings. */\n parameters?: GenerateVideoParameters;\n}\n\nexport enum VideoGenModel {\n UNKNOWN_VIDEO_GEN_MODEL = 'UNKNOWN_VIDEO_GEN_MODEL',\n VEO_2_0_GENERATE_001 = 'VEO_2_0_GENERATE_001',\n VEO_3_0_GENERATE_001 = 'VEO_3_0_GENERATE_001',\n VEO_3_0_FAST_GENERATE_001 = 'VEO_3_0_FAST_GENERATE_001',\n}\n\n/** @enumType */\nexport type VideoGenModelWithLiterals =\n | VideoGenModel\n | 'UNKNOWN_VIDEO_GEN_MODEL'\n | 'VEO_2_0_GENERATE_001'\n | 'VEO_3_0_GENERATE_001'\n | 'VEO_3_0_FAST_GENERATE_001';\n\nexport interface GenerateVideoInstance {\n /**\n * Mandatory (text-to-video), optional if an input image prompt is provided (image-to-video)\n * Text input for guiding video generation.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Mandatory (image-to-video), optional if a text prompt is provided (text-to-video)\n * Image input for guiding video generation.\n */\n image?: V1ImageInput;\n}\n\nexport interface V1ImageInput {\n /**\n * A publicly available image URL\n * @format WEB_URL\n */\n imageUrl?: string | null;\n /**\n * MIME type of the image (image/jpeg or image/png)\n * @maxLength 20\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateVideoParameters {\n /**\n * Requested video length in seconds (4, 6, or 8. The default is 8)\n * @min 4\n * @max 8\n */\n durationSeconds?: number | null;\n /**\n * A text string that describes anything you want to discourage the model from generating.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /** Use gemini to enhance your prompts (default is True) */\n enhancePrompt?: boolean | null;\n /**\n * A number to request to make generated videos deterministic.\n * Adding a seed number with your request without changing other parameters will cause the model to produce the same videos.\n */\n seed?: string | null;\n /**\n * Number of videos to generate (1–4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /**\n * Aspect ratio: 16:9 (default, landscape) or 9:16 (portrait)\n * @maxLength 50\n */\n aspectRatio?: string | null;\n /**\n * The safety setting that controls whether people or face generation is allowed:\n * \"allow_adult\" (default value): allow generation of adults only\n * \"disallow\": disallows inclusion of people/faces in images\n * @maxLength 50\n */\n personGeneration?: string | null;\n /** Whether to generate audio for the video */\n generateAudio?: boolean | null;\n /**\n * The resolution of the generated video. Supported values: 720p, 1080p. Default: 1080p\n * @maxLength 50\n */\n resolution?: string | null;\n}\n\n/** Add to your existing proto file */\nexport interface V1CreateChatCompletionRequest {\n /** Model identifier */\n model?: ChatCompletionModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: GoogleproxyV1ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: V1CreateChatCompletionRequestResponseFormat;\n}\n\nexport interface V1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawRequest {\n /**\n * ML Platform model identifier\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: ResponseFormat;\n}\n\nexport interface ResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface VideoInferenceRequest {\n /** Specifies the format of the output video. Supported formats are: MP4 and WEBM. Default: MP4. */\n outputFormat?: OutputFormatWithLiterals;\n /**\n * Sets the compression quality of the output video. Higher values preserve more quality but increase file size. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /**\n * The text description that guides the video generation process. This prompt defines what you want to see in the video.\n * The length of the prompt must be at least 2 characters.\n * @minLength 2\n * @maxLength 100000\n */\n positivePrompt?: string | null;\n /**\n * Specifies what you want to avoid in the generated video.\n * @maxLength 100000\n */\n negativePrompt?: string | null;\n /**\n * An array of objects that define key frames to guide video generation.\n * @maxSize 100\n */\n frameImages?: FrameImage[];\n /**\n * An array containing reference images used to condition the generation process. Must be URLs pointing to the images. The images must be accessible publicly.\n * @maxSize 10\n * @maxLength 100000\n */\n referenceImages?: string[] | null;\n /**\n * The width of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n width?: number | null;\n /**\n * The height of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n height?: number | null;\n /** The AI model to use for video generation. */\n model?: VideoModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The length of the generated video in seconds.\n * @min 1\n * @max 10\n */\n duration?: number | null;\n /**\n * The frame rate (frames per second) of the generated video. Default: 24.\n * @min 15\n * @max 60\n */\n fps?: number | null;\n /**\n * The number of denoising steps the model performs during video generation.\n * @min 10\n * @max 50\n */\n steps?: number | null;\n /** A seed is a value used to randomize the video generation. */\n seed?: string | null;\n /**\n * Controls how closely the video generation follows your prompt. Recommended range is 6.0-10.0 for most video models.\n * @max 50\n */\n cfgScale?: number | null;\n /**\n * Specifies how many videos to generate for the given parameters. Default: 1.\n * @min 1\n * @max 4\n */\n numberResults?: number | null;\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /**\n * Skip polling flag - if set to false, will poll until video generation is complete\n * If not set or true, returns immediately with task UUID for manual polling\n */\n skipPolling?: boolean | null;\n}\n\nexport enum OutputFormat {\n UNKNOWN_OUTPUT_FORMAT = 'UNKNOWN_OUTPUT_FORMAT',\n /** MPEG-4 video format, widely compatible and recommended for most use cases.MPEG-4 video format, widely compatible and recommended for most use cases. */\n MP4 = 'MP4',\n /** WebM video format, optimized for web delivery and smaller file sizes. */\n WEBM = 'WEBM',\n}\n\n/** @enumType */\nexport type OutputFormatWithLiterals =\n | OutputFormat\n | 'UNKNOWN_OUTPUT_FORMAT'\n | 'MP4'\n | 'WEBM';\n\nexport interface FrameImage {\n /**\n * Specifies the input image that will be used to constrain the video content at the specified frame position.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 100000\n */\n inputImage?: string;\n /**\n * Specifies the position of this frame constraint within the video timeline.\n * Can be \"first\", \"last\", or a numeric frame number.\n * @maxLength 20\n */\n frame?: string | null;\n}\n\nexport enum VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SEEDANCE_1_0_PRO = 'SEEDANCE_1_0_PRO',\n SEEDANCE_1_0_LITE = 'SEEDANCE_1_0_LITE',\n SEEDANCE_1_0_PRO_FAST = 'SEEDANCE_1_0_PRO_FAST',\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type VideoModelWithLiterals =\n | VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SEEDANCE_1_0_PRO'\n | 'SEEDANCE_1_0_LITE'\n | 'SEEDANCE_1_0_PRO_FAST'\n | 'FROM_MODEL_ID';\n\nexport interface V1OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: V1ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: V1ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: V1ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: V1ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: V1ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: V1ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport interface V1ResponsesInputItem extends V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesInputMessageResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: V1ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesInputMessageResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesInputMessageResponsesMessageRoleWithLiterals =\n | ResponsesInputMessageResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface V1ResponsesInputMessageContent\n extends V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n}\n\nexport interface ResponsesInputMessageContentImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface ResponsesInputMessageContentFileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface V1ResponsesTextFormat\n extends V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n}\n\nexport interface ResponsesTextFormatJsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface V1ResponsesTool extends V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\nexport interface V1ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: ResponsesWebSearchUserLocation;\n}\n\nexport interface ResponsesWebSearchUserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface V1ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: V1ResponsesCodeInterpreterContainer;\n}\n\nexport interface V1ResponsesCodeInterpreterContainer\n extends V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface V1ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\nexport interface OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport interface ResponsesInputItem extends ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesMessageRoleWithLiterals =\n | ResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface ResponsesInputMessageContent\n extends ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n}\n\nexport interface ImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface FileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface ResponsesTextFormat extends ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n}\n\nexport interface JsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface ResponsesTool extends ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\nexport interface ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: UserLocation;\n}\n\nexport interface UserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: ResponsesCodeInterpreterContainer;\n}\n\nexport interface ResponsesCodeInterpreterContainer\n extends ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\n/** More info and default values at https://platform.openai.com/docs/api-reference/videos/create */\nexport interface CreateVideoRequest {\n /**\n * Text prompt that describes the video to generate.\n * @maxLength 10000\n */\n prompt?: string;\n /** The video generation model to use. */\n model?: V1VideoModelWithLiterals;\n /**\n * Size of the generated video (width x height in pixels). Examples: \"720x1280\", \"1280x720\".\n * @maxLength 50\n */\n size?: string | null;\n /**\n * Clip duration in seconds. Default is 4 seconds if not specified.\n * @min 1\n * @max 180\n */\n seconds?: number | null;\n /**\n * Optional publicly accessible URL to an image reference that guides generation.\n * @maxLength 5000\n * @format WEB_URL\n */\n inputReferenceUrl?: string | null;\n}\n\nexport enum V1VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SORA_2 = 'SORA_2',\n SORA_2_PRO = 'SORA_2_PRO',\n}\n\n/** @enumType */\nexport type V1VideoModelWithLiterals =\n | V1VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SORA_2'\n | 'SORA_2_PRO';\n\nexport interface UserRequestInfo {\n /**\n * Interaction id\n * @maxLength 100\n */\n interactionId?: string | null;\n /**\n * Additional tags ,use comma separation format for multiple tags.\n * @maxLength 1000\n */\n additionalTags?: string | null;\n /**\n * GenAI feature name, required by FinOps for evaluation\n * @maxLength 1000\n */\n featureName?: string | null;\n /**\n * AppDefId to which the cost will be attributed to instead of the one that signs the request.\n * Will not work unless your application is explicitly allowed to override costs attribution.\n * Please reach out to #ai-tools-support if you think you need this field.\n * @format GUID\n */\n costAttributionOverrideId?: string | null;\n}\n\nexport interface FallbackProperties {\n /**\n * Flag to indicate whether to opt out of the request forwarding as a fallback.\n * Currently, only the fallback from OpenAI to Azure is supported for certain OpenAI models.\n * If set to true, the request will not be redirected to Azure in the event of a server failure by OpenAI.\n */\n optOut?: boolean | null;\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n}\n\nexport interface AsyncGenerationConfig {\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n /** SPI generation configuration. */\n spiGenerationConfig?: SpiGenerationConfig;\n}\n\nexport interface SpiGenerationConfig {\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n appId?: string | null;\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n componentId?: string | null;\n}\n\nexport interface DynamicRequestConfig {\n /**\n * List of GatewayToolDefinition's, used to overwrite tools in the prompt.\n * @maxSize 100\n */\n gatewayToolDefinitions?: GatewayToolDefinition[];\n /**\n * List of GatewayMessageDefinition's, which will be converted to model-specific format and appended to the messages saved in the prompt.\n * @maxSize 100\n */\n gatewayMessageDefinitions?: GatewayMessageDefinition[];\n}\n\nexport interface GatewayToolDefinition extends GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\n/** @oneof */\nexport interface GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\nexport interface GatewayToolDefinitionCustomTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the tool does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the tool accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface BuiltInTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional parameters specific to the built-in tool. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GatewayMessageDefinition {\n /** The role of the message author. */\n role?: GatewayMessageDefinitionRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport enum GatewayMessageDefinitionRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n TOOL = 'TOOL',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type GatewayMessageDefinitionRoleWithLiterals =\n | GatewayMessageDefinitionRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface GatewayContentBlock extends GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\n/** @oneof */\nexport interface GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\nexport interface ToolResultContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n error?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport interface GenerateContentByPromptObjectResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateTextByPromptObjectRequest {\n /** Prompt object that describes the text generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface GeneratedTextChunk extends GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n /**\n * Extracted text content from the chunk.\n * @maxLength 100\n */\n content?: string | null;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\n/** @oneof */\nexport interface GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n}\n\nexport interface ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 100\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ToolCall[];\n}\n\nexport interface ChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface V1ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChatCompletionChunkChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkChoiceChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 1000\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n}\n\nexport interface ChatCompletionChunkChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkChoiceChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface GoogleproxyV1AnthropicStreamChunk\n extends GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1ContentBlockDelta\n extends GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface V1AnthropicStreamChunkMessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: GoogleproxyV1Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface AnthropicStreamChunk extends AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n}\n\nexport interface ContentBlockDelta extends ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface MessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface V1AnthropicStreamChunk\n extends V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * Index of the content block this chunk refers to (when relevant).\n * For example, text and tool-input deltas apply to the block at this index.\n */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n}\n\nexport interface V1ContentBlockDelta extends V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\n/** @oneof */\nexport interface V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\nexport interface AnthropicStreamChunkMessageDelta {\n /**\n * Why generation concluded for this assistant message, when applicable:\n * \"end_turn\" | \"max_tokens\" | \"stop_sequence\" | \"tool_use\" | \"pause_turn\" | \"refusal\".\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * The specific custom stop sequence that was produced, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Cumulative token usage at this point in the stream. */\n usage?: V1Usage;\n /** Cost of the request so far, in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateAudioRequest\n extends GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n}\n\nexport interface CreateSpeechRequest {\n /** One of the available TTS models: https://platform.openai.com/docs/models#tts */\n model?: SpeechModelWithLiterals;\n /**\n * The text to generate audio for. The maximum length is 4096 characters.\n * @maxLength 4096\n */\n input?: string;\n /**\n * The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide.\n * @maxLength 100\n */\n voice?: string;\n /**\n * The format to audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.\n * @maxLength 100\n */\n responseFormat?: string | null;\n /**\n * The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.\n * @min 0.25\n * @max 4\n */\n speed?: number | null;\n}\n\nexport enum SpeechModel {\n UNKNOWN_SPEECH_MODEL = 'UNKNOWN_SPEECH_MODEL',\n TTS_1 = 'TTS_1',\n TTS_1_HD = 'TTS_1_HD',\n}\n\n/** @enumType */\nexport type SpeechModelWithLiterals =\n | SpeechModel\n | 'UNKNOWN_SPEECH_MODEL'\n | 'TTS_1'\n | 'TTS_1_HD';\n\nexport interface TextToSpeechRequest {\n /**\n * Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.\n * @maxLength 100\n */\n voiceId?: string;\n /**\n * The output format of the generated audio. List of supported values: mp3_22050_32, mp3_44100_32, mp3_44100_64, mp3_44100_96, mp3_44100_128, mp3_44100_192, pcm_16000, pcm_22050, pcm_24000, pcm_44100, ulaw_8000\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * When enable_logging is set to false full privacy mode will be used for the request.\n * This will mean history features are unavailable for this request, including request stitching.\n * Full privacy mode may only be used by enterprise customers.\n */\n enableLogging?: boolean;\n /**\n * The text that will get converted into speech.\n * @maxLength 10000000\n */\n text?: string;\n /** Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. */\n modelId?: ElevenLabsTextToSpeechModelWithLiterals;\n /**\n * Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.\n * @maxLength 100\n */\n languageCode?: string | null;\n /** Voice settings overriding stored settings for the given voice. They are applied only on the given request. */\n voiceSettings?: VoiceSettings;\n /**\n * A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request\n * @maxSize 10\n */\n pronunciationDictionaryLocators?: PronunciationDictionaryLocator[];\n /** If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. */\n seed?: string | null;\n /**\n * The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n previousText?: string | null;\n /**\n * The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n nextText?: string | null;\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n previousRequestIds?: string[];\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n nextRequestIds?: string[];\n /**\n * This parameter controls text normalization with three modes: ‘auto’, ‘on’, and ‘off’. When set to ‘auto’, the system will automatically decide whether to apply text normalization (e.g., spelling out numbers).\n * With ‘on’, text normalization will always be applied, while with ‘off’, it will be skipped. Cannot be turned on for ‘eleven_turbo_v2_5’ model.\n * Defaults to ‘auto’.\n * @maxLength 100\n */\n applyTextNormalization?: string | null;\n /** When set to true, response chunks will include with precise character-level timing information for audio-text synchronization. */\n withTimings?: boolean;\n}\n\nexport enum ElevenLabsTextToSpeechModel {\n UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL = 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL',\n ELEVEN_MULTILINGUAL_V2 = 'ELEVEN_MULTILINGUAL_V2',\n ELEVEN_FLASH_V2_5 = 'ELEVEN_FLASH_V2_5',\n ELEVEN_FLASH_V2 = 'ELEVEN_FLASH_V2',\n}\n\n/** @enumType */\nexport type ElevenLabsTextToSpeechModelWithLiterals =\n | ElevenLabsTextToSpeechModel\n | 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL'\n | 'ELEVEN_MULTILINGUAL_V2'\n | 'ELEVEN_FLASH_V2_5'\n | 'ELEVEN_FLASH_V2';\n\nexport interface VoiceSettings {\n /** Defines the stability for voice settings. */\n stability?: number;\n /** Defines the similarity boost for voice settings. */\n similarityBoost?: number;\n /** Defines the style for voice settings. This parameter is available on V2+ models. */\n style?: number | null;\n /** Defines the use speaker boost for voice settings. This parameter is available on V2+ models. */\n useSpeakerBoost?: boolean;\n}\n\nexport interface PronunciationDictionaryLocator {\n /**\n * pronunciation_dictionary_id\n * @maxLength 100\n */\n pronunciationDictionaryId?: string;\n /**\n * version_id\n * @maxLength 100\n */\n versionId?: string;\n}\n\nexport interface GeneratedAudioChunk\n extends GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\n/** @oneof */\nexport interface GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\nexport interface SpeechChunk {\n /** Partial audio file bytes. */\n content?: Uint8Array;\n}\n\nexport interface TextToSpeechChunk {\n /** Base64 encoded audio chunk */\n audioBase64?: Uint8Array;\n /** Alignment information for the generated audio given the input text sequence. */\n alignment?: AlignmentInfoInChunk;\n /** Alignment information for the generated audio given the input normalized text sequence. */\n normalizedAlignment?: AlignmentInfoInChunk;\n}\n\nexport interface AlignmentInfoInChunk {\n /**\n * Array of individual characters from the input or normalized text\n * @maxSize 1000000\n */\n characterStartTimesSeconds?: number[];\n /**\n * Array of start times (in seconds) for each character\n * @maxSize 1000000\n */\n characterEndTimesSeconds?: number[];\n /**\n * Array of end times (in seconds) for each character\n * @maxSize 1000000\n * @maxLength 1\n */\n characters?: string[];\n}\n\nexport interface DomainEvent extends DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n /** Event ID. With this ID you can easily spot duplicated events and ignore them. */\n _id?: string;\n /**\n * Fully Qualified Domain Name of an entity. This is a unique identifier assigned to the API main business entities.\n * For example, `wix.stores.catalog.product`, `wix.bookings.session`, `wix.payments.transaction`.\n */\n entityFqdn?: string;\n /**\n * Event action name, placed at the top level to make it easier for users to dispatch messages.\n * For example: `created`/`updated`/`deleted`/`started`/`completed`/`email_opened`.\n */\n slug?: string;\n /** ID of the entity associated with the event. */\n entityId?: string;\n /** Event timestamp in [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) format and UTC time. For example, `2020-04-26T13:57:50.699Z`. */\n eventTime?: Date | null;\n /**\n * Whether the event was triggered as a result of a privacy regulation application\n * (for example, GDPR).\n */\n triggeredByAnonymizeRequest?: boolean | null;\n /** If present, indicates the action that triggered the event. */\n originatedFrom?: string | null;\n /**\n * A sequence number that indicates the order of updates to an entity. For example, if an entity was updated at 16:00 and then again at 16:01, the second update will always have a higher sequence number.\n * You can use this number to make sure you're handling updates in the right order. Just save the latest sequence number on your end and compare it to the one in each new message. If the new message has an older (lower) number, you can safely ignore it.\n */\n entityEventSequence?: string | null;\n}\n\n/** @oneof */\nexport interface DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n}\n\nexport interface EntityCreatedEvent {\n entity?: string;\n}\n\nexport interface RestoreInfo {\n deletedDate?: Date | null;\n}\n\nexport interface EntityUpdatedEvent {\n /**\n * Since platformized APIs only expose PATCH and not PUT we can't assume that the fields sent from the client are the actual diff.\n * This means that to generate a list of changed fields (as opposed to sent fields) one needs to traverse both objects.\n * We don't want to impose this on all developers and so we leave this traversal to the notification recipients which need it.\n */\n currentEntity?: string;\n}\n\nexport interface EntityDeletedEvent {\n /** Entity that was deleted. */\n deletedEntity?: string | null;\n}\n\nexport interface ActionEvent {\n body?: string;\n}\n\nexport interface MessageEnvelope {\n /**\n * App instance ID.\n * @format GUID\n */\n instanceId?: string | null;\n /**\n * Event type.\n * @maxLength 150\n */\n eventType?: string;\n /** The identification type and identity data. */\n identity?: IdentificationData;\n /** Stringify payload. */\n data?: string;\n}\n\nexport interface IdentificationData extends IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n /** @readonly */\n identityType?: WebhookIdentityTypeWithLiterals;\n}\n\n/** @oneof */\nexport interface IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n}\n\nexport enum WebhookIdentityType {\n UNKNOWN = 'UNKNOWN',\n ANONYMOUS_VISITOR = 'ANONYMOUS_VISITOR',\n MEMBER = 'MEMBER',\n WIX_USER = 'WIX_USER',\n APP = 'APP',\n}\n\n/** @enumType */\nexport type WebhookIdentityTypeWithLiterals =\n | WebhookIdentityType\n | 'UNKNOWN'\n | 'ANONYMOUS_VISITOR'\n | 'MEMBER'\n | 'WIX_USER'\n | 'APP';\n\nexport interface BaseEventMetadata {\n /**\n * App instance ID.\n * @format GUID\n */\n instanceId?: string | null;\n /**\n * Event type.\n * @maxLength 150\n */\n eventType?: string;\n /** The identification type and identity data. */\n identity?: IdentificationData;\n}\n\nexport interface EventMetadata extends BaseEventMetadata {\n /** Event ID. With this ID you can easily spot duplicated events and ignore them. */\n _id?: string;\n /**\n * Fully Qualified Domain Name of an entity. This is a unique identifier assigned to the API main business entities.\n * For example, `wix.stores.catalog.product`, `wix.bookings.session`, `wix.payments.transaction`.\n */\n entityFqdn?: string;\n /**\n * Event action name, placed at the top level to make it easier for users to dispatch messages.\n * For example: `created`/`updated`/`deleted`/`started`/`completed`/`email_opened`.\n */\n slug?: string;\n /** ID of the entity associated with the event. */\n entityId?: string;\n /** Event timestamp in [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) format and UTC time. For example, `2020-04-26T13:57:50.699Z`. */\n eventTime?: Date | null;\n /**\n * Whether the event was triggered as a result of a privacy regulation application\n * (for example, GDPR).\n */\n triggeredByAnonymizeRequest?: boolean | null;\n /** If present, indicates the action that triggered the event. */\n originatedFrom?: string | null;\n /**\n * A sequence number that indicates the order of updates to an entity. For example, if an entity was updated at 16:00 and then again at 16:01, the second update will always have a higher sequence number.\n * You can use this number to make sure you're handling updates in the right order. Just save the latest sequence number on your end and compare it to the one in each new message. If the new message has an older (lower) number, you can safely ignore it.\n */\n entityEventSequence?: string | null;\n}\n\nexport interface PromptProxyCompletedEnvelope {\n data: GenerationCompletedResultEvent;\n metadata: EventMetadata;\n}\n\n/** @permissionId API_INFRA.WIX_AI_EXTERNAL_GATEWAY_GENERATE_CONTENT\n * @webhook\n * @eventType wix.api_infra.v1.prompt_proxy_completed\n * @slug completed\n * @documentationMaturity preview\n */\nexport declare function onPromptProxyCompleted(\n handler: (event: PromptProxyCompletedEnvelope) => void | Promise<void>\n): void;\n\n/**\n * Generate different content such as text, image, and video according to Prompt object configuration\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n * @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GATEWAY_GENERATE_CONTENT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObject\n */\nexport async function generateContentByPromptObject(\n options?: GenerateContentByPromptObjectOptions\n): Promise<\n NonNullablePaths<\n GenerateContentByPromptObjectResponse,\n | `response.openAiChatCompletionResponse.model`\n | `response.openAiChatCompletionResponse.choices`\n | `response.openAiChatCompletionResponse.choices.${number}.message.role`\n | `response.googleTextBisonResponse.predictions`\n | `response.googleChatBisonResponse.predictions`\n | `response.azureChatCompletionResponse.model`\n | `response.azureChatCompletionResponse.choices`\n | `response.azureChatCompletionResponse.choices.${number}.message.role`\n | `response.googleGeminiGenerateContentResponse.candidates`\n | `response.googleGeminiGenerateContentResponse.candidates.${number}.finishReason`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails.${number}.modality`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.candidatesTokensDetails`\n | `response.anthropicClaudeResponse.responseId`\n | `response.anthropicClaudeResponse.model`\n | `response.anthropicClaudeResponse.responseType`\n | `response.anthropicClaudeResponse.role`\n | `response.anthropicClaudeResponse.content`\n | `response.anthropicClaudeResponse.usage.inputTokens`\n | `response.anthropicClaudeResponse.usage.outputTokens`\n | `response.anthropicClaudeResponse.contentBlocks`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.googleAnthropicClaudeResponse.responseId`\n | `response.googleAnthropicClaudeResponse.model`\n | `response.googleAnthropicClaudeResponse.responseType`\n | `response.googleAnthropicClaudeResponse.role`\n | `response.googleAnthropicClaudeResponse.content`\n | `response.googleAnthropicClaudeResponse.usage.inputTokens`\n | `response.googleAnthropicClaudeResponse.usage.outputTokens`\n | `response.googleAnthropicClaudeResponse.contentBlocks`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.responseId`\n | `response.invokeAnthropicModelResponse.model`\n | `response.invokeAnthropicModelResponse.type`\n | `response.invokeAnthropicModelResponse.role`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral1hInputTokens`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral5mInputTokens`\n | `response.invokeAnthropicModelResponse.usage.inputTokens`\n | `response.invokeAnthropicModelResponse.usage.outputTokens`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webSearchRequests`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webFetchRequests`\n | `response.invokeAnthropicModelResponse.container.expiresAt`\n | `response.invokeAnthropicModelResponse.container._id`\n | `response.invokeAnthropicModelResponse.content`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.text`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.cacheControl.type`\n | `response.invokeAnthropicModelResponse.content.${number}.image.mediaType`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.signature`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.thinking`\n | `response.invokeAnthropicModelResponse.content.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.content.${number}.document.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentSuccess.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentError.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.type`\n | `response.perplexityChatCompletionResponse.model`\n | `response.perplexityChatCompletionResponse.citations`\n | `response.perplexityChatCompletionResponse.choices`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.content`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.role`\n | `response.perplexityChatCompletionResponse.images`\n | `response.perplexityChatCompletionResponse.relatedQuestions`\n | `response.openAiCreateImageResponse.data`\n | `response.openAiCreateImageResponse.model`\n | `response.stabilityAiTextToImageResponse.data`\n | `response.stabilityAiTextToImageResponse.model`\n | `response.stabilityAiGenerateCoreResponse.data`\n | `response.stabilityAiGenerateCoreResponse.model`\n | `response.stabilityAiStableDiffusionResponse.data`\n | `response.stabilityAiStableDiffusionResponse.model`\n | `response.replicateCreatePredictionResponse.output`\n | `response.replicateCreatePredictionResponse.textOutput`\n | `response.stabilityAiEditImageWithPromptResponse.data`\n | `response.stabilityAiEditImageWithPromptResponse.model`\n | `response.runwareTextToImageResponse.data`\n | `response.runwareTextToImageResponse.data.${number}.taskUuid`\n | `response.runwareTextToImageResponse.data.${number}.imageUuid`\n | `response.runwareTextToImageResponse.data.${number}.nsfwContent`\n | `response.googleGenerateImageResponse.predictions`\n | `response.googleGenerateVideoResponse.videos`\n | `response.mlPlatformGenerateImageResponse.output`\n | `response.openAiCreateOpenAiImageResponse.data`\n | `response.openAiCreateOpenAiImageResponse.model`\n | `response.openAiEditOpenAiImageResponse.data`\n | `response.openAiEditOpenAiImageResponse.model`\n | `response.googleCreateChatCompletionResponse.model`\n | `response.googleCreateChatCompletionResponse.choices`\n | `response.googleCreateChatCompletionResponse.choices.${number}.message.role`\n | `response.mlPlatformOpenAiRawResponse.modelId`\n | `response.mlPlatformOpenAiRawResponse.choices`\n | `response.mlPlatformOpenAiRawResponse.choices.${number}.message.role`\n | `response.runwareVideoInferenceResponse.data`\n | `response.runwareVideoInferenceResponse.data.${number}.taskType`\n | `response.runwareVideoInferenceResponse.data.${number}.taskUuid`\n | `response.openAiResponsesResponse.model`\n | `response.openAiResponsesResponse.output`\n | `response.azureOpenAiResponsesResponse.model`\n | `response.azureOpenAiResponsesResponse.output`\n | `response.generatedContent.texts`\n | `response.generatedContent.images`\n | `response.generatedContent.images.${number}.url`\n | `response.generatedContent.videos`\n | `response.generatedContent.thinkingTexts`\n | `response.generatedContent.tools`\n | `response.generatedContent.tools.${number}.name`\n | `materializedPrompt.openAiChatCompletionRequest.model`\n | `materializedPrompt.openAiChatCompletionRequest.messages`\n | `materializedPrompt.openAiChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.openAiChatCompletionRequest.functions`\n | `materializedPrompt.openAiChatCompletionRequest.stop`\n | `materializedPrompt.openAiChatCompletionRequest.tools`\n | `materializedPrompt.openAiChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleTextBisonRequest.instances`\n | `materializedPrompt.googleTextBisonRequest.parameters.stopSequences`\n | `materializedPrompt.googleTextBisonRequest.model`\n | `materializedPrompt.googleChatBisonRequest.instances`\n | `materializedPrompt.googleChatBisonRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.messages`\n | `materializedPrompt.azureChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.azureChatCompletionRequest.functions`\n | `materializedPrompt.azureChatCompletionRequest.stop`\n | `materializedPrompt.azureChatCompletionRequest.tools`\n | `materializedPrompt.azureChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleGeminiGenerateContentRequest.model`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents.${number}.role`\n | `materializedPrompt.googleGeminiGenerateContentRequest.systemInstruction.parts`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools.${number}.googleSearchRetrieval.dynamicRetrievalConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools.${number}.computerUse.environment`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.category`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.threshold`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.stopSequences`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.responseModalities`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.imageConfig.personGeneration`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.mediaResolution`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.allowedFunctionNames`\n | `materializedPrompt.anthropicClaudeRequest.model`\n | `materializedPrompt.anthropicClaudeRequest.messages`\n | `materializedPrompt.anthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.anthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.anthropicClaudeRequest.stopSequences`\n | `materializedPrompt.anthropicClaudeRequest.tools`\n | `materializedPrompt.anthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.anthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.anthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.model`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.googleAnthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.googleAnthropicClaudeRequest.stopSequences`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.invokeAnthropicModelRequest.model`\n | `materializedPrompt.invokeAnthropicModelRequest.messages`\n | `materializedPrompt.invokeAnthropicModelRequest.messages.${number}.role`\n | `materializedPrompt.invokeAnthropicModelRequest.systemPrompt`\n | `materializedPrompt.invokeAnthropicModelRequest.stopSequences`\n | `materializedPrompt.invokeAnthropicModelRequest.tools`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.name`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.cacheControl.type`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayWidthPx`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayHeightPx`\n | `materializedPrompt.invokeAnthropicModelRequest.toolChoice.type`\n | `materializedPrompt.invokeAnthropicModelRequest.thinking.budgetTokens`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.name`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.type`\n | `materializedPrompt.llamaModelRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.quality`\n | `materializedPrompt.openAiCreateImageRequest.size`\n | `materializedPrompt.openAiCreateImageRequest.style`\n | `materializedPrompt.stabilityAiTextToImageRequest.model`\n | `materializedPrompt.stabilityAiTextToImageRequest.textPrompts`\n | `materializedPrompt.stabilityAiTextToImageRequest.clipGuidancePreset`\n | `materializedPrompt.stabilityAiTextToImageRequest.sampler`\n | `materializedPrompt.stabilityAiTextToImageRequest.stylePreset`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.model`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.stylePreset`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.mode`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.model`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.outputFormat`\n | `materializedPrompt.blackForestLabsGenerateImageRequest.model`\n | `materializedPrompt.replicateCreatePredictionRequest.lucatacoFlorence2Large.taskInput`\n | `materializedPrompt.replicateCreatePredictionRequest.perceptronIsaac01.response`\n | `materializedPrompt.replicateCreatePredictionRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.stylePreset`\n | `materializedPrompt.runwareTextToImageRequest.positivePrompt`\n | `materializedPrompt.runwareTextToImageRequest.height`\n | `materializedPrompt.runwareTextToImageRequest.width`\n | `materializedPrompt.runwareTextToImageRequest.referenceImages`\n | `materializedPrompt.runwareTextToImageRequest.model`\n | `materializedPrompt.runwareTextToImageRequest.loraModels`\n | `materializedPrompt.runwareTextToImageRequest.inputs.referenceImages`\n | `materializedPrompt.mlPlatformLlamaModelRequest.modelId`\n | `materializedPrompt.perplexityChatCompletionRequest.model`\n | `materializedPrompt.perplexityChatCompletionRequest.messages`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.content`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.perplexityChatCompletionRequest.searchDomainFilter`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.jsonSchema`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.regex`\n | `materializedPrompt.googleGenerateImageRequest.model`\n | `materializedPrompt.googleGenerateImageRequest.instances`\n | `materializedPrompt.mlPlatformGenerateImageRequest.model`\n | `materializedPrompt.openAiCreateOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.imageUrls`\n | `materializedPrompt.googleGenerateVideoRequest.model`\n | `materializedPrompt.googleGenerateVideoRequest.instances`\n | `materializedPrompt.googleCreateChatCompletionRequest.model`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.modelId`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages.${number}.role`\n | `materializedPrompt.runwareVideoInferenceRequest.outputFormat`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages.${number}.inputImage`\n | `materializedPrompt.runwareVideoInferenceRequest.referenceImages`\n | `materializedPrompt.runwareVideoInferenceRequest.model`\n | `materializedPrompt.openAiResponsesRequest.model`\n | `materializedPrompt.openAiResponsesRequest.include`\n | `materializedPrompt.openAiResponsesRequest.input`\n | `materializedPrompt.openAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.openAiResponsesRequest.tools`\n | `materializedPrompt.azureOpenAiResponsesRequest.model`\n | `materializedPrompt.azureOpenAiResponsesRequest.include`\n | `materializedPrompt.azureOpenAiResponsesRequest.input`\n | `materializedPrompt.azureOpenAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.azureOpenAiResponsesRequest.tools`\n | `materializedPrompt.openAiCreateVideoRequest.prompt`\n | `materializedPrompt.openAiCreateVideoRequest.model`\n | `materializedPrompt.templatedParameterNames`\n | `materializedPrompt.templatedDynamicPropertiesNames`\n | `predictionId`,\n 8\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n prompt: options?.prompt,\n params: options?.params,\n userRequestInfo: options?.userRequestInfo,\n fallbackProperties: options?.fallbackProperties,\n dynamicProperties: options?.dynamicProperties,\n asyncGenerationConfig: options?.asyncGenerationConfig,\n dynamicRequestConfig: options?.dynamicRequestConfig,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateContentByPromptObject(payload);\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n prompt: '$[0].prompt',\n params: '$[0].params',\n userRequestInfo: '$[0].userRequestInfo',\n fallbackProperties: '$[0].fallbackProperties',\n dynamicProperties: '$[0].dynamicProperties',\n asyncGenerationConfig: '$[0].asyncGenerationConfig',\n dynamicRequestConfig: '$[0].dynamicRequestConfig',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateContentByPromptObjectOptions {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n * @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GENERATE_TEXT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateTextByPromptObjectStreamed\n */\nexport async function generateTextByPromptObjectStreamed(\n options?: GenerateTextByPromptObjectStreamedOptions\n): Promise<\n NonNullablePaths<\n GeneratedTextChunk,\n | `azureChatCompletionChunk.choices`\n | `azureChatCompletionChunk.choices.${number}.delta.role`\n | `azureChatCompletionChunk.model`\n | `openaiChatCompletionChunk.choices`\n | `openaiChatCompletionChunk.choices.${number}.delta.role`\n | `openaiChatCompletionChunk.model`\n | `googleAnthropicStreamChunk.toolUse.cacheControl.type`\n | `googleAnthropicStreamChunk.contentBlockDelta.text`\n | `googleAnthropicStreamChunk.contentBlockDelta.partialJson`\n | `googleAnthropicStreamChunk.contentBlockDelta.thinking`\n | `googleAnthropicStreamChunk.contentBlockDelta.signature`\n | `googleAnthropicStreamChunk.messageDelta.usage.inputTokens`\n | `googleAnthropicStreamChunk.messageDelta.usage.outputTokens`\n | `googleAnthropicStreamChunk.redactedThinking.data`\n | `googleAnthropicStreamChunk.responseId`\n | `googleAnthropicStreamChunk.model`\n | `googleGeminiStreamChunk.candidates`\n | `googleGeminiStreamChunk.candidates.${number}.finishReason`\n | `googleGeminiStreamChunk.usageMetadata.promptTokensDetails`\n | `googleGeminiStreamChunk.usageMetadata.promptTokensDetails.${number}.modality`\n | `googleGeminiStreamChunk.usageMetadata.candidatesTokensDetails`\n | `amazonAnthropicStreamChunk.toolUse.cacheControl.type`\n | `amazonAnthropicStreamChunk.contentBlockDelta.text`\n | `amazonAnthropicStreamChunk.contentBlockDelta.partialJson`\n | `amazonAnthropicStreamChunk.contentBlockDelta.thinking`\n | `amazonAnthropicStreamChunk.contentBlockDelta.signature`\n | `amazonAnthropicStreamChunk.messageDelta.usage.inputTokens`\n | `amazonAnthropicStreamChunk.messageDelta.usage.outputTokens`\n | `amazonAnthropicStreamChunk.redactedThinking.data`\n | `amazonAnthropicStreamChunk.responseId`\n | `amazonAnthropicStreamChunk.model`\n | `anthropicStreamChunk.toolUse.cacheControl.type`\n | `anthropicStreamChunk.webSearchToolResult.contentResults.items`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.type`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.type`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.textContent.text`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.image.mediaType`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.thinking.signature`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.thinking.thinking`\n | `anthropicStreamChunk.webFetchToolResult.contentSuccess.content.source.content.${number}.redactedThinking.data`\n | `anthropicStreamChunk.webFetchToolResult.contentError.type`\n | `anthropicStreamChunk.webFetchToolResult.type`\n | `anthropicStreamChunk.contentBlockDelta.text`\n | `anthropicStreamChunk.contentBlockDelta.partialJson`\n | `anthropicStreamChunk.contentBlockDelta.thinking`\n | `anthropicStreamChunk.contentBlockDelta.signature`\n | `anthropicStreamChunk.messageDelta.usage.cacheCreation.ephemeral1hInputTokens`\n | `anthropicStreamChunk.messageDelta.usage.cacheCreation.ephemeral5mInputTokens`\n | `anthropicStreamChunk.messageDelta.usage.inputTokens`\n | `anthropicStreamChunk.messageDelta.usage.outputTokens`\n | `anthropicStreamChunk.messageDelta.usage.serverToolUse.webSearchRequests`\n | `anthropicStreamChunk.messageDelta.usage.serverToolUse.webFetchRequests`\n | `anthropicStreamChunk.responseId`\n | `anthropicStreamChunk.model`\n | `predictionId`,\n 10\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n prompt: options?.prompt,\n params: options?.params,\n userRequestInfo: options?.userRequestInfo,\n fallbackProperties: options?.fallbackProperties,\n dynamicProperties: options?.dynamicProperties,\n dynamicRequestConfig: options?.dynamicRequestConfig,\n asyncResultTopic: options?.asyncResultTopic,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateTextByPromptObjectStreamed(\n payload\n );\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n prompt: '$[0].prompt',\n params: '$[0].params',\n userRequestInfo: '$[0].userRequestInfo',\n fallbackProperties: '$[0].fallbackProperties',\n dynamicProperties: '$[0].dynamicProperties',\n dynamicRequestConfig: '$[0].dynamicRequestConfig',\n asyncResultTopic: '$[0].asyncResultTopic',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateTextByPromptObjectStreamedOptions {\n /** Prompt object that describes the text generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\n/** @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GATEWAY_GENERATE_CONTENT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObjectAsync\n */\nexport async function generateContentByPromptObjectAsync(\n options?: GenerateContentByPromptObjectAsyncOptions\n): Promise<\n NonNullablePaths<\n GenerateContentByPromptObjectResponse,\n | `response.openAiChatCompletionResponse.model`\n | `response.openAiChatCompletionResponse.choices`\n | `response.openAiChatCompletionResponse.choices.${number}.message.role`\n | `response.googleTextBisonResponse.predictions`\n | `response.googleChatBisonResponse.predictions`\n | `response.azureChatCompletionResponse.model`\n | `response.azureChatCompletionResponse.choices`\n | `response.azureChatCompletionResponse.choices.${number}.message.role`\n | `response.googleGeminiGenerateContentResponse.candidates`\n | `response.googleGeminiGenerateContentResponse.candidates.${number}.finishReason`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.promptTokensDetails.${number}.modality`\n | `response.googleGeminiGenerateContentResponse.usageMetadata.candidatesTokensDetails`\n | `response.anthropicClaudeResponse.responseId`\n | `response.anthropicClaudeResponse.model`\n | `response.anthropicClaudeResponse.responseType`\n | `response.anthropicClaudeResponse.role`\n | `response.anthropicClaudeResponse.content`\n | `response.anthropicClaudeResponse.usage.inputTokens`\n | `response.anthropicClaudeResponse.usage.outputTokens`\n | `response.anthropicClaudeResponse.contentBlocks`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.anthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.googleAnthropicClaudeResponse.responseId`\n | `response.googleAnthropicClaudeResponse.model`\n | `response.googleAnthropicClaudeResponse.responseType`\n | `response.googleAnthropicClaudeResponse.role`\n | `response.googleAnthropicClaudeResponse.content`\n | `response.googleAnthropicClaudeResponse.usage.inputTokens`\n | `response.googleAnthropicClaudeResponse.usage.outputTokens`\n | `response.googleAnthropicClaudeResponse.contentBlocks`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.text`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.textContent.cacheControl.type`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.imageUrl.mediaType`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.signature`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.thinking.thinking`\n | `response.googleAnthropicClaudeResponse.contentBlocks.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.responseId`\n | `response.invokeAnthropicModelResponse.model`\n | `response.invokeAnthropicModelResponse.type`\n | `response.invokeAnthropicModelResponse.role`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral1hInputTokens`\n | `response.invokeAnthropicModelResponse.usage.cacheCreation.ephemeral5mInputTokens`\n | `response.invokeAnthropicModelResponse.usage.inputTokens`\n | `response.invokeAnthropicModelResponse.usage.outputTokens`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webSearchRequests`\n | `response.invokeAnthropicModelResponse.usage.serverToolUse.webFetchRequests`\n | `response.invokeAnthropicModelResponse.container.expiresAt`\n | `response.invokeAnthropicModelResponse.container._id`\n | `response.invokeAnthropicModelResponse.content`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.text`\n | `response.invokeAnthropicModelResponse.content.${number}.textContent.cacheControl.type`\n | `response.invokeAnthropicModelResponse.content.${number}.image.mediaType`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.signature`\n | `response.invokeAnthropicModelResponse.content.${number}.thinking.thinking`\n | `response.invokeAnthropicModelResponse.content.${number}.redactedThinking.data`\n | `response.invokeAnthropicModelResponse.content.${number}.document.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentSuccess.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.contentError.type`\n | `response.invokeAnthropicModelResponse.content.${number}.webFetchToolResult.type`\n | `response.perplexityChatCompletionResponse.model`\n | `response.perplexityChatCompletionResponse.citations`\n | `response.perplexityChatCompletionResponse.choices`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.content`\n | `response.perplexityChatCompletionResponse.choices.${number}.message.role`\n | `response.perplexityChatCompletionResponse.images`\n | `response.perplexityChatCompletionResponse.relatedQuestions`\n | `response.openAiCreateImageResponse.data`\n | `response.openAiCreateImageResponse.model`\n | `response.stabilityAiTextToImageResponse.data`\n | `response.stabilityAiTextToImageResponse.model`\n | `response.stabilityAiGenerateCoreResponse.data`\n | `response.stabilityAiGenerateCoreResponse.model`\n | `response.stabilityAiStableDiffusionResponse.data`\n | `response.stabilityAiStableDiffusionResponse.model`\n | `response.replicateCreatePredictionResponse.output`\n | `response.replicateCreatePredictionResponse.textOutput`\n | `response.stabilityAiEditImageWithPromptResponse.data`\n | `response.stabilityAiEditImageWithPromptResponse.model`\n | `response.runwareTextToImageResponse.data`\n | `response.runwareTextToImageResponse.data.${number}.taskUuid`\n | `response.runwareTextToImageResponse.data.${number}.imageUuid`\n | `response.runwareTextToImageResponse.data.${number}.nsfwContent`\n | `response.googleGenerateImageResponse.predictions`\n | `response.googleGenerateVideoResponse.videos`\n | `response.mlPlatformGenerateImageResponse.output`\n | `response.openAiCreateOpenAiImageResponse.data`\n | `response.openAiCreateOpenAiImageResponse.model`\n | `response.openAiEditOpenAiImageResponse.data`\n | `response.openAiEditOpenAiImageResponse.model`\n | `response.googleCreateChatCompletionResponse.model`\n | `response.googleCreateChatCompletionResponse.choices`\n | `response.googleCreateChatCompletionResponse.choices.${number}.message.role`\n | `response.mlPlatformOpenAiRawResponse.modelId`\n | `response.mlPlatformOpenAiRawResponse.choices`\n | `response.mlPlatformOpenAiRawResponse.choices.${number}.message.role`\n | `response.runwareVideoInferenceResponse.data`\n | `response.runwareVideoInferenceResponse.data.${number}.taskType`\n | `response.runwareVideoInferenceResponse.data.${number}.taskUuid`\n | `response.openAiResponsesResponse.model`\n | `response.openAiResponsesResponse.output`\n | `response.azureOpenAiResponsesResponse.model`\n | `response.azureOpenAiResponsesResponse.output`\n | `response.generatedContent.texts`\n | `response.generatedContent.images`\n | `response.generatedContent.images.${number}.url`\n | `response.generatedContent.videos`\n | `response.generatedContent.thinkingTexts`\n | `response.generatedContent.tools`\n | `response.generatedContent.tools.${number}.name`\n | `materializedPrompt.openAiChatCompletionRequest.model`\n | `materializedPrompt.openAiChatCompletionRequest.messages`\n | `materializedPrompt.openAiChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.openAiChatCompletionRequest.functions`\n | `materializedPrompt.openAiChatCompletionRequest.stop`\n | `materializedPrompt.openAiChatCompletionRequest.tools`\n | `materializedPrompt.openAiChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleTextBisonRequest.instances`\n | `materializedPrompt.googleTextBisonRequest.parameters.stopSequences`\n | `materializedPrompt.googleTextBisonRequest.model`\n | `materializedPrompt.googleChatBisonRequest.instances`\n | `materializedPrompt.googleChatBisonRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.model`\n | `materializedPrompt.azureChatCompletionRequest.messages`\n | `materializedPrompt.azureChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.azureChatCompletionRequest.functions`\n | `materializedPrompt.azureChatCompletionRequest.stop`\n | `materializedPrompt.azureChatCompletionRequest.tools`\n | `materializedPrompt.azureChatCompletionRequest.tools.${number}.type`\n | `materializedPrompt.googleGeminiGenerateContentRequest.model`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents`\n | `materializedPrompt.googleGeminiGenerateContentRequest.contents.${number}.role`\n | `materializedPrompt.googleGeminiGenerateContentRequest.systemInstruction.parts`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools.${number}.googleSearchRetrieval.dynamicRetrievalConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.tools.${number}.computerUse.environment`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.category`\n | `materializedPrompt.googleGeminiGenerateContentRequest.safetySettings.${number}.threshold`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.stopSequences`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.responseModalities`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.imageConfig.personGeneration`\n | `materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.mediaResolution`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.mode`\n | `materializedPrompt.googleGeminiGenerateContentRequest.toolConfig.functionCallingConfig.allowedFunctionNames`\n | `materializedPrompt.anthropicClaudeRequest.model`\n | `materializedPrompt.anthropicClaudeRequest.messages`\n | `materializedPrompt.anthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.anthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.anthropicClaudeRequest.stopSequences`\n | `materializedPrompt.anthropicClaudeRequest.tools`\n | `materializedPrompt.anthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.anthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.anthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.anthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.model`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages`\n | `materializedPrompt.googleAnthropicClaudeRequest.messages.${number}.role`\n | `materializedPrompt.googleAnthropicClaudeRequest.systemPrompt`\n | `materializedPrompt.googleAnthropicClaudeRequest.stopSequences`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools`\n | `materializedPrompt.googleAnthropicClaudeRequest.tools.${number}.cacheControl.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.toolChoice.type`\n | `materializedPrompt.googleAnthropicClaudeRequest.thinking.budgetTokens`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.name`\n | `materializedPrompt.googleAnthropicClaudeRequest.mcpServers.${number}.type`\n | `materializedPrompt.invokeAnthropicModelRequest.model`\n | `materializedPrompt.invokeAnthropicModelRequest.messages`\n | `materializedPrompt.invokeAnthropicModelRequest.messages.${number}.role`\n | `materializedPrompt.invokeAnthropicModelRequest.systemPrompt`\n | `materializedPrompt.invokeAnthropicModelRequest.stopSequences`\n | `materializedPrompt.invokeAnthropicModelRequest.tools`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.name`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.custom.cacheControl.type`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayWidthPx`\n | `materializedPrompt.invokeAnthropicModelRequest.tools.${number}.computerUse.displayHeightPx`\n | `materializedPrompt.invokeAnthropicModelRequest.toolChoice.type`\n | `materializedPrompt.invokeAnthropicModelRequest.thinking.budgetTokens`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.name`\n | `materializedPrompt.invokeAnthropicModelRequest.mcpServers.${number}.type`\n | `materializedPrompt.llamaModelRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.model`\n | `materializedPrompt.openAiCreateImageRequest.quality`\n | `materializedPrompt.openAiCreateImageRequest.size`\n | `materializedPrompt.openAiCreateImageRequest.style`\n | `materializedPrompt.stabilityAiTextToImageRequest.model`\n | `materializedPrompt.stabilityAiTextToImageRequest.textPrompts`\n | `materializedPrompt.stabilityAiTextToImageRequest.clipGuidancePreset`\n | `materializedPrompt.stabilityAiTextToImageRequest.sampler`\n | `materializedPrompt.stabilityAiTextToImageRequest.stylePreset`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.model`\n | `materializedPrompt.stabilityAiGenerateCoreRequest.stylePreset`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.mode`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.model`\n | `materializedPrompt.stabilityAiStableDiffusionRequest.outputFormat`\n | `materializedPrompt.blackForestLabsGenerateImageRequest.model`\n | `materializedPrompt.replicateCreatePredictionRequest.lucatacoFlorence2Large.taskInput`\n | `materializedPrompt.replicateCreatePredictionRequest.perceptronIsaac01.response`\n | `materializedPrompt.replicateCreatePredictionRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.model`\n | `materializedPrompt.stabilityAiEditWithPromptRequest.stylePreset`\n | `materializedPrompt.runwareTextToImageRequest.positivePrompt`\n | `materializedPrompt.runwareTextToImageRequest.height`\n | `materializedPrompt.runwareTextToImageRequest.width`\n | `materializedPrompt.runwareTextToImageRequest.referenceImages`\n | `materializedPrompt.runwareTextToImageRequest.model`\n | `materializedPrompt.runwareTextToImageRequest.loraModels`\n | `materializedPrompt.runwareTextToImageRequest.inputs.referenceImages`\n | `materializedPrompt.mlPlatformLlamaModelRequest.modelId`\n | `materializedPrompt.perplexityChatCompletionRequest.model`\n | `materializedPrompt.perplexityChatCompletionRequest.messages`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.content`\n | `materializedPrompt.perplexityChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.perplexityChatCompletionRequest.searchDomainFilter`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.jsonSchema`\n | `materializedPrompt.perplexityChatCompletionRequest.responseFormat.regex`\n | `materializedPrompt.googleGenerateImageRequest.model`\n | `materializedPrompt.googleGenerateImageRequest.instances`\n | `materializedPrompt.mlPlatformGenerateImageRequest.model`\n | `materializedPrompt.openAiCreateOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.model`\n | `materializedPrompt.openAiEditOpenAiImageRequest.imageUrls`\n | `materializedPrompt.googleGenerateVideoRequest.model`\n | `materializedPrompt.googleGenerateVideoRequest.instances`\n | `materializedPrompt.googleCreateChatCompletionRequest.model`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages`\n | `materializedPrompt.googleCreateChatCompletionRequest.messages.${number}.role`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.modelId`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages`\n | `materializedPrompt.mlPlatformOpenAiRawRequest.messages.${number}.role`\n | `materializedPrompt.runwareVideoInferenceRequest.outputFormat`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages`\n | `materializedPrompt.runwareVideoInferenceRequest.frameImages.${number}.inputImage`\n | `materializedPrompt.runwareVideoInferenceRequest.referenceImages`\n | `materializedPrompt.runwareVideoInferenceRequest.model`\n | `materializedPrompt.openAiResponsesRequest.model`\n | `materializedPrompt.openAiResponsesRequest.include`\n | `materializedPrompt.openAiResponsesRequest.input`\n | `materializedPrompt.openAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.openAiResponsesRequest.tools`\n | `materializedPrompt.azureOpenAiResponsesRequest.model`\n | `materializedPrompt.azureOpenAiResponsesRequest.include`\n | `materializedPrompt.azureOpenAiResponsesRequest.input`\n | `materializedPrompt.azureOpenAiResponsesRequest.input.${number}.message.role`\n | `materializedPrompt.azureOpenAiResponsesRequest.tools`\n | `materializedPrompt.openAiCreateVideoRequest.prompt`\n | `materializedPrompt.openAiCreateVideoRequest.model`\n | `materializedPrompt.templatedParameterNames`\n | `materializedPrompt.templatedDynamicPropertiesNames`\n | `predictionId`,\n 8\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n prompt: options?.prompt,\n params: options?.params,\n userRequestInfo: options?.userRequestInfo,\n fallbackProperties: options?.fallbackProperties,\n dynamicProperties: options?.dynamicProperties,\n asyncGenerationConfig: options?.asyncGenerationConfig,\n dynamicRequestConfig: options?.dynamicRequestConfig,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateContentByPromptObjectAsync(\n payload\n );\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n prompt: '$[0].prompt',\n params: '$[0].params',\n userRequestInfo: '$[0].userRequestInfo',\n fallbackProperties: '$[0].fallbackProperties',\n dynamicProperties: '$[0].dynamicProperties',\n asyncGenerationConfig: '$[0].asyncGenerationConfig',\n dynamicRequestConfig: '$[0].dynamicRequestConfig',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateContentByPromptObjectAsyncOptions {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\n/** @internal\n * @documentationMaturity preview\n * @permissionId API_INFRA.WIX_AI_EXTERNAL_GENERATE_TEXT\n * @fqn wix.api_infra.v1.WixAiExternalGateway.GenerateAudioStreamed\n */\nexport async function generateAudioStreamed(\n options?: GenerateAudioStreamedOptions\n): Promise<\n NonNullablePaths<\n GeneratedAudioChunk,\n | `openAiSpeechChunk.content`\n | `elevenlabsSpeechChunk.audioBase64`\n | `elevenlabsSpeechChunk.alignment.characterStartTimesSeconds`\n | `elevenlabsSpeechChunk.alignment.characterEndTimesSeconds`\n | `elevenlabsSpeechChunk.alignment.characters`,\n 4\n >\n> {\n // @ts-ignore\n const { httpClient, sideEffects } = arguments[1] as {\n httpClient: HttpClient;\n sideEffects?: any;\n };\n\n const payload = renameKeysFromSDKRequestToRESTRequest({\n openAiCreateSpeechRequest: options?.openAiCreateSpeechRequest,\n elevenlabsTextToSpeechRequest: options?.elevenlabsTextToSpeechRequest,\n userRequestInfo: options?.userRequestInfo,\n });\n\n const reqOpts =\n ambassadorWixApiInfraV1PromptProxy.generateAudioStreamed(payload);\n\n sideEffects?.onSiteCall?.();\n try {\n const result = await httpClient.request(reqOpts);\n sideEffects?.onSuccess?.(result);\n\n return renameKeysFromRESTResponseToSDKResponse(result.data)!;\n } catch (err: any) {\n const transformedError = sdkTransformError(\n err,\n {\n spreadPathsToArguments: {},\n explicitPathsToArguments: {\n openAiCreateSpeechRequest: '$[0].openAiCreateSpeechRequest',\n elevenlabsTextToSpeechRequest: '$[0].elevenlabsTextToSpeechRequest',\n userRequestInfo: '$[0].userRequestInfo',\n },\n singleArgumentUnchanged: false,\n },\n ['options']\n );\n sideEffects?.onError?.(err);\n\n throw transformedError;\n }\n}\n\nexport interface GenerateAudioStreamedOptions\n extends GenerateAudioStreamedOptionsAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateAudioStreamedOptionsAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n}\n","import { transformSDKFloatToRESTFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformRESTFloatToSDKFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformRESTBytesToSDKBytes } from '@wix/sdk-runtime/transformations/bytes';\nimport { transformPaths } from '@wix/sdk-runtime/transformations/transform-paths';\nimport { resolveUrl } from '@wix/sdk-runtime/rest-modules';\nimport { ResolveUrlOpts } from '@wix/sdk-runtime/rest-modules';\nimport { RequestOptionsFactory } from '@wix/sdk-types';\n\nfunction resolveWixApiInfraV1WixAiExternalGatewayUrl(\n opts: Omit<ResolveUrlOpts, 'domainToMappings'>\n) {\n const domainToMappings = {\n 'www.wixapis.com': [\n {\n srcPath: '/ai-external-gateway-poc',\n destPath: '',\n },\n ],\n };\n\n return resolveUrl(Object.assign(opts, { domainToMappings }));\n}\n\nconst PACKAGE_NAME = '@wix/auto_sdk_ai-gateway_prompts';\n\n/**\n * Generate different content such as text, image, and video according to Prompt object configuration\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn:\n 'wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPromptObject;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByPromptObjectStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptObjectStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn:\n 'wix.api_infra.v1.WixAiExternalGateway.GenerateTextByPromptObjectStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-by-prompt-object-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptObjectStreamed;\n}\n\nexport function generateContentByPromptObjectAsync(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPromptObjectAsync({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn:\n 'wix.api_infra.v1.WixAiExternalGateway.GenerateContentByPromptObjectAsync',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt-object-async',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPromptObjectAsync;\n}\n\nexport function generateAudioStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateAudioStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'openAiCreateSpeechRequest.speed' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.style' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.stability' },\n {\n path: 'elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost',\n },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.api_infra.v1.prompt_proxy',\n method: 'POST' as any,\n methodFqn: 'wix.api_infra.v1.WixAiExternalGateway.GenerateAudioStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixApiInfraV1WixAiExternalGatewayUrl({\n protoPath: '/v1/generate-audio-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n { path: 'openAiSpeechChunk.content' },\n { path: 'elevenlabsSpeechChunk.audioBase64' },\n ],\n },\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'elevenlabsSpeechChunk.alignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.alignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateAudioStreamed;\n}\n"],"mappings":";AAAA,SAAS,kBAAkB,yBAAyB;AACpD;AAAA,EACE;AAAA,EACA;AAAA,OACK;;;ACJP,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,sBAAsB;AAC/B,SAAS,kBAAkB;AAI3B,SAAS,4CACP,MACA;AACA,QAAM,mBAAmB;AAAA,IACvB,mBAAmB;AAAA,MACjB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AAEA,SAAO,WAAW,OAAO,OAAO,MAAM,EAAE,iBAAiB,CAAC,CAAC;AAC7D;AAEA,IAAM,eAAe;AAMd,SAAS,8BACd,SAC4B;AAC5B,WAAS,gCAAgC,EAAE,KAAK,GAAQ;AACtD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,mCACd,SAC4B;AAC5B,WAAS,qCAAqC,EAAE,KAAK,GAAQ;AAC3D,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAEO,SAAS,mCACd,SAC4B;AAC5B,WAAS,qCAAqC,EAAE,KAAK,GAAQ;AAC3D,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAEO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,kCAAkC;AAAA,UAC1C,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,wDAAwD;AAAA,UAChE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,4CAA4C;AAAA,QAC/C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,4BAA4B;AAAA,YACpC,EAAE,MAAM,oCAAoC;AAAA,UAC9C;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;;;ADlfO,IAAK,gDAAL,kBAAKC,mDAAL;AACL,EAAAA,+CAAA,aAAU;AACV,EAAAA,+CAAA,UAAO;AACP,EAAAA,+CAAA,eAAY;AACZ,EAAAA,+CAAA,YAAS;AACT,EAAAA,+CAAA,cAAW;AACX,EAAAA,+CAAA,UAAO;AAKP,EAAAA,+CAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAqFL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,WAAQ;AACR,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,0BAAuB;AACvB,EAAAA,oBAAA,yBAAsB;AACtB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,2BAAwB;AAxCd,SAAAA;AAAA,GAAA;AAuWL,IAAK,mCAAL,kBAAKC,sCAAL;AACL,EAAAA,kCAAA,aAAU;AACV,EAAAA,kCAAA,UAAO;AACP,EAAAA,kCAAA,eAAY;AACZ,EAAAA,kCAAA,YAAS;AACT,EAAAA,kCAAA,cAAW;AACX,EAAAA,kCAAA,UAAO;AAKP,EAAAA,kCAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAqFL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,mBAAgB;AAChB,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AAEpB,EAAAA,SAAA,4BAAyB;AACzB,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,wBAAqB;AAbX,SAAAA;AAAA,GAAA;AAiHL,IAAK,WAAL,kBAAKC,cAAL;AAEL,EAAAA,UAAA,0BAAuB;AAEvB,EAAAA,UAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAoBL,IAAK,UAAL,kBAAKC,aAAL;AAEL,EAAAA,SAAA,yBAAsB;AAEtB,EAAAA,SAAA,gBAAa;AAEb,EAAAA,SAAA,oBAAiB;AAEjB,EAAAA,SAAA,+BAA4B;AARlB,SAAAA;AAAA,GAAA;AAuEL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,iBAAc;AAEd,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,gBAAa;AAKb,EAAAA,cAAA,YAAS;AAET,EAAAA,cAAA,gBAAa;AAEb,EAAAA,cAAA,WAAQ;AAER,EAAAA,cAAA,cAAW;AAEX,EAAAA,cAAA,eAAY;AAEZ,EAAAA,cAAA,wBAAqB;AAErB,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,6BAA0B;AAE1B,EAAAA,cAAA,kBAAe;AAEf,EAAAA,cAAA,0BAAuB;AAEvB,EAAAA,cAAA,yBAAsB;AAhCZ,SAAAA;AAAA,GAAA;AA4EL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,sBAAmB;AACnB,EAAAA,cAAA,qCAAkC;AAClC,EAAAA,cAAA,+BAA4B;AAC5B,EAAAA,cAAA,8BAA2B;AAC3B,EAAAA,cAAA,qCAAkC;AALxB,SAAAA;AAAA,GAAA;AAiBL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,gBAAa;AACb,EAAAA,iBAAA,SAAM;AACN,EAAAA,iBAAA,YAAS;AACT,EAAAA,iBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAgOL,IAAK,WAAL,kBAAKC,cAAL;AACL,EAAAA,UAAA,sBAAmB;AAEnB,EAAAA,UAAA,UAAO;AAEP,EAAAA,UAAA,WAAQ;AAER,EAAAA,UAAA,WAAQ;AAPE,SAAAA;AAAA,GAAA;AAoEL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAWL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,UAAO;AACP,EAAAA,MAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAsFL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAoBL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,aAAU;AAEV,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AAEZ,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA6JL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAWL,IAAK,oBAAL,kBAAKC,uBAAL;AACL,EAAAA,mBAAA,aAAU;AACV,EAAAA,mBAAA,UAAO;AACP,EAAAA,mBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AA2FL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAuBL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,aAAU;AAEV,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA0JL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAWL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,UAAO;AACP,EAAAA,iBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAqKL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAoLL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,aAAU;AAEV,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AAEZ,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AAikBL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAeL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,8BAA2B;AAC3B,EAAAA,iBAAA,WAAQ;AACR,EAAAA,iBAAA,eAAY;AACZ,EAAAA,iBAAA,qBAAkB;AAClB,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,yBAAsB;AANZ,SAAAA;AAAA,GAAA;AA4FL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,oCAAiC;AACjC,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AA0CL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,uCAAoC;AAEpC,EAAAA,YAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAwBL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,4CAAyC;AACzC,EAAAA,gBAAA,uBAAoB;AAFV,SAAAA;AAAA,GAAA;AAuBL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,wCAAqC;AAErC,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,qBAAkB;AAElB,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,iBAAc;AAEd,EAAAA,2BAAA,uBAAoB;AAEpB,EAAAA,2BAAA,kBAAe;AAbL,SAAAA;AAAA,GAAA;AA0HL,IAAK,kCAAL,kBAAKC,qCAAL;AACL,EAAAA,iCAAA,mDAAgD;AAChD,EAAAA,iCAAA,aAAU;AACV,EAAAA,iCAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAqKL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,kCAA+B;AAC/B,EAAAA,kBAAA,kBAAe;AACf,EAAAA,kBAAA,iBAAc;AAHJ,SAAAA;AAAA,GAAA;AAoGL,IAAK,qCAAL,kBAAKC,wCAAL;AACL,EAAAA,oCAAA,aAAU;AACV,EAAAA,oCAAA,UAAO;AACP,EAAAA,oCAAA,eAAY;AACZ,EAAAA,oCAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AA0CL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,mCAAgC;AAKhC,EAAAA,qBAAA,yCAAsC;AAKtC,EAAAA,qBAAA,6CAA0C;AAXhC,SAAAA;AAAA,GAAA;AAuGL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAiML,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,gCAA6B;AAC7B,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,iCAA8B;AAC9B,EAAAA,kBAAA,iBAAc;AACd,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,uBAAoB;AAXV,SAAAA;AAAA,GAAA;AAscL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,gCAA6B;AAC7B,EAAAA,gBAAA,qCAAkC;AAClC,EAAAA,gBAAA,qCAAkC;AAJxB,SAAAA;AAAA,GAAA;AA80BL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AA2DL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AA2NL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,0BAAuB;AAdb,SAAAA;AAAA,GAAA;AAmDL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,WAAQ;AAHE,SAAAA;AAAA,GAAA;AAgFL,IAAK,uBAAL,kBAAKC,0BAAL;AAEL,EAAAA,sBAAA,kCAA+B;AAE/B,EAAAA,sBAAA,0BAAuB;AAEvB,EAAAA,sBAAA,6BAA0B;AAE1B,EAAAA,sBAAA,2BAAwB;AARd,SAAAA;AAAA,GAAA;AAsDL,IAAK,6BAAL,kBAAKC,gCAAL;AAEL,EAAAA,4BAAA,sBAAmB;AAEnB,EAAAA,4BAAA,kBAAe;AAJL,SAAAA;AAAA,GAAA;AAoBL,IAAK,cAAL,kBAAKC,iBAAL;AAEL,EAAAA,aAAA,6BAA0B;AAE1B,EAAAA,aAAA,yBAAsB;AAJZ,SAAAA;AAAA,GAAA;AAgEL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,uBAAoB;AACpB,EAAAA,WAAA,gBAAa;AACb,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,qBAAkB;AALR,SAAAA;AAAA,GAAA;AAyKL,IAAK,mBAAL,kBAAKC,sBAAL;AAEL,EAAAA,kBAAA,mCAAgC;AAEhC,EAAAA,kBAAA,eAAY;AAEZ,EAAAA,kBAAA,iBAAc;AAEd,EAAAA,kBAAA,gBAAa;AARH,SAAAA;AAAA,GAAA;AAoCL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AAEV,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,SAAM;AAEN,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,eAAY;AAfF,SAAAA;AAAA,GAAA;AAmIL,IAAK,QAAL,kBAAKC,WAAL;AACL,EAAAA,OAAA,aAAU;AAEV,EAAAA,OAAA,yBAAsB;AAEtB,EAAAA,OAAA,wBAAqB;AAErB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAEvB,EAAAA,OAAA,2BAAwB;AACxB,EAAAA,OAAA,yBAAsB;AACtB,EAAAA,OAAA,uBAAoB;AAEpB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAnBb,SAAAA;AAAA,GAAA;AA2FL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,aAAU;AACV,EAAAA,gBAAA,UAAO;AACP,EAAAA,gBAAA,SAAM;AACN,EAAAA,gBAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,aAAU;AACV,EAAAA,eAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAmHL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,wBAAqB;AACrB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,0BAAuB;AAXb,SAAAA;AAAA,GAAA;AAyFL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,aAAU;AACV,EAAAA,6BAAA,UAAO;AACP,EAAAA,6BAAA,SAAM;AACN,EAAAA,6BAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,6BAAL,kBAAKC,gCAAL;AACL,EAAAA,4BAAA,aAAU;AACV,EAAAA,4BAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAqGL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,wBAAqB;AACrB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,0BAAuB;AAVb,SAAAA;AAAA,GAAA;AAgSL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,SAAM;AACN,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAkDL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAwDL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AAEtB,EAAAA,YAAA,6BAA0B;AAE1B,EAAAA,YAAA,8BAA2B;AAE3B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,gCAA6B;AAE7B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,+BAA4B;AAblB,SAAAA;AAAA,GAAA;AAwDL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AACxB,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,QAAK;AAHK,SAAAA;AAAA,GAAA;AAaL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,wBAAqB;AACrB,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AANP,SAAAA;AAAA,GAAA;AAmBL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,WAAQ;AACR,EAAAA,YAAA,aAAU;AAHA,SAAAA;AAAA,GAAA;AA0DL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,sCAAmC;AACnC,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,aAAU;AARA,SAAAA;AAAA,GAAA;AAuBL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,yBAAsB;AACtB,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,0BAAuB;AACvB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,YAAS;AACT,EAAAA,SAAA,WAAQ;AAXE,SAAAA;AAAA,GAAA;AA6BL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,8BAA2B;AAC3B,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,WAAQ;AACR,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,gBAAa;AACb,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,uBAAoB;AACpB,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAwFL,IAAK,iCAAL,kBAAKC,oCAAL;AACL,EAAAA,gCAAA,8BAA2B;AAC3B,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,WAAQ;AACR,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,gBAAa;AACb,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,uBAAoB;AACpB,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AA0FL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,mBAAgB;AAChB,EAAAA,gBAAA,oBAAiB;AAHP,SAAAA;AAAA,GAAA;AAaL,IAAK,6CAAL,kBAAKC,gDAAL;AACL,EAAAA,4CAAA,+BAA4B;AAC5B,EAAAA,4CAAA,UAAO;AACP,EAAAA,4CAAA,SAAM;AAHI,SAAAA;AAAA,GAAA;AAyHL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,iCAA8B;AAC9B,EAAAA,sBAAA,wBAAqB;AACrB,EAAAA,sBAAA,gBAAa;AACb,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,qBAAkB;AANR,SAAAA;AAAA,GAAA;AA0DL,IAAK,wBAAL,kBAAKC,2BAAL;AAEL,EAAAA,uBAAA,qCAAkC;AAElC,EAAAA,uBAAA,gBAAa;AAEb,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,eAAY;AAEZ,EAAAA,uBAAA,+BAA4B;AAE5B,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,2BAAwB;AAdd,SAAAA;AAAA,GAAA;AAmOL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,6BAA0B;AAC1B,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,aAAU;AACV,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,2BAAwB;AACxB,EAAAA,WAAA,iCAA8B;AAC9B,EAAAA,WAAA,qBAAkB;AAClB,EAAAA,WAAA,0BAAuB;AACvB,EAAAA,WAAA,SAAM;AACN,EAAAA,WAAA,qBAAkB;AAVR,SAAAA;AAAA,GAAA;AA6CL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,gCAA6B;AAC7B,EAAAA,cAAA,UAAO;AACP,EAAAA,cAAA,SAAM;AACN,EAAAA,cAAA,WAAQ;AACR,EAAAA,cAAA,aAAU;AALA,SAAAA;AAAA,GAAA;AA4JL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,8BAA2B;AAC3B,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,gBAAa;AACb,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAmKL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,kBAAe;AAEf,EAAAA,yBAAA,uBAAoB;AAEpB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,mBAAgB;AA7BN,SAAAA;AAAA,GAAA;AAoOL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,mCAAgC;AANtB,SAAAA;AAAA,GAAA;AA2FL,IAAK,+BAAL,kBAAKC,kCAAL;AAEL,EAAAA,8BAAA,qCAAkC;AAElC,EAAAA,8BAAA,gBAAa;AAJH,SAAAA;AAAA,GAAA;AAyOL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,6BAA0B;AAC1B,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,+BAA4B;AAJlB,SAAAA;AAAA,GAAA;AA+TL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,SAAM;AAEN,EAAAA,cAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AA8BL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,sBAAmB;AACnB,EAAAA,YAAA,uBAAoB;AACpB,EAAAA,YAAA,2BAAwB;AACxB,EAAAA,YAAA,mBAAgB;AALN,SAAAA;AAAA,GAAA;AAgJL,IAAK,4CAAL,kBAAKC,+CAAL;AACL,EAAAA,2CAAA,sBAAmB;AACnB,EAAAA,2CAAA,UAAO;AACP,EAAAA,2CAAA,YAAS;AACT,EAAAA,2CAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAyaL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,UAAO;AACP,EAAAA,sBAAA,YAAS;AACT,EAAAA,sBAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAqUL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,yBAAsB;AACtB,EAAAA,cAAA,YAAS;AACT,EAAAA,cAAA,gBAAa;AAHH,SAAAA;AAAA,GAAA;AAyIL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AACZ,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AANF,SAAAA;AAAA,GAAA;AA+lBL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAsFL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,8CAA2C;AAC3C,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,uBAAoB;AACpB,EAAAA,6BAAA,qBAAkB;AAJR,SAAAA;AAAA,GAAA;AA+NL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,aAAU;AACV,EAAAA,qBAAA,uBAAoB;AACpB,EAAAA,qBAAA,YAAS;AACT,EAAAA,qBAAA,cAAW;AACX,EAAAA,qBAAA,SAAM;AALI,SAAAA;AAAA,GAAA;AAsFZ,eAAsBC,+BACpB,SA0QA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,QAAQ,SAAS;AAAA,IACjB,QAAQ,SAAS;AAAA,IACjB,iBAAiB,SAAS;AAAA,IAC1B,oBAAoB,SAAS;AAAA,IAC7B,mBAAmB,SAAS;AAAA,IAC5B,uBAAuB,SAAS;AAAA,IAChC,sBAAsB,SAAS;AAAA,EACjC,CAAC;AAED,QAAM,UAC+B,8BAA8B,OAAO;AAE1E,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,QAAQ;AAAA,UACR,QAAQ;AAAA,UACR,iBAAiB;AAAA,UACjB,oBAAoB;AAAA,UACpB,mBAAmB;AAAA,UACnB,uBAAuB;AAAA,UACvB,sBAAsB;AAAA,QACxB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;AA4BA,eAAsBC,oCACpB,SA8DA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,QAAQ,SAAS;AAAA,IACjB,QAAQ,SAAS;AAAA,IACjB,iBAAiB,SAAS;AAAA,IAC1B,oBAAoB,SAAS;AAAA,IAC7B,mBAAmB,SAAS;AAAA,IAC5B,sBAAsB,SAAS;AAAA,IAC/B,kBAAkB,SAAS;AAAA,EAC7B,CAAC;AAED,QAAM,UAC+B;AAAA,IACjC;AAAA,EACF;AAEF,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,QAAQ;AAAA,UACR,QAAQ;AAAA,UACR,iBAAiB;AAAA,UACjB,oBAAoB;AAAA,UACpB,mBAAmB;AAAA,UACnB,sBAAsB;AAAA,UACtB,kBAAkB;AAAA,QACpB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;AA4BA,eAAsBC,oCACpB,SA0QA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,QAAQ,SAAS;AAAA,IACjB,QAAQ,SAAS;AAAA,IACjB,iBAAiB,SAAS;AAAA,IAC1B,oBAAoB,SAAS;AAAA,IAC7B,mBAAmB,SAAS;AAAA,IAC5B,uBAAuB,SAAS;AAAA,IAChC,sBAAsB,SAAS;AAAA,EACjC,CAAC;AAED,QAAM,UAC+B;AAAA,IACjC;AAAA,EACF;AAEF,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,QAAQ;AAAA,UACR,QAAQ;AAAA,UACR,iBAAiB;AAAA,UACjB,oBAAoB;AAAA,UACpB,mBAAmB;AAAA,UACnB,uBAAuB;AAAA,UACvB,sBAAsB;AAAA,QACxB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;AAwBA,eAAsBC,uBACpB,SAWA;AAEA,QAAM,EAAE,YAAY,YAAY,IAAI,UAAU,CAAC;AAK/C,QAAM,UAAU,sCAAsC;AAAA,IACpD,2BAA2B,SAAS;AAAA,IACpC,+BAA+B,SAAS;AAAA,IACxC,iBAAiB,SAAS;AAAA,EAC5B,CAAC;AAED,QAAM,UAC+B,sBAAsB,OAAO;AAElE,eAAa,aAAa;AAC1B,MAAI;AACF,UAAM,SAAS,MAAM,WAAW,QAAQ,OAAO;AAC/C,iBAAa,YAAY,MAAM;AAE/B,WAAO,wCAAwC,OAAO,IAAI;AAAA,EAC5D,SAAS,KAAU;AACjB,UAAM,mBAAmB;AAAA,MACvB;AAAA,MACA;AAAA,QACE,wBAAwB,CAAC;AAAA,QACzB,0BAA0B;AAAA,UACxB,2BAA2B;AAAA,UAC3B,+BAA+B;AAAA,UAC/B,iBAAiB;AAAA,QACnB;AAAA,QACA,yBAAyB;AAAA,MAC3B;AAAA,MACA,CAAC,SAAS;AAAA,IACZ;AACA,iBAAa,UAAU,GAAG;AAE1B,UAAM;AAAA,EACR;AACF;","names":["payload","OpenaiproxyV1ChatCompletionMessageMessageRole","OpenaiproxyV1Model","ChatCompletionMessageMessageRole","V1Model","Language","Outcome","FinishReason","HarmCategory","HarmProbability","Modality","ResponseTypeType","Role","Type","MediaType","GoogleproxyV1ResponseTypeType","V1MessageRoleRole","V1CacheControlType","V1ImageMediaTypeMediaType","V1ResponseTypeType","MessageRoleRole","CacheControlType","ImageMediaTypeMediaType","PerplexityMessageMessageRole","PerplexityModel","V1ImageModel","ImageModel","ImageCoreModel","ImageStableDiffusionModel","EditImageWithPromptRequestModel","OpenAiImageModel","V1ChatCompletionMessageMessageRole","ChatCompletionModel","MessageRole","V1ResponsesModel","ResponsesModel","TextBisonModel","ChatBisonModel","GoogleproxyV1Model","ContentRole","MediaResolutionLevel","DynamicRetrievalConfigMode","Environment","Threshold","PersonGeneration","Mode","Model","ToolChoiceType","McpServerType","ClaudeModel","GoogleproxyV1ToolChoiceType","GoogleproxyV1McpServerType","AnthropicModel","V1ToolChoiceType","V1McpServerType","LlamaModel","ImageQuality","ImageSize","ImageStyle","ClipGuidancePreset","Sampler","TextToImageRequestStylePreset","GenerateCoreRequestStylePreset","GenerationMode","GenerateStableDiffusionRequestOutputFormat","GenerateAnImageModel","CreatePredictionModel","TaskInput","ResponseType","StylePreset","TextToImageRequestModel","ImagenModel","GenerateImageMlPlatformModel","VideoGenModel","OutputFormat","VideoModel","ResponsesInputMessageResponsesMessageRole","ResponsesMessageRole","V1VideoModel","GatewayMessageDefinitionRole","SpeechModel","ElevenLabsTextToSpeechModel","WebhookIdentityType","generateContentByPromptObject","generateTextByPromptObjectStreamed","generateContentByPromptObjectAsync","generateAudioStreamed"]}
|