@wix/auto_sdk_ai-gateway_generators 1.0.56 → 1.0.58

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/ds-wix-ai-gateway-v1-prompt-generators.http.ts","../../src/ds-wix-ai-gateway-v1-prompt-generators.types.ts","../../src/ds-wix-ai-gateway-v1-prompt-generators.meta.ts"],"sourcesContent":["import { toURLSearchParams } from '@wix/sdk-runtime/rest-modules';\nimport { transformSDKFloatToRESTFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformRESTFloatToSDKFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformSDKBytesToRESTBytes } from '@wix/sdk-runtime/transformations/bytes';\nimport { transformRESTBytesToSDKBytes } from '@wix/sdk-runtime/transformations/bytes';\nimport { transformRESTDurationToSDKDuration } from '@wix/sdk-runtime/transformations/duration';\nimport { transformPaths } from '@wix/sdk-runtime/transformations/transform-paths';\nimport { resolveUrl } from '@wix/sdk-runtime/rest-modules';\nimport { ResolveUrlOpts } from '@wix/sdk-runtime/rest-modules';\nimport { RequestOptionsFactory } from '@wix/sdk-types';\n\nfunction resolveWixDsWixAiGatewayV1WixAiGatewayUrl(\n opts: Omit<ResolveUrlOpts, 'domainToMappings'>\n) {\n const domainToMappings = {\n 'bo._base_domain_': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'wixbo.ai': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'wix-bo.com': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'api._api_base_domain_': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n ],\n 'manage._base_domain_': [\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'www.wixapis.com': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n ],\n };\n\n return resolveUrl(Object.assign(opts, { domainToMappings }));\n}\n\nconst PACKAGE_NAME = '@wix/auto_sdk_ai-gateway_generators';\n\n/**\n * Generate text according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPrompt(), and will be removed on 2026-03-31.\n */\nexport function generateTextByPrompt(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPrompt;\n}\n\n/**\n * Generate text according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByPromptStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptStreamed({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPromptStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt-streamed/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptStreamed;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPromptObject(), and will be removed on 2026-03-31.\n */\nexport function generateTextByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptObject;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByPromptObjectStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptObjectStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPromptObjectStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt-object-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptObjectStreamed;\n}\n\n/** Generate an embedding using the provided request. */\nexport function generateEmbedding(payload: object): RequestOptionsFactory<any> {\n function __generateEmbedding({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateEmbedding',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-embedding',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'openAiEmbeddingsResponse.data.floatEmbedding.embedding',\n isRepeated: true,\n },\n {\n path: 'azureEmbeddingsResponse.data.floatEmbedding.embedding',\n isRepeated: true,\n },\n {\n path: 'googleEmbeddingsResponse.predictions.embeddings.embedding',\n isRepeated: true,\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateEmbedding;\n}\n\n/**\n * Generate text according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByProject(), and will be removed on 2026-03-31.\n */\nexport function generateTextByProject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-project/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByProject;\n}\n\n/**\n * Generate text according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByProjectStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByProjectStreamed({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByProjectStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-project-streamed/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByProjectStreamed;\n}\n\n/** Generate moderation output from specified moderation model provider. */\nexport function generateModeration(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateModeration({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateModeration',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-moderation',\n data: payload,\n host,\n }),\n data: payload,\n };\n\n return metadata;\n }\n\n return __generateModeration;\n}\n\n/**\n * Generate image according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate image request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByProject(), and will be removed on 2026-03-31.\n */\nexport function generateImageByProject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateImageByProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateImageByProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-image-by-project/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateImageByProject;\n}\n\n/**\n * Generate image according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate image request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPrompt(), and will be removed on 2026-03-31.\n */\nexport function generateImageByPrompt(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateImageByPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateImageByPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-image-by-prompt/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateImageByPrompt;\n}\n\n/**\n * Generate image according to Prompt object configuration.\n * Prompt object is used for all generate image request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPromptObject(), and will be removed on 2026-03-31.\n */\nexport function generateImageByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateImageByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateImageByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-image-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateImageByPromptObject;\n}\n\n/**\n * Generate different content such as text, image, and video according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByPrompt(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateContentByPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPrompt;\n}\n\n/**\n * Generate different content such as text, image, and video according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByProject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateContentByProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-content-by-project/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByProject;\n}\n\n/**\n * Generate different content such as text, image, and video according to Prompt object configuration\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateContentByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPromptObject;\n}\n\n/** Transcribe input audio using the specified model. */\nexport function generateTranscription(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTranscription({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [{ path: 'openAiTranscriptionRequest.temperature' }],\n },\n {\n transformFn: transformSDKBytesToRESTBytes,\n paths: [{ path: 'openAiTranscriptionRequest.fileContent.fileBytes' }],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTranscription',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-transcription',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTDurationToSDKDuration,\n paths: [\n { path: 'openAiTranscriptionResponse.duration' },\n { path: 'openAiTranscriptionResponse.words.start' },\n { path: 'openAiTranscriptionResponse.words.end' },\n { path: 'openAiTranscriptionResponse.segments.start' },\n { path: 'openAiTranscriptionResponse.segments.end' },\n ],\n },\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n { path: 'openAiTranscriptionResponse.segments.temperature' },\n { path: 'openAiTranscriptionResponse.segments.avgLogprob' },\n { path: 'openAiTranscriptionResponse.segments.compressionRatio' },\n { path: 'openAiTranscriptionResponse.segments.noSpeechProb' },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTranscription;\n}\n\n/** Generate audio from text using the specified model. */\nexport function generateAudio(payload: object): RequestOptionsFactory<any> {\n function __generateAudio({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'openAiCreateSpeechRequest.speed' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.style' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.stability' },\n {\n path: 'elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost',\n },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateAudio',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-audio',\n data: serializedData,\n host,\n }),\n data: serializedData,\n };\n\n return metadata;\n }\n\n return __generateAudio;\n}\n\n/** Generate audio from text using the specified model. The response is streamed back in chunks. */\nexport function generateAudioStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateAudioStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'openAiCreateSpeechRequest.speed' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.style' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.stability' },\n {\n path: 'elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost',\n },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateAudioStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-audio-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n { path: 'openAiSpeechChunk.content' },\n { path: 'elevenlabsSpeechChunk.audioBase64' },\n ],\n },\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'elevenlabsSpeechChunk.alignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.alignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateAudioStreamed;\n}\n\n/**\n * Publish the Prompt object to the service's storage. Enables request to GenerateTextByPrompt rpc using published Prompt's id.\n * Once published, a different Prompt configuration cannot with the same id cannot be published, so the published Prompt is considered immutable.\n */\nexport function publishPrompt(payload: object): RequestOptionsFactory<any> {\n function __publishPrompt({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.PublishPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/prompt/{prompt.id}',\n data: serializedData,\n host,\n }),\n data: serializedData,\n };\n\n return metadata;\n }\n\n return __publishPrompt;\n}\n\n/**\n * Retrieve the information about Prompt from service's storage.\n * If provided, the Prompt object's templated parameters will be expanded using values from the provided params.\n * An error will occur if the Prompt object's templated parameters are insufficient.\n */\nexport function getPrompt(payload: object): RequestOptionsFactory<any> {\n function __getPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/prompt/{promptId}',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload, true),\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'prompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n {\n path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n {\n path: 'prompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n {\n path: 'prompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __getPrompt;\n}\n\n/**\n * Publish the Project object to the service's storage. Enables request to GenerateTextByProject rpc using published Project's id.\n * Different Project configuration can be published with the same id, overwriting the previous Project configuration.\n */\nexport function publishProject(payload: object): RequestOptionsFactory<any> {\n function __publishProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.PublishProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/project/{project.id}',\n data: payload,\n host,\n }),\n data: payload,\n };\n\n return metadata;\n }\n\n return __publishProject;\n}\n\n/** Retrieve the information about Project from service's storage. */\nexport function getProject(payload: object): RequestOptionsFactory<any> {\n function __getProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/project/{projectId}',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload),\n };\n\n return metadata;\n }\n\n return __getProject;\n}\n\n/** Retrieve status by entity id and type. */\nexport function getStatus(payload: object): RequestOptionsFactory<any> {\n function __getStatus({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetStatus',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/status/{entityId}',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload),\n };\n\n return metadata;\n }\n\n return __getStatus;\n}\n\n/** Gets info about application's overall and per-user budget and current usage. */\nexport function getApplicationUsage(\n payload: object\n): RequestOptionsFactory<any> {\n function __getApplicationUsage({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetApplicationUsage',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/application-usage',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload),\n };\n\n return metadata;\n }\n\n return __getApplicationUsage;\n}\n\n/** Image Editing API's */\nexport function editImage(payload: object): RequestOptionsFactory<any> {\n function __editImage({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'photoroomImageEditingRequest.background.guidance.scale' },\n { path: 'photoroomImageEditingRequest.margin.general' },\n { path: 'photoroomImageEditingRequest.margin.bottom' },\n { path: 'photoroomImageEditingRequest.margin.left' },\n { path: 'photoroomImageEditingRequest.margin.right' },\n { path: 'photoroomImageEditingRequest.margin.top' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.EditImage',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/edit-image',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n { path: 'photoroomRemoveBackgroundResponse.xUncertaintyScore' },\n { path: 'photoroomImageEditingResponse.xUncertaintyScore' },\n { path: 'replicateEditImageResponse.metrics.predictTime' },\n { path: 'replicateEditImageResponse.metrics.totalTime' },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __editImage;\n}\n\n/** Poll image generation result by id. */\nexport function pollImageGenerationResult(\n payload: object\n): RequestOptionsFactory<any> {\n function __pollImageGenerationResult({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.PollImageGenerationResult',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/poll-image-generation-result',\n data: payload,\n host,\n }),\n data: payload,\n };\n\n return metadata;\n }\n\n return __pollImageGenerationResult;\n}\n","import type { GoogleProtoDuration } from '@wix/metro-runtime/ambassador';\n\nexport interface Prompt extends PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** Invoke Amazon Converse API request. */\n amazonConverseRequest?: InvokeConverseRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n /**\n * Prompt id.\n * @format GUID\n */\n id?: string | null;\n /**\n * Names of template parameters, that will be checked and substituted during GenerateText requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedParameterNames?: string[];\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n /**\n * Names of dynamic properties, that will be checked and substituted during requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedDynamicPropertiesNames?: string[];\n}\n\n/** @oneof */\nexport interface PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** Invoke Amazon Converse API request. */\n amazonConverseRequest?: InvokeConverseRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n}\n\nexport interface FallbackPromptConfig {\n /**\n * Id of the fallback Prompt. This Prompt will be used for text generation in case the invocation of original Prompt fails.\n * @format GUID\n */\n fallbackPromptId?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequest\n extends OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: OpenaiproxyV1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: CreateChatCompletionRequestFunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: V1CreateChatCompletionRequestTool[];\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: V1FineTuningSpec;\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: OpenaiproxyV1CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * o1 models only\n * @maxLength 100\n */\n reasoningEffort?: string | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses.\n * Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface CreateChatCompletionRequestFunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport enum OpenaiproxyV1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_0301 = 'GPT_3_5_TURBO_0301',\n GPT_4 = 'GPT_4',\n GPT_4_0314 = 'GPT_4_0314',\n GPT_4_32K = 'GPT_4_32K',\n GPT_4_32K_0314 = 'GPT_4_32K_0314',\n GPT_3_5_TURBO_0613 = 'GPT_3_5_TURBO_0613',\n GPT_3_5_TURBO_16K = 'GPT_3_5_TURBO_16K',\n GPT_3_5_TURBO_16K_0613 = 'GPT_3_5_TURBO_16K_0613',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_4_32K_0613 = 'GPT_4_32K_0613',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_1106_PREVIEW = 'GPT_4_1106_PREVIEW',\n GPT_4_VISION_PREVIEW = 'GPT_4_VISION_PREVIEW',\n GPT_4_TURBO_PREVIEW = 'GPT_4_TURBO_PREVIEW',\n GPT_4_0125_PREVIEW = 'GPT_4_0125_PREVIEW',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4_TURBO_2024_04_09 = 'GPT_4_TURBO_2024_04_09',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4O_2024_08_06 = 'GPT_4O_2024_08_06',\n O1_PREVIEW = 'O1_PREVIEW',\n O1_PREVIEW_2024_09_12 = 'O1_PREVIEW_2024_09_12',\n O1_MINI = 'O1_MINI',\n O1_MINI_2024_09_12 = 'O1_MINI_2024_09_12',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O1_2024_12_17 = 'O1_2024_12_17',\n O3_MINI_2025_01_31 = 'O3_MINI_2025_01_31',\n GPT_4_OLD = 'GPT_4_OLD',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n O3_2025_04_16 = 'O3_2025_04_16',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n GPT_EXP = 'GPT_EXP',\n GPT_EXP_2 = 'GPT_EXP_2',\n GPT_5_2025_08_07 = 'GPT_5_2025_08_07',\n GPT_5_MINI_2025_08_07 = 'GPT_5_MINI_2025_08_07',\n GPT_5_NANO_2025_08_07 = 'GPT_5_NANO_2025_08_07',\n GPT_5_2_2025_12_11_COMPLETION = 'GPT_5_2_2025_12_11_COMPLETION',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ModelWithLiterals =\n | OpenaiproxyV1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_0301'\n | 'GPT_4'\n | 'GPT_4_0314'\n | 'GPT_4_32K'\n | 'GPT_4_32K_0314'\n | 'GPT_3_5_TURBO_0613'\n | 'GPT_3_5_TURBO_16K'\n | 'GPT_3_5_TURBO_16K_0613'\n | 'GPT_4_0613'\n | 'GPT_4_32K_0613'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_1106_PREVIEW'\n | 'GPT_4_VISION_PREVIEW'\n | 'GPT_4_TURBO_PREVIEW'\n | 'GPT_4_0125_PREVIEW'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4_TURBO_2024_04_09'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4O_2024_08_06'\n | 'O1_PREVIEW'\n | 'O1_PREVIEW_2024_09_12'\n | 'O1_MINI'\n | 'O1_MINI_2024_09_12'\n | 'GPT_4O_2024_11_20'\n | 'O1_2024_12_17'\n | 'O3_MINI_2025_01_31'\n | 'GPT_4_OLD'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'O3_2025_04_16'\n | 'O4_MINI_2025_04_16'\n | 'GPT_EXP'\n | 'GPT_EXP_2'\n | 'GPT_5_2025_08_07'\n | 'GPT_5_MINI_2025_08_07'\n | 'GPT_5_NANO_2025_08_07'\n | 'GPT_5_2_2025_12_11_COMPLETION';\n\nexport interface OpenaiproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: ChatCompletionMessageFunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: OpenaiproxyV1ChatCompletionMessageContentPart[];\n}\n\nexport interface ChatCompletionMessageFunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum OpenaiproxyV1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals =\n | OpenaiproxyV1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ChatCompletionMessageToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: ChatCompletionMessageFunctionWithArgs;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageContentPart\n extends OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface V1CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: CreateChatCompletionRequestFunctionSignature;\n}\n\nexport interface V1FineTuningSpec {\n /**\n * Organization field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:<my-org>:custom_suffix:id\n * @maxLength 100\n */\n org?: string | null;\n /**\n * Suffix field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:<custom_suffix>:id\n * @maxLength 100\n */\n suffix?: string | null;\n /**\n * Id field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:custom_suffix:<id>\n * @maxLength 100\n */\n id?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface TextBisonPredictRequest {\n /**\n * TextInstance objects containing input prompts.\n * @maxSize 100\n */\n instances?: TextInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: TextBisonModelWithLiterals;\n}\n\nexport interface TextInstance {\n /**\n * Text input to generate model response. Prompts can include preamble, questions, suggestions, instructions, or examples.\n * @maxLength 100000\n */\n prompt?: string | null;\n}\n\nexport interface PredictParameters {\n /**\n * The temperature is used for sampling during response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that\n * require a less open-ended or creative response, while higher temperatures can lead to more diverse or creative results.\n * A temperature of 0 means that the highest probability tokens are always selected. In this case, responses for a\n * given prompt are mostly deterministic, but a small amount of variation is still possible.\n * For most use cases, try starting with a temperature of 0.2. If the model returns a response that's too generic,\n * too short, or the model gives a fallback response, try increasing the temperature.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for longer responses.\n * @min 1\n * @max 2048\n */\n maxOutputTokens?: number | null;\n /**\n * Top-K changes how the model selects tokens for output. A top-K of 1 means the next selected token is the most probable\n * among all tokens in the model's vocabulary (also called greedy decoding), while a top-K of 3 means that the next\n * token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled. Then tokens are further\n * filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-K is 40.\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output. Tokens are selected from the most (see top-K) to least\n * probable until the sum of their probabilities equals the top-P value. For example, if tokens A, B, and C have a\n * probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5, then the model will select either A or B as the next\n * token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-P is 0.95.\n * @max 1\n */\n topP?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in\n * the response. If a string appears multiple times in the response, then the response truncates where it's first\n * encountered. The strings are case-sensitive.\n * @maxSize 100\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * The number of response variations to return.\n * @min 1\n * @max 8\n */\n candidateCount?: number | null;\n}\n\nexport enum TextBisonModel {\n UNKNOWN_TEXT_BISON_MODEL = 'UNKNOWN_TEXT_BISON_MODEL',\n TEXT_BISON = 'TEXT_BISON',\n TEXT_BISON_001 = 'TEXT_BISON_001',\n TEXT_BISON_32K = 'TEXT_BISON_32K',\n TEXT_BISON_002 = 'TEXT_BISON_002',\n TEXT_BISON_32K_002 = 'TEXT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type TextBisonModelWithLiterals =\n | TextBisonModel\n | 'UNKNOWN_TEXT_BISON_MODEL'\n | 'TEXT_BISON'\n | 'TEXT_BISON_001'\n | 'TEXT_BISON_32K'\n | 'TEXT_BISON_002'\n | 'TEXT_BISON_32K_002';\n\nexport interface ChatBisonPredictRequest {\n /**\n * ChatInstance objects containing inputs.\n * @maxSize 100\n */\n instances?: ChatInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: ChatBisonModelWithLiterals;\n}\n\nexport interface ChatInstance {\n /**\n * Optional. Context shapes how the model responds throughout the conversation. For example, you can use context\n * to specify words the model can or cannot use, topics to focus on or avoid, or the response format or style.\n * @maxLength 100000\n */\n context?: string | null;\n /**\n * Optional. Examples for the model to learn how to respond to the conversation.\n * @maxSize 1000\n */\n examples?: Example[];\n /**\n * Required. Conversation history provided to the model in a structured alternate-author form. Messages appear in\n * chronological order: oldest first, newest last. When the history of messages causes the input to exceed the\n * maximum length, the oldest messages are removed until the entire prompt is within the allowed limit.\n * @maxSize 1000\n */\n messages?: ChatMessage[];\n}\n\nexport interface Example {\n /** An example of an input Message from the user. */\n input?: ChatMessage;\n /** An example of what the model should output given the input. */\n output?: ChatMessage;\n}\n\nexport interface ChatMessage {\n /**\n * Author tag for the turn.\n * @maxLength 100000\n */\n author?: string | null;\n /**\n * Text content of the chat message.\n * @maxLength 100000\n */\n content?: string;\n}\n\nexport enum ChatBisonModel {\n UNKNOWN_CHAT_BISON_MODEL = 'UNKNOWN_CHAT_BISON_MODEL',\n CHAT_BISON = 'CHAT_BISON',\n CHAT_BISON_001 = 'CHAT_BISON_001',\n CHAT_BISON_32K = 'CHAT_BISON_32K',\n CHAT_BISON_002 = 'CHAT_BISON_002',\n CHAT_BISON_32K_002 = 'CHAT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type ChatBisonModelWithLiterals =\n | ChatBisonModel\n | 'UNKNOWN_CHAT_BISON_MODEL'\n | 'CHAT_BISON'\n | 'CHAT_BISON_001'\n | 'CHAT_BISON_32K'\n | 'CHAT_BISON_002'\n | 'CHAT_BISON_32K_002';\n\nexport interface CreateChatCompletionRequest\n extends CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: V1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: V1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: FunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: CreateChatCompletionRequestTool[];\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface FunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport enum V1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n /** New models for Migration */\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n GPT_5_2_2025_12_11_COMPLETION = 'GPT_5_2_2025_12_11_COMPLETION',\n}\n\n/** @enumType */\nexport type V1ModelWithLiterals =\n | V1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_0613'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4O_2024_11_20'\n | 'O4_MINI_2025_04_16'\n | 'GPT_5_2_2025_12_11_COMPLETION';\n\nexport interface V1ChatCompletionMessage {\n /** The role of the message author. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: FunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: ChatCompletionMessageContentPart[];\n}\n\nexport interface FunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ChatCompletionMessageMessageRoleWithLiterals =\n | ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: FunctionWithArgs;\n}\n\nexport interface ChatCompletionMessageContentPart\n extends ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: FunctionSignature;\n}\n\nexport interface CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface GenerateContentRequest {\n /** ID of the model to use. */\n model?: GoogleproxyV1ModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n contents?: Content[];\n /** The system instruction to the model. */\n systemInstruction?: SystemInstruction;\n /**\n * A list of Tools the model may use to generate the next response.\n * @maxSize 1000\n */\n tools?: GoogleproxyV1Tool[];\n /**\n * Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.\n * @maxSize 100\n */\n safetySettings?: SafetySetting[];\n /** The generation configuration for the response. */\n generationConfig?: GenerationConfig;\n /** Tool configuration for any Tool specified in the request. */\n toolConfig?: V1ToolConfig;\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: FineTuningSpec;\n}\n\nexport enum GoogleproxyV1Model {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n GEMINI_1_0_PRO = 'GEMINI_1_0_PRO',\n GEMINI_1_0_PRO_VISION = 'GEMINI_1_0_PRO_VISION',\n GEMINI_1_5_PRO = 'GEMINI_1_5_PRO',\n GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH',\n GEMINI_2_0_FLASH = 'GEMINI_2_0_FLASH',\n GEMINI_2_0_FLASH_LITE = 'GEMINI_2_0_FLASH_LITE',\n GEMINI_2_5_PRO = 'GEMINI_2_5_PRO',\n GEMINI_2_5_FLASH = 'GEMINI_2_5_FLASH',\n GEMINI_2_5_FLASH_LITE = 'GEMINI_2_5_FLASH_LITE',\n GEMINI_2_5_FLASH_IMAGE = 'GEMINI_2_5_FLASH_IMAGE',\n GEMINI_2_5_COMPUTER_USE = 'GEMINI_2_5_COMPUTER_USE',\n GEMINI_3_0_PRO = 'GEMINI_3_0_PRO',\n GEMINI_3_0_PRO_IMAGE = 'GEMINI_3_0_PRO_IMAGE',\n GEMINI_3_0_FLASH = 'GEMINI_3_0_FLASH',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ModelWithLiterals =\n | GoogleproxyV1Model\n | 'UNKNOWN_MODEL'\n | 'GEMINI_1_0_PRO'\n | 'GEMINI_1_0_PRO_VISION'\n | 'GEMINI_1_5_PRO'\n | 'GEMINI_1_5_FLASH'\n | 'GEMINI_2_0_FLASH'\n | 'GEMINI_2_0_FLASH_LITE'\n | 'GEMINI_2_5_PRO'\n | 'GEMINI_2_5_FLASH'\n | 'GEMINI_2_5_FLASH_LITE'\n | 'GEMINI_2_5_FLASH_IMAGE'\n | 'GEMINI_2_5_COMPUTER_USE'\n | 'GEMINI_3_0_PRO'\n | 'GEMINI_3_0_PRO_IMAGE'\n | 'GEMINI_3_0_FLASH';\n\nexport interface Content {\n /**\n * The role in a conversation associated with the content.\n * Specifying a role is required even in single turn use cases. Acceptable values include the following:\n * USER: Specifies content that's sent by you. MODEL: Specifies the model's response.\n */\n role?: ContentRoleWithLiterals;\n /**\n * Ordered parts that make up the input. Parts may have different MIME types.\n * For gemini-1.0-pro, only the text field is valid. The token limit is 32k.\n * For gemini-1.0-pro-vision, you may specify either text only, text and up to 16 images, or text and 1 video. The token limit is 16k.\n * @maxSize 1000\n */\n parts?: V1ContentPart[];\n}\n\nexport enum ContentRole {\n UNKNOWN_CONTENT_ROLE = 'UNKNOWN_CONTENT_ROLE',\n USER = 'USER',\n MODEL = 'MODEL',\n}\n\n/** @enumType */\nexport type ContentRoleWithLiterals =\n | ContentRole\n | 'UNKNOWN_CONTENT_ROLE'\n | 'USER'\n | 'MODEL';\n\nexport interface V1ContentPart {\n /**\n * Union field data can be only one of the following:\n * The text instructions or chat dialogue to include in the prompt.\n * @maxLength 1000000000\n */\n text?: string | null;\n /** data field not supported for gemini-1.0-pro */\n contentData?: ContentData;\n /** A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values. */\n functionCall?: FunctionCall;\n /**\n * The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the\n * function is used as context to the model. This should contain the result of aFunctionCall made based on model prediction.\n */\n functionResponse?: FunctionResponse;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /** Optional. Media resolution level for the input media. */\n mediaResolution?: MediaResolution;\n /** Thought flag indicates that the content part is a thought. */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ContentData {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * The MIME type of the content data. supported types are image/jpeg, image/png.\n * @maxLength 100\n */\n mimeType?: string | null;\n}\n\nexport interface FunctionCall {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional. The function parameters and values in JSON object format. */\n args?: Record<string, any> | null;\n}\n\nexport interface FunctionResponse {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string;\n /** Required. The function response in JSON object format. */\n response?: Record<string, any> | null;\n}\n\nexport interface ExecutableCode {\n /** Required. Programming language of the code. */\n language?: LanguageWithLiterals;\n /**\n * Required. The code to be executed.\n * @maxLength 100000\n */\n code?: string;\n}\n\nexport enum Language {\n /** Unspecified language. This value should not be used. */\n LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED',\n /** Python >= 3.10, with numpy and simpy available. */\n PYTHON = 'PYTHON',\n}\n\n/** @enumType */\nexport type LanguageWithLiterals = Language | 'LANGUAGE_UNSPECIFIED' | 'PYTHON';\n\nexport interface V1CodeExecutionResult {\n /** Required. Outcome of the code execution. */\n outcome?: OutcomeWithLiterals;\n /**\n * Optional. Contains stdout when code execution is successful, stderr or other description otherwise.\n * @maxLength 100000\n */\n output?: string | null;\n}\n\nexport enum Outcome {\n /** Unspecified status. This value should not be used. */\n OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED',\n /** Code execution completed successfully. */\n OUTCOME_OK = 'OUTCOME_OK',\n /** Code execution finished but with a failure. stderr should contain the reason. */\n OUTCOME_FAILED = 'OUTCOME_FAILED',\n /** Code execution ran for too long, and was cancelled. There may or may not be a partial output present. */\n OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED',\n}\n\n/** @enumType */\nexport type OutcomeWithLiterals =\n | Outcome\n | 'OUTCOME_UNSPECIFIED'\n | 'OUTCOME_OK'\n | 'OUTCOME_FAILED'\n | 'OUTCOME_DEADLINE_EXCEEDED';\n\n/**\n * Raw media bytes.\n * Text should not be sent as raw bytes, use the 'text' field.\n */\nexport interface Blob {\n /**\n * The IANA standard MIME type of the source data.\n * Examples: - image/png - image/jpeg\n * If an unsupported MIME type is provided, an error will be returned.\n * For a complete list of supported types, see https://ai.google.dev/gemini-api/docs/file-prompting-strategies#supported_file_formats.\n * @maxLength 100\n */\n mimeType?: string;\n /**\n * Represents raw bytes for media formats. Will be fetched from the passed URL in request, and uploaded to WixMP URL in response.\n * @format WEB_URL\n */\n data?: string;\n}\n\nexport interface MediaResolution {\n /** Media resolution level */\n level?: MediaResolutionLevelWithLiterals;\n}\n\nexport enum MediaResolutionLevel {\n /** Media resolution has not been set. */\n MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED',\n /** Media resolution set to low (64 tokens). */\n MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW',\n /** Media resolution set to medium (256 tokens). */\n MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM',\n /** Media resolution set to high (zoomed reframing with 256 tokens). */\n MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH',\n}\n\n/** @enumType */\nexport type MediaResolutionLevelWithLiterals =\n | MediaResolutionLevel\n | 'MEDIA_RESOLUTION_UNSPECIFIED'\n | 'MEDIA_RESOLUTION_LOW'\n | 'MEDIA_RESOLUTION_MEDIUM'\n | 'MEDIA_RESOLUTION_HIGH';\n\nexport interface SystemInstruction {\n /**\n * The role field of systemInstruction is ignored and doesn't affect the performance of the model.\n * @maxLength 20\n */\n role?: string | null;\n /**\n * Instructions for the model to steer it toward better performance.\n * The text strings count toward the token limit.\n * @maxSize 10\n */\n parts?: V1ContentPart[];\n}\n\nexport interface GoogleproxyV1Tool {\n /**\n * One or more function declarations\n * More information about the function declarations :\n * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling\n * @maxSize 1000\n */\n functionDeclarations?: FunctionDeclaration[];\n /** Optional. Retrieval tool that is powered by Google search. */\n googleSearchRetrieval?: GoogleSearchRetrieval;\n /** Optional. Enables the model to execute code as part of generation. */\n codeExecution?: CodeExecution;\n /** Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. */\n googleSearch?: GoogleSearch;\n /**\n * Optional. Tool to support the model interacting directly with the computer.\n * If enabled, it automatically populates computer-use specific Function Declarations.\n */\n computerUse?: ComputerUse;\n}\n\nexport enum DynamicRetrievalConfigMode {\n /** Always trigger retrieval. */\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n /** Run retrieval only when system decides it is necessary. */\n MODE_DYNAMIC = 'MODE_DYNAMIC',\n}\n\n/** @enumType */\nexport type DynamicRetrievalConfigModeWithLiterals =\n | DynamicRetrievalConfigMode\n | 'MODE_UNSPECIFIED'\n | 'MODE_DYNAMIC';\n\nexport interface DynamicRetrievalConfig {\n /** The mode of the predictor to be used in dynamic retrieval. */\n mode?: DynamicRetrievalConfigModeWithLiterals;\n /** The threshold to be used in dynamic retrieval. If not set, a system default value is used. */\n dynamicThreshold?: string | null;\n}\n\nexport enum Environment {\n /** Defaults to browser. */\n ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED',\n /** Operates in a web browser. */\n ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER',\n}\n\n/** @enumType */\nexport type EnvironmentWithLiterals =\n | Environment\n | 'ENVIRONMENT_UNSPECIFIED'\n | 'ENVIRONMENT_BROWSER';\n\nexport interface FunctionDeclaration {\n /**\n * The name of the function to call. Must start with a letter or an underscore.\n * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description and purpose of the function. The model uses this to decide how and whether to call the function.\n * For the best results, we recommend that you include a description.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * The parameters of this function in a format that's compatible with the OpenAPI\n * https://spec.opensoapis.org/oas/v3.0.3#schema\n */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GoogleSearchRetrieval {\n /** Specifies the dynamic retrieval configuration for the given source. */\n dynamicRetrievalConfig?: DynamicRetrievalConfig;\n}\n\nexport interface CodeExecution {}\n\nexport interface GoogleSearch {}\n\nexport interface ComputerUse {\n /** Required. The environment being operated. */\n environment?: EnvironmentWithLiterals;\n /**\n * Optional. By default, predefined functions are included in the final model call.\n * Some of them can be explicitly excluded from being automatically included.\n * This can serve two purposes:\n * 1. Using a more restricted / different action space.\n * 2. Improving the definitions / instructions of predefined functions.\n * @maxSize 100\n * @maxLength 1000\n */\n excludedPredefinedFunctions?: string[];\n}\n\nexport interface SafetySetting {\n /** The safety category to configure a threshold for. */\n category?: HarmCategoryWithLiterals;\n /** The threshold for blocking responses that could belong to the specified safety category based on probability. */\n threshold?: ThresholdWithLiterals;\n}\n\nexport enum HarmCategory {\n UNKNOWN_CATEGORY = 'UNKNOWN_CATEGORY',\n HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',\n}\n\n/** @enumType */\nexport type HarmCategoryWithLiterals =\n | HarmCategory\n | 'UNKNOWN_CATEGORY'\n | 'HARM_CATEGORY_SEXUALLY_EXPLICIT'\n | 'HARM_CATEGORY_HATE_SPEECH'\n | 'HARM_CATEGORY_HARASSMENT'\n | 'HARM_CATEGORY_DANGEROUS_CONTENT';\n\nexport enum Threshold {\n UNKNOWN_THRESHOLD = 'UNKNOWN_THRESHOLD',\n BLOCK_NONE = 'BLOCK_NONE',\n BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',\n BLOCK_MED_AND_ABOVE = 'BLOCK_MED_AND_ABOVE',\n BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',\n}\n\n/** @enumType */\nexport type ThresholdWithLiterals =\n | Threshold\n | 'UNKNOWN_THRESHOLD'\n | 'BLOCK_NONE'\n | 'BLOCK_LOW_AND_ABOVE'\n | 'BLOCK_MED_AND_ABOVE'\n | 'BLOCK_ONLY_HIGH';\n\nexport interface GenerationConfig {\n /**\n * The temperature is used for sampling during the response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection.\n * Lower temperatures are good for prompts that require a more deterministic and less open-ended or creative response,\n * while higher temperatures can lead to more diverse or creative results. A temperature of 0 is deterministic:\n * the highest probability response is always selected.\n * Range: 0.0 - 1.0, Default for gemini-1.0-pro: 0.9, Default for gemini-1.0-pro-vision: 0.4\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters.\n * 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for potentially longer responses.\n * Range for gemini-1.0-pro: 1-8192 (default: 8192),\n * Range for gemini-1.0-pro-vision: 1-2048 (default: 2048)\n * Range for gemini-2.5-pro: 1-65536\n * @min 1\n * @max 65536\n */\n maxOutputTokens?: string | null;\n /**\n * Top-K changes how the model selects tokens for output.\n * A top-K of 1 means the next selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-K of 3 means that the next token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled.\n * Then tokens are further filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default for gemini-1.0-pro-vision: 32, Default for gemini-1.0-pro: none\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output.\n * Tokens are selected from the most (see top-K) to least probable until the sum of their probabilities equals the top-P value.\n * For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5,\n * then the model will select either A or B as the next token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default: 1.0\n * @max 1\n */\n topP?: number | null;\n /**\n * The number of response variations to return.This value must be 1.\n * @min 1\n * @max 1\n */\n candidateCount?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in the response.\n * If a string appears multiple times in the response, then the response truncates where it's first encountered. The strings are case-sensitive.\n * For example, if the following is the returned response when stopSequences isn't specified:\n * public static string reverse(string myString)\n * Then the returned response with stopSequences set to [\"Str\",\"reverse\"] is:\n * public static string\n * Maximum 5 items in the list.\n * @maxSize 5\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * Available for gemini-1.5-pro\n * The output format of the generated candidate text.\n * Supported MIME types: text/plain: (default) Text output. application/json: JSON response in the candidates.\n * text/x.enum: For classification tasks, output an enum value as defined in the response schema.\n * How to control the output format: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output\n * @maxLength 50\n */\n responseMimeType?: string | null;\n /**\n * Available for gemini-1.5-pro.\n * The schema that generated candidate text must follow. For more information, see Control generated output.\n * You must specify the responseType or responseMimeType field to use this parameter.\n * Link for examples : https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema\n */\n responseSchema?: Record<string, any> | null;\n /**\n * Optional. Output schema of the generated response. This is an alternative to responseSchema that accepts JSON Schema.\n * If set, responseSchema must be omitted, but responseMimeType is required.\n * While the full JSON Schema may be sent, not all features are supported.\n * more information about supported features and examples can be found here:\n * https://ai.google.dev/api/generate-content#FIELDS.response_json_schema\n */\n responseJsonSchema?: Record<string, any> | null;\n /** Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking */\n thinkingConfig?: GenerationThinkingConfig;\n /**\n * Optional. The requested modalities of the response.\n * Represents the set of modalities that the model can return, and should be expected in the response.\n * This is an exact match to the modalities of the response.\n * A model may have multiple combinations of supported modalities.\n * If the requested modalities do not match any of the supported combinations, an error will be returned.\n * An empty list is equivalent to requesting only TEXT.\n * Currently supported as experimental feature for gemini-2.0-flash only.\n * @maxSize 5\n */\n responseModalities?: ModalityWithLiterals[];\n /**\n * Optional. Configuration for image generation.\n * This message allows you to control various aspects of image generation, such as the output format, aspect ratio, and whether the model can generate images of people.\n */\n imageConfig?: ImageConfig;\n /**\n * The media_resolution parameter controls how the Gemini API processes media inputs like images, videos,\n * and PDF documents by determining the maximum number of tokens allocated for media inputs,\n * allowing you to balance response quality against latency and cost.\n */\n mediaResolution?: MediaResolutionLevelWithLiterals;\n}\n\nexport interface GenerationThinkingConfig {\n /** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */\n includeThoughts?: boolean | null;\n /** The number of thoughts tokens that the model should generate. */\n thinkingBudget?: string | null;\n /**\n * Thinking level parameter offering 2 states:\n * Low: Minimizes latency and cost. Best for simple instruction following or chat.\n * High: Maximizes reasoning depth. Default. Dynamic thinking.\n * The model may take significantly longer to reach a first token,\n * but the output will be more thoroughly vetted.\n * Note: You cannot use both thinking_level and the legacy thinking_budget parameter in the same request. Doing so will return a 400 error\n * @maxLength 20\n */\n thinkingLevel?: string | null;\n}\n\nexport enum Modality {\n UNKNOWN_MODALITY = 'UNKNOWN_MODALITY',\n /** Indicates the model should return text. */\n TEXT = 'TEXT',\n /** Indicates the model should return images. */\n IMAGE = 'IMAGE',\n /** Indicates the model should return audio. */\n AUDIO = 'AUDIO',\n}\n\n/** @enumType */\nexport type ModalityWithLiterals =\n | Modality\n | 'UNKNOWN_MODALITY'\n | 'TEXT'\n | 'IMAGE'\n | 'AUDIO';\n\nexport interface ImageConfig {\n /** Optional. The image output format for generated images. */\n imageOutputOptions?: ImageOutputOptions;\n /**\n * Optional. The desired aspect ratio for the generated images. The following aspect ratios are supported:\n * \"1:1\" \"2:3\", \"3:2\" \"3:4\", \"4:3\" \"4:5\", \"5:4\" \"9:16\", \"16:9\" \"21:9\"\n * @maxLength 10\n */\n aspectRatio?: string | null;\n /** Optional. Controls whether the model can generate people. */\n personGeneration?: PersonGenerationWithLiterals;\n}\n\nexport interface ImageOutputOptions {\n /**\n * Optional. The image format that the output should be saved as.\n * @maxLength 100\n */\n mimeType?: string | null;\n /** Optional. The compression quality of the output image. */\n compressionQuality?: string | null;\n}\n\nexport enum PersonGeneration {\n /** The default behavior is unspecified. The model will decide whether to generate images of people. */\n PERSON_GENERATION_UNSPECIFIED = 'PERSON_GENERATION_UNSPECIFIED',\n /** Allows the model to generate images of people, including adults and children. */\n ALLOW_ALL = 'ALLOW_ALL',\n /** Allows the model to generate images of adults, but not children. */\n ALLOW_ADULT = 'ALLOW_ADULT',\n /** Prevents the model from generating images of people. */\n ALLOW_NONE = 'ALLOW_NONE',\n}\n\n/** @enumType */\nexport type PersonGenerationWithLiterals =\n | PersonGeneration\n | 'PERSON_GENERATION_UNSPECIFIED'\n | 'ALLOW_ALL'\n | 'ALLOW_ADULT'\n | 'ALLOW_NONE';\n\nexport interface V1ToolConfig {\n /** Function calling config. */\n functionCallingConfig?: FunctionCallingConfig;\n}\n\nexport interface FunctionCallingConfig {\n /** Specifies the mode in which function calling should execute. If unspecified, the default value will be set to AUTO. */\n mode?: ModeWithLiterals;\n /**\n * A set of function names that, when provided, limits the functions the model will call.\n * This should only be set when the Mode is ANY or VALIDATED. Function names should match [FunctionDeclaration.name]. When set, model will predict a function call from only allowed function names.\n * @maxLength 64\n * @maxSize 100\n */\n allowedFunctionNames?: string[];\n}\n\nexport enum Mode {\n UNKNOWN = 'UNKNOWN',\n /** Default model behavior, model decides to predict either a function call or a natural language response. */\n AUTO = 'AUTO',\n /**\n * Model is constrained to always predicting a function call only. If \"allowedFunctionNames\" are set, the predicted function call will be limited to any one of \"allowedFunctionNames\",\n * else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n ANY = 'ANY',\n /** Model will not predict any function call. Model behavior is same as when not passing any function declarations. */\n NONE = 'NONE',\n /**\n * Model decides to predict either a function call or a natural language response, but will validate function calls with constrained decoding. If \"allowedFunctionNames\" are set, the predicted function call will be\n * limited to any one of \"allowedFunctionNames\", else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n VALIDATED = 'VALIDATED',\n}\n\n/** @enumType */\nexport type ModeWithLiterals =\n | Mode\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'NONE'\n | 'VALIDATED';\n\nexport interface FineTuningSpec {\n /**\n * Endpoint ID of the fine-tuning model to use.\n * @maxLength 100\n */\n id?: string | null;\n}\n\nexport interface InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: TypeWithLiterals;\n}\n\nexport enum Type {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type TypeWithLiterals = Type | 'UNKNOWN' | 'EPHEMERAL';\n\nexport enum Model {\n UNKNOWN = 'UNKNOWN',\n /** anthropic.claude-3-sonnet-20240229-v1:0 */\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n /** anthropic.claude-3-haiku-20240307-v1:0 */\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n /** anthropic.claude-3-5-sonnet-20240620-v1:0 */\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n /** anthropic.claude-3-5-sonnet-20241022-v2:0 */\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n /** us.anthropic.claude-3-5-haiku-20241022-v1:0 */\n CLAUDE_3_5_HAIKU_1_0 = 'CLAUDE_3_5_HAIKU_1_0',\n /** us.anthropic.claude-3-7-sonnet-20250219-v1:0 */\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n /** us.anthropic.claude-sonnet-4-5-20250929-v1:0 */\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n /** us.anthropic.claude-haiku-4-5-20251001-v1:0 */\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ModelWithLiterals =\n | Model\n | 'UNKNOWN'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_5_HAIKU_1_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: RoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: ContentBlock[];\n}\n\nexport enum Role {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type RoleWithLiterals = Role | 'UNKNOWN' | 'USER' | 'ASSISTANT';\n\nexport interface ContentBlock extends ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\n/** @oneof */\nexport interface ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\nexport interface Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: MediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport enum MediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type MediaTypeWithLiterals =\n | MediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface SimpleContentBlock extends SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\n/** @oneof */\nexport interface SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\nexport interface Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface Tool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n}\n\nexport enum ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type ToolChoiceTypeWithLiterals =\n | ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: ToolConfiguration;\n}\n\nexport enum McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type McpServerTypeWithLiterals = McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface ToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface V1InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ClaudeModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: V1AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: GoogleproxyV1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: InvokeAnthropicClaudeModelRequestTool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: GoogleproxyV1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: GoogleproxyV1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: GoogleproxyV1McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface GoogleproxyV1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface GoogleproxyV1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: V1CacheControlTypeWithLiterals;\n}\n\nexport enum V1CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type V1CacheControlTypeWithLiterals =\n | V1CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\nexport enum ClaudeModel {\n UNKNOWN_CLAUDE_MODEL = 'UNKNOWN_CLAUDE_MODEL',\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_OPUS_1_0 = 'CLAUDE_3_OPUS_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ClaudeModelWithLiterals =\n | ClaudeModel\n | 'UNKNOWN_CLAUDE_MODEL'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_OPUS_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface V1AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GoogleproxyV1ContentBlock[];\n}\n\nexport enum V1MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type V1MessageRoleRoleWithLiterals =\n | V1MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface GoogleproxyV1ContentBlock\n extends GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: V1ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport enum V1ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type V1ImageMediaTypeMediaTypeWithLiterals =\n | V1ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface GoogleproxyV1ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: V1SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface V1SimpleContentBlock extends V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\n/** @oneof */\nexport interface V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\nexport interface GoogleproxyV1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface GoogleproxyV1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface InvokeAnthropicClaudeModelRequestTool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: GoogleproxyV1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface GoogleproxyV1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: GoogleproxyV1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum GoogleproxyV1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ToolChoiceTypeWithLiterals =\n | GoogleproxyV1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface GoogleproxyV1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface GoogleproxyV1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: GoogleproxyV1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: V1McpServerToolConfiguration;\n}\n\nexport enum GoogleproxyV1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1McpServerTypeWithLiterals =\n | GoogleproxyV1McpServerType\n | 'UNKNOWN'\n | 'URL';\n\nexport interface V1McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface InvokeAnthropicModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: AnthropicModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicMessage[];\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: V1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: V1Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: V1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: V1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: V1McpServer[];\n /**\n * Container identifier for reuse across requests.\n * @maxLength 512\n */\n container?: string | null;\n /** An object describing metadata about the request. */\n metadata?: RequestMetadata;\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport enum AnthropicModel {\n UNKNOWN_ANTHROPIC_MODEL = 'UNKNOWN_ANTHROPIC_MODEL',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_1_OPUS_1_0 = 'CLAUDE_4_1_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type AnthropicModelWithLiterals =\n | AnthropicModel\n | 'UNKNOWN_ANTHROPIC_MODEL'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_1_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicMessage {\n /** The role of the message author. */\n role?: MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\nexport enum MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type MessageRoleRoleWithLiterals =\n | MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\n/** Content object used in both request and response */\nexport interface V1ContentBlock extends V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\n/** @oneof */\nexport interface V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\nexport interface V1Text {\n /**\n * Text content.\n * @maxLength 1000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n /**\n * Structured citations for this text block.\n * Populated by the model when citations are enabled.\n * @maxSize 256\n */\n citations?: Citation[];\n}\n\nexport interface V1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: CacheControlTypeWithLiterals;\n /**\n * The time-to-live for the cache control breakpoint. This may be one the following values:\n * 5m: 5 minutes (default)\n * 1h: 1 hour\n * @maxLength 50\n */\n ttl?: string | null;\n}\n\nexport enum CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type CacheControlTypeWithLiterals =\n | CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\n/** Unified wrapper for all citation kinds (attach to Text.citations). */\nexport interface Citation extends CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\n/** @oneof */\nexport interface CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\nexport interface CharLocationCitation {\n /**\n * Should be \"char_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start character index (inclusive) within the document text. */\n startCharIndex?: number | null;\n /** 0-based end character index (exclusive) within the document text. */\n endCharIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface PageLocationCitation {\n /**\n * Should be \"page_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 1-based start page number (inclusive). */\n startPageNumber?: number | null;\n /** 1-based end page number (exclusive). */\n endPageNumber?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface ContentBlockLocationCitation {\n /**\n * Should be \"content_block_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start content-block index (inclusive) within the custom document. */\n startBlockIndex?: number | null;\n /** 0-based end content-block index (exclusive) within the custom document. */\n endBlockIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface WebSearchResultLocationCitation {\n /**\n * Should be \"web_search_result_location\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the cited source\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the cited source\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * A reference that must be passed back for multi-turn conversations.\n * @maxLength 1000000\n */\n encryptedIndex?: string | null;\n /**\n * Up to 150 characters of the cited content\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface SearchResultLocationCitation {\n /**\n * Should be \"search_result_location\".\n * @maxLength 500\n */\n type?: string;\n /** Index of the search_result within the current turn (0-based). */\n searchResultIndex?: number | null;\n /** 0-based start block indices within that search_result's content. */\n startBlockIndex?: number | null;\n /** 0-based end block indices within that search_result's content. */\n endBlockIndex?: number | null;\n /**\n * Source string\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Optional title (same as search_result.title).\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional quoted snippet\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface V1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport enum ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type ImageMediaTypeMediaTypeWithLiterals =\n | ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface V1ToolUse {\n /**\n * Tool use id\n * @maxLength 512\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Optional: enable tool use caching */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1ToolResult {\n /**\n * Tool use id\n * @maxLength 512\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: ToolResultContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface ToolResultContentBlock\n extends ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\n/** @oneof */\nexport interface ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\nexport interface DocumentContent {\n /**\n * Should be \"document\"\n * @maxLength 500\n */\n type?: string;\n /** Citable payload or reference. */\n source?: DocumentSource;\n /**\n * Optional: Document title\n * Can be passed to the model but not used towards cited content.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional: Any document metadata as text or stringified json.\n * Can be passed to the model but not used towards cited content.\n * @maxLength 1000000\n */\n context?: string | null;\n /** Enable citations for this doc */\n citations?: CitationsEnabled;\n /** Optional: Cache the document content */\n cacheControl?: V1CacheControl;\n}\n\nexport interface DocumentSource {\n /**\n * One of: \"text\" | \"base64\" | \"content\" | \"file\" | \"url\".\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Required types \"text\"/\"base64\" (e.g., \"text/plain\", \"application/pdf\").\n * @maxLength 500\n */\n mediaType?: string | null;\n /**\n * For type \"text\": raw text. For \"base64\": bytes as base64.\n * @maxLength 10000000\n */\n data?: string | null;\n /**\n * For type \"file\": Files API id (e.g., \"file_01...\")\n * @maxLength 5000\n */\n fileId?: string | null;\n /**\n * For type \"url\": absolute URL to the document\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * For type \"content\": custom content block; Only text blocks are citable\n * @maxSize 500\n */\n content?: V1ContentBlock[];\n}\n\nexport interface CitationsEnabled {\n /** Whether to enable citations */\n enabled?: boolean | null;\n}\n\nexport interface ToolResultSearchResult {\n /**\n * Should be \"search_result\".\n * @maxLength 500\n */\n type?: string;\n /**\n * Where this result came from (URL or source label).\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Human-readable title for the result.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Inline text snippets that summarize/support the result.\n * @maxSize 1000\n */\n content?: V1Text[];\n /**\n * Enable/disable citations for this result's content.\n * Matches Anthropic \"citations\" on search_result blocks.\n */\n citations?: CitationsEnabled;\n}\n\nexport interface V1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface V1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\n/**\n * Assistant requests a Model Context Protocol (MCP) tool call.\n * Pair with ToolResult using the same `id`.\n */\nexport interface McpToolUse {\n /**\n * Unique id for this tool call; must match McpToolResult.tool_use_id.\n * @maxLength 512\n */\n id?: string | null;\n /**\n * Tool name as exposed by the MCP server.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Which MCP server to call (must match a server in the request).\n * @maxLength 1000\n */\n serverName?: string | null;\n /** JSON arguments for the tool (object per the tool's schema). */\n input?: Record<string, any> | null;\n}\n\n/**\n * Server-tool invocation announced by the ASSISTANT for Anthropic-run tools\n * (e.g., \"web_search\", \"code_execution\").\n */\nexport interface ServerToolUse {\n /**\n * Should be \"server_tool_use\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n id?: string | null;\n /**\n * The tool name. Available options: \"web_search\" | \"web_fetch\" | \"code_execution\" | \"bash_code_execution\" | \"text_editor_code_execution\"\n * @maxLength 500\n */\n name?: string | null;\n /**\n * Tool-specific parameters object:\n * web_search → { \"query\": \"<string>\" }\n * web_fetch → { \"url\": \"<string>\" }\n * code_execution→ { \"code\": \"<python source>\" }\n */\n input?: Record<string, any> | null;\n}\n\n/** Server tool result (web search). Either results[] OR error. */\nexport interface WebSearchToolResult extends WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n /**\n * Should be \"web_search_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n}\n\n/** Success payload: the JSON `content` ARRAY of result items. */\nexport interface WebSearchResultList {\n /**\n * Results items\n * @maxSize 1000\n */\n items?: WebSearchResult[];\n}\n\n/**\n * One search result item.\n * Docs (“Search results include”): url, title, page_age, encrypted_content.\n * Each item also has the literal type.\n */\nexport interface WebSearchResult {\n /**\n * Should be \"web_search_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the source page.\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the source page.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * When the site was last updated (e.g., \"April 30, 2025\").\n * @maxLength 100\n */\n pageAge?: string | null;\n /**\n * Encrypted content that must be passed back in multi-turn conversations for citations.\n * @maxLength 1000000\n */\n encryptedContent?: string | null;\n}\n\n/**\n * Error payload\n * Possible error codes: too_many_requests | invalid_input | max_uses_exceeded | query_too_long | unavailable\n */\nexport interface WebSearchToolResultError {\n /**\n * Should be \"web_search_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface CodeExecutionToolResult\n extends CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n /**\n * Should be \"code_execution_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n}\n\n/** Success payload for code execution. */\nexport interface CodeExecutionResult {\n /**\n * Should be \"code_execution_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Output from successful execution (print, etc.).\n * @maxLength 1000000\n */\n stdout?: string | null;\n /**\n * Error messages emitted by the program.\n * @maxLength 1000000\n */\n stderr?: string | null;\n /** 0 = success, non-zero = failure. */\n returnCode?: number | null;\n /**\n * Optional: Array of produced artifacts.\n * Example item (typical): { \"file_id\": \"file_abc123\", ... }\n * @maxSize 4096\n */\n content?: Record<string, any>[] | null;\n}\n\n/**\n * Error payload (HTTP 200; error lives in the result body).\n * Docs list: unavailable | code_execution_exceeded | container_expired\n */\nexport interface CodeExecutionToolResultError {\n /**\n * Should be \"code_execution_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value, e.g. \"unavailable\", \"code_execution_exceeded\", \"container_expired\".\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface ContainerUpload {\n /**\n * Should be \"container_upload\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * File identifier returned by the Files API (e.g., \"file_01abc...\").\n * @maxLength 5000\n */\n fileId?: string | null;\n}\n\n/** Web fetch tool result */\nexport interface WebFetchToolResult extends WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n /**\n * Should be \"web_fetch_tool_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n}\n\nexport interface WebFetchToolResultContentSuccess {\n /**\n * Should be \"web_fetch_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * The URL that was fetched\n * @maxLength 10000\n */\n url?: string | null;\n /** A document block containing the fetched content */\n content?: DocumentContent;\n /**\n * Timestamp when the content was retrieved\n * @maxLength 256\n */\n retrievedAt?: string | null;\n}\n\nexport interface WebFetchToolResultContentError {\n /**\n * Should be \"web_fetch_tool_result_error\"\n * @maxLength 500\n */\n type?: string;\n /**\n * These are the possible error codes:\n * - invalid_tool_input: Invalid URL format\n * - url_too_long: URL exceeds maximum length (250 characters)\n * - url_not_allowed: URL blocked by domain filtering rules and model restrictions\n * - url_not_accessible: Failed to fetch content (HTTP error)\n * - too_many_requests: Rate limit exceeded\n * - unsupported_content_type: Content type not supported (only text and PDF)\n * - max_uses_exceeded: Maximum web fetch tool uses exceeded\n * - unavailable: An internal error occurred\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\n/** Top-level tool wrapper. Exactly one branch is set. */\nexport interface V1Tool extends V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\n/** @oneof */\nexport interface V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\nexport interface CustomTool {\n /**\n * The name of the tool. Must match the regex ^[a-zA-Z0-9_-]{1,64}$.\n * @maxLength 1000\n */\n name?: string;\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: V1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface ComputerUseTool {\n /** Display width in pixels, recommend ≤1280 */\n displayWidthPx?: number;\n /** Display height in pixels, recommend ≤800 */\n displayHeightPx?: number;\n /** Display number for X11 environments */\n displayNumber?: number | null;\n}\n\nexport interface TextEditorTool {\n /** Parameter to control truncation when viewing large files. Available only for text_editor_20250728 and later. */\n maxCharacters?: number | null;\n}\n\nexport interface BashTool {\n /**\n * Name must be \"bash\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebSearchTool {\n /** Optional: Limit the number of searches per request; exceeding -> error \"max_uses_exceeded\". */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only include results from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never include results from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Localize search results */\n userLocation?: WebSearchUserLocation;\n /** Optional: caches the tool definition only (it will not cache the results) */\n cacheControl?: V1CacheControl;\n}\n\nexport interface WebSearchUserLocation {\n /**\n * The type of location (must be \"approximate\")\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The city name\n * @maxLength 500\n */\n city?: string | null;\n /**\n * The region or state\n * @maxLength 500\n */\n region?: string | null;\n /**\n * The country\n * @maxLength 500\n */\n country?: string | null;\n /**\n * The IANA timezone ID, e.g. \"America/Los_Angeles\"\n * @maxLength 500\n */\n timezone?: string | null;\n}\n\nexport interface CodeExecutionTool {\n /**\n * Name must be \"code_execution\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebFetchTool {\n /** Optional: Limit the number of fetches per request */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only fetch from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never fetch from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Enable citations for fetched content */\n citations?: CitationsEnabled;\n /** Optional: Maximum content length in tokens */\n maxContentTokens?: number | null;\n}\n\nexport interface V1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: V1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum V1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n NONE = 'NONE',\n}\n\n/** @enumType */\nexport type V1ToolChoiceTypeWithLiterals =\n | V1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL'\n | 'NONE';\n\nexport interface V1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n * @min 1024\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface V1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: V1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: McpServerToolConfiguration;\n}\n\nexport enum V1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type V1McpServerTypeWithLiterals = V1McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface RequestMetadata {\n /**\n * An external identifier for the user who is associated with the request.\n * This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. Do not include any identifying information such as name, email address, or phone number.\n * Maximum length: 256\n * Examples: \"13803d75-b4b5-4c3e-b2a2-6f21399b021b\"\n * @maxLength 256\n */\n userId?: string | null;\n}\n\nexport interface InvokeLlamaModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: LlamaModelWithLiterals;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport enum LlamaModel {\n UNKNOWN_LLAMA_MODEL = 'UNKNOWN_LLAMA_MODEL',\n /** meta.llama3-8b-instruct-v1:0 */\n LLAMA_3_8B_INSTRUCT_1_0 = 'LLAMA_3_8B_INSTRUCT_1_0',\n /** meta.llama3-70b-instruct-v1:0 */\n LLAMA_3_70B_INSTRUCT_1_0 = 'LLAMA_3_70B_INSTRUCT_1_0',\n /** meta.llama3-1-8b-instruct-v1:0 */\n LLAMA_3_1_8B_INSTRUCT_1_0 = 'LLAMA_3_1_8B_INSTRUCT_1_0',\n /** meta.llama3-1-70b-instruct-v1:0 */\n LLAMA_3_1_70B_INSTRUCT_1_0 = 'LLAMA_3_1_70B_INSTRUCT_1_0',\n /** meta.llama3-2-1b-instruct-v1:0 */\n LLAMA_3_2_1B_INSTRUCT_1_0 = 'LLAMA_3_2_1B_INSTRUCT_1_0',\n /** meta.llama3-2-3b-instruct-v1:0 */\n LLAMA_3_2_3B_INSTRUCT_1_0 = 'LLAMA_3_2_3B_INSTRUCT_1_0',\n}\n\n/** @enumType */\nexport type LlamaModelWithLiterals =\n | LlamaModel\n | 'UNKNOWN_LLAMA_MODEL'\n | 'LLAMA_3_8B_INSTRUCT_1_0'\n | 'LLAMA_3_70B_INSTRUCT_1_0'\n | 'LLAMA_3_1_8B_INSTRUCT_1_0'\n | 'LLAMA_3_1_70B_INSTRUCT_1_0'\n | 'LLAMA_3_2_1B_INSTRUCT_1_0'\n | 'LLAMA_3_2_3B_INSTRUCT_1_0';\n\nexport interface InvokeConverseRequest {\n /** The foundation model to use for this conversation. */\n model?: ConverseModelWithLiterals;\n /**\n * Conversation history and new input. Processed in the order provided.\n * @maxSize 4096\n */\n messages?: ConverseMessage[];\n /** Parameters controlling text generation behavior. */\n inferenceConfig?: ConverseInferenceConfig;\n /** Tool configuration for function calling. */\n toolConfig?: ToolConfig;\n /** Latency optimization settings. */\n performanceConfig?: ConversePerformanceConfig;\n /**\n * System prompts providing high-level instructions. Processed before conversation messages.\n * @maxSize 100\n */\n system?: SystemContentBlock[];\n /** Model-specific parameters as a JSON object. */\n additionalModelRequestFields?: Record<string, any> | null;\n /**\n * JSON paths to extract from the model's raw response.\n * @maxLength 1000\n * @maxSize 100\n */\n additionalModelResponseFieldPaths?: string[];\n}\n\nexport enum ConverseModel {\n UNKNOWN_CONVERSE_MODEL = 'UNKNOWN_CONVERSE_MODEL',\n /** OPEN AI Models */\n OPEN_AI_GPT_OSS_120B = 'OPEN_AI_GPT_OSS_120B',\n /** MiniMax AI */\n MINIMAX_M2 = 'MINIMAX_M2',\n}\n\n/** @enumType */\nexport type ConverseModelWithLiterals =\n | ConverseModel\n | 'UNKNOWN_CONVERSE_MODEL'\n | 'OPEN_AI_GPT_OSS_120B'\n | 'MINIMAX_M2';\n\nexport interface ConverseMessage {\n /** The role that generated this message (user or assistant). */\n role?: RoleWithLiterals;\n /**\n * Content blocks that can include text, tool use, and tool results.\n * @maxSize 4096\n */\n content?: ConverseContentBlock[];\n}\n\n/** Converse-specific content block (simplified structure for AWS Bedrock Converse API) */\nexport interface ConverseContentBlock extends ConverseContentBlockContentOneOf {\n /**\n * Plain text content.\n * @maxLength 100000000\n */\n text?: string;\n /** Reasoning refers to a Chain of Thought (CoT) that the model generates to enhance the accuracy of its final response. */\n reasoningContent?: ConverseReasoningContent;\n /** Tool use block representing a function call request. */\n toolUse?: ConverseToolUse;\n /** Tool result block containing the output of a tool execution. */\n toolResult?: ConverseToolResult;\n}\n\n/** @oneof */\nexport interface ConverseContentBlockContentOneOf {\n /**\n * Plain text content.\n * @maxLength 100000000\n */\n text?: string;\n /** Reasoning refers to a Chain of Thought (CoT) that the model generates to enhance the accuracy of its final response. */\n reasoningContent?: ConverseReasoningContent;\n /** Tool use block representing a function call request. */\n toolUse?: ConverseToolUse;\n /** Tool result block containing the output of a tool execution. */\n toolResult?: ConverseToolResult;\n}\n\nexport interface ConverseReasoningContent {\n /** Contains the reasoning that the model used to return the output. */\n reasoningText?: ReasoningText;\n}\n\nexport interface ReasoningText {\n /**\n * The reasoning that the model used to return the output.\n * @maxLength 100000000\n */\n text?: string;\n}\n\n/** Tool use request from the model */\nexport interface ConverseToolUse {\n /**\n * Unique identifier for this tool use.\n * @maxLength 1000\n */\n toolUseId?: string;\n /**\n * Name of the tool being invoked.\n * @maxLength 1000\n */\n name?: string;\n /** Input parameters for the tool as a JSON object. */\n input?: Record<string, any> | null;\n}\n\n/** Tool execution result */\nexport interface ConverseToolResult {\n /**\n * Identifier matching the tool_use_id from the ToolUse request.\n * @maxLength 1000\n */\n toolUseId?: string;\n /**\n * Result content (text only for now).\n * @maxSize 1000\n */\n content?: ConverseToolResultContent[];\n /**\n * Execution status: 'success' or 'error'.\n * @maxLength 100\n */\n status?: string | null;\n}\n\n/** Tool result content (text only for now) */\nexport interface ConverseToolResultContent\n extends ConverseToolResultContentContentOneOf {\n /**\n * A tool result that is text.\n * @maxLength 100000000\n */\n text?: string;\n /** A tool result that is JSON format data. */\n json?: Record<string, any> | null;\n}\n\n/** @oneof */\nexport interface ConverseToolResultContentContentOneOf {\n /**\n * A tool result that is text.\n * @maxLength 100000000\n */\n text?: string;\n /** A tool result that is JSON format data. */\n json?: Record<string, any> | null;\n}\n\n/** Parameters that control the model's text generation behavior. */\nexport interface ConverseInferenceConfig {\n /**\n * Maximum tokens to generate before stopping.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Randomness in output. Higher values (closer to 1.0) increase creativity.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Nucleus sampling threshold. The model considers tokens whose cumulative probability exceeds this value.\n * @max 1\n */\n topP?: number | null;\n /**\n * Text sequences that cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n}\n\n/** Converse-specific tool configuration */\nexport interface ToolConfig {\n /**\n * Available tools for the model.\n * @maxSize 1000\n */\n tools?: ConverseTool[];\n /** How the model should use tools. */\n toolChoice?: ToolChoice;\n}\n\n/** Tool wrapper with specification */\nexport interface ConverseTool {\n /** Tool specification containing name, description, and input schema. */\n toolSpec?: ToolSpecification;\n}\n\n/** Detailed tool specification */\nexport interface ToolSpecification {\n /**\n * Name of the tool.\n * @maxLength 256\n */\n name?: string;\n /**\n * Description of what the tool does.\n * @maxLength 2048\n */\n description?: string | null;\n /** JSON schema for tool input parameters. */\n inputSchema?: ConverseInputSchema;\n}\n\n/** Input schema wrapper */\nexport interface ConverseInputSchema {\n /** JSON schema as a Struct (wraps the schema in \"json\" field for AWS API). */\n json?: Record<string, any> | null;\n}\n\nexport interface ConversePerformanceConfig {\n /**\n * The desired latency profile. Valid values: 'standard' (default) or 'optimized'.\n * @maxLength 100\n */\n latency?: string | null;\n}\n\nexport interface SystemContentBlock {\n /**\n * Text providing high-level instructions or context for the conversation.\n * @maxLength 100000000\n */\n text?: string | null;\n}\n\nexport interface CreateImageRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: V1ImageModelWithLiterals;\n /** The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3.\n */\n quality?: ImageQualityWithLiterals;\n /** The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. */\n size?: ImageSizeWithLiterals;\n /**\n * The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images.\n * Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3.\n */\n style?: ImageStyleWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n}\n\nexport enum V1ImageModel {\n UNKNOWN_IMAGE_GENERATION_MODEL = 'UNKNOWN_IMAGE_GENERATION_MODEL',\n DALL_E_2 = 'DALL_E_2',\n DALL_E_3 = 'DALL_E_3',\n}\n\n/** @enumType */\nexport type V1ImageModelWithLiterals =\n | V1ImageModel\n | 'UNKNOWN_IMAGE_GENERATION_MODEL'\n | 'DALL_E_2'\n | 'DALL_E_3';\n\nexport enum ImageQuality {\n UNKNOWN_IMAGE_QUALITY = 'UNKNOWN_IMAGE_QUALITY',\n STANDARD = 'STANDARD',\n HD = 'HD',\n}\n\n/** @enumType */\nexport type ImageQualityWithLiterals =\n | ImageQuality\n | 'UNKNOWN_IMAGE_QUALITY'\n | 'STANDARD'\n | 'HD';\n\nexport enum ImageSize {\n UNKNOWN_IMAGE_SIZE = 'UNKNOWN_IMAGE_SIZE',\n SIZE_256X256 = 'SIZE_256X256',\n SIZE_512X512 = 'SIZE_512X512',\n SIZE_1024X1024 = 'SIZE_1024X1024',\n SIZE_1792X1024 = 'SIZE_1792X1024',\n SIZE_1024X1792 = 'SIZE_1024X1792',\n}\n\n/** @enumType */\nexport type ImageSizeWithLiterals =\n | ImageSize\n | 'UNKNOWN_IMAGE_SIZE'\n | 'SIZE_256X256'\n | 'SIZE_512X512'\n | 'SIZE_1024X1024'\n | 'SIZE_1792X1024'\n | 'SIZE_1024X1792';\n\nexport enum ImageStyle {\n UNKNOWN_IMAGE_STYLE = 'UNKNOWN_IMAGE_STYLE',\n VIVID = 'VIVID',\n NATURAL = 'NATURAL',\n}\n\n/** @enumType */\nexport type ImageStyleWithLiterals =\n | ImageStyle\n | 'UNKNOWN_IMAGE_STYLE'\n | 'VIVID'\n | 'NATURAL';\n\nexport interface V1TextToImageRequest {\n /** The model to use for generating the image. */\n model?: ImageModelWithLiterals;\n /** Height of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n height?: number | null;\n /** Width of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n width?: number | null;\n /**\n * An array of text prompts to use for generation.\n * @minSize 1\n * @maxSize 10\n */\n textPrompts?: TextPrompt[];\n /** How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt. Default: 7 */\n cfgScale?: number | null;\n /**\n * CLIP Guidance is a technique that uses the CLIP neural network to guide the generation of images to be more in-line with your included prompt,\n * which often results in improved coherency.\n */\n clipGuidancePreset?: ClipGuidancePresetWithLiterals;\n /** Which sampler to use for the diffusion process. If this value is omitted we'll automatically select an appropriate sampler for you. */\n sampler?: SamplerWithLiterals;\n /** Number of images to generate. Default: 1 */\n samples?: number | null;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Number of diffusion steps to run. Default: 30 */\n steps?: number | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: TextToImageRequestStylePresetWithLiterals;\n}\n\nexport enum ImageModel {\n STABILITY_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_IMAGE_MODEL_UNSPECIFIED',\n /** stable-diffusion-xl-1024-v1-0 - Stable Diffusion XL v1.0 */\n SDXL_1_0 = 'SDXL_1_0',\n}\n\n/** @enumType */\nexport type ImageModelWithLiterals =\n | ImageModel\n | 'STABILITY_IMAGE_MODEL_UNSPECIFIED'\n | 'SDXL_1_0';\n\nexport interface TextPrompt {\n /**\n * The text to generate the image from.\n * @maxLength 4000\n */\n text?: string | null;\n /** The weight of the text prompt. */\n weight?: number | null;\n}\n\nexport enum ClipGuidancePreset {\n CLIP_GUIDANCE_PRESET_UNSPECIFIED = 'CLIP_GUIDANCE_PRESET_UNSPECIFIED',\n FAST_BLUE = 'FAST_BLUE',\n FAST_GREEN = 'FAST_GREEN',\n NONE = 'NONE',\n SIMPLE = 'SIMPLE',\n SLOW = 'SLOW',\n SLOWER = 'SLOWER',\n SLOWEST = 'SLOWEST',\n}\n\n/** @enumType */\nexport type ClipGuidancePresetWithLiterals =\n | ClipGuidancePreset\n | 'CLIP_GUIDANCE_PRESET_UNSPECIFIED'\n | 'FAST_BLUE'\n | 'FAST_GREEN'\n | 'NONE'\n | 'SIMPLE'\n | 'SLOW'\n | 'SLOWER'\n | 'SLOWEST';\n\nexport enum Sampler {\n SAMPLER_UNSPECIFIED = 'SAMPLER_UNSPECIFIED',\n DDIM = 'DDIM',\n DDPM = 'DDPM',\n K_DPMPP_2M = 'K_DPMPP_2M',\n K_DPMPP_2S_ANCESTRAL = 'K_DPMPP_2S_ANCESTRAL',\n K_DPM_2 = 'K_DPM_2',\n K_DPM_2_ANCESTRAL = 'K_DPM_2_ANCESTRAL',\n K_EULER = 'K_EULER',\n K_EULER_ANCESTRAL = 'K_EULER_ANCESTRAL',\n K_HEUN = 'K_HEUN',\n K_LMS = 'K_LMS',\n}\n\n/** @enumType */\nexport type SamplerWithLiterals =\n | Sampler\n | 'SAMPLER_UNSPECIFIED'\n | 'DDIM'\n | 'DDPM'\n | 'K_DPMPP_2M'\n | 'K_DPMPP_2S_ANCESTRAL'\n | 'K_DPM_2'\n | 'K_DPM_2_ANCESTRAL'\n | 'K_EULER'\n | 'K_EULER_ANCESTRAL'\n | 'K_HEUN'\n | 'K_LMS';\n\nexport enum TextToImageRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type TextToImageRequestStylePresetWithLiterals =\n | TextToImageRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateCoreRequest {\n /** The model to use for generating the image. will always be STABLE_IMAGE_CORE */\n model?: ImageCoreModelWithLiterals;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Default: 0\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: GenerateCoreRequestStylePresetWithLiterals;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\nexport enum ImageCoreModel {\n STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED',\n STABLE_IMAGE_CORE = 'STABLE_IMAGE_CORE',\n}\n\n/** @enumType */\nexport type ImageCoreModelWithLiterals =\n | ImageCoreModel\n | 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED'\n | 'STABLE_IMAGE_CORE';\n\nexport enum GenerateCoreRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type GenerateCoreRequestStylePresetWithLiterals =\n | GenerateCoreRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateStableDiffusionRequest {\n /**\n * The text prompt to generate the image from.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Controls whether this is a text-to-image or image-to-image generation.\n * - TEXT_TO_IMAGE requires only the prompt parameter.\n * - IMAGE_TO_IMAGE requires prompt, image, and strength parameters.\n */\n mode?: GenerationModeWithLiterals;\n /**\n * The image to use as the starting point for the generation.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Controls how much influence the image parameter has on the output image.\n * A value of 0 yields an image identical to the input; 1 ignores the input image.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n */\n strength?: number | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * This parameter is only valid for TEXT_TO_IMAGE mode.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /** The model to use for generation. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Dictates the content-type of the generated image. */\n outputFormat?: GenerateStableDiffusionRequestOutputFormatWithLiterals;\n /**\n * Keywords of what you do not wish to see in the output image.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n}\n\nexport enum GenerationMode {\n UNKNOWN_GENERATION_MODE = 'UNKNOWN_GENERATION_MODE',\n TEXT_TO_IMAGE = 'TEXT_TO_IMAGE',\n IMAGE_TO_IMAGE = 'IMAGE_TO_IMAGE',\n}\n\n/** @enumType */\nexport type GenerationModeWithLiterals =\n | GenerationMode\n | 'UNKNOWN_GENERATION_MODE'\n | 'TEXT_TO_IMAGE'\n | 'IMAGE_TO_IMAGE';\n\nexport enum ImageStableDiffusionModel {\n STABLE_DIFFUSION_MODEL_UNSPECIFIED = 'STABLE_DIFFUSION_MODEL_UNSPECIFIED',\n /** sd3-large */\n SD3_LARGE = 'SD3_LARGE',\n /** sd3-large-turbo */\n SD3_LARGE_TURBO = 'SD3_LARGE_TURBO',\n /** sd3-medium */\n SD3_MEDIUM = 'SD3_MEDIUM',\n /** sd3.5-large */\n SD3_5_LARGE = 'SD3_5_LARGE',\n /** sd3.5-large-turbo */\n SD3_5_LARGE_TURBO = 'SD3_5_LARGE_TURBO',\n /** sd3.5-medium */\n SD3_5_MEDIUM = 'SD3_5_MEDIUM',\n}\n\n/** @enumType */\nexport type ImageStableDiffusionModelWithLiterals =\n | ImageStableDiffusionModel\n | 'STABLE_DIFFUSION_MODEL_UNSPECIFIED'\n | 'SD3_LARGE'\n | 'SD3_LARGE_TURBO'\n | 'SD3_MEDIUM'\n | 'SD3_5_LARGE'\n | 'SD3_5_LARGE_TURBO'\n | 'SD3_5_MEDIUM';\n\nexport enum GenerateStableDiffusionRequestOutputFormat {\n OUTPUT_FORMAT_UNSPECIFIED = 'OUTPUT_FORMAT_UNSPECIFIED',\n JPEG = 'JPEG',\n PNG = 'PNG',\n}\n\n/** @enumType */\nexport type GenerateStableDiffusionRequestOutputFormatWithLiterals =\n | GenerateStableDiffusionRequestOutputFormat\n | 'OUTPUT_FORMAT_UNSPECIFIED'\n | 'JPEG'\n | 'PNG';\n\n/** Request to generate an image */\nexport interface GenerateAnImageRequest {\n /** The model to use for generating the image. */\n model?: GenerateAnImageModelWithLiterals;\n /**\n * The prompt to use for image generation.\n * Relevant models : ALL\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Optional seed for reproducibility. If not provided, a random seed will be used.\n * Relevant models : ALL\n */\n seed?: number | null;\n /**\n * Aspect ratio of the image between 21:9 and 9:21\n * default: 16:9\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * Width of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n width?: number | null;\n /**\n * Height of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n height?: number | null;\n /**\n * Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.\n * Relevant models : ALL\n * @max 6\n */\n safetyTolerance?: number | null;\n /**\n * Output format for the generated image. Can be 'jpeg' or 'png'.\n * Relevant models : ALL\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Generate less processed, more natural-looking images\n * Relevant models : FLUX_PRO_1_1_ULTRA\n */\n raw?: boolean | null;\n /**\n * Optional image to remix\n * The URL must be a valid wix mp or wix static URL.\n * Relevant models FLUX_PRO_1_1_ULTRA, FLUX_1_DEV, FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Blend between the prompt and the image prompt\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @max 1\n */\n imagePromptStrength?: number | null;\n /**\n * Optional image to remix\n * Image to use as control input - relevant models FLUX_PRO_1_DEPTH and FLUX_PRO_1_CANNY\n * @maxLength 100000\n */\n controlImageUrl?: string | null;\n /**\n * Whether to perform up sampling on the prompt\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n */\n promptUpsampling?: boolean | null;\n /**\n * Number of steps for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @min 15\n * @max 50\n */\n steps?: number | null;\n /**\n * Guidance strength for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @max 100\n */\n guidance?: number | null;\n /**\n * Image Mask\n * A Urk representing a mask for the areas you want to modify in the image.\n * The mask should be the same dimensions as the image and in black and white.\n * Black areas (0%) indicate no modification, while white areas (100%) specify areas for in painting.\n * Optional if you provide an alpha mask in the original image.\n * Validation: The endpoint verifies that the dimensions of the mask match the original image.\n * Relevant models FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageMaskUrl?: string | null;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\nexport enum GenerateAnImageModel {\n GEN_IMAGE_MODEL_UNSPECIFIED = 'GEN_IMAGE_MODEL_UNSPECIFIED',\n FLUX_PRO_1_1_ULTRA = 'FLUX_PRO_1_1_ULTRA',\n FLUX_1_DEV = 'FLUX_1_DEV',\n FLUX_PRO_1_CANNY = 'FLUX_PRO_1_CANNY',\n FLUX_PRO_1_DEPTH = 'FLUX_PRO_1_DEPTH',\n FLUX_PRO_1_FILL = 'FLUX_PRO_1_FILL',\n}\n\n/** @enumType */\nexport type GenerateAnImageModelWithLiterals =\n | GenerateAnImageModel\n | 'GEN_IMAGE_MODEL_UNSPECIFIED'\n | 'FLUX_PRO_1_1_ULTRA'\n | 'FLUX_1_DEV'\n | 'FLUX_PRO_1_CANNY'\n | 'FLUX_PRO_1_DEPTH'\n | 'FLUX_PRO_1_FILL';\n\nexport interface CreatePredictionRequest\n extends CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** Input for z-image-turbo */\n prunaaiZImageTurbo?: PrunaaiZImageTurbo;\n /** Input for qwen-image-layered */\n qwenImageLayered?: QwenImageLayered;\n /** The model version ID */\n model?: CreatePredictionModelWithLiterals;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** Input for z-image-turbo */\n prunaaiZImageTurbo?: PrunaaiZImageTurbo;\n /** Input for qwen-image-layered */\n qwenImageLayered?: QwenImageLayered;\n}\n\nexport enum CreatePredictionModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n /** Flux-dev-controlnet */\n FLUX_DEV_CONTROLNET = 'FLUX_DEV_CONTROLNET',\n /** https://replicate.com/reve/edit. Has a `prompt` field, routed through GenerateContent */\n REVE_EDIT = 'REVE_EDIT',\n /** https://replicate.com/lucataco/florence-2-large */\n LUCATACO_FLORENCE_2_LARGE = 'LUCATACO_FLORENCE_2_LARGE',\n /** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\n PERCEPTRON_ISAAC_01 = 'PERCEPTRON_ISAAC_01',\n /** https://replicate.com/prunaai/z-image-turbo */\n PRUNAAI_Z_IMAGE_TURBO = 'PRUNAAI_Z_IMAGE_TURBO',\n /** https://replicate.com/qwen/qwen-image-layered */\n QWEN_IMAGE_LAYERED = 'QWEN_IMAGE_LAYERED',\n}\n\n/** @enumType */\nexport type CreatePredictionModelWithLiterals =\n | CreatePredictionModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID'\n | 'FLUX_DEV_CONTROLNET'\n | 'REVE_EDIT'\n | 'LUCATACO_FLORENCE_2_LARGE'\n | 'PERCEPTRON_ISAAC_01'\n | 'PRUNAAI_Z_IMAGE_TURBO'\n | 'QWEN_IMAGE_LAYERED';\n\nexport interface FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * Number of images to generate\n * @min 1\n * @max 4\n */\n numOutputs?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the quality of the output image for jpg and webp (1-100)\n * @min 1\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n}\n\nexport interface FluxDevControlnet {\n /** Set a seed for reproducibility. Random by default. */\n seed?: number | null;\n /**\n * Number of steps\n * @min 1\n * @max 50\n */\n steps?: number | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Optional LoRA model to use.\n * Give a URL to a HuggingFace .safetensors file, a Replicate .tar file or a CivitAI download link.\n * @maxLength 2000\n */\n loraUrl?: string | null;\n /**\n * Type of control net\n * @maxLength 100\n */\n controlType?: string | null;\n /**\n * Image to use with control net\n * @maxLength 2000\n */\n controlImage?: string | null;\n /**\n * Strength of LoRA model\n * @min -1\n * @max 3\n */\n loraStrength?: number | null;\n /**\n * Format of the output images\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Guidance scale\n * @max 5\n */\n guidanceScale?: number | null;\n /**\n * Quality of the output images, from 0 to 100.\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Things you do not want to see in your image\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Strength of control net.\n * @max 3\n */\n controlStrength?: number | null;\n /**\n * Preprocessor to use with depth control net\n * @maxLength 100\n */\n depthPreprocessor?: string | null;\n /**\n * Preprocessor to use with soft edge control net\n * @maxLength 100\n */\n softEdgePreprocessor?: string | null;\n /**\n * Strength of image to image control.\n * @max 1\n */\n imageToImageStrength?: number | null;\n /** Return the preprocessed image used to control the generation process. */\n returnPreprocessedImage?: boolean | null;\n}\n\nexport interface ReveEdit {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Edit instructions\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Specific version to use. Default: \"latest\"\n * @maxLength 10000\n */\n version?: string | null;\n}\n\n/** https://replicate.com/lucataco/florence-2-large/readme */\nexport interface LucatacoFlorence2Large {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /** Which task to perform */\n taskInput?: TaskInputWithLiterals;\n /**\n * Optional input for some task types\n * @maxLength 10000\n */\n textInput?: string | null;\n}\n\nexport enum TaskInput {\n UNRECOGNIZED_TASK_INPUT = 'UNRECOGNIZED_TASK_INPUT',\n OBJECT_DETECTION = 'OBJECT_DETECTION',\n CAPTION = 'CAPTION',\n DETAILED_CAPTION = 'DETAILED_CAPTION',\n MORE_DETAILED_CAPTION = 'MORE_DETAILED_CAPTION',\n CAPTION_TO_PHRASE_GROUNDING = 'CAPTION_TO_PHRASE_GROUNDING',\n REGION_PROPOSAL = 'REGION_PROPOSAL',\n DENSE_REGION_CAPTION = 'DENSE_REGION_CAPTION',\n OCR = 'OCR',\n OCR_WITH_REGION = 'OCR_WITH_REGION',\n}\n\n/** @enumType */\nexport type TaskInputWithLiterals =\n | TaskInput\n | 'UNRECOGNIZED_TASK_INPUT'\n | 'OBJECT_DETECTION'\n | 'CAPTION'\n | 'DETAILED_CAPTION'\n | 'MORE_DETAILED_CAPTION'\n | 'CAPTION_TO_PHRASE_GROUNDING'\n | 'REGION_PROPOSAL'\n | 'DENSE_REGION_CAPTION'\n | 'OCR'\n | 'OCR_WITH_REGION';\n\n/** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\nexport interface PerceptronIsaac01 {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /** Which task to perform */\n response?: ResponseTypeWithLiterals;\n /** Max new tokens */\n maxNewTokens?: string | null;\n}\n\nexport enum ResponseType {\n UNRECOGNIZED_RESPONSE_TYPE = 'UNRECOGNIZED_RESPONSE_TYPE',\n TEXT = 'TEXT',\n BOX = 'BOX',\n POINT = 'POINT',\n POLYGON = 'POLYGON',\n}\n\n/** @enumType */\nexport type ResponseTypeWithLiterals =\n | ResponseType\n | 'UNRECOGNIZED_RESPONSE_TYPE'\n | 'TEXT'\n | 'BOX'\n | 'POINT'\n | 'POLYGON';\n\n/** https://replicate.com/prunaai/z-image-turbo */\nexport interface PrunaaiZImageTurbo {\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Height of the generated image\n * @min 64\n * @max 2048\n */\n width?: number | null;\n /**\n * Width of the generated image\n * @min 64\n * @max 2048\n */\n height?: number | null;\n /**\n * Number of inference steps. This actually results in (num_inference_steps - 1) DiT forwards\n * @min 1\n * @max 50\n */\n numInferenceSteps?: number | null;\n /**\n * Guidance scale. Should be 0 for Turbo models\n * @max 20\n */\n guidanceScale?: number | null;\n /** Random seed. Set for reproducible generation */\n seed?: number | null;\n /**\n * Format of the output images\n * @maxLength 5\n */\n outputFormat?: string | null;\n /**\n * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs\n * @max 100\n */\n outputQuality?: number | null;\n}\n\n/** https://replicate.com/qwen/qwen-image-layered */\nexport interface QwenImageLayered {\n /**\n * Image to be converted into a layered image\n * @minLength 1\n * @maxLength 100000\n */\n image?: string;\n /**\n * Number of layers to generate (2-8)\n * @min 2\n * @max 8\n */\n numLayers?: number | null;\n /**\n * Text description of the input image. Use 'auto' for auto captioning\n * @maxLength 100000\n */\n description?: string | null;\n /** Run faster predictions with additional optimizations */\n goFast?: boolean | null;\n /** Random seed. Set for reproducible generation */\n seed?: number | null;\n /**\n * Format of the output images. Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Quality when saving the output images, from 0 to 100. Not relevant for .png outputs\n * @max 100\n */\n outputQuality?: number | null;\n /** Disable safety checker for generated images */\n disableSafetyChecker?: boolean | null;\n}\n\nexport interface EditImageWithPromptRequest {\n /** The model to use for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /**\n * The image you wish to inpaint.\n * Supported Formats: jpeg, png, webp\n * Validation Rules:\n * - Every side must be at least 64 pixels\n * - Total pixel count must be between 4,096 and 9,437,184 pixels\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * image format jpeg, png, webp\n * @maxLength 100\n */\n imageFormat?: string | null;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * Optional for OUTPAINT model , and required for INPAINT model\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Controls the strength of the inpainting process on a per-pixel basis,\n * either via a second image (passed into this parameter) or via the alpha channel of the image parameter.\n * Passing in a Mask\n * The image passed to this parameter should be a black and white image that represents,\n * at any pixel, the strength of inpainting based on how dark or light the given pixel is.\n * Completely black pixels represent no inpainting strength while completely white pixels represent maximum strength.\n * In the event the mask is a different size than the image parameter, it will be automatically resized.\n * Alpha Channel Support\n * If you don't provide an explicit mask, one will be derived from the alpha channel of the image parameter.\n * Transparent pixels will be inpainted while opaque pixels will be preserved.\n * In the event an image with an alpha channel is provided along with a mask, the mask will take precedence.\n * Relevant only for INPAINT model\n * @maxLength 100000\n */\n imageMask?: string | null;\n /**\n * image mask format jpeg, png, webp\n * Relevant only for INPAINT model\n * @maxLength 100\n */\n imageMaskFormat?: string | null;\n /**\n * Grows the edges of the mask outward in all directions by the specified number of pixels. The expanded area around the mask will be blurred,\n * which can help smooth the transition between inpainted content and the original image.\n * Try this parameter if you notice seams or rough edges around the inpainted content.\n * Default: 5\n * Relevant only for INPAINT model\n * @max 100\n */\n growMask?: number | null;\n /**\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: StylePresetWithLiterals;\n /**\n * The direction to outpaint the image\n * Relevant only for OUTPAINT model\n * At least one of the fields must be set\n */\n outpaintDirection?: OutpaintDirection;\n /**\n * Controls the likelihood of creating additional details not heavily conditioned by the init image [0..1]\n * Relevant only for OUTPAINT model\n * @max 1\n */\n creativity?: number | null;\n}\n\nexport enum EditImageWithPromptRequestModel {\n UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL = 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL',\n INPAINT = 'INPAINT',\n OUTPAINT = 'OUTPAINT',\n}\n\n/** @enumType */\nexport type EditImageWithPromptRequestModelWithLiterals =\n | EditImageWithPromptRequestModel\n | 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL'\n | 'INPAINT'\n | 'OUTPAINT';\n\nexport enum StylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type StylePresetWithLiterals =\n | StylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface OutpaintDirection {\n /**\n * The number of pixels to outpaint on the left side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n left?: number | null;\n /**\n * The number of pixels to outpaint on the right side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n right?: number | null;\n /**\n * The number of pixels to outpaint on the top of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n up?: number | null;\n /**\n * The number of pixels to outpaint on the bottom of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n down?: number | null;\n}\n\nexport interface TextToImageRequest {\n /**\n * Specifies the format of the output image. Supported formats are: PNG, JPG and WEBP. Default: JPG.\n * @maxLength 4\n */\n outputFormat?: string | null;\n /**\n * Sets the compression quality of the output image. Higher values preserve more quality but increase file size, lower values reduce file size but decrease quality. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /** This parameter is used to enable or disable the NSFW check. */\n checkNsfw?: boolean | null;\n /**\n * A positive prompt is a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n * For example, if the positive prompt is \"dragon drinking coffee\", the model will generate an image of a dragon drinking coffee. The more detailed the prompt, the more accurate the results.\n * The length of the prompt must be between 2 and 3000 characters.\n * @maxLength 1000000\n */\n positivePrompt?: string;\n /**\n * Used to define the height dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n height?: number;\n /**\n * Used to define the width dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n width?: number;\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /** Model to invoke. */\n model?: TextToImageRequestModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The number of steps is the number of iterations the model will perform to generate the image. Default: 28.\n * @min 1\n * @max 100\n */\n steps?: number | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /**\n * Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results. Default: 7.\n * @max 30\n */\n cfgScale?: number | null;\n /** The number of images to generate from the specified prompt. */\n numberResults?: number | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n /**\n * Used to determine the influence of the seedImage image in the generated output. A lower value results in more influence from the original image, while a higher value allows more creative deviation.\n * @max 1\n */\n strength?: number | null;\n /**\n * An array of LoRA models to be applied during the image generation process.\n * @maxSize 10\n */\n loraModels?: LoraModelSelect[];\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /** Inputs for the image generation process. */\n inputs?: Inputs;\n}\n\nexport enum TextToImageRequestModel {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n /** runware:101@1 */\n FLUX_1_DEV = 'FLUX_1_DEV',\n /** runware:100@1 */\n FLUX_1_SCHNELL = 'FLUX_1_SCHNELL',\n /** bfl:4@1 */\n FLUX_1_KONTEXT_MAX = 'FLUX_1_KONTEXT_MAX',\n /** bfl:3@1 */\n FLUX_1_KONTEXT_PRO = 'FLUX_1_KONTEXT_PRO',\n /** runware:108@20 */\n QWEN_IMAGE_EDIT = 'QWEN_IMAGE_EDIT',\n /** ideogram:4@1 */\n IDEOGRAM_3_0 = 'IDEOGRAM_3_0',\n /** ideogram:4@3 */\n IDEOGRAM_3_0_EDIT = 'IDEOGRAM_3_0_EDIT',\n /** bfl:2@2 */\n FLUX_1_1_PRO_ULTRA = 'FLUX_1_1_PRO_ULTRA',\n /** bfl:1@2 */\n FLUX_1_FILL_PRO = 'FLUX_1_FILL_PRO',\n /** bytedance:5@0 */\n SEEDREAM_4 = 'SEEDREAM_4',\n /** runware:102@1 */\n FLUX_DEV_FILL = 'FLUX_DEV_FILL',\n /** bfl:1@5 */\n FLUX_DEPTH_PRO = 'FLUX_DEPTH_PRO',\n /** bfl:1@4 */\n FLUX_CANNY_PRO = 'FLUX_CANNY_PRO',\n /** Should be used together with model_id filed from allowed models list */\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type TextToImageRequestModelWithLiterals =\n | TextToImageRequestModel\n | 'UNKNOWN_MODEL'\n | 'FLUX_1_DEV'\n | 'FLUX_1_SCHNELL'\n | 'FLUX_1_KONTEXT_MAX'\n | 'FLUX_1_KONTEXT_PRO'\n | 'QWEN_IMAGE_EDIT'\n | 'IDEOGRAM_3_0'\n | 'IDEOGRAM_3_0_EDIT'\n | 'FLUX_1_1_PRO_ULTRA'\n | 'FLUX_1_FILL_PRO'\n | 'SEEDREAM_4'\n | 'FLUX_DEV_FILL'\n | 'FLUX_DEPTH_PRO'\n | 'FLUX_CANNY_PRO'\n | 'FROM_MODEL_ID';\n\nexport interface LoraModelSelect {\n /**\n * The unique identifier of the LoRA model, typically in the format \"wix:<id>@<version>\".\n * @minLength 1\n * @maxLength 255\n */\n model?: string | null;\n /**\n * The weight or influence of the LoRA model during the generation process.\n * A higher value indicates a stronger influence of the LoRA model on the output.\n * @min -4\n * @max 4\n */\n weight?: number | null;\n}\n\nexport interface Inputs {\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n}\n\nexport interface InvokeMlPlatformLlamaModelRequest {\n /**\n * The ML platform model id.\n * @minLength 1\n * @maxLength 50\n */\n modelId?: string;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport interface InvokeChatCompletionRequest {\n /** Model to invoke */\n model?: PerplexityModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far\n * @maxSize 1000\n */\n messages?: PerplexityMessage[];\n /**\n * Max number of completion tokens.\n * Completion token count + prompt token count must not exceed the size of the context window\n * @max 200000\n */\n maxTokens?: number | null;\n /**\n * The amount of randomness in the response, valued between 0 inclusive and 2 exclusive.\n * Higher values are more random, and lower values are more deterministic.\n */\n temperature?: number | null;\n /**\n * The nucleus sampling threshold, valued between 0 and 1 inclusive.\n * For each subsequent token, the model considers the results of the tokens with top_p probability mass.\n * Perplexity recommends either altering top_k or top_p, but not both.\n */\n topP?: number | null;\n /**\n * Given a list of domains, limit the citations used by the online model to URLs from the specified domains.\n * Currently limited to only 3 domains for whitelisting and blacklisting.\n * For blacklisting add a - to the beginning of the domain string.\n * @maxLength 10000\n * @maxSize 3\n */\n searchDomainFilter?: string[];\n /** Determines whether or not a request to an online model should return images. */\n returnImages?: boolean | null;\n /** Determines whether or not a request to an online model should return related questions. */\n returnRelatedQuestions?: boolean | null;\n /**\n * Returns search results within the specified time interval - does not apply to images.\n * Must be one of \"month\", \"week\", \"day\", \"hour\"\n * @maxLength 10\n */\n searchRecencyFilter?: string | null;\n /**\n * The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive.\n * If set to 0, top-k filtering is disabled. Perplexity recommends either altering top_k or top_p, but not both.\n */\n topK?: number | null;\n /**\n * A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics. Incompatible with `frequency_penalty`.\n */\n presencePenalty?: number | null;\n /**\n * A multiplicative penalty greater than 0. Values greater than 1.0 penalize new tokens based on their existing\n * frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n * A value of 1.0 means no penalty. Incompatible with `presence_penalty`.\n */\n frequencyPenalty?: number | null;\n /**\n * Enable structured outputs with a JSON or Regex schema.\n * https://docs.perplexity.ai/guides/structured-outputs\n */\n responseFormat?: InvokeChatCompletionRequestResponseFormat;\n}\n\nexport enum PerplexityModel {\n UNKNOWN_PERPLEXITY_MODEL = 'UNKNOWN_PERPLEXITY_MODEL',\n SONAR = 'SONAR',\n SONAR_PRO = 'SONAR_PRO',\n SONAR_REASONING = 'SONAR_REASONING',\n SONAR_REASONING_PRO = 'SONAR_REASONING_PRO',\n SONAR_DEEP_RESEARCH = 'SONAR_DEEP_RESEARCH',\n}\n\n/** @enumType */\nexport type PerplexityModelWithLiterals =\n | PerplexityModel\n | 'UNKNOWN_PERPLEXITY_MODEL'\n | 'SONAR'\n | 'SONAR_PRO'\n | 'SONAR_REASONING'\n | 'SONAR_REASONING_PRO'\n | 'SONAR_DEEP_RESEARCH';\n\nexport interface PerplexityMessage {\n /**\n * The content of the message\n * @maxLength 200000\n */\n content?: string;\n /**\n * The role of the speaker in this turn of conversation. After the (optional) system message,\n * user and assistant roles should alternate with `user` then `assistant`, ending in `user`.\n */\n role?: PerplexityMessageMessageRoleWithLiterals;\n}\n\nexport enum PerplexityMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n SYSTEM = 'SYSTEM',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type PerplexityMessageMessageRoleWithLiterals =\n | PerplexityMessageMessageRole\n | 'UNKNOWN'\n | 'SYSTEM'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface InvokeChatCompletionRequestResponseFormat\n extends InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** @oneof */\nexport interface InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** mimics https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api */\nexport interface GenerateImageRequest {\n /** ID of the model to use. */\n model?: ImagenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n instances?: Instance[];\n /** The configuration for the generation. */\n parameters?: Parameters;\n}\n\nexport enum ImagenModel {\n UNKNOWN_IMAGEN_MODEL = 'UNKNOWN_IMAGEN_MODEL',\n IMAGEN_3_0_GENERATE_002 = 'IMAGEN_3_0_GENERATE_002',\n IMAGEN_3_0_FAST_GENERATE_001 = 'IMAGEN_3_0_FAST_GENERATE_001',\n IMAGEN_4_0_GENERATE_001 = 'IMAGEN_4_0_GENERATE_001',\n IMAGEN_4_0_FAST_GENERATE_001 = 'IMAGEN_4_0_FAST_GENERATE_001',\n IMAGEN_4_0_ULTRA_GENERATE_001 = 'IMAGEN_4_0_ULTRA_GENERATE_001',\n}\n\n/** @enumType */\nexport type ImagenModelWithLiterals =\n | ImagenModel\n | 'UNKNOWN_IMAGEN_MODEL'\n | 'IMAGEN_3_0_GENERATE_002'\n | 'IMAGEN_3_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_GENERATE_001'\n | 'IMAGEN_4_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_ULTRA_GENERATE_001';\n\nexport interface Instance {\n /**\n * The text prompt for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n}\n\nexport interface Parameters {\n /**\n * The number of images to generate (1-4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /** Optional random seed for image generation */\n seed?: string | null;\n /** Optional parameter to use LLM-based prompt rewriting for higher quality images */\n enhancePrompt?: boolean | null;\n /**\n * Optional text to discourage in the generated images\n * @maxLength 480\n */\n negativePrompt?: string | null;\n /**\n * Optional aspect ratio for the image (1:1, 9:16, 16:9, 3:4, 4:3)\n * @maxLength 5\n */\n aspectRatio?: string | null;\n /** Optional output image format options */\n outputOptions?: OutputOptions;\n /**\n * Optional setting for allowing/disallowing generation of people\n * @maxLength 20\n */\n personGeneration?: string | null;\n /**\n * Optional safety filtering level\n * @maxLength 50\n */\n safetySetting?: string | null;\n /** Optional flag to add invisible watermark */\n addWatermark?: boolean | null;\n}\n\nexport interface OutputOptions {\n /**\n * Image format (image/png or image/jpeg)\n * @maxLength 20\n */\n mimeType?: string | null;\n /**\n * Compression quality for JPEG (0-100)\n * @max 100\n */\n compressionQuality?: number | null;\n}\n\nexport interface GenerateImageMlPlatformRequest\n extends GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n /** The model version ID */\n model?: GenerateImageMlPlatformModelWithLiterals;\n}\n\n/** @oneof */\nexport interface GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n}\n\nexport enum GenerateImageMlPlatformModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n}\n\n/** @enumType */\nexport type GenerateImageMlPlatformModelWithLiterals =\n | GenerateImageMlPlatformModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID';\n\nexport interface V1FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n /** Time step to start CFG - new field for ml platform */\n timestepToStartCfg?: number | null;\n /** Option to disable the NSFW safety checker */\n disableSafetyChecker?: boolean | null;\n}\n\nexport interface CreateImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536x1024\n * Portrait: 1024x1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n}\n\nexport enum OpenAiImageModel {\n UNKNOWN_IMAGE_CREATION_MODEL = 'UNKNOWN_IMAGE_CREATION_MODEL',\n GPT_4O_IMAGE = 'GPT_4O_IMAGE',\n GPT_IMAGE_1 = 'GPT_IMAGE_1',\n GPT_IMAGE_EXP = 'GPT_IMAGE_EXP',\n GPT_IMAGE_EXP_2 = 'GPT_IMAGE_EXP_2',\n GPT_IMAGE_EXP_3 = 'GPT_IMAGE_EXP_3',\n GPT_IMAGE_1_5 = 'GPT_IMAGE_1_5',\n}\n\n/** @enumType */\nexport type OpenAiImageModelWithLiterals =\n | OpenAiImageModel\n | 'UNKNOWN_IMAGE_CREATION_MODEL'\n | 'GPT_4O_IMAGE'\n | 'GPT_IMAGE_1'\n | 'GPT_IMAGE_EXP'\n | 'GPT_IMAGE_EXP_2'\n | 'GPT_IMAGE_EXP_3'\n | 'GPT_IMAGE_1_5';\n\nexport interface EditImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536 x 1024\n * Portrait: 1024 x 1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * The image to be edited.\n * @maxLength 10000\n */\n imageUrl?: string | null;\n /**\n * The image mask to be edited.\n * @maxLength 10000\n */\n imageMaskUrl?: string | null;\n /**\n * Additional images to be edited.\n * @maxSize 10\n * @maxLength 10000\n */\n imageUrls?: string[] | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n /**\n * Control how much effort the model will exert to match the style and features, especially facial features, of input images.\n * This parameter is only supported for gpt-image-1. Supports high and low. Defaults to low.\n * @maxLength 10\n */\n inputFidelity?: string | null;\n}\n\n/** Mirrors https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo-video-generation */\nexport interface GenerateVideoRequest {\n /** ID of the Video generation model to use. */\n model?: VideoGenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 100\n */\n instances?: GenerateVideoInstance[];\n /** Generation-time settings. */\n parameters?: GenerateVideoParameters;\n}\n\nexport enum VideoGenModel {\n UNKNOWN_VIDEO_GEN_MODEL = 'UNKNOWN_VIDEO_GEN_MODEL',\n VEO_2_0_GENERATE_001 = 'VEO_2_0_GENERATE_001',\n VEO_3_0_GENERATE_001 = 'VEO_3_0_GENERATE_001',\n VEO_3_0_FAST_GENERATE_001 = 'VEO_3_0_FAST_GENERATE_001',\n}\n\n/** @enumType */\nexport type VideoGenModelWithLiterals =\n | VideoGenModel\n | 'UNKNOWN_VIDEO_GEN_MODEL'\n | 'VEO_2_0_GENERATE_001'\n | 'VEO_3_0_GENERATE_001'\n | 'VEO_3_0_FAST_GENERATE_001';\n\nexport interface GenerateVideoInstance {\n /**\n * Mandatory (text-to-video), optional if an input image prompt is provided (image-to-video)\n * Text input for guiding video generation.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Mandatory (image-to-video), optional if a text prompt is provided (text-to-video)\n * Image input for guiding video generation.\n */\n image?: V1ImageInput;\n}\n\nexport interface V1ImageInput {\n /**\n * A publicly available image URL\n * @maxLength 10000\n */\n imageUrl?: string | null;\n /**\n * MIME type of the image (image/jpeg or image/png)\n * @maxLength 20\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateVideoParameters {\n /**\n * Requested video length in seconds (4, 6, or 8. The default is 8)\n * @min 4\n * @max 8\n */\n durationSeconds?: number | null;\n /**\n * A text string that describes anything you want to discourage the model from generating.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /** Use gemini to enhance your prompts (default is True) */\n enhancePrompt?: boolean | null;\n /**\n * A number to request to make generated videos deterministic.\n * Adding a seed number with your request without changing other parameters will cause the model to produce the same videos.\n */\n seed?: string | null;\n /**\n * Number of videos to generate (1–4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /**\n * Aspect ratio: 16:9 (default, landscape) or 9:16 (portrait)\n * @maxLength 50\n */\n aspectRatio?: string | null;\n /**\n * The safety setting that controls whether people or face generation is allowed:\n * \"allow_adult\" (default value): allow generation of adults only\n * \"disallow\": disallows inclusion of people/faces in images\n * @maxLength 50\n */\n personGeneration?: string | null;\n /** Whether to generate audio for the video */\n generateAudio?: boolean | null;\n /**\n * The resolution of the generated video. Supported values: 720p, 1080p. Default: 1080p\n * @maxLength 50\n */\n resolution?: string | null;\n}\n\n/** Add to your existing proto file */\nexport interface V1CreateChatCompletionRequest {\n /** Model identifier */\n model?: ChatCompletionModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: GoogleproxyV1ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: V1CreateChatCompletionRequestResponseFormat;\n}\n\nexport enum ChatCompletionModel {\n UNKNOWN_CHAT_COMPLETION_MODEL = 'UNKNOWN_CHAT_COMPLETION_MODEL',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-scout\n * llama-4-scout-17b-16e-instruct-maas\n */\n LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS = 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-maverick\n * llama-4-maverick-17b-128e-instruct-maas\n */\n LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS = 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS',\n}\n\n/** @enumType */\nexport type ChatCompletionModelWithLiterals =\n | ChatCompletionModel\n | 'UNKNOWN_CHAT_COMPLETION_MODEL'\n | 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS'\n | 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS';\n\nexport interface GoogleproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: V1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: V1ChatCompletionMessageContentPart[];\n}\n\nexport interface V1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum V1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type V1ChatCompletionMessageMessageRoleWithLiterals =\n | V1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface V1ChatCompletionMessageContentPart\n extends V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface V1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawRequest {\n /**\n * ML Platform model identifier\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: ResponseFormat;\n}\n\nexport interface ChatCompletionMessage {\n /** The role of the message author. */\n role?: MessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: ContentPart[];\n}\n\nexport interface ImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum MessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type MessageRoleWithLiterals =\n | MessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface ContentPart extends ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface ResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface VideoInferenceRequest {\n /** Specifies the format of the output video. Supported formats are: MP4 and WEBM. Default: MP4. */\n outputFormat?: OutputFormatWithLiterals;\n /**\n * Sets the compression quality of the output video. Higher values preserve more quality but increase file size. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /**\n * The text description that guides the video generation process. This prompt defines what you want to see in the video.\n * The length of the prompt must be at least 2 characters.\n * @minLength 2\n * @maxLength 100000\n */\n positivePrompt?: string | null;\n /**\n * Specifies what you want to avoid in the generated video.\n * @maxLength 100000\n */\n negativePrompt?: string | null;\n /**\n * An array of objects that define key frames to guide video generation.\n * @maxSize 100\n */\n frameImages?: FrameImage[];\n /**\n * An array containing reference images used to condition the generation process. Must be URLs pointing to the images. The images must be accessible publicly.\n * @maxSize 10\n * @maxLength 100000\n */\n referenceImages?: string[] | null;\n /**\n * The width of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n width?: number | null;\n /**\n * The height of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n height?: number | null;\n /** The AI model to use for video generation. */\n model?: VideoModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The length of the generated video in seconds.\n * @min 1\n * @max 10\n */\n duration?: number | null;\n /**\n * The frame rate (frames per second) of the generated video. Default: 24.\n * @min 15\n * @max 60\n */\n fps?: number | null;\n /**\n * The number of denoising steps the model performs during video generation.\n * @min 10\n * @max 50\n */\n steps?: number | null;\n /** A seed is a value used to randomize the video generation. */\n seed?: string | null;\n /**\n * Controls how closely the video generation follows your prompt. Recommended range is 6.0-10.0 for most video models.\n * @max 50\n */\n cfgScale?: number | null;\n /**\n * Specifies how many videos to generate for the given parameters. Default: 1.\n * @min 1\n * @max 4\n */\n numberResults?: number | null;\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /**\n * Skip polling flag - if set to false, will poll until video generation is complete\n * If not set or true, returns immediately with task UUID for manual polling\n */\n skipPolling?: boolean | null;\n}\n\nexport enum OutputFormat {\n UNKNOWN_OUTPUT_FORMAT = 'UNKNOWN_OUTPUT_FORMAT',\n /** MPEG-4 video format, widely compatible and recommended for most use cases.MPEG-4 video format, widely compatible and recommended for most use cases. */\n MP4 = 'MP4',\n /** WebM video format, optimized for web delivery and smaller file sizes. */\n WEBM = 'WEBM',\n}\n\n/** @enumType */\nexport type OutputFormatWithLiterals =\n | OutputFormat\n | 'UNKNOWN_OUTPUT_FORMAT'\n | 'MP4'\n | 'WEBM';\n\nexport interface FrameImage {\n /**\n * Specifies the input image that will be used to constrain the video content at the specified frame position.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 100000\n */\n inputImage?: string;\n /**\n * Specifies the position of this frame constraint within the video timeline.\n * Can be \"first\", \"last\", or a numeric frame number.\n * @maxLength 20\n */\n frame?: string | null;\n}\n\nexport enum VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SEEDANCE_1_0_PRO = 'SEEDANCE_1_0_PRO',\n SEEDANCE_1_0_LITE = 'SEEDANCE_1_0_LITE',\n SEEDANCE_1_0_PRO_FAST = 'SEEDANCE_1_0_PRO_FAST',\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type VideoModelWithLiterals =\n | VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SEEDANCE_1_0_PRO'\n | 'SEEDANCE_1_0_LITE'\n | 'SEEDANCE_1_0_PRO_FAST'\n | 'FROM_MODEL_ID';\n\nexport interface V1OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: V1ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: V1ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: V1ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: V1ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: V1ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: V1ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport enum V1ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n O3_PRO_2025_06_10 = 'O3_PRO_2025_06_10',\n O3_DEEP_RESEARCH_2025_06_26 = 'O3_DEEP_RESEARCH_2025_06_26',\n GPT_5_CODEX = 'GPT_5_CODEX',\n GPT_5_1_2025_11_13 = 'GPT_5_1_2025_11_13',\n GPT_5_1_CODEX = 'GPT_5_1_CODEX',\n GPT_5_1_CODEX_MINI = 'GPT_5_1_CODEX_MINI',\n GPT_EXP_RESPONSES = 'GPT_EXP_RESPONSES',\n GPT_EXP_RESPONSES_2 = 'GPT_EXP_RESPONSES_2',\n GPT_EXP_RESPONSES_3 = 'GPT_EXP_RESPONSES_3',\n GPT_5_1_CODEX_MAX = 'GPT_5_1_CODEX_MAX',\n GPT_5_2_2025_12_11 = 'GPT_5_2_2025_12_11',\n}\n\n/** @enumType */\nexport type V1ResponsesModelWithLiterals =\n | V1ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES'\n | 'O3_PRO_2025_06_10'\n | 'O3_DEEP_RESEARCH_2025_06_26'\n | 'GPT_5_CODEX'\n | 'GPT_5_1_2025_11_13'\n | 'GPT_5_1_CODEX'\n | 'GPT_5_1_CODEX_MINI'\n | 'GPT_EXP_RESPONSES'\n | 'GPT_EXP_RESPONSES_2'\n | 'GPT_EXP_RESPONSES_3'\n | 'GPT_5_1_CODEX_MAX'\n | 'GPT_5_2_2025_12_11';\n\nexport interface V1ResponsesInputItem extends V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesInputMessageResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: V1ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesInputMessageResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesInputMessageResponsesMessageRoleWithLiterals =\n | ResponsesInputMessageResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface V1ResponsesInputMessageContent\n extends V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n}\n\nexport interface ResponsesInputMessageContentImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface ResponsesInputMessageContentFileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface V1ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: ResponsesOutputMessageOutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface V1OutputAnnotation\n extends V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\n/** @oneof */\nexport interface V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\nexport interface V1UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface ResponsesOutputMessageOutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: V1OutputAnnotation[];\n}\n\nexport interface V1ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: ResponsesWebSearchToolCallAction;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface ResponsesWebSearchToolCallAction {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface V1ResponsesReasoningOutput {\n /** @maxLength 100 */\n id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: V1ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: V1ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface V1ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface V1ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface V1ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: V1ResponsesCodeInterpreterOutput[];\n}\n\nexport interface V1ResponsesCodeInterpreterOutput\n extends V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface V1ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface V1ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface V1ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface V1ResponsesTextFormat\n extends V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n}\n\nexport interface ResponsesTextFormatJsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface V1ResponsesTool extends V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\nexport interface V1ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: ResponsesWebSearchUserLocation;\n}\n\nexport interface ResponsesWebSearchUserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface V1ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: V1ResponsesCodeInterpreterContainer;\n}\n\nexport interface V1ResponsesCodeInterpreterContainer\n extends V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface V1ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\nexport interface OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport enum ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n GPT_5_2_2025_12_11 = 'GPT_5_2_2025_12_11',\n}\n\n/** @enumType */\nexport type ResponsesModelWithLiterals =\n | ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES'\n | 'GPT_5_2_2025_12_11';\n\nexport interface ResponsesInputItem extends ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesMessageRoleWithLiterals =\n | ResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface ResponsesInputMessageContent\n extends ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n}\n\nexport interface ImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface FileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: OutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface OutputAnnotation extends OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\n/** @oneof */\nexport interface OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\nexport interface UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface OutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: OutputAnnotation[];\n}\n\nexport interface ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: Action;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface Action {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface ResponsesReasoningOutput {\n /** @maxLength 100 */\n id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: ResponsesCodeInterpreterOutput[];\n}\n\nexport interface ResponsesCodeInterpreterOutput\n extends ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface ResponsesTextFormat extends ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n}\n\nexport interface JsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface ResponsesTool extends ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\nexport interface ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: UserLocation;\n}\n\nexport interface UserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: ResponsesCodeInterpreterContainer;\n}\n\nexport interface ResponsesCodeInterpreterContainer\n extends ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\n/** More info and default values at https://platform.openai.com/docs/api-reference/videos/create */\nexport interface CreateVideoRequest {\n /**\n * Text prompt that describes the video to generate.\n * @maxLength 10000\n */\n prompt?: string;\n /** The video generation model to use. */\n model?: V1VideoModelWithLiterals;\n /**\n * Size of the generated video (width x height in pixels). Examples: \"720x1280\", \"1280x720\".\n * @maxLength 50\n */\n size?: string | null;\n /**\n * Clip duration in seconds. Default is 4 seconds if not specified.\n * @min 1\n * @max 180\n */\n seconds?: number | null;\n /**\n * Optional publicly accessible URL to an image reference that guides generation.\n * @maxLength 5000\n * @format WEB_URL\n */\n inputReferenceUrl?: string | null;\n}\n\nexport enum V1VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SORA_2 = 'SORA_2',\n SORA_2_PRO = 'SORA_2_PRO',\n}\n\n/** @enumType */\nexport type V1VideoModelWithLiterals =\n | V1VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SORA_2'\n | 'SORA_2_PRO';\n\nexport interface ContentGenerationRequestedEvent {\n /** Prompt that the generation was requested for. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface UserRequestInfo {\n /**\n * Interaction id\n * @maxLength 100\n */\n interactionId?: string | null;\n /**\n * Additional tags ,use comma separation format for multiple tags.\n * @maxLength 1000\n */\n additionalTags?: string | null;\n /**\n * GenAI feature name, required by FinOps for evaluation\n * @maxLength 1000\n */\n featureName?: string | null;\n /**\n * AppDefId to which the cost will be attributed to instead of the one that signs the request.\n * Will not work unless your application is explicitly allowed to override costs attribution.\n * Please reach out to #ai-tools-support if you think you need this field.\n * @format GUID\n */\n costAttributionOverrideId?: string | null;\n}\n\nexport interface ContentGenerationSucceededEvent {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateContent request. */\n materializedPrompt?: Prompt;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GenerateContentModelResponse\n extends GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Invoke Amazon Converse API response. */\n amazonConverseResponse?: InvokeConverseResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n /** Extracted generated content data from the model's response. */\n generatedContent?: GeneratedContent;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n /** Token usage information. */\n tokenUsage?: V1TokenUsage;\n /** Metadata about the response, such as finish reason. */\n responseMetadata?: ResponseMetadata;\n}\n\n/** @oneof */\nexport interface GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Invoke Amazon Converse API response. */\n amazonConverseResponse?: InvokeConverseResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n}\n\n/** Model generation result, at least one of the fields should be present */\nexport interface GeneratedContent {\n /**\n * Zero or more textual results. Only present when the model returned a text.\n * @maxSize 1000\n */\n texts?: TextContent[];\n /**\n * Zero or more images. Only present when the model returned an image.\n * @maxSize 1000\n */\n images?: MediaContent[];\n /**\n * Zero or more videos. Only present when the model returned a video.\n * @maxSize 1000\n */\n videos?: MediaContent[];\n /**\n * Zero or more thinking texts. Only present when the model returned a thought.\n * @maxSize 1000\n */\n thinkingTexts?: ThinkingTextContent[];\n /**\n * Zero or more tool call requests. Only present when the model requested to call a tool.\n * @maxSize 1000\n */\n tools?: ToolUseContent[];\n}\n\nexport interface TextContent {\n /**\n * Generated text\n * @maxLength 1000000\n */\n generatedText?: string | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface MediaContent {\n /**\n * Mime type, e.g. \"image/jpeg\" or \"video/mp4\"\n * @maxLength 500\n */\n mimeType?: string | null;\n /**\n * Wix Media Platform (WixMP) url where the image or video is stored.\n * @maxLength 5000\n */\n url?: string;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ThinkingTextContent {\n /**\n * The thought text of the model thinking\n * @maxLength 1000000\n */\n thoughtText?: string | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ToolUseContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string;\n /** Tool use input */\n input?: Record<string, any> | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface V1TokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n /** Total number of tokens used in the request. */\n totalTokens?: number | null;\n /** cache creation token usage */\n cacheCreationTokens?: number | null;\n /** cache read token usage */\n cacheReadTokens?: number | null;\n /** thought tokens usage */\n thoughtsTokens?: number | null;\n /** tool use tokens usage */\n toolUseTokens?: number | null;\n}\n\nexport interface ResponseMetadata {\n /**\n * Finish reason of the model response.\n * @maxLength 1000\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: OpenaiproxyV1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface CreateChatCompletionResponsePromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CreateChatCompletionResponseCompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: OpenaiproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: CreateChatCompletionResponsePromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CreateChatCompletionResponseCompletionTokenDetails;\n}\n\nexport interface TextBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: TextBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface TextBisonPrediction {\n /**\n * The result generated from input text.\n * @maxLength 100000\n */\n content?: string | null;\n /** Citation metadata */\n citationMetadata?: CitationMetadata;\n /** A collection of categories and their associated confidence scores. */\n safetyAttributes?: SafetyAttribute;\n}\n\nexport interface CitationMetadata {\n /**\n * Citations array\n * @maxSize 1000\n */\n citations?: V1Citation[];\n}\n\nexport interface V1Citation {\n /** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */\n startIndex?: number | null;\n /** Index in the prediction output where the citation ends (exclusive). Must be > start_index and < len(output). */\n endIndex?: number | null;\n /**\n * URL associated with this citation. If present, this URL links to the webpage of the source of this citation.\n * Possible URLs include news websites, GitHub repos, etc.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * Title associated with this citation. If present, it refers to the title of the source of this citation.\n * Possible titles include news titles, book titles, etc.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * License associated with this recitation. If present, it refers to the license of the source of this citation.\n * Possible licenses include code licenses, e.g., mit license.\n * @maxLength 100\n */\n license?: string | null;\n /**\n * Publication date associated with this citation. If present, it refers to the date at which the source of this citation was published.\n * Possible formats are YYYY, YYYY-MM, YYYY-MM-DD.\n * @maxLength 100\n */\n publicationDate?: string | null;\n}\n\nexport interface SafetyAttribute {\n /**\n * The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /** A flag indicating if the model's input or output was blocked. */\n blocked?: boolean | null;\n /**\n * The confidence scores of the each category, higher value means higher confidence.\n * @maxSize 100\n */\n scores?: number[] | null;\n /**\n * An error code that identifies why the input or output was blocked.\n * For a list of error codes, see https://cloud.google.com/vertex-ai/docs/generative-ai/learn/responsible-ai#safety_filters_and_attributes.\n * @maxSize 100\n */\n errors?: string[] | null;\n}\n\nexport interface Metadata {\n /** TokenMetadata object */\n tokenMetadata?: TokenMetadata;\n}\n\nexport interface TokenMetadata {\n /** Number of input tokens. This is the total number of tokens across all messages, examples, and context. */\n inputTokenCount?: TokenCount;\n /** Number of output tokens. This is the total number of tokens in content across all candidates in the response. */\n outputTokenCount?: TokenCount;\n}\n\nexport interface TokenCount {\n /** Number of tokens */\n totalTokens?: number | null;\n /** Number of billable characters */\n totalBillableCharacters?: number | null;\n}\n\nexport interface ChatBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: ChatBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface ChatBisonPrediction {\n /**\n * The chat result generated from given message.\n * @maxSize 100\n */\n candidates?: ChatMessage[];\n /**\n * Citation metadata\n * @maxSize 100\n */\n citationMetadata?: CitationMetadata[];\n /**\n * An array of collections of categories and their associated confidence scores. 1-1 mapping to candidates.\n * @maxSize 100\n */\n safetyAttributes?: SafetyAttribute[];\n}\n\nexport interface CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface PromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport interface CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: V1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: PromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CompletionTokenDetails;\n}\n\nexport interface GenerateContentResponse {\n /**\n * The generated response.\n * @maxSize 1000\n */\n candidates?: Candidate[];\n /** The usage metadata. */\n usageMetadata?: UsageMetadata;\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Candidate {\n /** The generated response content. */\n content?: CandidateContent;\n /** The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */\n finishReason?: FinishReasonWithLiterals;\n /**\n * The safety ratings of the response.\n * @maxSize 100\n */\n safetyRatings?: SafetyRating[];\n /** The citation metadata of the response. */\n citationMetadata?: CandidateCitationMetadata;\n /** Output only. Metadata specifies sources used to ground generated content. */\n groundingMetadata?: GroundingMetadata;\n}\n\nexport interface CandidateContent {\n /**\n * The generated response content.\n * @maxSize 1000\n */\n parts?: CandidateContentPart[];\n}\n\nexport interface CandidateContentPart {\n /**\n * The text generated by the model.\n * @maxLength 100000\n */\n text?: string | null;\n /** function call */\n functionCall?: FunctionCall;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /**\n * Thought flag indicates that the content part is a thought.\n * @readonly\n */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport enum FinishReason {\n UNKNOWN_FINISH_REASON = 'UNKNOWN_FINISH_REASON',\n /** The finish reason is unspecified. */\n UNSPECIFIED = 'UNSPECIFIED',\n /** Natural stop point of the model or provided stop sequence. */\n STOP = 'STOP',\n /** The maximum number of tokens as specified in the request was reached. */\n MAX_TOKENS = 'MAX_TOKENS',\n /**\n * The token generation was stopped as the response was flagged for safety reasons.\n * Note that Candidate.content is empty if content filters block the output.\n */\n SAFETY = 'SAFETY',\n /** The token generation was stopped as the response was flagged for unauthorized citations. */\n RECITATION = 'RECITATION',\n /** All other reasons that stopped the token */\n OTHER = 'OTHER',\n /** The response candidate content was flagged for using an unsupported language. */\n LANGUAGE = 'LANGUAGE',\n /** Token generation stopped because the content contains forbidden terms. */\n BLOCKLIST = 'BLOCKLIST',\n /** Token generation stopped for potentially containing prohibited content. */\n PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',\n /** Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information (SPII). */\n SPII = 'SPII',\n /** The function call generated by the model is invalid. */\n MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL',\n /** Token generation stopped because generated images contain safety violations. */\n IMAGE_SAFETY = 'IMAGE_SAFETY',\n /** Model generated a tool call but no tools were enabled in the request. */\n UNEXPECTED_TOOL_CALL = 'UNEXPECTED_TOOL_CALL',\n /** Model called too many tools consecutively, thus the system exited execution. */\n TOO_MANY_TOOL_CALLS = 'TOO_MANY_TOOL_CALLS',\n}\n\n/** @enumType */\nexport type FinishReasonWithLiterals =\n | FinishReason\n | 'UNKNOWN_FINISH_REASON'\n | 'UNSPECIFIED'\n | 'STOP'\n | 'MAX_TOKENS'\n | 'SAFETY'\n | 'RECITATION'\n | 'OTHER'\n | 'LANGUAGE'\n | 'BLOCKLIST'\n | 'PROHIBITED_CONTENT'\n | 'SPII'\n | 'MALFORMED_FUNCTION_CALL'\n | 'IMAGE_SAFETY'\n | 'UNEXPECTED_TOOL_CALL'\n | 'TOO_MANY_TOOL_CALLS';\n\nexport interface SafetyRating {\n /** The safety category that the response belongs to. */\n category?: HarmCategoryWithLiterals;\n /** The probability that the response belongs to the specified safety category. */\n probability?: HarmProbabilityWithLiterals;\n /** The probability score that the response belongs to the specified safety category. */\n probabilityScore?: number | null;\n /**\n * The severity of the response's safety rating.\n * @maxLength 100\n */\n severity?: string | null;\n /** the severity score of the response's safety rating. */\n severityScore?: number | null;\n /**\n * A boolean flag associated with a safety attribute that indicates if the model's input or output was blocked.\n * If blocked is true, then the errors field in the response contains one or more error codes.\n * If blocked is false, then the response doesn't include the errors field.\n */\n blocked?: boolean | null;\n}\n\nexport enum HarmProbability {\n UNKNOWN_PROBABILITY = 'UNKNOWN_PROBABILITY',\n NEGLIGIBLE = 'NEGLIGIBLE',\n LOW = 'LOW',\n MEDIUM = 'MEDIUM',\n HIGH = 'HIGH',\n}\n\n/** @enumType */\nexport type HarmProbabilityWithLiterals =\n | HarmProbability\n | 'UNKNOWN_PROBABILITY'\n | 'NEGLIGIBLE'\n | 'LOW'\n | 'MEDIUM'\n | 'HIGH';\n\nexport interface CandidateCitationMetadata {\n /**\n * The citations of the response.\n * @maxSize 1000\n */\n citations?: CandidateCitationMetadataCitation[];\n}\n\nexport interface PublicationDate {\n /** The year of the publication date. */\n year?: number | null;\n /** The month of the publication date. */\n month?: number | null;\n /** The day of the publication date. */\n day?: number | null;\n}\n\nexport interface CandidateCitationMetadataCitation {\n /** An integer that specifies where a citation starts in the content. */\n startIndex?: number | null;\n /** An integer that specifies where a citation ends in the content. */\n endIndex?: number | null;\n /**\n * The URI of a citation source. Examples of a URI source might be a news website or a GitHub repository.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * The title of a citation source. Examples of source titles might be that of a news article or a book.\n * @maxLength 500\n */\n title?: string | null;\n /**\n * The license associated with a citation.\n * @maxLength 500\n */\n license?: string | null;\n /** The date a citation was published. Its valid formats are YYYY, YYYY-MM, and YYYY-MM-DD. */\n publicationDate?: PublicationDate;\n}\n\n/** Metadata returned to client when grounding is enabled. */\nexport interface GroundingMetadata {\n /**\n * Optional. Web search queries for the following-up web search.\n * @maxSize 1000\n * @maxLength 1000\n */\n webSearchQueries?: string[];\n /** Optional. Google search entry for the following-up web searches. */\n searchEntryPoint?: SearchEntryPoint;\n /**\n * List of supporting references retrieved from specified grounding source.\n * @maxSize 1000\n */\n groundingChunks?: GroundingChunk[];\n /**\n * Optional. List of grounding support.\n * @maxSize 1000\n */\n groundingSupports?: GroundingSupport[];\n /** Optional. Output only. Retrieval metadata. */\n retrievalMetadata?: RetrievalMetadata;\n}\n\n/** Google search entry point. */\nexport interface SearchEntryPoint {\n /**\n * Optional. Web content snippet that can be embedded in a web page or an app webview.\n * @maxLength 10000000\n */\n renderedContent?: string | null;\n /** Optional. Base64 encoded JSON representing array of <search term, search url> tuple. */\n sdkBlob?: Uint8Array | null;\n}\n\n/** Grounding chunk. */\nexport interface GroundingChunk extends GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** @oneof */\nexport interface GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** Chunk from the web. */\nexport interface Web {\n /**\n * URI reference of the chunk.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the chunk.\n * @maxLength 1000\n */\n title?: string | null;\n}\n\n/** Chunk from context retrieved by the retrieval tools. */\nexport interface RetrievedContext {\n /**\n * URI reference of the attribution.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the attribution.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * Text of the attribution.\n * @maxLength 100000\n */\n text?: string | null;\n}\n\n/** Grounding support. */\nexport interface GroundingSupport {\n /** Segment of the content this support belongs to. */\n segment?: Segment;\n /**\n * A list of indices (into 'grounding_chunk') specifying the\n * citations associated with the claim. For instance [1,3,4] means\n * that grounding_chunk[1], grounding_chunk[3],\n * grounding_chunk[4] are the retrieved content attributed to the claim.\n * @maxSize 1000\n */\n groundingChunkIndices?: number[];\n /**\n * Confidence score of the support references. Ranges from 0 to 1. 1 is the\n * most confident. This list must have the same size as the\n * grounding_chunk_indices.\n * @maxSize 1000\n */\n confidenceScores?: number[];\n}\n\n/** Segment of the content. */\nexport interface Segment {\n /** Output only. The index of a Part object within its parent Content object. */\n partIndex?: number | null;\n /**\n * Output only. Start index in the given Part, measured in bytes. Offset from\n * the start of the Part, inclusive, starting at zero.\n */\n startIndex?: number;\n /**\n * Output only. End index in the given Part, measured in bytes. Offset from\n * the start of the Part, exclusive, starting at zero.\n */\n endIndex?: number;\n /**\n * Output only. The text corresponding to the segment from the response.\n * @maxLength 100000\n */\n text?: string;\n}\n\n/** Metadata related to retrieval in the grounding flow. */\nexport interface RetrievalMetadata {\n /**\n * Optional. Score indicating how likely information from Google Search could\n * help answer the prompt. The score is in the range `[0, 1]`, where 0 is the\n * least likely and 1 is the most likely. This score is only populated when\n * Google Search grounding and dynamic retrieval is enabled. It will be\n * compared to the threshold to determine whether to trigger Google Search.\n */\n googleSearchDynamicRetrievalScore?: number | null;\n}\n\nexport interface UsageMetadata {\n /** Number of tokens in the request. */\n promptTokenCount?: number | null;\n /** Number of tokens in the response. */\n candidatesTokenCount?: number | null;\n /** Number of tokens in the request and response(s). */\n totalTokenCount?: number | null;\n /** Optional. Number of tokens of thoughts for thinking models. */\n thoughtsTokenCount?: number | null;\n /**\n * Output only. List of modalities that were processed in the request input.\n * @maxSize 10\n */\n promptTokensDetails?: ModalityTokenCount[];\n /**\n * Output only. List of modalities that were returned in the response.\n * @maxSize 10\n */\n candidatesTokensDetails?: ModalityTokenCount[];\n}\n\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality?: ModalityWithLiterals;\n /** Number of tokens. */\n tokenCount?: string | null;\n}\n\nexport interface InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: RoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n */\n contentBlocks?: ContentBlock[];\n}\n\nexport enum ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type ResponseTypeTypeWithLiterals =\n | ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport interface Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface V1InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: GoogleproxyV1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: GoogleproxyV1Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 1000\n */\n contentBlocks?: GoogleproxyV1ContentBlock[];\n}\n\nexport enum GoogleproxyV1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ResponseTypeTypeWithLiterals =\n | GoogleproxyV1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport interface GoogleproxyV1Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface InvokeAnthropicModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * \"end_turn\": the model reached a natural stopping point\n * \"max_tokens\": we exceeded the requested max_tokens or the model's maximum\n * \"stop_sequence\": one of your provided custom stop_sequences was generated\n * \"tool_use\": the model invoked one or more tools\n * \"pause_turn\": we paused a long-running turn. You may provide the response back as-is in a subsequent request to let the model continue.\n * \"refusal\": when streaming classifiers intervene to handle potential policy violations\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n type?: V1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: MessageRoleRoleWithLiterals;\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: V1Usage;\n /**\n * Information about the container used in this request.\n * This will be non-null if a container tool (e.g. code execution) was used.\n */\n container?: Container;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\nexport enum V1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type V1ResponseTypeTypeWithLiterals =\n | V1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport interface V1Usage {\n /** Breakdown of cached tokens by TTL */\n cacheCreation?: UsageCacheCreation;\n /** The number of input tokens used to create the cache entry. */\n cacheCreationInputTokens?: number | null;\n /** The number of input tokens read from the cache. */\n cacheReadInputTokens?: number | null;\n /** The number of input tokens which were used. */\n inputTokens?: number;\n /** The number of output tokens which were used. */\n outputTokens?: number;\n /** The number of server tool requests. */\n serverToolUse?: UsageServerToolUse;\n /**\n * If the request used the priority, standard, or batch tier.\n * Available options: standard, priority, batch\n * @maxLength 500\n */\n serviceTier?: string | null;\n}\n\nexport interface UsageCacheCreation {\n /** The number of input tokens used to create the 1 hour cache entry. */\n ephemeral1hInputTokens?: number;\n /** The number of input tokens used to create the 5 minute cache entry. */\n ephemeral5mInputTokens?: number;\n}\n\nexport interface UsageServerToolUse {\n /** The number of web search tool requests. */\n webSearchRequests?: number;\n /** The number of web fetch tool requests. */\n webFetchRequests?: number;\n}\n\nexport interface Container {\n /**\n * The time at which the container will expire.\n * @maxLength 100\n */\n expiresAt?: string;\n /**\n * Identifier for the container used in this request\n * @maxLength 512\n */\n id?: string;\n}\n\nexport interface InvokeLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeConverseResponse {\n /** The model's generated output. */\n output?: Output;\n /**\n * Why the model stopped: 'end_turn', 'max_tokens', 'stop_sequence', 'guardrail_intervened', or 'content_filtered'.\n * @maxLength 500\n */\n stopReason?: string | null;\n /** Token usage statistics including cache metrics. */\n usage?: InvokeConverseResponseTokenUsage;\n /** Performance metrics including latency. */\n metrics?: Metrics;\n /** Model-specific response fields as a JSON object. */\n additionalModelResponseFields?: Record<string, any> | null;\n /** The performance configuration applied to this request. */\n performanceConfig?: ConversePerformanceConfig;\n /** Total cost in microcents for this request */\n microcentsSpent?: string | null;\n}\n\n/** Container for the model's generated output. */\nexport interface Output {\n /** The generated message with role and content blocks. */\n message?: ConverseMessage;\n}\n\n/** todo: expose serverToolUsage */\nexport interface InvokeConverseResponseTokenUsage {\n /** Tokens in the input (prompt, history, system prompts). */\n inputTokens?: number;\n /** Tokens generated in the response. */\n outputTokens?: number;\n /** Total tokens processed (input + output). */\n totalTokens?: number;\n /** Tokens retrieved from cache. Only present when prompt caching is enabled. */\n cacheReadInputTokens?: number | null;\n /** Tokens written to cache for future requests. Only present when prompt caching is enabled. */\n cacheWriteInputTokens?: number | null;\n}\n\nexport interface Metrics {\n /** End-to-end latency in milliseconds. */\n latencyMs?: number;\n}\n\nexport interface InvokeMlPlatformLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeChatCompletionResponse {\n /**\n * Unique response ID\n * @maxLength 100\n */\n id?: string | null;\n /** The model used to generate the response */\n model?: PerplexityModelWithLiterals;\n /**\n * The object type, which always equals chat.completion\n * @maxLength 100\n */\n object?: string | null;\n /** The Unix timestamp (in seconds) of when the completion was created */\n created?: number | null;\n /**\n * Citations for the generated answer\n * @maxLength 10000\n * @maxSize 1000\n */\n citations?: string[];\n /** The list of completion choices the model generated for the input prompt */\n choices?: InvokeChatCompletionResponseChoice[];\n /** URLs and size metadata for returned images */\n images?: PerplexityImageDescriptor[];\n /**\n * Further questions related to the search\n * @maxLength 10000\n * @maxSize 1000\n */\n relatedQuestions?: string[];\n /** Usage statistics for the completion request. */\n usage?: InvokeChatCompletionResponseUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\n/** Structures the completion choice */\nexport interface InvokeChatCompletionResponseChoice {\n /** Choice index */\n index?: number | null;\n /**\n * Stop reason, can be `STOP` or `LENGTH`\n * @maxLength 10\n */\n finishReason?: string | null;\n /** Choice message, containing content and role */\n message?: PerplexityMessage;\n}\n\nexport interface PerplexityImageDescriptor {\n /**\n * Full image url\n * @maxLength 5000\n */\n imageUrl?: string | null;\n /**\n * Image origin website\n * @maxLength 5000\n */\n originUrl?: string | null;\n /** Height */\n height?: number | null;\n /** Width */\n width?: number | null;\n}\n\n/** Usage statistics for the completion request. */\nexport interface InvokeChatCompletionResponseUsage {\n /** The number of tokens provided in the request prompt. */\n promptTokens?: number | null;\n /** The number of tokens generated in the response output. */\n completionTokens?: number | null;\n /** The total number of tokens used in the chat completion (prompt + completion). */\n totalTokens?: number | null;\n /** Tokens passed into the input from citations found during search. Priced like `prompt_tokens` */\n citationTokens?: number | null;\n /** Reasoning tokens are used to reason through the research material before generating the final output via the CoTs */\n reasoningTokens?: number | null;\n /** Number of search queries executed. */\n numSearchQueries?: number | null;\n}\n\nexport interface CreateImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: V1ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * The prompt that was used to generate the image, if there was any revision to the prompt.\n * @maxLength 100000\n */\n revisedPrompt?: string | null;\n}\n\nexport interface V1TextToImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /** A specific value [0 .. 4294967294] used to guide the 'randomness' of the generation. */\n seed?: string | null;\n /**\n * Finish reason by the model provider.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface GenerateCoreResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageCoreModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateStableDiffusionResponse {\n /**\n * The generated image objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateAnImageResponse {\n /**\n * The id of the task.\n * @format GUID\n */\n id?: string | null;\n /**\n * status of the image generation\n * one of Task not found, Pending, Request Moderated, Content Moderated, Ready, Error\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Result object for the generated image */\n result?: ResultObject;\n}\n\nexport interface ResultObject {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * prompt used for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n /** seed used for image generation */\n seed?: string | null;\n}\n\nexport interface CreatePredictionResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /**\n * Prediction text output\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n textOutput?: string[] | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Token counts */\n tokenUsage?: CreatePredictionResponseTokenUsage;\n}\n\nexport interface CreatePredictionResponseTokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n}\n\nexport interface EditImageWithPromptResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface TextToImageResponse {\n /**\n * Generation TextToImageTaskResult\n * @maxSize 1000\n */\n data?: TextToImageTaskResult[];\n}\n\nexport interface TextToImageTaskResult {\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * The unique identifier of the image.\n * @format GUID\n */\n imageUuid?: string;\n /**\n * If outputType is set to URL, this parameter contains the URL of the image to be downloaded.\n * @maxLength 2048\n */\n imageUrl?: string | null;\n /** If checkNSFW parameter is used, NSFWContent is included informing if the image has been flagged as potentially sensitive content. */\n nsfwContent?: boolean;\n /** A cost of generated image. */\n microcentsSpent?: string | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n}\n\nexport interface GenerateImageResponse {\n /**\n * Array of generated image results, one for each requested sampleCount\n * @maxSize 8\n */\n predictions?: Prediction[];\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Prediction {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * Enhanced prompt used for generation (only returned for models that support prompt enhancement)\n * @maxLength 1000\n */\n prompt?: string | null;\n /**\n * The responsible AI filter reason\n * Only returned if includeRaiReason is enabled and this image was filtered out\n * @maxLength 1000\n */\n raiFilteredReason?: string | null;\n /** Safety attributes information */\n safetyAttributes?: SafetyAttributes;\n}\n\nexport interface SafetyAttributes {\n /**\n * The safety attribute categories\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /**\n * The safety attribute scores\n * @maxSize 100\n */\n scores?: number[] | null;\n}\n\nexport interface GenerateVideoResponse {\n /**\n * Generated videos\n * @maxSize 4\n */\n videos?: GeneratedVideo[];\n /** Cost of the request in micro-cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GeneratedVideo {\n /**\n * The URL of the generated video.\n * @format WEB_URL\n */\n videoUrl?: string | null;\n /**\n * The video MIME type (currently only \"video/mp4\")\n * @maxLength 50\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateImageMlPlatformResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface CreateImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport interface ImageUsage {\n /** Number of tokens in the input */\n inputTokens?: number | null;\n /** Details about input tokens */\n inputTokensDetails?: OpenAiImageTokenDetails;\n /** Number of tokens in the output */\n outputTokens?: number | null;\n /** Output tokens details */\n outputTokensDetails?: OpenAiImageTokenDetails;\n /** Total number of tokens used */\n totalTokens?: number | null;\n}\n\nexport interface OpenAiImageTokenDetails {\n /** Number of tokens used for image processing */\n imageTokens?: number | null;\n /** Number of tokens used for text processing */\n textTokens?: number | null;\n}\n\nexport interface EditImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport interface V1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: ChatCompletionModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: V1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface V1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: GoogleproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface V1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /**\n * Model that produced the completion.\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of chat completion choices. Can be more than one if n is greater than 1.\n * @maxSize 10000\n */\n choices?: Choice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: TokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface Choice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface TokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface VideoInferenceResponse {\n /**\n * Generation VideoInferenceTaskResult\n * @maxSize 1000\n */\n data?: VideoInferenceTaskResult[];\n}\n\nexport interface VideoInferenceTaskResult {\n /**\n * The API will return the taskType you sent in the request.\n * @maxLength 100\n */\n taskType?: string;\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * A unique identifier for the generated video.\n * @format GUID\n */\n videoUuid?: string | null;\n /**\n * If outputType is set to URL, this parameter contains the URL of the video to be downloaded.\n * @maxLength 10000\n */\n videoUrl?: string | null;\n /**\n * The seed value that was used to generate this video.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /** A cost of generated video. */\n microcentsSpent?: string | null;\n /**\n * The current processing status (for polling operations).\n * @maxLength 50\n */\n status?: string | null;\n}\n\nexport interface V1OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: OpenAiResponsesResponseIncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: V1ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: V1ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: V1ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface OpenAiResponsesResponseIncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: V1ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: V1ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface V1ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface V1ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: IncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface IncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport interface ResponsesOutput extends ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface CreateVideoResponse {\n videoJob?: VideoJob;\n}\n\nexport interface VideoJob {\n /**\n * The unique identifier for the video generation job.\n * @maxLength 200\n */\n id?: string | null;\n /**\n * The status of the response generation.\n * @maxLength 50\n */\n status?: string | null;\n /**\n * The generated video result url. Only present when status is \"completed\".\n * @maxLength 5000\n * @format WEB_URL\n */\n url?: string | null;\n /** Error payload that explains why generation failed, if applicable. */\n error?: ErrorInfo;\n /** The progress of the video generation as a percentage (0-100) */\n progress?: number | null;\n}\n\nexport interface ErrorInfo {\n /**\n * code\n * @maxLength 50\n */\n code?: string | null;\n /**\n * message\n * @maxLength 1000\n */\n message?: string | null;\n}\n\nexport interface ContentGenerationFailedEvent {\n /**\n * Error message that content generation failed with.\n * @maxLength 10000\n */\n errorMessage?: string;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GenerateTextByPromptRequest {\n /**\n * Id of the Prompt that will be used to facilitate text generation request.\n * @format GUID\n */\n promptId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface FallbackProperties {\n /**\n * Flag to indicate whether to opt out of the request forwarding as a fallback.\n * Currently, only the fallback from OpenAI to Azure is supported for certain OpenAI models.\n * If set to true, the request will not be redirected to Azure in the event of a server failure by OpenAI.\n */\n optOut?: boolean | null;\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n}\n\nexport interface DynamicRequestConfig {\n /**\n * List of GatewayToolDefinition's, used to overwrite tools in the prompt.\n * @maxSize 100\n */\n gatewayToolDefinitions?: GatewayToolDefinition[];\n /**\n * List of GatewayMessageDefinition's, which will be converted to model-specific format and appended to the messages saved in the prompt.\n * @maxSize 100\n */\n gatewayMessageDefinitions?: GatewayMessageDefinition[];\n}\n\nexport interface GatewayToolDefinition extends GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\n/** @oneof */\nexport interface GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\nexport interface GatewayToolDefinitionCustomTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the tool does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the tool accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface BuiltInTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional parameters specific to the built-in tool. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GatewayMessageDefinition {\n /** The role of the message author. */\n role?: GatewayMessageDefinitionRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport enum GatewayMessageDefinitionRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n TOOL = 'TOOL',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type GatewayMessageDefinitionRoleWithLiterals =\n | GatewayMessageDefinitionRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface GatewayContentBlock extends GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\n/** @oneof */\nexport interface GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\nexport interface ToolResultContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n error?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport interface GenerateTextByPromptResponse {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface ModelResponse extends ModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /**\n * Extracted generated text messages from the model's response.\n * @maxSize 100\n * @maxLength 100000\n */\n generatedTexts?: string[] | null;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n}\n\n/** @oneof */\nexport interface ModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n}\n\nexport interface GenerationRequestedEvent {\n /** Prompt that the generation was requested for. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface TextGenerationSucceededEvent {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface TextGenerationFailedEvent {\n /**\n * Error message that text generation failed with.\n * @maxLength 10000\n */\n errorMessage?: string;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GeneratedTextChunk extends GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n /**\n * Extracted text content from the chunk.\n * @maxLength 100\n */\n content?: string | null;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\n/** @oneof */\nexport interface GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n}\n\nexport interface ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 100\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ToolCall[];\n}\n\nexport interface ChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface V1ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChatCompletionChunkChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkChoiceChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 1000\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n}\n\nexport interface ChatCompletionChunkChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkChoiceChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface GoogleproxyV1AnthropicStreamChunk\n extends GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1ContentBlockDelta\n extends GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface V1AnthropicStreamChunkMessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: GoogleproxyV1Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface AnthropicStreamChunk extends AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n}\n\nexport interface ContentBlockDelta extends ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface MessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface V1AnthropicStreamChunk\n extends V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * Index of the content block this chunk refers to (when relevant).\n * For example, text and tool-input deltas apply to the block at this index.\n */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n}\n\nexport interface V1ContentBlockDelta extends V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\n/** @oneof */\nexport interface V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\nexport interface AnthropicStreamChunkMessageDelta {\n /**\n * Why generation concluded for this assistant message, when applicable:\n * \"end_turn\" | \"max_tokens\" | \"stop_sequence\" | \"tool_use\" | \"pause_turn\" | \"refusal\".\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * The specific custom stop sequence that was produced, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Cumulative token usage at this point in the stream. */\n usage?: V1Usage;\n /** Cost of the request so far, in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateTextByPromptObjectRequest {\n /** Prompt object that describes the text generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface GenerateTextByPromptObjectResponse {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateEmbeddingsRequest\n extends GenerateEmbeddingsRequestEmbeddingRequestOneOf {\n /** OpenAi Embeddings Request */\n openAiEmbeddingsRequest?: V1CreateEmbeddingsRequest;\n /** Azure Embeddings Request */\n azureEmbeddingsRequest?: CreateEmbeddingsRequest;\n /** Google Vertex Embeddings Request */\n googleEmbeddingsRequest?: GetEmbeddingRequest;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateEmbeddingsRequestEmbeddingRequestOneOf {\n /** OpenAi Embeddings Request */\n openAiEmbeddingsRequest?: V1CreateEmbeddingsRequest;\n /** Azure Embeddings Request */\n azureEmbeddingsRequest?: CreateEmbeddingsRequest;\n /** Google Vertex Embeddings Request */\n googleEmbeddingsRequest?: GetEmbeddingRequest;\n}\n\nexport interface V1CreateEmbeddingsRequest {\n /**\n * Input text to get embeddings for, encoded as a string or array of tokens.\n * To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays.\n * Each input must not exceed 8192 tokens in length.\n * @minSize 1\n * @maxSize 200\n * @maxLength 40000\n */\n input?: string[] | null;\n /** Embedding model that produced the embeddings. */\n model?: OpenaiproxyV1EmbeddingModelWithLiterals;\n /** The format to return the embeddings in. Can be either float or base64. */\n encodingFormat?: V1EmbeddingEncodingFormatWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 50\n */\n user?: string | null;\n /**\n * The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n * @min 1\n */\n dimensions?: string | null;\n}\n\nexport enum OpenaiproxyV1EmbeddingModel {\n UNKNOWN_EMBEDDING_MODEL = 'UNKNOWN_EMBEDDING_MODEL',\n TEXT_EMBEDDING_ADA_002 = 'TEXT_EMBEDDING_ADA_002',\n TEXT_EMBEDDING_3_SMALL = 'TEXT_EMBEDDING_3_SMALL',\n TEXT_EMBEDDING_3_LARGE = 'TEXT_EMBEDDING_3_LARGE',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1EmbeddingModelWithLiterals =\n | OpenaiproxyV1EmbeddingModel\n | 'UNKNOWN_EMBEDDING_MODEL'\n | 'TEXT_EMBEDDING_ADA_002'\n | 'TEXT_EMBEDDING_3_SMALL'\n | 'TEXT_EMBEDDING_3_LARGE';\n\nexport enum V1EmbeddingEncodingFormat {\n UNKNOWN_ENCODING_FORMAT = 'UNKNOWN_ENCODING_FORMAT',\n /** Will request base64 from OpenAI and parse server-side */\n FLOAT = 'FLOAT',\n BASE64 = 'BASE64',\n}\n\n/** @enumType */\nexport type V1EmbeddingEncodingFormatWithLiterals =\n | V1EmbeddingEncodingFormat\n | 'UNKNOWN_ENCODING_FORMAT'\n | 'FLOAT'\n | 'BASE64';\n\nexport interface CreateEmbeddingsRequest {\n /**\n * Input text to get embeddings for, encoded as a string or array of tokens.\n * To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays.\n * Each input must not exceed 8192 tokens in length.\n * @minSize 1\n * @maxSize 200\n * @maxLength 40000\n */\n input?: string[] | null;\n /** Embedding model that produced the embeddings. */\n model?: EmbeddingModelWithLiterals;\n /** The format to return the embeddings in. Currently, only float is supported. */\n encodingFormat?: EmbeddingEncodingFormatWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 50\n */\n user?: string | null;\n /**\n * The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n * @min 1\n */\n dimensions?: string | null;\n}\n\nexport enum EmbeddingModel {\n UNKNOWN_EMBEDDING_MODEL = 'UNKNOWN_EMBEDDING_MODEL',\n ADA = 'ADA',\n TEXT_EMBEDDING_3_SMALL = 'TEXT_EMBEDDING_3_SMALL',\n TEXT_EMBEDDING_3_LARGE = 'TEXT_EMBEDDING_3_LARGE',\n}\n\n/** @enumType */\nexport type EmbeddingModelWithLiterals =\n | EmbeddingModel\n | 'UNKNOWN_EMBEDDING_MODEL'\n | 'ADA'\n | 'TEXT_EMBEDDING_3_SMALL'\n | 'TEXT_EMBEDDING_3_LARGE';\n\nexport enum EmbeddingEncodingFormat {\n UNKNOWN_ENCODING_FORMAT = 'UNKNOWN_ENCODING_FORMAT',\n FLOAT = 'FLOAT',\n BASE64 = 'BASE64',\n}\n\n/** @enumType */\nexport type EmbeddingEncodingFormatWithLiterals =\n | EmbeddingEncodingFormat\n | 'UNKNOWN_ENCODING_FORMAT'\n | 'FLOAT'\n | 'BASE64';\n\n/** Request for getting embeddings from text */\nexport interface GetEmbeddingRequest {\n /** The model to use for generating embeddings */\n model?: V1EmbeddingModelWithLiterals;\n /**\n * Array of instances containing text to embed\n * five texts of up to 2,048 tokens per text for all models\n * @minSize 1\n * @maxSize 5\n */\n instances?: TextEmbeddingInstance[];\n /** Optional parameters for the embedding request */\n parameters?: TextEmbeddingParameters;\n}\n\n/** Enum for different embedding models offered by Vertex AI */\nexport enum V1EmbeddingModel {\n UNKNOWN_EMBEDDING_MODEL = 'UNKNOWN_EMBEDDING_MODEL',\n TEXT_MULTILINGUAL_EMBEDDING_002 = 'TEXT_MULTILINGUAL_EMBEDDING_002',\n TEXT_EMBEDDING_005 = 'TEXT_EMBEDDING_005',\n /** Experimental model text-embedding-large-exp-03-07 */\n TEXT_EMBEDDING_LARGE = 'TEXT_EMBEDDING_LARGE',\n GEMINI_EMBEDDING_001 = 'GEMINI_EMBEDDING_001',\n}\n\n/** @enumType */\nexport type V1EmbeddingModelWithLiterals =\n | V1EmbeddingModel\n | 'UNKNOWN_EMBEDDING_MODEL'\n | 'TEXT_MULTILINGUAL_EMBEDDING_002'\n | 'TEXT_EMBEDDING_005'\n | 'TEXT_EMBEDDING_LARGE'\n | 'GEMINI_EMBEDDING_001';\n\n/** Instance containing text to embed */\nexport interface TextEmbeddingInstance {\n /**\n * The text content to embed\n * up to 2,048 tokens per text for all models\n * @maxLength 40000\n */\n content?: string | null;\n /**\n * Optional task type that helps optimize the embedding for specific use cases\n * If left blank, the default used is RETRIEVAL_QUERY\n */\n taskType?: TaskTypeWithLiterals;\n /**\n * Optional title for the content\n * @maxLength 10000\n */\n title?: string | null;\n}\n\n/**\n * Enum for task types that help optimize embeddings for specific use cases\n * Used to convey intended downstream application to help the model produce better embeddings\n * If left blank, the default used is RETRIEVAL_QUERY\n */\nexport enum TaskType {\n UNKNOWN_TASK_TYPE = 'UNKNOWN_TASK_TYPE',\n RETRIEVAL_QUERY = 'RETRIEVAL_QUERY',\n RETRIEVAL_DOCUMENT = 'RETRIEVAL_DOCUMENT',\n SEMANTIC_SIMILARITY = 'SEMANTIC_SIMILARITY',\n CLASSIFICATION = 'CLASSIFICATION',\n CLUSTERING = 'CLUSTERING',\n QUESTION_ANSWERING = 'QUESTION_ANSWERING',\n FACT_VERIFICATION = 'FACT_VERIFICATION',\n CODE_RETRIEVAL_QUERY = 'CODE_RETRIEVAL_QUERY',\n}\n\n/** @enumType */\nexport type TaskTypeWithLiterals =\n | TaskType\n | 'UNKNOWN_TASK_TYPE'\n | 'RETRIEVAL_QUERY'\n | 'RETRIEVAL_DOCUMENT'\n | 'SEMANTIC_SIMILARITY'\n | 'CLASSIFICATION'\n | 'CLUSTERING'\n | 'QUESTION_ANSWERING'\n | 'FACT_VERIFICATION'\n | 'CODE_RETRIEVAL_QUERY';\n\n/** Parameters for the embedding request */\nexport interface TextEmbeddingParameters {\n /** Optional: Used to specify output embedding size. If set, output embeddings will be truncated to the size specified. */\n outputDimensionality?: number | null;\n /** Optional: When set to true, input text will be truncated. When set to false, an error is returned if the input text is longer than the maximum length supported by the model. Defaults to true. */\n autoTruncate?: boolean;\n}\n\nexport interface GenerateEmbeddingsResponse\n extends GenerateEmbeddingsResponseEmbeddingResponseOneOf {\n /** OpenAi Embeddings Response */\n openAiEmbeddingsResponse?: V1CreateEmbeddingsResponse;\n /** Azure Embeddings Response */\n azureEmbeddingsResponse?: CreateEmbeddingsResponse;\n /** Google Vertex Embeddings Response */\n googleEmbeddingsResponse?: GetEmbeddingResponse;\n}\n\n/** @oneof */\nexport interface GenerateEmbeddingsResponseEmbeddingResponseOneOf {\n /** OpenAi Embeddings Response */\n openAiEmbeddingsResponse?: V1CreateEmbeddingsResponse;\n /** Azure Embeddings Response */\n azureEmbeddingsResponse?: CreateEmbeddingsResponse;\n /** Google Vertex Embeddings Response */\n googleEmbeddingsResponse?: GetEmbeddingResponse;\n}\n\nexport interface V1CreateEmbeddingsResponse {\n /**\n * The object type, which is always list.\n * @maxLength 50\n */\n objectType?: string | null;\n /**\n * A list of embeddings for each input.\n * @maxSize 1000\n */\n data?: V1EmbeddingInfo[];\n /** Embedding model that produced the embeddings. */\n model?: OpenaiproxyV1EmbeddingModelWithLiterals;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: CreateEmbeddingsResponseEmbeddingUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1EmbeddingInfo extends V1EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: V1FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 1000\n */\n base64Embedding?: string | null;\n /**\n * The object type, which is always \"embedding\".\n * @maxLength 50\n */\n objectType?: string | null;\n /** The index of the embedding in the list of embeddings. */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface V1EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: V1FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 1000\n */\n base64Embedding?: string | null;\n}\n\nexport interface V1FloatEmbedding {\n /**\n * The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.\n * @maxSize 10000\n */\n embedding?: number[] | null;\n}\n\nexport interface CreateEmbeddingsResponseEmbeddingUsage {\n /** Number of prompt tokens */\n promptTokens?: number | null;\n /** Total number of tokens used for the embedding request. */\n totalTokens?: number | null;\n}\n\nexport interface CreateEmbeddingsResponse {\n /**\n * The object type, which is always list.\n * @maxLength 50\n */\n objectType?: string | null;\n /**\n * A list of embeddings for each input.\n * @maxSize 1000\n */\n data?: EmbeddingInfo[];\n /** Embedding model that produced the embeddings. */\n model?: EmbeddingModelWithLiterals;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: EmbeddingUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface EmbeddingInfo extends EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 10000\n */\n base64Embedding?: string | null;\n /**\n * The object type, which is always \"embedding\".\n * @maxLength 50\n */\n objectType?: string | null;\n /** The index of the embedding in the list of embeddings. */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 10000\n */\n base64Embedding?: string | null;\n}\n\nexport interface FloatEmbedding {\n /**\n * The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.\n * @maxSize 10000\n */\n embedding?: number[] | null;\n}\n\nexport interface EmbeddingUsage {\n /** Number of prompt tokens */\n promptTokens?: number | null;\n /** Total number of tokens used for the embedding request. */\n totalTokens?: number | null;\n}\n\n/** Response containing the generated embeddings */\nexport interface GetEmbeddingResponse {\n /**\n * The generated embedding values\n * @maxSize 5\n */\n predictions?: EmbeddingPrediction[];\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\n/** Embeddings data */\nexport interface EmbeddingPrediction {\n embeddings?: EmbeddingInstance;\n}\n\n/** Single content embedding instance */\nexport interface EmbeddingInstance {\n /** Metadata about the embedding */\n statistics?: Statistics;\n /**\n * The generated embedding values\n * @maxSize 10000\n */\n embedding?: number[];\n}\n\n/** Metadata about the embedding generation */\nexport interface Statistics {\n /** Number of tokens processed */\n tokenCount?: number | null;\n /** Truncation indicator */\n truncated?: boolean | null;\n}\n\nexport interface GenerateTextByProjectRequest {\n /**\n * Id of the Project that will be used to facilitate text generation request.\n * The project's default_prompt_id field will be used as prompt.\n * @format GUID\n */\n projectId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface GenerateTextByProjectResponse {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /**\n * Id of associated Prompt that was invoked.\n * @format GUID\n */\n promptId?: string;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateModerationRequest\n extends GenerateModerationRequestModerationRequestOneOf {\n /** OpenAi Moderation Request */\n openAiModerationRequest?: CreateModerationRequest;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateModerationRequestModerationRequestOneOf {\n /** OpenAi Moderation Request */\n openAiModerationRequest?: CreateModerationRequest;\n}\n\nexport interface CreateModerationRequest {\n /**\n * The input text to classify.\n * @maxLength 100000\n * @maxSize 1000\n */\n input?: string[];\n /**\n * Two content moderations models are available: text-moderation-stable and text-moderation-latest.\n * The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always\n * using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating\n * the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.\n * @maxLength 50\n */\n model?: string | null;\n /**\n * An array of input parts with a defined type, each can be of type text or image_url when passing in images.\n * If defined, input field will be ignored.\n * Image input is only supported when using the omni-moderation model.\n * @maxSize 1000\n */\n multiModalInputs?: MultiModalInput[];\n}\n\nexport interface ImageUrlInput {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n}\n\nexport interface MultiModalInput extends MultiModalInputContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlInput;\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface MultiModalInputContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlInput;\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n}\n\nexport interface GenerateModerationResponse\n extends GenerateModerationResponseModerationResponseOneOf {\n /** OpenAi Moderation Response */\n openAiModerationResponse?: CreateModerationResponse;\n}\n\n/** @oneof */\nexport interface GenerateModerationResponseModerationResponseOneOf {\n /** OpenAi Moderation Response */\n openAiModerationResponse?: CreateModerationResponse;\n}\n\nexport interface CreateModerationResponse {\n /**\n * The unique identifier for the moderation request.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The model used to generate the moderation results.\n * @maxLength 100\n */\n model?: string | null;\n /**\n * A list of moderation objects.\n * @maxSize 1000\n */\n results?: ModerationResult[];\n}\n\nexport interface ModerationResult {\n /** Whether the content violates OpenAI's usage policies: https://openai.com/policies/usage-policies. */\n flagged?: boolean;\n /** A list of the categories, and whether they are flagged or not. */\n categories?: Record<string, any> | null;\n /** A list of the categories along with their scores as predicted by model. */\n categoryScores?: Record<string, any> | null;\n /** A list of the categories along with the input type(s) that the score applies to. */\n categoryAppliedInputTypes?: Record<string, any> | null;\n}\n\nexport interface GenerateImageByProjectRequest {\n /**\n * Id of the Project that will be used to facilitate image generation request.\n * The project's default_prompt_id field will be used as prompt.\n * @format GUID\n */\n projectId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /**\n * Skip polling flag. supported requests :\n * black_forest_labs_generate_image_response , replicate_create_prediction_response\n */\n skipPolling?: boolean | null;\n}\n\nexport interface GenerateImageByProjectResponse {\n /** ImageModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /**\n * Id of associated Prompt that was invoked.\n * @format GUID\n */\n promptId?: string;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface ImageModelResponse extends ImageModelResponseResponseOneOf {\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n}\n\n/** @oneof */\nexport interface ImageModelResponseResponseOneOf {\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n}\n\nexport interface ImageGenerationRequestedEvent {\n /** Prompt that the generation was requested for. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface ImageGenerationSucceededEvent {\n /** ModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface ImageGenerationFailedEvent {\n /**\n * Error message that text generation failed with.\n * @maxLength 10000\n */\n errorMessage?: string;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GenerateImageByPromptRequest {\n /**\n * Id of the Prompt that will be used to facilitate image generation request.\n * @format GUID\n */\n promptId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n}\n\nexport interface GenerateImageByPromptResponse {\n /** ModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateImageByPromptObjectRequest {\n /** Prompt object that describes the image generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n}\n\nexport interface GenerateImageByPromptObjectResponse {\n /** ImageModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateContentByPromptRequest {\n /**\n * Id of the Prompt that will be used to facilitate content generation request.\n * @format GUID\n */\n promptId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface AsyncGenerationConfig {\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n /** SPI generation configuration. */\n spiGenerationConfig?: SpiGenerationConfig;\n}\n\nexport interface SpiGenerationConfig {\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n appId?: string | null;\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n componentId?: string | null;\n}\n\nexport interface GenerateContentByPromptResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateContentByProjectRequest {\n /**\n * Id of the Project that will be used to facilitate content generation request.\n * The project's default_prompt_id field will be used as prompt.\n * @format GUID\n */\n projectId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface GenerateContentByProjectResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /**\n * Id of associated Prompt that was invoked.\n * @format GUID\n */\n promptId?: string;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateContentByPromptObjectRequest {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface GenerateContentByPromptObjectResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateTranscriptionRequest\n extends GenerateTranscriptionRequestTranscriptionRequestOneOf {\n /** OpenAi transcription request */\n openAiTranscriptionRequest?: CreateTranscriptionRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateTranscriptionRequestTranscriptionRequestOneOf {\n /** OpenAi transcription request */\n openAiTranscriptionRequest?: CreateTranscriptionRequest;\n}\n\nexport interface CreateTranscriptionRequest {\n /**\n * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n * @format WEB_URL\n */\n file?: string | null;\n /** Transcription ai model to use. */\n model?: TranscriptionModelWithLiterals;\n /**\n * The language of the input audio.\n * Supplying the input language in ISO-639-1 format will improve accuracy and latency.\n * @format LANGUAGE\n */\n language?: string | null;\n /**\n * Prompt text to guide the model's style or continue a previous audio segment.\n * @maxLength 4000\n */\n prompt?: string | null;\n /**\n * The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.\n * DEPRECATED - Will always be set to verbose_json for cost monitoring.\n * @deprecated The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.\n * DEPRECATED - Will always be set to verbose_json for cost monitoring.\n * @replacedBy content_blocks\n * @targetRemovalDate 2025-01-30\n */\n responseFormat?: CreateTranscriptionRequestResponseFormatWithLiterals;\n /** The sampling temperature, between 0 and 1. */\n temperature?: number | null;\n /**\n * The timestamp granularities to populate for this transcription.\n * response_format must be set verbose_json to use timestamp granularities.\n * Either or both of these options are supported: word, or segment.\n * Note: There is no additional latency for segment timestamps,\n * but generating word timestamps incurs additional latency.\n */\n timestampGranularities?: TimestampGranularities;\n /** Content of the input file, can by used instead of the `file` field. */\n fileContent?: FileContent;\n}\n\nexport enum TranscriptionModel {\n UNKNOWN_TRANSCRIPTION_MODEL = 'UNKNOWN_TRANSCRIPTION_MODEL',\n WHISPER_1 = 'WHISPER_1',\n}\n\n/** @enumType */\nexport type TranscriptionModelWithLiterals =\n | TranscriptionModel\n | 'UNKNOWN_TRANSCRIPTION_MODEL'\n | 'WHISPER_1';\n\nexport enum CreateTranscriptionRequestResponseFormat {\n UNKNOWN_RESPONSE_FORMAT = 'UNKNOWN_RESPONSE_FORMAT',\n JSON = 'JSON',\n TEXT = 'TEXT',\n SRT = 'SRT',\n VERBOSE_JSON = 'VERBOSE_JSON',\n VTT = 'VTT',\n}\n\n/** @enumType */\nexport type CreateTranscriptionRequestResponseFormatWithLiterals =\n | CreateTranscriptionRequestResponseFormat\n | 'UNKNOWN_RESPONSE_FORMAT'\n | 'JSON'\n | 'TEXT'\n | 'SRT'\n | 'VERBOSE_JSON'\n | 'VTT';\n\nexport interface TimestampGranularities {\n /**\n * Timestamp granularity, can be WORD or SEGMENT or both.\n * @maxSize 1000\n */\n timestampGranularities?: TimestampGranularityWithLiterals[];\n}\n\nexport enum TimestampGranularity {\n UNKNOWN_TIMESTAMP_GRANULARITY = 'UNKNOWN_TIMESTAMP_GRANULARITY',\n WORD = 'WORD',\n SEGMENT = 'SEGMENT',\n}\n\n/** @enumType */\nexport type TimestampGranularityWithLiterals =\n | TimestampGranularity\n | 'UNKNOWN_TIMESTAMP_GRANULARITY'\n | 'WORD'\n | 'SEGMENT';\n\nexport interface FileContent {\n /** File bytes */\n fileBytes?: Uint8Array;\n /**\n * File name\n * @maxLength 100\n */\n fileName?: string;\n}\n\nexport interface GenerateTranscriptionResponse\n extends GenerateTranscriptionResponseTranscriptionResponseOneOf {\n /** OpenAi transcription response */\n openAiTranscriptionResponse?: CreateTranscriptionResponse;\n}\n\n/** @oneof */\nexport interface GenerateTranscriptionResponseTranscriptionResponseOneOf {\n /** OpenAi transcription response */\n openAiTranscriptionResponse?: CreateTranscriptionResponse;\n}\n\nexport interface CreateTranscriptionResponse {\n /**\n * Language of the input audio.\n * @maxLength 50\n */\n language?: string | null;\n /** Input audio duration in seconds. */\n duration?: GoogleProtoDuration;\n /**\n * Transcribed text.\n * @maxLength 10000\n */\n text?: string;\n /**\n * Extracted words and their corresponding timestamps.\n * @maxSize 1000\n */\n words?: Word[];\n /**\n * Segments of the transcribed text and their corresponding details.\n * @maxSize 1000\n */\n segments?: V1Segment[];\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface Word {\n /**\n * The text content of the word.\n * @maxLength 100\n */\n word?: string;\n /** Start time of the word. */\n start?: GoogleProtoDuration;\n /** End time of the word */\n end?: GoogleProtoDuration;\n}\n\nexport interface V1Segment {\n /** Unique identifier of the segment. */\n id?: number;\n /** Seek offset of the segment. */\n seek?: number;\n /** Start time of the segment. */\n start?: GoogleProtoDuration;\n /** End time of the segment */\n end?: GoogleProtoDuration;\n /**\n * Text content of the segment.\n * @maxLength 10000\n */\n text?: string;\n /**\n * Array of token IDs for the text content.\n * @maxSize 1000\n */\n tokens?: string[];\n /** Temperature parameter used for generating the segment. */\n temperature?: number;\n /** Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. */\n avgLogprob?: number;\n /** Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. */\n compressionRatio?: number;\n /** Probability of no speech in the segment. If the value is higher than 1.0 and the avg_logprob is below -1, consider this segment silent. */\n noSpeechProb?: number;\n}\n\nexport interface GenerateAudioRequest\n extends GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n}\n\nexport interface CreateSpeechRequest {\n /** One of the available TTS models: https://platform.openai.com/docs/models#tts */\n model?: SpeechModelWithLiterals;\n /**\n * The text to generate audio for. The maximum length is 4096 characters.\n * @maxLength 4096\n */\n input?: string;\n /**\n * The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide.\n * @maxLength 100\n */\n voice?: string;\n /**\n * The format to audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.\n * @maxLength 100\n */\n responseFormat?: string | null;\n /**\n * The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.\n * @min 0.25\n * @max 4\n */\n speed?: number | null;\n}\n\nexport enum SpeechModel {\n UNKNOWN_SPEECH_MODEL = 'UNKNOWN_SPEECH_MODEL',\n TTS_1 = 'TTS_1',\n TTS_1_HD = 'TTS_1_HD',\n}\n\n/** @enumType */\nexport type SpeechModelWithLiterals =\n | SpeechModel\n | 'UNKNOWN_SPEECH_MODEL'\n | 'TTS_1'\n | 'TTS_1_HD';\n\nexport interface TextToSpeechRequest {\n /**\n * Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.\n * @maxLength 100\n */\n voiceId?: string;\n /**\n * The output format of the generated audio. List of supported values: mp3_22050_32, mp3_44100_32, mp3_44100_64, mp3_44100_96, mp3_44100_128, mp3_44100_192, pcm_16000, pcm_22050, pcm_24000, pcm_44100, ulaw_8000\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * When enable_logging is set to false full privacy mode will be used for the request.\n * This will mean history features are unavailable for this request, including request stitching.\n * Full privacy mode may only be used by enterprise customers.\n */\n enableLogging?: boolean;\n /**\n * The text that will get converted into speech.\n * @maxLength 10000000\n */\n text?: string;\n /** Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. */\n modelId?: ElevenLabsTextToSpeechModelWithLiterals;\n /**\n * Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.\n * @maxLength 100\n */\n languageCode?: string | null;\n /** Voice settings overriding stored settings for the given voice. They are applied only on the given request. */\n voiceSettings?: VoiceSettings;\n /**\n * A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request\n * @maxSize 10\n */\n pronunciationDictionaryLocators?: PronunciationDictionaryLocator[];\n /** If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. */\n seed?: string | null;\n /**\n * The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n previousText?: string | null;\n /**\n * The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n nextText?: string | null;\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n previousRequestIds?: string[];\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n nextRequestIds?: string[];\n /**\n * This parameter controls text normalization with three modes: ‘auto’, ‘on’, and ‘off’. When set to ‘auto’, the system will automatically decide whether to apply text normalization (e.g., spelling out numbers).\n * With ‘on’, text normalization will always be applied, while with ‘off’, it will be skipped. Cannot be turned on for ‘eleven_turbo_v2_5’ model.\n * Defaults to ‘auto’.\n * @maxLength 100\n */\n applyTextNormalization?: string | null;\n /** When set to true, response chunks will include with precise character-level timing information for audio-text synchronization. */\n withTimings?: boolean;\n}\n\nexport enum ElevenLabsTextToSpeechModel {\n UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL = 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL',\n ELEVEN_MULTILINGUAL_V2 = 'ELEVEN_MULTILINGUAL_V2',\n ELEVEN_FLASH_V2_5 = 'ELEVEN_FLASH_V2_5',\n ELEVEN_FLASH_V2 = 'ELEVEN_FLASH_V2',\n}\n\n/** @enumType */\nexport type ElevenLabsTextToSpeechModelWithLiterals =\n | ElevenLabsTextToSpeechModel\n | 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL'\n | 'ELEVEN_MULTILINGUAL_V2'\n | 'ELEVEN_FLASH_V2_5'\n | 'ELEVEN_FLASH_V2';\n\nexport interface VoiceSettings {\n /** Defines the stability for voice settings. */\n stability?: number;\n /** Defines the similarity boost for voice settings. */\n similarityBoost?: number;\n /** Defines the style for voice settings. This parameter is available on V2+ models. */\n style?: number | null;\n /** Defines the use speaker boost for voice settings. This parameter is available on V2+ models. */\n useSpeakerBoost?: boolean;\n}\n\nexport interface PronunciationDictionaryLocator {\n /**\n * pronunciation_dictionary_id\n * @maxLength 100\n */\n pronunciationDictionaryId?: string;\n /**\n * version_id\n * @maxLength 100\n */\n versionId?: string;\n}\n\nexport interface GenerateAudioResponse\n extends GenerateAudioResponseAudioResponseOneOf {\n /** OpenAi create speech response */\n openAiCreateSpeechResponse?: CreateSpeechResponse;\n}\n\n/** @oneof */\nexport interface GenerateAudioResponseAudioResponseOneOf {\n /** OpenAi create speech response */\n openAiCreateSpeechResponse?: CreateSpeechResponse;\n}\n\nexport interface CreateSpeechResponse {\n /**\n * Audio file content\n * @format WEB_URL\n */\n contentUrl?: string | null;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GeneratedAudioChunk\n extends GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\n/** @oneof */\nexport interface GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\nexport interface SpeechChunk {\n /** Partial audio file bytes. */\n content?: Uint8Array;\n}\n\nexport interface TextToSpeechChunk {\n /** Base64 encoded audio chunk */\n audioBase64?: Uint8Array;\n /** Alignment information for the generated audio given the input text sequence. */\n alignment?: AlignmentInfoInChunk;\n /** Alignment information for the generated audio given the input normalized text sequence. */\n normalizedAlignment?: AlignmentInfoInChunk;\n}\n\nexport interface AlignmentInfoInChunk {\n /**\n * Array of individual characters from the input or normalized text\n * @maxSize 1000000\n */\n characterStartTimesSeconds?: number[];\n /**\n * Array of start times (in seconds) for each character\n * @maxSize 1000000\n */\n characterEndTimesSeconds?: number[];\n /**\n * Array of end times (in seconds) for each character\n * @maxSize 1000000\n * @maxLength 1\n */\n characters?: string[];\n}\n\nexport interface PublishPromptRequest {\n /**\n * Prompt object to be serialized in the service's storage.\n * After serialization, the GenerateTextByPromptId with Prompt's id can be used.\n */\n prompt?: Prompt;\n}\n\nexport interface PublishPromptResponse {}\n\nexport interface GetPromptRequest {\n /**\n * Id of the Prompt object to be retrieved from service's storage.\n * @format GUID\n */\n promptId: string;\n /**\n * Key-value pairs that will be used to substitute templated parameters in the prompt.\n * It is expected that only USER or SYSTEM messages can be templated.\n */\n params?: Record<string, string>;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n}\n\nexport interface GetPromptResponse {\n /** Prompt object from service's storage. */\n prompt?: Prompt;\n}\n\nexport interface PublishProjectRequest {\n /**\n * Project object to be serialized in the service's storage.\n * After serialization, the GenerateTextByProjectId with Project's id can be used.\n */\n project?: Project;\n}\n\nexport interface Project {\n /**\n * Project id.\n * @format GUID\n */\n id?: string;\n /**\n * Id of the default Prompt associated with this Project. This Prompt be used to conduct text generation requests.\n * @format GUID\n */\n defaultPromptId?: string;\n /** ExperimentalPromptConfig object that describes optional second Prompt that can be invoked. */\n experimentPromptConfig?: ExperimentalPromptConfig;\n}\n\nexport interface ExperimentalPromptConfig {\n /**\n * Id of the experimental Prompt associated with this Project. This Prompt will be used for text generation in case the associated experiment resolves to true.\n * @format GUID\n */\n experimentalPromptId?: string | null;\n /**\n * Name of experiment in Petri that will define the switch between default and optional Prompt. False is resolved to default_prompt_id invocation. True is resolved to experimental_prompt_id invocation.\n * @maxLength 200\n */\n petriExperimentName?: string | null;\n}\n\nexport interface PublishProjectResponse {}\n\n/** Should match same event in GatewayVisibility */\nexport interface ProjectConfigChangedDomainEvent {\n /**\n * Project ID\n * @maxLength 10000\n */\n projectId?: string;\n /**\n * Old default prompt id, if available\n * @maxLength 10000\n */\n oldDefaultPromptId?: string;\n /**\n * Old experimental prompt id, if available\n * @maxLength 10000\n */\n oldExperimentalPromptId?: string;\n /**\n * Old experiment name, if available\n * @maxLength 10000\n */\n oldExperimentName?: string;\n /**\n * New default prompt id\n * @maxLength 10000\n */\n newDefaultPromptId?: string;\n /**\n * New experimental prompt id\n * @maxLength 10000\n */\n newExperimentalPromptId?: string;\n /**\n * New experiment name\n * @maxLength 10000\n */\n newExperimentName?: string;\n /**\n * Application that originated the request\n * @maxLength 10000\n */\n applicationId?: string;\n /**\n * Sender artifact ID\n * @maxLength 10000\n */\n artifactId?: string;\n}\n\nexport interface GetProjectRequest {\n /**\n * Id of the Project object to be retrieved from service's storage.\n * @format GUID\n */\n projectId: string;\n}\n\nexport interface GetProjectResponse {\n /** Project object from service's storage. */\n project?: Project;\n}\n\nexport interface GetStatusRequest {\n /** Type of the entity to retrieve status for. */\n entityType: EntityTypeWithLiterals;\n /**\n * Id of the entity to retrieve status for, In case of VENDOR, the id is one of the supported vendors.\n * Supported vendors are: {open-ai,google,azure,stability-ai}.\n * In case of PROJECT or PROMPT, the id is the id of the Project or Prompt object.\n * @maxLength 50\n */\n entityId: string | null;\n}\n\nexport enum EntityType {\n UNKNOWN_ENTITY_TYPE = 'UNKNOWN_ENTITY_TYPE',\n VENDOR = 'VENDOR',\n PROJECT = 'PROJECT',\n PROMPT = 'PROMPT',\n}\n\n/** @enumType */\nexport type EntityTypeWithLiterals =\n | EntityType\n | 'UNKNOWN_ENTITY_TYPE'\n | 'VENDOR'\n | 'PROJECT'\n | 'PROMPT';\n\nexport interface GetStatusResponse {\n /** Type of the entity to retrieve status for. */\n entityType?: EntityTypeWithLiterals;\n /** Outage status of the entity. */\n outageStatus?: OutageStatusWithLiterals;\n}\n\nexport enum OutageStatus {\n UNKNOWN_STATUS = 'UNKNOWN_STATUS',\n HEALTHY = 'HEALTHY',\n OUTAGE = 'OUTAGE',\n}\n\n/** @enumType */\nexport type OutageStatusWithLiterals =\n | OutageStatus\n | 'UNKNOWN_STATUS'\n | 'HEALTHY'\n | 'OUTAGE';\n\nexport interface GetApplicationUsageRequest {}\n\nexport interface GetApplicationUsageResponse {\n /** Info about application quota usage */\n applicationBudgetInfo?: ApplicationBudgetInfo;\n /** Info about user per application quota usage */\n userPerApplicationBudgetInfo?: UserPerApplicationBudgetInfo;\n}\n\nexport interface ApplicationBudgetInfo {\n /** Whether the next call is estimated to succeed based on the remaining monthly budget constraints. */\n eligible?: boolean;\n /** Monthly budget assigned to the calling application, in microcents. */\n totalMonthlyBudget?: string;\n /** Monthly budget spent thus far by the calling application, in microcents. */\n spentMonthlyBudget?: string;\n}\n\nexport interface UserPerApplicationBudgetInfo {\n /** Whether the next call is estimated to succeed based on the remaining budget constraints. */\n eligible?: boolean;\n /** Budget assigned to the user, in microcents. */\n totalBudget?: string;\n /** Budget spent thus far by the user, in microcents. */\n spentBudget?: string;\n /**\n * Limitation timeframe\n * @maxLength 20\n */\n timeframe?: string;\n}\n\nexport interface Wix_ai_gatewayV1EditImageRequest\n extends Wix_ai_gatewayV1EditImageRequestRequestOneOf {\n /** Photoroom remove background request */\n photoroomRemoveBackgroundRequest?: RemoveBackgroundRequest;\n /** Photoroom image editing request */\n photoroomImageEditingRequest?: ImageEditingRequest;\n /** Stability Edit image */\n stabilityAiEditRequest?: V1EditImageRequest;\n /** Replicate edit image */\n replicateEditImageRequest?: EditImageRequest;\n /** Recraft edit image */\n recraftEditImageRequest?: Recraft_proxyV1EditImageRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface Wix_ai_gatewayV1EditImageRequestRequestOneOf {\n /** Photoroom remove background request */\n photoroomRemoveBackgroundRequest?: RemoveBackgroundRequest;\n /** Photoroom image editing request */\n photoroomImageEditingRequest?: ImageEditingRequest;\n /** Stability Edit image */\n stabilityAiEditRequest?: V1EditImageRequest;\n /** Replicate edit image */\n replicateEditImageRequest?: EditImageRequest;\n /** Recraft edit image */\n recraftEditImageRequest?: Recraft_proxyV1EditImageRequest;\n}\n\nexport interface RemoveBackgroundRequest {\n /**\n * The image file to render\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * The format of the resulting image.\n * Allowed values: Allowed values:png,jpg webp\n * Default value is \"png\".\n * @maxLength 100\n */\n format?: string | null;\n /**\n * The channels of the resulting image\n * Allowed values:rgba,alpha\n * Default: rgba\n * @maxLength 100\n */\n channels?: string | null;\n /**\n * The background color of the resulting image. Can be a hex code (#FF00FF) or a HTML color (red, green, etc.)\n * @maxLength 100\n */\n bgColor?: string | null;\n /**\n * Will resize the output to the specified size. Can be preview (0.25 Megapixels),\n * medium (1.5 MP), hd (4 MP) or full (36 MP, can be slower for large images).\n * Useful for mobile apps that need smaller images.\n * Allowed values:preview,medium,hd,full\n * Default:full\n * @maxLength 100\n */\n size?: string | null;\n /**\n * If true, the image returned is cropped to the cutout border. Transparent pixels are removed from the border\n * Allowed values:true,false\n * Default: false\n * @maxLength 100\n */\n crop?: string | null;\n /**\n * If true, automatically removes colored reflections that have been left on the main subject by a green background.\n * Allowed values:true,false\n * Default: false\n * @maxLength 100\n */\n despill?: string | null;\n}\n\nexport interface ImageEditingRequest {\n /** The model version to use for image editing */\n model?: ImageEditingModelWithLiterals;\n /** The background properties to use for the image editing */\n background?: Background;\n /** The expand properties to use for the image editing */\n expand?: Expand;\n /** The export properties to use for the image editing */\n export?: Export;\n /**\n * [Advanced] Defines the horizontal alignment of the cutout subject within its bounding box.Show all...\n * Allowed values:left,center,right\n * @maxLength 100\n */\n horizontalAlignment?: string | null;\n /**\n * If set to true (default), cropped sides of the subject will snap to the edges For instance,\n * for a portrait image cropped below the elbows, the subject will be aligned at the bottom even if a bottom padding is provided\n * (but it will still respect bottom margin)\n * Can't be provided if removeBackground is set to false\n * (See positioning section of the documentation for more information)\n * Default: true\n */\n ignorePaddingAndSnapOnCroppedSides?: boolean | null;\n /**\n * URL of the main image used by the API. The GET endpoint accepts imageUrl only.\n * The maximum size of the image is 30MB.\n * If you want to directly upload an image file, please instead use the POST endpoint with the argument imageFile.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /** The lighting properties to use for the image editing */\n lighting?: Lighting;\n /** The margin properties to use for the image editing */\n margin?: Margin;\n /**\n * Maximum output height. Can only be provided if outputSize is originalImage or croppedSubject.\n * Useful for: re dimensioning while keeping the aspect ratio\n */\n maxHeight?: number | null;\n /**\n * Maximum output width. Can only be provided if outputSize is originalImage or croppedSubject.\n * Useful for: resizing an image while keeping the aspect ratio\n */\n maxWidth?: number | null;\n /**\n * Output size of the image. In the form of either:\n * auto to keep the template dimensions when templateId is defined, or behave like originalImage when templateId isn't defined (default)\n * widthXheight for a custom size (example: 200x400)\n * originalImage to keep the original image dimensions\n * croppedSubject to use the size of the foreground dimensions after cropping around it\n * Default:auto\n * Match pattern: ^(auto|\\d+x\\d+|originalImage|croppedSubject)$\n * @maxLength 100\n */\n outputSize?: string | null;\n /** The padding properties to use for the image editing */\n padding?: Padding;\n /**\n * [Advanced] subjectBox by default. When set to originalImage, the padding / margin will be around the original image and not the cropped subject.\n * It can lead to the subject disappearing when scaling is set to 'fill', for instance if the subject is on the left of a landscape image and outputSize is a square.\n * Most use cases don't require this option. It is useful if you'd like to maintain subject positioning in the original image.\n * Can't be provided if removeBackground is set to false\n * Allowed values:subjectBox,originalImage\n * Default: subjectBox\n * @maxLength 100\n */\n referenceBox?: string | null;\n /**\n * If enabled (default), the background of the image will be removed using PhotoRoom's award-winning algorithm\n * Default:true\n */\n removeBackground?: boolean | null;\n /**\n * Whether the subject should fit (default) or fill the output image If set to fit, the empty pixels will be transparentShow all...\n * Allowed values: fit fill , Default:fill\n * @maxLength 100\n */\n scaling?: string | null;\n /** The segmentation properties to use for the image editing */\n segmentation?: Segmentation;\n /** The shadow properties to use for the image editing */\n shadow?: Shadow;\n /**\n * The ID of the template to render\n * @format GUID\n */\n templateId?: string | null;\n /** The text removal properties to use for the image editing */\n textRemoval?: TextRemoval;\n /**\n * [Advanced] Defines the vertical alignment of the cutout subject within its bounding box.\n * Specifying a custom vertical alignment will implicitly set ignorePaddingAndSnapOnCroppedSides to false for the vertical direction.\n * Allowed values: top center, bottom\n * @maxLength 100\n */\n verticalAlignment?: string | null;\n}\n\nexport interface Guidance {\n /**\n * URL of the image to use as a background image guidance.\n * Can't be provided if removeBackground is set to false.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * How closely the generated background will be matching the guiding image, between 0 and 1.\n * A value of 1 means it will match the guiding image as much as possible, a value of 0 means the guiding image will be ignored.\n * @max 1\n */\n scale?: number | null;\n}\n\nexport enum ImageEditingModel {\n IMAGE_EDITING_MODEL_UNSPECIFIED = 'IMAGE_EDITING_MODEL_UNSPECIFIED',\n PR_AI_BACKGROUND_MODEL_VERSION_3 = 'PR_AI_BACKGROUND_MODEL_VERSION_3',\n PR_AI_BACKGROUND_MODEL_VERSION_4 = 'PR_AI_BACKGROUND_MODEL_VERSION_4',\n}\n\n/** @enumType */\nexport type ImageEditingModelWithLiterals =\n | ImageEditingModel\n | 'IMAGE_EDITING_MODEL_UNSPECIFIED'\n | 'PR_AI_BACKGROUND_MODEL_VERSION_3'\n | 'PR_AI_BACKGROUND_MODEL_VERSION_4';\n\nexport interface Background {\n /**\n * Color of the background. If omitted, background will be transparent unless background.imageUrl or background.\n * imageFile is provided. Can be a hex color without the hash sign (example: FF0000, FF0000EE) or color name (examples: red, blue)\n * Can't be provided if removeBackground is set to false\n * Default: transparent\n * @maxLength 1000\n */\n color?: string | null;\n /**\n * If ai.auto, a pre-processing step is applied to expand the prompt into a longer form.\n * auto and never are legacy values that will be removed in the next major version.\n * @maxLength 1000\n */\n expandPrompt?: string | null;\n /** The guidance properties to use for the image editing */\n guidance?: Guidance;\n /**\n * URL of the image to use as a background.\n * Can't be provided if removeBackground is set to false\n * The maximum size of the image is 30MB.\n * If background.imageUrl is provided, neither background.imageFile nor background.prompt can be provided, and vice versa.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Prompt to use for guiding the background generation process.\n * If background.prompt is provided, neither background.imageUrl nor background.imageFile can be provided, and vice versa.\n * @maxLength 100000\n */\n prompt?: string | null;\n /**\n * Whether the background should fit or fill (default) the output image If set to fit, the empty pixels will be transparentShow all...\n * Allowed values: fit fill , Default:fill\n * @maxLength 100\n */\n scaling?: string | null;\n /** Seed used to generate the background. Can be used to get similar looking results for the same prompt. */\n seed?: number | null;\n}\n\nexport interface Expand {\n /**\n * Expand mode to use on the main image used by the API.\n * If set to ai.auto, all transparent pixels will automatically be filled based on the content of the current background\n * (either the original background, if removeBackground has been set to false, or a static background, if background.imageUrl has been provided)\n * Expand will rely on output size, subject position, and fitting mode.\n * @maxLength 1000\n */\n mode?: string | null;\n /** Seed used to generate the background. Can be used to get similar looking results for the same prompt. */\n seed?: number | null;\n}\n\nexport interface Export {\n /**\n * The pixel density of the result image.\n * Pixel density can be set to any value between 72 and 1200 dpi.\n */\n dpi?: number | null;\n /**\n * The format of the result image.\n * Default value is \"png\".\n * Allowed values: png, jpeg, jpg Default:png\n * @maxLength 100\n */\n format?: string | null;\n}\n\nexport interface Lighting {\n /**\n * Lighting mode to use on the main image used by the API. If set to ai.auto, the lighting will be automatically adjusted\n * Allowed value:ai.auto\n * @maxLength 100\n */\n mode?: string | null;\n}\n\nexport interface Margin {\n /**\n * General margin around the subject. Can be expressed as a number between 0 and 0.49,\n * a percentage string between 0% and 49% (e.g., \"30%\"), or a pixel value string (e.g., \"100px\").\n * Unlike padding, margin is never ignored even on cropped sides of the subject.\n * Expressed in a ratio of the output image size. See positioning section of the documentation for more information.\n * Default: 0\n * @max 0.49\n */\n general?: number | null;\n /**\n * Bottom Margin, overrides general margin on the bottom side. Accepts the same formats as margin.\n * Default: 0\n * @max 0.49\n */\n bottom?: number | null;\n /**\n * Left Margin, overrides general margin on the left side. Accepts the same formats as margin.\n * @max 0.49\n */\n left?: number | null;\n /**\n * Right Margin, overrides general margin on the right side. Accepts the same formats as margin.\n * @max 0.49\n */\n right?: number | null;\n /**\n * Top Margin, overrides general margin on the top side. Accepts the same formats as margin.\n * @max 0.49\n */\n top?: number | null;\n}\n\nexport interface Padding {\n /**\n * General padding around the subject. Can be expressed as a number between 0 and 0.49, a percentage string between 0% and 49% (e.g., \"30%\"),\n * or a pixel value string (e.g., \"100px\"). Unlike margin, padding will be ignored on cropped sides of the subject if that option is enabled.\n * Expressed in a ratio of the size of the document, minus margins (similar to CSS).\n * See positioning section of the documentation for more information.\n * Default: 0\n * @maxLength 100\n */\n general?: string | null;\n /**\n * Bottom Padding, overrides general padding on the bottom side. Accepts the same formats as padding.\n * Default: 0\n * @maxLength 100\n */\n bottom?: string | null;\n /**\n * Left Padding, overrides general padding on the left side. Accepts the same formats as padding.\n * @maxLength 100\n */\n left?: string | null;\n /**\n * Right Padding, overrides general padding on the right side. Accepts the same formats as padding.\n * @maxLength 100\n */\n right?: string | null;\n /**\n * Top Padding, overrides general padding on the top side. Accepts the same formats as padding.\n * @maxLength 100\n */\n top?: string | null;\n}\n\nexport interface Segmentation {\n /**\n * Controls whether or not the salient object should be kept or ignored by the segmentation model.\n * Allowed values:keepSalientObject ignoreSalientObject\n * Default: ignoreSalientObject\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * A textual description of what the segmentation should remove.\n * @maxLength 100000\n */\n negativePrompt?: string | null;\n /**\n * A textual description of what the segmentation should keep.\n * @maxLength 100000\n */\n prompt?: string | null;\n}\n\nexport interface Shadow {\n /**\n * Shadow generation mode to use on the main image used by the API. If set to ai.soft,\n * a soft shadow will be generated If set to ai.hard, a hard shadow will be generated If set to ai.floating, a floating shadow will be generated\n * Allowed values:ai.soft,ai.hard, ai.floating\n * @maxLength 100\n */\n mode?: string | null;\n}\n\nexport interface TextRemoval {\n /**\n * Text removal mode to use on the main image used by the API.\n * If set to ai.artificial, artificial text will be automatically removed.\n * Artificial text includes all text added on an image through post-precessing, such as company name, watermarks, discount, etc.\n * If set to ai.natural, natural text will be automatically removed.\n * Natural text includes text that naturally occurs in an image such as writing on buildings or clothing's, road signs, etc.\n * If set to ai.all, all text (natural and artificial) will be automatically removed.\n * Allowed values: ai.artificial, ai.natural ai.all\n * @maxLength 100\n */\n mode?: string | null;\n}\n\nexport interface V1EditImageRequest {\n /** The model to use for generating the image. */\n model?: V1EditImageModelWithLiterals;\n /**\n * The image you wish to inpaint.\n * Supported Formats: jpeg, png, webp\n * Validation Rules:\n * - Every side must be at least 64 pixels\n * - Total pixel count must be between 4,096 and 9,437,184 pixels\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * image format jpeg, png, webp\n * @maxLength 100\n */\n imageFormat?: string | null;\n /**\n * Controls the strength of the inpainting process on a per-pixel basis,\n * either via a second image (passed into this parameter) or via the alpha channel of the image parameter.\n * Passing in a Mask\n * The image passed to this parameter should be a black and white image that represents,\n * at any pixel, the strength of inpainting based on how dark or light the given pixel is.\n * Completely black pixels represent no inpainting strength while completely white pixels represent maximum strength.\n * In the event the mask is a different size than the image parameter, it will be automatically resized.\n * Alpha Channel Support\n * If you don't provide an explicit mask, one will be derived from the alpha channel of the image parameter.\n * Transparent pixels will be inpainted while opaque pixels will be preserved.\n * In the event an image with an alpha channel is provided along with a mask, the mask will take precedence.\n * @maxLength 100000\n */\n imageMask?: string | null;\n /**\n * image mask format jpeg, png, webp\n * @maxLength 100\n */\n imageMaskFormat?: string | null;\n /**\n * Grows the edges of the mask outward in all directions by the specified number of pixels. The expanded area around the mask will be blurred,\n * which can help smooth the transition between inpainted content and the original image.\n * Try this parameter if you notice seams or rough edges around the inpainted content.\n * Default: 5\n * @max 100\n */\n growMask?: number | null;\n /**\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\n/** flynt-http-mapping-breaking-change */\nexport enum V1EditImageModel {\n UNKNOWN_EDIT_IMAGE_REQUEST_MODEL = 'UNKNOWN_EDIT_IMAGE_REQUEST_MODEL',\n ERASE = 'ERASE',\n}\n\n/** @enumType */\nexport type V1EditImageModelWithLiterals =\n | V1EditImageModel\n | 'UNKNOWN_EDIT_IMAGE_REQUEST_MODEL'\n | 'ERASE';\n\nexport interface EditImageRequest {\n /** Model to use */\n model?: EditImageModelWithLiterals;\n /**\n * Input image URL\n * @maxLength 1000\n */\n image?: string | null;\n /** Desired scale */\n scale?: string | null;\n /** Optional face enhancement */\n faceEnhance?: boolean | null;\n /**\n * Choose the format of the output image\n * Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\nexport enum EditImageModel {\n /** Default */\n UNKNOWN_EDIT_IMAGE_MODEL = 'UNKNOWN_EDIT_IMAGE_MODEL',\n /** real-esrgan */\n REAL_ESRGAN = 'REAL_ESRGAN',\n /** https://replicate.com/recraft-ai/recraft-vectorize/api */\n RECRAFT_VECTORIZE = 'RECRAFT_VECTORIZE',\n}\n\n/** @enumType */\nexport type EditImageModelWithLiterals =\n | EditImageModel\n | 'UNKNOWN_EDIT_IMAGE_MODEL'\n | 'REAL_ESRGAN'\n | 'RECRAFT_VECTORIZE';\n\nexport interface Recraft_proxyV1EditImageRequest {\n /** Which action to perform */\n editAction?: EditActionWithLiterals;\n /**\n * Input image URL\n * @maxLength 10000\n */\n image?: string;\n /**\n * Image mask URL, used only by `ERASE_REGION`. From the docs: \"An image encoded in grayscale color mode,\n * used to define the specific regions of the image to be erased. The white pixels represent the parts of the\n * image that will be erased, while black pixels indicate the parts of the image that will remain unchanged.\n * Should have exactly the same size as the image.\n * Each pixel of the image should be either pure black (value 0) or pure white (value 255).\"\n * @maxLength 10000\n */\n mask?: string | null;\n}\n\n/** https://www.recraft.ai/docs/api-reference/usage#vectorize-image */\nexport enum EditAction {\n UNKNOWN_EDIT_ACTION = 'UNKNOWN_EDIT_ACTION',\n VECTORIZE = 'VECTORIZE',\n REMOVE_BACKGROUND = 'REMOVE_BACKGROUND',\n CRISP_UPSCALE = 'CRISP_UPSCALE',\n CREATIVE_UPSCALE = 'CREATIVE_UPSCALE',\n ERASE_REGION = 'ERASE_REGION',\n}\n\n/** @enumType */\nexport type EditActionWithLiterals =\n | EditAction\n | 'UNKNOWN_EDIT_ACTION'\n | 'VECTORIZE'\n | 'REMOVE_BACKGROUND'\n | 'CRISP_UPSCALE'\n | 'CREATIVE_UPSCALE'\n | 'ERASE_REGION';\n\nexport interface Wix_ai_gatewayV1EditImageResponse\n extends Wix_ai_gatewayV1EditImageResponseResponseOneOf {\n /** Photoroom remove background response */\n photoroomRemoveBackgroundResponse?: RemoveBackgroundResponse;\n /** Photoroom image editing response */\n photoroomImageEditingResponse?: ImageEditingResponse;\n /** Stability Edit response */\n stabilityAiEditResponse?: V1EditImageResponse;\n /** Replicate edit image */\n replicateEditImageResponse?: EditImageResponse;\n /** Recraft edit image */\n recraftEditImageResponse?: Recraft_proxyV1EditImageResponse;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n}\n\n/** @oneof */\nexport interface Wix_ai_gatewayV1EditImageResponseResponseOneOf {\n /** Photoroom remove background response */\n photoroomRemoveBackgroundResponse?: RemoveBackgroundResponse;\n /** Photoroom image editing response */\n photoroomImageEditingResponse?: ImageEditingResponse;\n /** Stability Edit response */\n stabilityAiEditResponse?: V1EditImageResponse;\n /** Replicate edit image */\n replicateEditImageResponse?: EditImageResponse;\n /** Recraft edit image */\n recraftEditImageResponse?: Recraft_proxyV1EditImageResponse;\n}\n\nexport interface RemoveBackgroundResponse {\n /**\n * The URL of the image generated by the API. The image will be available for 24 hours.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * The uncertainty score is a number between 0 and 1. 0 means the model is very confident that the cutout is accurate,\n * 1 means the model is unsure.\n * For instance, shoes on a shoe box might give a higher uncertainty score as the model is unsure what to segment\n * (shoes, box or both). Currently the model returns an uncertainty score only for images of objects.\n * If an image contains humans, it will return the value -1.\n */\n xUncertaintyScore?: number | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface ImageEditingResponse {\n /** This is the seed used when generating the background. Can be used to get similar looking results for the same prompt. */\n prAiBackgroundSeed?: number | null;\n /** When removing texts from an image, it will return the number of texts detected. */\n prTextsDetected?: number | null;\n /**\n * The uncertainty score is a number between 0 and 1. 0 means the model is very confident that the cutout is accurate,\n * 1 means the model is unsure. For instance, shoes on a shoe box might give a higher uncertainty score as the model is unsure what to segment (shoes, box or both).\n * Currently the model returns an uncertainty score only for images of objects. If an image contains humans, it will return the value -1.\n */\n xUncertaintyScore?: number | null;\n /**\n * The URL of the image generated by the API. The image will be available for 24 hours.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1EditImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: V1EditImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface EditImageResponse {\n /**\n * Timestamp when the prediction was completed\n * @maxLength 2000\n */\n completedAt?: string | null;\n /**\n * Timestamp when the prediction was created\n * @maxLength 2000\n */\n createdAt?: string | null;\n /** Whether data has been removed */\n dataRemoved?: boolean | null;\n /**\n * Error message if the prediction failed\n * @maxLength 1000\n */\n error?: string | null;\n /**\n * Unique identifier for the prediction\n * @maxLength 100\n */\n predictionId?: string | null;\n /** Input parameters for the prediction */\n input?: EditImageInput;\n /**\n * Logs from the prediction process\n * @maxLength 10000\n */\n logs?: string | null;\n /** Performance metrics */\n metrics?: PredictionMetrics;\n /**\n * Output URL of the processed image\n * @maxLength 2000\n */\n output?: string | null;\n /**\n * Timestamp when the prediction started\n * @maxLength 2000\n */\n startedAt?: string | null;\n /**\n * Status of the prediction\n * @maxLength 50\n */\n status?: string | null;\n /** URLs for API operations */\n urls?: PredictionUrls;\n /**\n * Version of the model used\n * @maxLength 100\n */\n version?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface EditImageInput {\n /**\n * Input image URL\n * @maxLength 1000\n */\n image?: string | null;\n /** Desired scale */\n scale?: string | null;\n /** Optional face enhancement */\n faceEnhance?: boolean | null;\n}\n\nexport interface PredictionMetrics {\n /** Time spent on prediction in seconds */\n predictTime?: number | null;\n /** Total time for the entire process in seconds */\n totalTime?: number | null;\n}\n\nexport interface PredictionUrls {\n /**\n * URL to get the prediction status\n * @maxLength 2000\n */\n get?: string | null;\n /**\n * URL to cancel the prediction\n * @maxLength 2000\n */\n cancel?: string | null;\n}\n\nexport interface Recraft_proxyV1EditImageResponse {\n /**\n * Output URL of the processed image\n * @maxLength 10000\n */\n output?: string | null;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface PollImageGenerationResultRequest\n extends PollImageGenerationResultRequestRequestOneOf {\n /** replicate proxy getResult request */\n replicateGetResultRequest?: V1GetResultRequest;\n /** BFL proxy getResult request */\n bflGetResultRequest?: GetResultRequest;\n /** Runware GetTaskResult request */\n runwareGetTaskResultRequest?: GetTaskResultRequest;\n /** OpenAI getVideoResult request */\n openAiGetVideoResultRequest?: GetVideoResultRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface PollImageGenerationResultRequestRequestOneOf {\n /** replicate proxy getResult request */\n replicateGetResultRequest?: V1GetResultRequest;\n /** BFL proxy getResult request */\n bflGetResultRequest?: GetResultRequest;\n /** Runware GetTaskResult request */\n runwareGetTaskResultRequest?: GetTaskResultRequest;\n /** OpenAI getVideoResult request */\n openAiGetVideoResultRequest?: GetVideoResultRequest;\n}\n\nexport interface V1GetResultRequest {\n /**\n * The id of the task.\n * @maxLength 1000\n */\n id?: string | null;\n}\n\nexport interface GetResultRequest {\n /**\n * The id of the task.\n * @format GUID\n */\n id?: string | null;\n}\n\nexport interface GetTaskResultRequest {\n /**\n * Task UUID to get results for\n * @format GUID\n */\n taskUuid?: string;\n}\n\nexport interface GetVideoResultRequest {\n /**\n * The id of the video generation job.\n * @maxLength 200\n */\n id?: string;\n}\n\nexport interface PollImageGenerationResultResponse\n extends PollImageGenerationResultResponseResponseOneOf {\n /** replicate proxy getResult response */\n replicateGetResultResponse?: V1GetResultResponse;\n /** BFL proxy getResult response */\n bflGetResultResponse?: GetResultResponse;\n /** Runware GetTaskResult response */\n runwareGetTaskResultResponse?: GetTaskResultResponse;\n /** OpenAI getVideoResult response */\n openAiGetVideoResultResponse?: GetVideoResultResponse;\n}\n\n/** @oneof */\nexport interface PollImageGenerationResultResponseResponseOneOf {\n /** replicate proxy getResult response */\n replicateGetResultResponse?: V1GetResultResponse;\n /** BFL proxy getResult response */\n bflGetResultResponse?: GetResultResponse;\n /** Runware GetTaskResult response */\n runwareGetTaskResultResponse?: GetTaskResultResponse;\n /** OpenAI getVideoResult response */\n openAiGetVideoResultResponse?: GetVideoResultResponse;\n}\n\nexport interface V1GetResultResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /**\n * Prediction text output\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n textOutput?: string[] | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n}\n\nexport interface GetResultResponse {\n /**\n * The prompt to use for image generation.\n * @format GUID\n */\n id?: string | null;\n /**\n * status of the image generation\n * one of Task not found, Pending, Request Moderated, Content Moderated, Ready, Error\n * @maxLength 100\n */\n status?: string | null;\n /** Result object for the generated image */\n result?: ResultObject;\n}\n\nexport interface GetTaskResultResponse\n extends GetTaskResultResponseResponseOneOf {\n videoInferenceResponse?: VideoInferenceResponse;\n}\n\n/** @oneof */\nexport interface GetTaskResultResponseResponseOneOf {\n videoInferenceResponse?: VideoInferenceResponse;\n}\n\nexport interface GetVideoResultResponse {\n videoJob?: VideoJob;\n}\n\nexport interface DomainEvent extends DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n /** Event ID. With this ID you can easily spot duplicated events and ignore them. */\n id?: string;\n /**\n * Fully Qualified Domain Name of an entity. This is a unique identifier assigned to the API main business entities.\n * For example, `wix.stores.catalog.product`, `wix.bookings.session`, `wix.payments.transaction`.\n */\n entityFqdn?: string;\n /**\n * Event action name, placed at the top level to make it easier for users to dispatch messages.\n * For example: `created`/`updated`/`deleted`/`started`/`completed`/`email_opened`.\n */\n slug?: string;\n /** ID of the entity associated with the event. */\n entityId?: string;\n /** Event timestamp in [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) format and UTC time. For example, `2020-04-26T13:57:50.699Z`. */\n eventTime?: Date | null;\n /**\n * Whether the event was triggered as a result of a privacy regulation application\n * (for example, GDPR).\n */\n triggeredByAnonymizeRequest?: boolean | null;\n /** If present, indicates the action that triggered the event. */\n originatedFrom?: string | null;\n /**\n * A sequence number that indicates the order of updates to an entity. For example, if an entity was updated at 16:00 and then again at 16:01, the second update will always have a higher sequence number.\n * You can use this number to make sure you're handling updates in the right order. Just save the latest sequence number on your end and compare it to the one in each new message. If the new message has an older (lower) number, you can safely ignore it.\n */\n entityEventSequence?: string | null;\n}\n\n/** @oneof */\nexport interface DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n}\n\nexport interface EntityCreatedEvent {\n entityAsJson?: string;\n /** Indicates the event was triggered by a restore-from-trashbin operation for a previously deleted entity */\n restoreInfo?: RestoreInfo;\n}\n\nexport interface RestoreInfo {\n deletedDate?: Date | null;\n}\n\nexport interface EntityUpdatedEvent {\n /**\n * Since platformized APIs only expose PATCH and not PUT we can't assume that the fields sent from the client are the actual diff.\n * This means that to generate a list of changed fields (as opposed to sent fields) one needs to traverse both objects.\n * We don't want to impose this on all developers and so we leave this traversal to the notification recipients which need it.\n */\n currentEntityAsJson?: string;\n}\n\nexport interface EntityDeletedEvent {\n /** Entity that was deleted. */\n deletedEntityAsJson?: string | null;\n}\n\nexport interface ActionEvent {\n bodyAsJson?: string;\n}\n\nexport interface MessageEnvelope {\n /**\n * App instance ID.\n * @format GUID\n */\n instanceId?: string | null;\n /**\n * Event type.\n * @maxLength 150\n */\n eventType?: string;\n /** The identification type and identity data. */\n identity?: IdentificationData;\n /** Stringify payload. */\n data?: string;\n}\n\nexport interface IdentificationData extends IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n /** @readonly */\n identityType?: WebhookIdentityTypeWithLiterals;\n}\n\n/** @oneof */\nexport interface IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n}\n\nexport enum WebhookIdentityType {\n UNKNOWN = 'UNKNOWN',\n ANONYMOUS_VISITOR = 'ANONYMOUS_VISITOR',\n MEMBER = 'MEMBER',\n WIX_USER = 'WIX_USER',\n APP = 'APP',\n}\n\n/** @enumType */\nexport type WebhookIdentityTypeWithLiterals =\n | WebhookIdentityType\n | 'UNKNOWN'\n | 'ANONYMOUS_VISITOR'\n | 'MEMBER'\n | 'WIX_USER'\n | 'APP';\n","import * as ambassadorWixDsWixAiGatewayV1Prompt from './ds-wix-ai-gateway-v1-prompt-generators.http.js';\nimport * as ambassadorWixDsWixAiGatewayV1PromptTypes from './ds-wix-ai-gateway-v1-prompt-generators.types.js';\nimport * as ambassadorWixDsWixAiGatewayV1PromptUniversalTypes from './ds-wix-ai-gateway-v1-prompt-generators.universal.js';\n\nexport type __PublicMethodMetaInfo<\n K = string,\n M = unknown,\n T = unknown,\n S = unknown,\n Q = unknown,\n R = unknown\n> = {\n getUrl: (context: any) => string;\n httpMethod: K;\n path: string;\n pathParams: M;\n __requestType: T;\n __originalRequestType: S;\n __responseType: Q;\n __originalResponseType: R;\n};\n\nexport function generateTextByPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByPromptStreamed(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedTextChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedTextChunk\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPromptStreamed(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt-streamed/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByPromptObject(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptObjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptObjectResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPromptObject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt-object',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByPromptObjectStreamed(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedTextChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedTextChunk\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPromptObjectStreamed(\n payload\n );\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt-object-streamed',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateEmbedding(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateEmbeddingsRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateEmbeddingsRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateEmbeddingsResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateEmbeddingsResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateEmbedding(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-embedding',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByProjectStreamed(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedTextChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedTextChunk\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByProjectStreamed(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-project-streamed/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateModeration(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateModerationRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateModerationRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateModerationResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateModerationResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateModeration(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-moderation',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateImageByProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateImageByProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-image-by-project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateImageByPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateImageByPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-image-by-prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateImageByPromptObject(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptObjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptObjectResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateImageByPromptObject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-image-by-prompt-object',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateContentByPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateContentByPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-content-by-prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateContentByProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateContentByProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-content-by-project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateContentByPromptObject(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptObjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptObjectResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateContentByPromptObject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-content-by-prompt-object',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTranscription(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTranscriptionRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTranscriptionRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTranscriptionResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTranscriptionResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTranscription(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-transcription',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateAudio(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateAudioResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateAudioResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateAudio(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-audio',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateAudioStreamed(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedAudioChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedAudioChunk\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateAudioStreamed(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-audio-streamed',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function publishPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishPromptResponse\n> {\n const payload = { prompt: { id: ':promptId' } } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.publishPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/prompt/{prompt.id}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getPrompt(): __PublicMethodMetaInfo<\n 'GET',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function publishProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishProjectResponse\n> {\n const payload = { project: { id: ':projectId' } } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.publishProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/project/{project.id}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getProject(): __PublicMethodMetaInfo<\n 'GET',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getStatus(): __PublicMethodMetaInfo<\n 'GET',\n { entityId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetStatusRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetStatusRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetStatusResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetStatusResponse\n> {\n const payload = { entityId: ':entityId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getStatus(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/status/{entityId}',\n pathParams: { entityId: 'entityId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getApplicationUsage(): __PublicMethodMetaInfo<\n 'GET',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetApplicationUsageRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetApplicationUsageRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetApplicationUsageResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetApplicationUsageResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getApplicationUsage(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/application-usage',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function editImage(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.Wix_ai_gatewayV1EditImageRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.Wix_ai_gatewayV1EditImageRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.Wix_ai_gatewayV1EditImageResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.Wix_ai_gatewayV1EditImageResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.editImage(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/edit-image',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function pollImageGenerationResult(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PollImageGenerationResultRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PollImageGenerationResultRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PollImageGenerationResultResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PollImageGenerationResultResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.pollImageGenerationResult(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/poll-image-generation-result',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport {\n Prompt as PromptOriginal,\n PromptModelRequestOneOf as PromptModelRequestOneOfOriginal,\n FallbackPromptConfig as FallbackPromptConfigOriginal,\n OpenaiproxyV1CreateChatCompletionRequest as OpenaiproxyV1CreateChatCompletionRequestOriginal,\n OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf as OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOfOriginal,\n CreateChatCompletionRequestFunctionSignature as CreateChatCompletionRequestFunctionSignatureOriginal,\n OpenaiproxyV1Model as OpenaiproxyV1ModelOriginal,\n OpenaiproxyV1ModelWithLiterals as OpenaiproxyV1ModelWithLiteralsOriginal,\n OpenaiproxyV1ChatCompletionMessage as OpenaiproxyV1ChatCompletionMessageOriginal,\n ChatCompletionMessageFunctionWithArgs as ChatCompletionMessageFunctionWithArgsOriginal,\n OpenaiproxyV1ChatCompletionMessageImageUrlContent as OpenaiproxyV1ChatCompletionMessageImageUrlContentOriginal,\n OpenaiproxyV1ChatCompletionMessageMessageRole as OpenaiproxyV1ChatCompletionMessageMessageRoleOriginal,\n OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals as OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiteralsOriginal,\n ChatCompletionMessageToolCall as ChatCompletionMessageToolCallOriginal,\n OpenaiproxyV1ChatCompletionMessageContentPart as OpenaiproxyV1ChatCompletionMessageContentPartOriginal,\n OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf as OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOfOriginal,\n V1CreateChatCompletionRequestTool as V1CreateChatCompletionRequestToolOriginal,\n V1FineTuningSpec as V1FineTuningSpecOriginal,\n OpenaiproxyV1CreateChatCompletionRequestResponseFormat as OpenaiproxyV1CreateChatCompletionRequestResponseFormatOriginal,\n TextBisonPredictRequest as TextBisonPredictRequestOriginal,\n TextInstance as TextInstanceOriginal,\n PredictParameters as PredictParametersOriginal,\n TextBisonModel as TextBisonModelOriginal,\n TextBisonModelWithLiterals as TextBisonModelWithLiteralsOriginal,\n ChatBisonPredictRequest as ChatBisonPredictRequestOriginal,\n ChatInstance as ChatInstanceOriginal,\n Example as ExampleOriginal,\n ChatMessage as ChatMessageOriginal,\n ChatBisonModel as ChatBisonModelOriginal,\n ChatBisonModelWithLiterals as ChatBisonModelWithLiteralsOriginal,\n CreateChatCompletionRequest as CreateChatCompletionRequestOriginal,\n CreateChatCompletionRequestFunctionCallOneOf as CreateChatCompletionRequestFunctionCallOneOfOriginal,\n FunctionSignature as FunctionSignatureOriginal,\n V1Model as V1ModelOriginal,\n V1ModelWithLiterals as V1ModelWithLiteralsOriginal,\n V1ChatCompletionMessage as V1ChatCompletionMessageOriginal,\n FunctionWithArgs as FunctionWithArgsOriginal,\n ChatCompletionMessageImageUrlContent as ChatCompletionMessageImageUrlContentOriginal,\n ChatCompletionMessageMessageRole as ChatCompletionMessageMessageRoleOriginal,\n ChatCompletionMessageMessageRoleWithLiterals as ChatCompletionMessageMessageRoleWithLiteralsOriginal,\n ToolCall as ToolCallOriginal,\n ChatCompletionMessageContentPart as ChatCompletionMessageContentPartOriginal,\n ChatCompletionMessageContentPartContentValueOneOf as ChatCompletionMessageContentPartContentValueOneOfOriginal,\n CreateChatCompletionRequestTool as CreateChatCompletionRequestToolOriginal,\n CreateChatCompletionRequestResponseFormat as CreateChatCompletionRequestResponseFormatOriginal,\n GenerateContentRequest as GenerateContentRequestOriginal,\n GoogleproxyV1Model as GoogleproxyV1ModelOriginal,\n GoogleproxyV1ModelWithLiterals as GoogleproxyV1ModelWithLiteralsOriginal,\n Content as ContentOriginal,\n ContentRole as ContentRoleOriginal,\n ContentRoleWithLiterals as ContentRoleWithLiteralsOriginal,\n V1ContentPart as V1ContentPartOriginal,\n ContentData as ContentDataOriginal,\n FunctionCall as FunctionCallOriginal,\n FunctionResponse as FunctionResponseOriginal,\n ExecutableCode as ExecutableCodeOriginal,\n Language as LanguageOriginal,\n LanguageWithLiterals as LanguageWithLiteralsOriginal,\n V1CodeExecutionResult as V1CodeExecutionResultOriginal,\n Outcome as OutcomeOriginal,\n OutcomeWithLiterals as OutcomeWithLiteralsOriginal,\n Blob as BlobOriginal,\n MediaResolution as MediaResolutionOriginal,\n MediaResolutionLevel as MediaResolutionLevelOriginal,\n MediaResolutionLevelWithLiterals as MediaResolutionLevelWithLiteralsOriginal,\n SystemInstruction as SystemInstructionOriginal,\n GoogleproxyV1Tool as GoogleproxyV1ToolOriginal,\n DynamicRetrievalConfigMode as DynamicRetrievalConfigModeOriginal,\n DynamicRetrievalConfigModeWithLiterals as DynamicRetrievalConfigModeWithLiteralsOriginal,\n DynamicRetrievalConfig as DynamicRetrievalConfigOriginal,\n Environment as EnvironmentOriginal,\n EnvironmentWithLiterals as EnvironmentWithLiteralsOriginal,\n FunctionDeclaration as FunctionDeclarationOriginal,\n GoogleSearchRetrieval as GoogleSearchRetrievalOriginal,\n CodeExecution as CodeExecutionOriginal,\n GoogleSearch as GoogleSearchOriginal,\n ComputerUse as ComputerUseOriginal,\n SafetySetting as SafetySettingOriginal,\n HarmCategory as HarmCategoryOriginal,\n HarmCategoryWithLiterals as HarmCategoryWithLiteralsOriginal,\n Threshold as ThresholdOriginal,\n ThresholdWithLiterals as ThresholdWithLiteralsOriginal,\n GenerationConfig as GenerationConfigOriginal,\n GenerationThinkingConfig as GenerationThinkingConfigOriginal,\n Modality as ModalityOriginal,\n ModalityWithLiterals as ModalityWithLiteralsOriginal,\n ImageConfig as ImageConfigOriginal,\n ImageOutputOptions as ImageOutputOptionsOriginal,\n PersonGeneration as PersonGenerationOriginal,\n PersonGenerationWithLiterals as PersonGenerationWithLiteralsOriginal,\n V1ToolConfig as V1ToolConfigOriginal,\n FunctionCallingConfig as FunctionCallingConfigOriginal,\n Mode as ModeOriginal,\n ModeWithLiterals as ModeWithLiteralsOriginal,\n FineTuningSpec as FineTuningSpecOriginal,\n InvokeAnthropicClaudeModelRequest as InvokeAnthropicClaudeModelRequestOriginal,\n InputSchema as InputSchemaOriginal,\n CacheControl as CacheControlOriginal,\n Type as TypeOriginal,\n TypeWithLiterals as TypeWithLiteralsOriginal,\n Model as ModelOriginal,\n ModelWithLiterals as ModelWithLiteralsOriginal,\n AnthropicClaudeMessage as AnthropicClaudeMessageOriginal,\n Role as RoleOriginal,\n RoleWithLiterals as RoleWithLiteralsOriginal,\n ContentBlock as ContentBlockOriginal,\n ContentBlockTypeOneOf as ContentBlockTypeOneOfOriginal,\n Text as TextOriginal,\n ImageUrl as ImageUrlOriginal,\n MediaType as MediaTypeOriginal,\n MediaTypeWithLiterals as MediaTypeWithLiteralsOriginal,\n ToolUse as ToolUseOriginal,\n ToolResult as ToolResultOriginal,\n SimpleContentBlock as SimpleContentBlockOriginal,\n SimpleContentBlockTypeOneOf as SimpleContentBlockTypeOneOfOriginal,\n Thinking as ThinkingOriginal,\n RedactedThinking as RedactedThinkingOriginal,\n Tool as ToolOriginal,\n ToolChoice as ToolChoiceOriginal,\n ToolChoiceType as ToolChoiceTypeOriginal,\n ToolChoiceTypeWithLiterals as ToolChoiceTypeWithLiteralsOriginal,\n ThinkingConfig as ThinkingConfigOriginal,\n McpServer as McpServerOriginal,\n McpServerType as McpServerTypeOriginal,\n McpServerTypeWithLiterals as McpServerTypeWithLiteralsOriginal,\n ToolConfiguration as ToolConfigurationOriginal,\n V1InvokeAnthropicClaudeModelRequest as V1InvokeAnthropicClaudeModelRequestOriginal,\n GoogleproxyV1InputSchema as GoogleproxyV1InputSchemaOriginal,\n GoogleproxyV1CacheControl as GoogleproxyV1CacheControlOriginal,\n V1CacheControlType as V1CacheControlTypeOriginal,\n V1CacheControlTypeWithLiterals as V1CacheControlTypeWithLiteralsOriginal,\n ClaudeModel as ClaudeModelOriginal,\n ClaudeModelWithLiterals as ClaudeModelWithLiteralsOriginal,\n V1AnthropicClaudeMessage as V1AnthropicClaudeMessageOriginal,\n V1MessageRoleRole as V1MessageRoleRoleOriginal,\n V1MessageRoleRoleWithLiterals as V1MessageRoleRoleWithLiteralsOriginal,\n GoogleproxyV1ContentBlock as GoogleproxyV1ContentBlockOriginal,\n GoogleproxyV1ContentBlockTypeOneOf as GoogleproxyV1ContentBlockTypeOneOfOriginal,\n GoogleproxyV1Text as GoogleproxyV1TextOriginal,\n GoogleproxyV1ImageUrl as GoogleproxyV1ImageUrlOriginal,\n V1ImageMediaTypeMediaType as V1ImageMediaTypeMediaTypeOriginal,\n V1ImageMediaTypeMediaTypeWithLiterals as V1ImageMediaTypeMediaTypeWithLiteralsOriginal,\n GoogleproxyV1ToolUse as GoogleproxyV1ToolUseOriginal,\n GoogleproxyV1ToolResult as GoogleproxyV1ToolResultOriginal,\n V1SimpleContentBlock as V1SimpleContentBlockOriginal,\n V1SimpleContentBlockTypeOneOf as V1SimpleContentBlockTypeOneOfOriginal,\n GoogleproxyV1Thinking as GoogleproxyV1ThinkingOriginal,\n GoogleproxyV1RedactedThinking as GoogleproxyV1RedactedThinkingOriginal,\n InvokeAnthropicClaudeModelRequestTool as InvokeAnthropicClaudeModelRequestToolOriginal,\n GoogleproxyV1ToolChoice as GoogleproxyV1ToolChoiceOriginal,\n GoogleproxyV1ToolChoiceType as GoogleproxyV1ToolChoiceTypeOriginal,\n GoogleproxyV1ToolChoiceTypeWithLiterals as GoogleproxyV1ToolChoiceTypeWithLiteralsOriginal,\n GoogleproxyV1ThinkingConfig as GoogleproxyV1ThinkingConfigOriginal,\n GoogleproxyV1McpServer as GoogleproxyV1McpServerOriginal,\n GoogleproxyV1McpServerType as GoogleproxyV1McpServerTypeOriginal,\n GoogleproxyV1McpServerTypeWithLiterals as GoogleproxyV1McpServerTypeWithLiteralsOriginal,\n V1McpServerToolConfiguration as V1McpServerToolConfigurationOriginal,\n InvokeAnthropicModelRequest as InvokeAnthropicModelRequestOriginal,\n AnthropicModel as AnthropicModelOriginal,\n AnthropicModelWithLiterals as AnthropicModelWithLiteralsOriginal,\n AnthropicMessage as AnthropicMessageOriginal,\n MessageRoleRole as MessageRoleRoleOriginal,\n MessageRoleRoleWithLiterals as MessageRoleRoleWithLiteralsOriginal,\n V1ContentBlock as V1ContentBlockOriginal,\n V1ContentBlockTypeOneOf as V1ContentBlockTypeOneOfOriginal,\n V1Text as V1TextOriginal,\n V1CacheControl as V1CacheControlOriginal,\n CacheControlType as CacheControlTypeOriginal,\n CacheControlTypeWithLiterals as CacheControlTypeWithLiteralsOriginal,\n Citation as CitationOriginal,\n CitationTypeOneOf as CitationTypeOneOfOriginal,\n CharLocationCitation as CharLocationCitationOriginal,\n PageLocationCitation as PageLocationCitationOriginal,\n ContentBlockLocationCitation as ContentBlockLocationCitationOriginal,\n WebSearchResultLocationCitation as WebSearchResultLocationCitationOriginal,\n SearchResultLocationCitation as SearchResultLocationCitationOriginal,\n V1ImageUrl as V1ImageUrlOriginal,\n ImageMediaTypeMediaType as ImageMediaTypeMediaTypeOriginal,\n ImageMediaTypeMediaTypeWithLiterals as ImageMediaTypeMediaTypeWithLiteralsOriginal,\n V1ToolUse as V1ToolUseOriginal,\n V1ToolResult as V1ToolResultOriginal,\n ToolResultContentBlock as ToolResultContentBlockOriginal,\n ToolResultContentBlockTypeOneOf as ToolResultContentBlockTypeOneOfOriginal,\n DocumentContent as DocumentContentOriginal,\n DocumentSource as DocumentSourceOriginal,\n CitationsEnabled as CitationsEnabledOriginal,\n ToolResultSearchResult as ToolResultSearchResultOriginal,\n V1Thinking as V1ThinkingOriginal,\n V1RedactedThinking as V1RedactedThinkingOriginal,\n McpToolUse as McpToolUseOriginal,\n ServerToolUse as ServerToolUseOriginal,\n WebSearchToolResult as WebSearchToolResultOriginal,\n WebSearchToolResultContentOneOf as WebSearchToolResultContentOneOfOriginal,\n WebSearchResultList as WebSearchResultListOriginal,\n WebSearchResult as WebSearchResultOriginal,\n WebSearchToolResultError as WebSearchToolResultErrorOriginal,\n CodeExecutionToolResult as CodeExecutionToolResultOriginal,\n CodeExecutionToolResultContentOneOf as CodeExecutionToolResultContentOneOfOriginal,\n CodeExecutionResult as CodeExecutionResultOriginal,\n CodeExecutionToolResultError as CodeExecutionToolResultErrorOriginal,\n ContainerUpload as ContainerUploadOriginal,\n WebFetchToolResult as WebFetchToolResultOriginal,\n WebFetchToolResultContentOneOf as WebFetchToolResultContentOneOfOriginal,\n WebFetchToolResultContentSuccess as WebFetchToolResultContentSuccessOriginal,\n WebFetchToolResultContentError as WebFetchToolResultContentErrorOriginal,\n V1Tool as V1ToolOriginal,\n V1ToolKindOneOf as V1ToolKindOneOfOriginal,\n CustomTool as CustomToolOriginal,\n V1InputSchema as V1InputSchemaOriginal,\n ComputerUseTool as ComputerUseToolOriginal,\n TextEditorTool as TextEditorToolOriginal,\n BashTool as BashToolOriginal,\n WebSearchTool as WebSearchToolOriginal,\n WebSearchUserLocation as WebSearchUserLocationOriginal,\n CodeExecutionTool as CodeExecutionToolOriginal,\n WebFetchTool as WebFetchToolOriginal,\n V1ToolChoice as V1ToolChoiceOriginal,\n V1ToolChoiceType as V1ToolChoiceTypeOriginal,\n V1ToolChoiceTypeWithLiterals as V1ToolChoiceTypeWithLiteralsOriginal,\n V1ThinkingConfig as V1ThinkingConfigOriginal,\n V1McpServer as V1McpServerOriginal,\n V1McpServerType as V1McpServerTypeOriginal,\n V1McpServerTypeWithLiterals as V1McpServerTypeWithLiteralsOriginal,\n McpServerToolConfiguration as McpServerToolConfigurationOriginal,\n RequestMetadata as RequestMetadataOriginal,\n InvokeLlamaModelRequest as InvokeLlamaModelRequestOriginal,\n LlamaModel as LlamaModelOriginal,\n LlamaModelWithLiterals as LlamaModelWithLiteralsOriginal,\n InvokeConverseRequest as InvokeConverseRequestOriginal,\n ConverseModel as ConverseModelOriginal,\n ConverseModelWithLiterals as ConverseModelWithLiteralsOriginal,\n ConverseMessage as ConverseMessageOriginal,\n ConverseContentBlock as ConverseContentBlockOriginal,\n ConverseContentBlockContentOneOf as ConverseContentBlockContentOneOfOriginal,\n ConverseReasoningContent as ConverseReasoningContentOriginal,\n ReasoningText as ReasoningTextOriginal,\n ConverseToolUse as ConverseToolUseOriginal,\n ConverseToolResult as ConverseToolResultOriginal,\n ConverseToolResultContent as ConverseToolResultContentOriginal,\n ConverseToolResultContentContentOneOf as ConverseToolResultContentContentOneOfOriginal,\n ConverseInferenceConfig as ConverseInferenceConfigOriginal,\n ToolConfig as ToolConfigOriginal,\n ConverseTool as ConverseToolOriginal,\n ToolSpecification as ToolSpecificationOriginal,\n ConverseInputSchema as ConverseInputSchemaOriginal,\n ConversePerformanceConfig as ConversePerformanceConfigOriginal,\n SystemContentBlock as SystemContentBlockOriginal,\n CreateImageRequest as CreateImageRequestOriginal,\n V1ImageModel as V1ImageModelOriginal,\n V1ImageModelWithLiterals as V1ImageModelWithLiteralsOriginal,\n ImageQuality as ImageQualityOriginal,\n ImageQualityWithLiterals as ImageQualityWithLiteralsOriginal,\n ImageSize as ImageSizeOriginal,\n ImageSizeWithLiterals as ImageSizeWithLiteralsOriginal,\n ImageStyle as ImageStyleOriginal,\n ImageStyleWithLiterals as ImageStyleWithLiteralsOriginal,\n V1TextToImageRequest as V1TextToImageRequestOriginal,\n ImageModel as ImageModelOriginal,\n ImageModelWithLiterals as ImageModelWithLiteralsOriginal,\n TextPrompt as TextPromptOriginal,\n ClipGuidancePreset as ClipGuidancePresetOriginal,\n ClipGuidancePresetWithLiterals as ClipGuidancePresetWithLiteralsOriginal,\n Sampler as SamplerOriginal,\n SamplerWithLiterals as SamplerWithLiteralsOriginal,\n TextToImageRequestStylePreset as TextToImageRequestStylePresetOriginal,\n TextToImageRequestStylePresetWithLiterals as TextToImageRequestStylePresetWithLiteralsOriginal,\n GenerateCoreRequest as GenerateCoreRequestOriginal,\n ImageCoreModel as ImageCoreModelOriginal,\n ImageCoreModelWithLiterals as ImageCoreModelWithLiteralsOriginal,\n GenerateCoreRequestStylePreset as GenerateCoreRequestStylePresetOriginal,\n GenerateCoreRequestStylePresetWithLiterals as GenerateCoreRequestStylePresetWithLiteralsOriginal,\n GenerateStableDiffusionRequest as GenerateStableDiffusionRequestOriginal,\n GenerationMode as GenerationModeOriginal,\n GenerationModeWithLiterals as GenerationModeWithLiteralsOriginal,\n ImageStableDiffusionModel as ImageStableDiffusionModelOriginal,\n ImageStableDiffusionModelWithLiterals as ImageStableDiffusionModelWithLiteralsOriginal,\n GenerateStableDiffusionRequestOutputFormat as GenerateStableDiffusionRequestOutputFormatOriginal,\n GenerateStableDiffusionRequestOutputFormatWithLiterals as GenerateStableDiffusionRequestOutputFormatWithLiteralsOriginal,\n GenerateAnImageRequest as GenerateAnImageRequestOriginal,\n GenerateAnImageModel as GenerateAnImageModelOriginal,\n GenerateAnImageModelWithLiterals as GenerateAnImageModelWithLiteralsOriginal,\n CreatePredictionRequest as CreatePredictionRequestOriginal,\n CreatePredictionRequestInputOneOf as CreatePredictionRequestInputOneOfOriginal,\n CreatePredictionModel as CreatePredictionModelOriginal,\n CreatePredictionModelWithLiterals as CreatePredictionModelWithLiteralsOriginal,\n FluxPulid as FluxPulidOriginal,\n FluxDevControlnet as FluxDevControlnetOriginal,\n ReveEdit as ReveEditOriginal,\n LucatacoFlorence2Large as LucatacoFlorence2LargeOriginal,\n TaskInput as TaskInputOriginal,\n TaskInputWithLiterals as TaskInputWithLiteralsOriginal,\n PerceptronIsaac01 as PerceptronIsaac01Original,\n ResponseType as ResponseTypeOriginal,\n ResponseTypeWithLiterals as ResponseTypeWithLiteralsOriginal,\n PrunaaiZImageTurbo as PrunaaiZImageTurboOriginal,\n QwenImageLayered as QwenImageLayeredOriginal,\n EditImageWithPromptRequest as EditImageWithPromptRequestOriginal,\n EditImageWithPromptRequestModel as EditImageWithPromptRequestModelOriginal,\n EditImageWithPromptRequestModelWithLiterals as EditImageWithPromptRequestModelWithLiteralsOriginal,\n StylePreset as StylePresetOriginal,\n StylePresetWithLiterals as StylePresetWithLiteralsOriginal,\n OutpaintDirection as OutpaintDirectionOriginal,\n TextToImageRequest as TextToImageRequestOriginal,\n TextToImageRequestModel as TextToImageRequestModelOriginal,\n TextToImageRequestModelWithLiterals as TextToImageRequestModelWithLiteralsOriginal,\n LoraModelSelect as LoraModelSelectOriginal,\n Inputs as InputsOriginal,\n InvokeMlPlatformLlamaModelRequest as InvokeMlPlatformLlamaModelRequestOriginal,\n InvokeChatCompletionRequest as InvokeChatCompletionRequestOriginal,\n PerplexityModel as PerplexityModelOriginal,\n PerplexityModelWithLiterals as PerplexityModelWithLiteralsOriginal,\n PerplexityMessage as PerplexityMessageOriginal,\n PerplexityMessageMessageRole as PerplexityMessageMessageRoleOriginal,\n PerplexityMessageMessageRoleWithLiterals as PerplexityMessageMessageRoleWithLiteralsOriginal,\n InvokeChatCompletionRequestResponseFormat as InvokeChatCompletionRequestResponseFormatOriginal,\n InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf as InvokeChatCompletionRequestResponseFormatFormatDetailsOneOfOriginal,\n GenerateImageRequest as GenerateImageRequestOriginal,\n ImagenModel as ImagenModelOriginal,\n ImagenModelWithLiterals as ImagenModelWithLiteralsOriginal,\n Instance as InstanceOriginal,\n Parameters as ParametersOriginal,\n OutputOptions as OutputOptionsOriginal,\n GenerateImageMlPlatformRequest as GenerateImageMlPlatformRequestOriginal,\n GenerateImageMlPlatformRequestInputOneOf as GenerateImageMlPlatformRequestInputOneOfOriginal,\n GenerateImageMlPlatformModel as GenerateImageMlPlatformModelOriginal,\n GenerateImageMlPlatformModelWithLiterals as GenerateImageMlPlatformModelWithLiteralsOriginal,\n V1FluxPulid as V1FluxPulidOriginal,\n CreateImageOpenAiRequest as CreateImageOpenAiRequestOriginal,\n OpenAiImageModel as OpenAiImageModelOriginal,\n OpenAiImageModelWithLiterals as OpenAiImageModelWithLiteralsOriginal,\n EditImageOpenAiRequest as EditImageOpenAiRequestOriginal,\n GenerateVideoRequest as GenerateVideoRequestOriginal,\n VideoGenModel as VideoGenModelOriginal,\n VideoGenModelWithLiterals as VideoGenModelWithLiteralsOriginal,\n GenerateVideoInstance as GenerateVideoInstanceOriginal,\n V1ImageInput as V1ImageInputOriginal,\n GenerateVideoParameters as GenerateVideoParametersOriginal,\n V1CreateChatCompletionRequest as V1CreateChatCompletionRequestOriginal,\n ChatCompletionModel as ChatCompletionModelOriginal,\n ChatCompletionModelWithLiterals as ChatCompletionModelWithLiteralsOriginal,\n GoogleproxyV1ChatCompletionMessage as GoogleproxyV1ChatCompletionMessageOriginal,\n V1ChatCompletionMessageImageUrlContent as V1ChatCompletionMessageImageUrlContentOriginal,\n V1ChatCompletionMessageMessageRole as V1ChatCompletionMessageMessageRoleOriginal,\n V1ChatCompletionMessageMessageRoleWithLiterals as V1ChatCompletionMessageMessageRoleWithLiteralsOriginal,\n V1ChatCompletionMessageContentPart as V1ChatCompletionMessageContentPartOriginal,\n V1ChatCompletionMessageContentPartContentValueOneOf as V1ChatCompletionMessageContentPartContentValueOneOfOriginal,\n V1CreateChatCompletionRequestResponseFormat as V1CreateChatCompletionRequestResponseFormatOriginal,\n InvokeMlPlatformOpenAIChatCompletionRawRequest as InvokeMlPlatformOpenAIChatCompletionRawRequestOriginal,\n ChatCompletionMessage as ChatCompletionMessageOriginal,\n ImageUrlContent as ImageUrlContentOriginal,\n MessageRole as MessageRoleOriginal,\n MessageRoleWithLiterals as MessageRoleWithLiteralsOriginal,\n ContentPart as ContentPartOriginal,\n ContentPartContentValueOneOf as ContentPartContentValueOneOfOriginal,\n ResponseFormat as ResponseFormatOriginal,\n VideoInferenceRequest as VideoInferenceRequestOriginal,\n OutputFormat as OutputFormatOriginal,\n OutputFormatWithLiterals as OutputFormatWithLiteralsOriginal,\n FrameImage as FrameImageOriginal,\n VideoModel as VideoModelOriginal,\n VideoModelWithLiterals as VideoModelWithLiteralsOriginal,\n V1OpenAiResponsesRequest as V1OpenAiResponsesRequestOriginal,\n V1ResponsesModel as V1ResponsesModelOriginal,\n V1ResponsesModelWithLiterals as V1ResponsesModelWithLiteralsOriginal,\n V1ResponsesInputItem as V1ResponsesInputItemOriginal,\n V1ResponsesInputItemItemOneOf as V1ResponsesInputItemItemOneOfOriginal,\n V1ResponsesInputMessage as V1ResponsesInputMessageOriginal,\n ResponsesInputMessageResponsesMessageRole as ResponsesInputMessageResponsesMessageRoleOriginal,\n ResponsesInputMessageResponsesMessageRoleWithLiterals as ResponsesInputMessageResponsesMessageRoleWithLiteralsOriginal,\n V1ResponsesInputMessageContent as V1ResponsesInputMessageContentOriginal,\n V1ResponsesInputMessageContentContentValueOneOf as V1ResponsesInputMessageContentContentValueOneOfOriginal,\n ResponsesInputMessageContentImageInput as ResponsesInputMessageContentImageInputOriginal,\n ResponsesInputMessageContentFileInput as ResponsesInputMessageContentFileInputOriginal,\n V1ResponsesOutputMessage as V1ResponsesOutputMessageOriginal,\n V1OutputAnnotation as V1OutputAnnotationOriginal,\n V1OutputAnnotationAnnotationTypeOneOf as V1OutputAnnotationAnnotationTypeOneOfOriginal,\n V1UrlCitation as V1UrlCitationOriginal,\n ResponsesOutputMessageOutputContent as ResponsesOutputMessageOutputContentOriginal,\n V1ResponsesWebSearchToolCall as V1ResponsesWebSearchToolCallOriginal,\n ResponsesWebSearchToolCallAction as ResponsesWebSearchToolCallActionOriginal,\n V1ResponsesFunctionToolCall as V1ResponsesFunctionToolCallOriginal,\n V1ResponsesFunctionToolCallOutput as V1ResponsesFunctionToolCallOutputOriginal,\n V1ResponsesReasoningOutput as V1ResponsesReasoningOutputOriginal,\n V1ResponsesReasoningSummaryContent as V1ResponsesReasoningSummaryContentOriginal,\n V1ResponsesReasoningContent as V1ResponsesReasoningContentOriginal,\n V1ResponsesCodeInterpreterToolCall as V1ResponsesCodeInterpreterToolCallOriginal,\n V1ResponsesCodeInterpreterOutput as V1ResponsesCodeInterpreterOutputOriginal,\n V1ResponsesCodeInterpreterOutputOutputTypeOneOf as V1ResponsesCodeInterpreterOutputOutputTypeOneOfOriginal,\n V1ResponsesCodeInterpreterLogsOutput as V1ResponsesCodeInterpreterLogsOutputOriginal,\n V1ResponsesCodeInterpreterImageOutput as V1ResponsesCodeInterpreterImageOutputOriginal,\n V1ResponsesReasoning as V1ResponsesReasoningOriginal,\n V1ResponsesTextFormat as V1ResponsesTextFormatOriginal,\n V1ResponsesTextFormatFormatOneOf as V1ResponsesTextFormatFormatOneOfOriginal,\n ResponsesTextFormatJsonSchema as ResponsesTextFormatJsonSchemaOriginal,\n V1ResponsesToolChoice as V1ResponsesToolChoiceOriginal,\n V1ResponsesTool as V1ResponsesToolOriginal,\n V1ResponsesToolToolTypeOneOf as V1ResponsesToolToolTypeOneOfOriginal,\n V1ResponsesWebSearch as V1ResponsesWebSearchOriginal,\n ResponsesWebSearchUserLocation as ResponsesWebSearchUserLocationOriginal,\n V1ResponsesFunction as V1ResponsesFunctionOriginal,\n V1ResponsesCodeInterpreter as V1ResponsesCodeInterpreterOriginal,\n V1ResponsesCodeInterpreterContainer as V1ResponsesCodeInterpreterContainerOriginal,\n V1ResponsesCodeInterpreterContainerContainerTypeOneOf as V1ResponsesCodeInterpreterContainerContainerTypeOneOfOriginal,\n V1ResponsesCodeInterpreterContainerAuto as V1ResponsesCodeInterpreterContainerAutoOriginal,\n OpenAiResponsesRequest as OpenAiResponsesRequestOriginal,\n ResponsesModel as ResponsesModelOriginal,\n ResponsesModelWithLiterals as ResponsesModelWithLiteralsOriginal,\n ResponsesInputItem as ResponsesInputItemOriginal,\n ResponsesInputItemItemOneOf as ResponsesInputItemItemOneOfOriginal,\n ResponsesInputMessage as ResponsesInputMessageOriginal,\n ResponsesMessageRole as ResponsesMessageRoleOriginal,\n ResponsesMessageRoleWithLiterals as ResponsesMessageRoleWithLiteralsOriginal,\n ResponsesInputMessageContent as ResponsesInputMessageContentOriginal,\n ResponsesInputMessageContentContentValueOneOf as ResponsesInputMessageContentContentValueOneOfOriginal,\n ImageInput as ImageInputOriginal,\n FileInput as FileInputOriginal,\n ResponsesOutputMessage as ResponsesOutputMessageOriginal,\n OutputAnnotation as OutputAnnotationOriginal,\n OutputAnnotationAnnotationTypeOneOf as OutputAnnotationAnnotationTypeOneOfOriginal,\n UrlCitation as UrlCitationOriginal,\n OutputContent as OutputContentOriginal,\n ResponsesWebSearchToolCall as ResponsesWebSearchToolCallOriginal,\n Action as ActionOriginal,\n ResponsesFunctionToolCall as ResponsesFunctionToolCallOriginal,\n ResponsesFunctionToolCallOutput as ResponsesFunctionToolCallOutputOriginal,\n ResponsesReasoningOutput as ResponsesReasoningOutputOriginal,\n ResponsesReasoningSummaryContent as ResponsesReasoningSummaryContentOriginal,\n ResponsesReasoningContent as ResponsesReasoningContentOriginal,\n ResponsesCodeInterpreterToolCall as ResponsesCodeInterpreterToolCallOriginal,\n ResponsesCodeInterpreterOutput as ResponsesCodeInterpreterOutputOriginal,\n ResponsesCodeInterpreterOutputOutputTypeOneOf as ResponsesCodeInterpreterOutputOutputTypeOneOfOriginal,\n ResponsesCodeInterpreterLogsOutput as ResponsesCodeInterpreterLogsOutputOriginal,\n ResponsesCodeInterpreterImageOutput as ResponsesCodeInterpreterImageOutputOriginal,\n ResponsesReasoning as ResponsesReasoningOriginal,\n ResponsesTextFormat as ResponsesTextFormatOriginal,\n ResponsesTextFormatFormatOneOf as ResponsesTextFormatFormatOneOfOriginal,\n JsonSchema as JsonSchemaOriginal,\n ResponsesToolChoice as ResponsesToolChoiceOriginal,\n ResponsesTool as ResponsesToolOriginal,\n ResponsesToolToolTypeOneOf as ResponsesToolToolTypeOneOfOriginal,\n ResponsesWebSearch as ResponsesWebSearchOriginal,\n UserLocation as UserLocationOriginal,\n ResponsesFunction as ResponsesFunctionOriginal,\n ResponsesCodeInterpreter as ResponsesCodeInterpreterOriginal,\n ResponsesCodeInterpreterContainer as ResponsesCodeInterpreterContainerOriginal,\n ResponsesCodeInterpreterContainerContainerTypeOneOf as ResponsesCodeInterpreterContainerContainerTypeOneOfOriginal,\n ResponsesCodeInterpreterContainerAuto as ResponsesCodeInterpreterContainerAutoOriginal,\n CreateVideoRequest as CreateVideoRequestOriginal,\n V1VideoModel as V1VideoModelOriginal,\n V1VideoModelWithLiterals as V1VideoModelWithLiteralsOriginal,\n ContentGenerationRequestedEvent as ContentGenerationRequestedEventOriginal,\n UserRequestInfo as UserRequestInfoOriginal,\n ContentGenerationSucceededEvent as ContentGenerationSucceededEventOriginal,\n GenerateContentModelResponse as GenerateContentModelResponseOriginal,\n GenerateContentModelResponseResponseOneOf as GenerateContentModelResponseResponseOneOfOriginal,\n GeneratedContent as GeneratedContentOriginal,\n TextContent as TextContentOriginal,\n MediaContent as MediaContentOriginal,\n ThinkingTextContent as ThinkingTextContentOriginal,\n ToolUseContent as ToolUseContentOriginal,\n V1TokenUsage as V1TokenUsageOriginal,\n ResponseMetadata as ResponseMetadataOriginal,\n OpenaiproxyV1CreateChatCompletionResponse as OpenaiproxyV1CreateChatCompletionResponseOriginal,\n CreateChatCompletionResponsePromptTokenDetails as CreateChatCompletionResponsePromptTokenDetailsOriginal,\n CreateChatCompletionResponseCompletionTokenDetails as CreateChatCompletionResponseCompletionTokenDetailsOriginal,\n OpenaiproxyV1CreateChatCompletionResponseChoice as OpenaiproxyV1CreateChatCompletionResponseChoiceOriginal,\n OpenaiproxyV1CreateChatCompletionResponseTokenUsage as OpenaiproxyV1CreateChatCompletionResponseTokenUsageOriginal,\n TextBisonPredictResponse as TextBisonPredictResponseOriginal,\n TextBisonPrediction as TextBisonPredictionOriginal,\n CitationMetadata as CitationMetadataOriginal,\n V1Citation as V1CitationOriginal,\n SafetyAttribute as SafetyAttributeOriginal,\n Metadata as MetadataOriginal,\n TokenMetadata as TokenMetadataOriginal,\n TokenCount as TokenCountOriginal,\n ChatBisonPredictResponse as ChatBisonPredictResponseOriginal,\n ChatBisonPrediction as ChatBisonPredictionOriginal,\n CreateChatCompletionResponse as CreateChatCompletionResponseOriginal,\n PromptTokenDetails as PromptTokenDetailsOriginal,\n CompletionTokenDetails as CompletionTokenDetailsOriginal,\n CreateChatCompletionResponseChoice as CreateChatCompletionResponseChoiceOriginal,\n CreateChatCompletionResponseTokenUsage as CreateChatCompletionResponseTokenUsageOriginal,\n GenerateContentResponse as GenerateContentResponseOriginal,\n Candidate as CandidateOriginal,\n CandidateContent as CandidateContentOriginal,\n CandidateContentPart as CandidateContentPartOriginal,\n FinishReason as FinishReasonOriginal,\n FinishReasonWithLiterals as FinishReasonWithLiteralsOriginal,\n SafetyRating as SafetyRatingOriginal,\n HarmProbability as HarmProbabilityOriginal,\n HarmProbabilityWithLiterals as HarmProbabilityWithLiteralsOriginal,\n CandidateCitationMetadata as CandidateCitationMetadataOriginal,\n PublicationDate as PublicationDateOriginal,\n CandidateCitationMetadataCitation as CandidateCitationMetadataCitationOriginal,\n GroundingMetadata as GroundingMetadataOriginal,\n SearchEntryPoint as SearchEntryPointOriginal,\n GroundingChunk as GroundingChunkOriginal,\n GroundingChunkChunkTypeOneOf as GroundingChunkChunkTypeOneOfOriginal,\n Web as WebOriginal,\n RetrievedContext as RetrievedContextOriginal,\n GroundingSupport as GroundingSupportOriginal,\n Segment as SegmentOriginal,\n RetrievalMetadata as RetrievalMetadataOriginal,\n UsageMetadata as UsageMetadataOriginal,\n ModalityTokenCount as ModalityTokenCountOriginal,\n InvokeAnthropicClaudeModelResponse as InvokeAnthropicClaudeModelResponseOriginal,\n ResponseTypeType as ResponseTypeTypeOriginal,\n ResponseTypeTypeWithLiterals as ResponseTypeTypeWithLiteralsOriginal,\n Usage as UsageOriginal,\n V1InvokeAnthropicClaudeModelResponse as V1InvokeAnthropicClaudeModelResponseOriginal,\n GoogleproxyV1ResponseTypeType as GoogleproxyV1ResponseTypeTypeOriginal,\n GoogleproxyV1ResponseTypeTypeWithLiterals as GoogleproxyV1ResponseTypeTypeWithLiteralsOriginal,\n GoogleproxyV1Usage as GoogleproxyV1UsageOriginal,\n InvokeAnthropicModelResponse as InvokeAnthropicModelResponseOriginal,\n V1ResponseTypeType as V1ResponseTypeTypeOriginal,\n V1ResponseTypeTypeWithLiterals as V1ResponseTypeTypeWithLiteralsOriginal,\n V1Usage as V1UsageOriginal,\n UsageCacheCreation as UsageCacheCreationOriginal,\n UsageServerToolUse as UsageServerToolUseOriginal,\n Container as ContainerOriginal,\n InvokeLlamaModelResponse as InvokeLlamaModelResponseOriginal,\n InvokeConverseResponse as InvokeConverseResponseOriginal,\n Output as OutputOriginal,\n InvokeConverseResponseTokenUsage as InvokeConverseResponseTokenUsageOriginal,\n Metrics as MetricsOriginal,\n InvokeMlPlatformLlamaModelResponse as InvokeMlPlatformLlamaModelResponseOriginal,\n InvokeChatCompletionResponse as InvokeChatCompletionResponseOriginal,\n InvokeChatCompletionResponseChoice as InvokeChatCompletionResponseChoiceOriginal,\n PerplexityImageDescriptor as PerplexityImageDescriptorOriginal,\n InvokeChatCompletionResponseUsage as InvokeChatCompletionResponseUsageOriginal,\n CreateImageResponse as CreateImageResponseOriginal,\n V1ImageObject as V1ImageObjectOriginal,\n V1TextToImageResponse as V1TextToImageResponseOriginal,\n ImageObject as ImageObjectOriginal,\n GenerateCoreResponse as GenerateCoreResponseOriginal,\n GenerateStableDiffusionResponse as GenerateStableDiffusionResponseOriginal,\n GenerateAnImageResponse as GenerateAnImageResponseOriginal,\n ResultObject as ResultObjectOriginal,\n CreatePredictionResponse as CreatePredictionResponseOriginal,\n CreatePredictionResponseTokenUsage as CreatePredictionResponseTokenUsageOriginal,\n EditImageWithPromptResponse as EditImageWithPromptResponseOriginal,\n TextToImageResponse as TextToImageResponseOriginal,\n TextToImageTaskResult as TextToImageTaskResultOriginal,\n GenerateImageResponse as GenerateImageResponseOriginal,\n Prediction as PredictionOriginal,\n SafetyAttributes as SafetyAttributesOriginal,\n GenerateVideoResponse as GenerateVideoResponseOriginal,\n GeneratedVideo as GeneratedVideoOriginal,\n GenerateImageMlPlatformResponse as GenerateImageMlPlatformResponseOriginal,\n CreateImageOpenAiResponse as CreateImageOpenAiResponseOriginal,\n ImageUsage as ImageUsageOriginal,\n OpenAiImageTokenDetails as OpenAiImageTokenDetailsOriginal,\n EditImageOpenAiResponse as EditImageOpenAiResponseOriginal,\n V1CreateChatCompletionResponse as V1CreateChatCompletionResponseOriginal,\n V1CreateChatCompletionResponseChoice as V1CreateChatCompletionResponseChoiceOriginal,\n V1CreateChatCompletionResponseTokenUsage as V1CreateChatCompletionResponseTokenUsageOriginal,\n InvokeMlPlatformOpenAIChatCompletionRawResponse as InvokeMlPlatformOpenAIChatCompletionRawResponseOriginal,\n Choice as ChoiceOriginal,\n TokenUsage as TokenUsageOriginal,\n VideoInferenceResponse as VideoInferenceResponseOriginal,\n VideoInferenceTaskResult as VideoInferenceTaskResultOriginal,\n V1OpenAiResponsesResponse as V1OpenAiResponsesResponseOriginal,\n OpenAiResponsesResponseIncompleteDetails as OpenAiResponsesResponseIncompleteDetailsOriginal,\n V1ResponsesOutput as V1ResponsesOutputOriginal,\n V1ResponsesOutputOutputOneOf as V1ResponsesOutputOutputOneOfOriginal,\n V1ResponsesTokenUsage as V1ResponsesTokenUsageOriginal,\n V1ResponsesInputTokensDetails as V1ResponsesInputTokensDetailsOriginal,\n V1ResponsesOutputTokensDetails as V1ResponsesOutputTokensDetailsOriginal,\n OpenAiResponsesResponse as OpenAiResponsesResponseOriginal,\n IncompleteDetails as IncompleteDetailsOriginal,\n ResponsesOutput as ResponsesOutputOriginal,\n ResponsesOutputOutputOneOf as ResponsesOutputOutputOneOfOriginal,\n ResponsesTokenUsage as ResponsesTokenUsageOriginal,\n ResponsesInputTokensDetails as ResponsesInputTokensDetailsOriginal,\n ResponsesOutputTokensDetails as ResponsesOutputTokensDetailsOriginal,\n CreateVideoResponse as CreateVideoResponseOriginal,\n VideoJob as VideoJobOriginal,\n ErrorInfo as ErrorInfoOriginal,\n ContentGenerationFailedEvent as ContentGenerationFailedEventOriginal,\n GenerateTextByPromptRequest as GenerateTextByPromptRequestOriginal,\n FallbackProperties as FallbackPropertiesOriginal,\n DynamicRequestConfig as DynamicRequestConfigOriginal,\n GatewayToolDefinition as GatewayToolDefinitionOriginal,\n GatewayToolDefinitionToolOneOf as GatewayToolDefinitionToolOneOfOriginal,\n GatewayToolDefinitionCustomTool as GatewayToolDefinitionCustomToolOriginal,\n BuiltInTool as BuiltInToolOriginal,\n GatewayMessageDefinition as GatewayMessageDefinitionOriginal,\n GatewayMessageDefinitionRole as GatewayMessageDefinitionRoleOriginal,\n GatewayMessageDefinitionRoleWithLiterals as GatewayMessageDefinitionRoleWithLiteralsOriginal,\n GatewayContentBlock as GatewayContentBlockOriginal,\n GatewayContentBlockTypeOneOf as GatewayContentBlockTypeOneOfOriginal,\n ToolResultContent as ToolResultContentOriginal,\n GenerateTextByPromptResponse as GenerateTextByPromptResponseOriginal,\n ModelResponse as ModelResponseOriginal,\n ModelResponseResponseOneOf as ModelResponseResponseOneOfOriginal,\n GenerationRequestedEvent as GenerationRequestedEventOriginal,\n TextGenerationSucceededEvent as TextGenerationSucceededEventOriginal,\n TextGenerationFailedEvent as TextGenerationFailedEventOriginal,\n GeneratedTextChunk as GeneratedTextChunkOriginal,\n GeneratedTextChunkModelChunkOneOf as GeneratedTextChunkModelChunkOneOfOriginal,\n ChatCompletionChunk as ChatCompletionChunkOriginal,\n ChunkDelta as ChunkDeltaOriginal,\n ChunkChoice as ChunkChoiceOriginal,\n V1ChatCompletionChunk as V1ChatCompletionChunkOriginal,\n ChunkChoiceChunkDelta as ChunkChoiceChunkDeltaOriginal,\n ChatCompletionChunkChunkChoice as ChatCompletionChunkChunkChoiceOriginal,\n GoogleproxyV1AnthropicStreamChunk as GoogleproxyV1AnthropicStreamChunkOriginal,\n GoogleproxyV1AnthropicStreamChunkContentOneOf as GoogleproxyV1AnthropicStreamChunkContentOneOfOriginal,\n GoogleproxyV1ContentBlockDelta as GoogleproxyV1ContentBlockDeltaOriginal,\n GoogleproxyV1ContentBlockDeltaDeltaOneOf as GoogleproxyV1ContentBlockDeltaDeltaOneOfOriginal,\n V1AnthropicStreamChunkMessageDelta as V1AnthropicStreamChunkMessageDeltaOriginal,\n AnthropicStreamChunk as AnthropicStreamChunkOriginal,\n AnthropicStreamChunkContentOneOf as AnthropicStreamChunkContentOneOfOriginal,\n ContentBlockDelta as ContentBlockDeltaOriginal,\n ContentBlockDeltaDeltaOneOf as ContentBlockDeltaDeltaOneOfOriginal,\n MessageDelta as MessageDeltaOriginal,\n V1AnthropicStreamChunk as V1AnthropicStreamChunkOriginal,\n V1AnthropicStreamChunkContentOneOf as V1AnthropicStreamChunkContentOneOfOriginal,\n V1ContentBlockDelta as V1ContentBlockDeltaOriginal,\n V1ContentBlockDeltaDeltaOneOf as V1ContentBlockDeltaDeltaOneOfOriginal,\n AnthropicStreamChunkMessageDelta as AnthropicStreamChunkMessageDeltaOriginal,\n GenerateTextByPromptObjectRequest as GenerateTextByPromptObjectRequestOriginal,\n GenerateTextByPromptObjectResponse as GenerateTextByPromptObjectResponseOriginal,\n GenerateEmbeddingsRequest as GenerateEmbeddingsRequestOriginal,\n GenerateEmbeddingsRequestEmbeddingRequestOneOf as GenerateEmbeddingsRequestEmbeddingRequestOneOfOriginal,\n V1CreateEmbeddingsRequest as V1CreateEmbeddingsRequestOriginal,\n OpenaiproxyV1EmbeddingModel as OpenaiproxyV1EmbeddingModelOriginal,\n OpenaiproxyV1EmbeddingModelWithLiterals as OpenaiproxyV1EmbeddingModelWithLiteralsOriginal,\n V1EmbeddingEncodingFormat as V1EmbeddingEncodingFormatOriginal,\n V1EmbeddingEncodingFormatWithLiterals as V1EmbeddingEncodingFormatWithLiteralsOriginal,\n CreateEmbeddingsRequest as CreateEmbeddingsRequestOriginal,\n EmbeddingModel as EmbeddingModelOriginal,\n EmbeddingModelWithLiterals as EmbeddingModelWithLiteralsOriginal,\n EmbeddingEncodingFormat as EmbeddingEncodingFormatOriginal,\n EmbeddingEncodingFormatWithLiterals as EmbeddingEncodingFormatWithLiteralsOriginal,\n GetEmbeddingRequest as GetEmbeddingRequestOriginal,\n V1EmbeddingModel as V1EmbeddingModelOriginal,\n V1EmbeddingModelWithLiterals as V1EmbeddingModelWithLiteralsOriginal,\n TextEmbeddingInstance as TextEmbeddingInstanceOriginal,\n TaskType as TaskTypeOriginal,\n TaskTypeWithLiterals as TaskTypeWithLiteralsOriginal,\n TextEmbeddingParameters as TextEmbeddingParametersOriginal,\n GenerateEmbeddingsResponse as GenerateEmbeddingsResponseOriginal,\n GenerateEmbeddingsResponseEmbeddingResponseOneOf as GenerateEmbeddingsResponseEmbeddingResponseOneOfOriginal,\n V1CreateEmbeddingsResponse as V1CreateEmbeddingsResponseOriginal,\n V1EmbeddingInfo as V1EmbeddingInfoOriginal,\n V1EmbeddingInfoEmbeddingResultOneOf as V1EmbeddingInfoEmbeddingResultOneOfOriginal,\n V1FloatEmbedding as V1FloatEmbeddingOriginal,\n CreateEmbeddingsResponseEmbeddingUsage as CreateEmbeddingsResponseEmbeddingUsageOriginal,\n CreateEmbeddingsResponse as CreateEmbeddingsResponseOriginal,\n EmbeddingInfo as EmbeddingInfoOriginal,\n EmbeddingInfoEmbeddingResultOneOf as EmbeddingInfoEmbeddingResultOneOfOriginal,\n FloatEmbedding as FloatEmbeddingOriginal,\n EmbeddingUsage as EmbeddingUsageOriginal,\n GetEmbeddingResponse as GetEmbeddingResponseOriginal,\n EmbeddingPrediction as EmbeddingPredictionOriginal,\n EmbeddingInstance as EmbeddingInstanceOriginal,\n Statistics as StatisticsOriginal,\n GenerateTextByProjectRequest as GenerateTextByProjectRequestOriginal,\n GenerateTextByProjectResponse as GenerateTextByProjectResponseOriginal,\n GenerateModerationRequest as GenerateModerationRequestOriginal,\n GenerateModerationRequestModerationRequestOneOf as GenerateModerationRequestModerationRequestOneOfOriginal,\n CreateModerationRequest as CreateModerationRequestOriginal,\n ImageUrlInput as ImageUrlInputOriginal,\n MultiModalInput as MultiModalInputOriginal,\n MultiModalInputContentValueOneOf as MultiModalInputContentValueOneOfOriginal,\n GenerateModerationResponse as GenerateModerationResponseOriginal,\n GenerateModerationResponseModerationResponseOneOf as GenerateModerationResponseModerationResponseOneOfOriginal,\n CreateModerationResponse as CreateModerationResponseOriginal,\n ModerationResult as ModerationResultOriginal,\n GenerateImageByProjectRequest as GenerateImageByProjectRequestOriginal,\n GenerateImageByProjectResponse as GenerateImageByProjectResponseOriginal,\n ImageModelResponse as ImageModelResponseOriginal,\n ImageModelResponseResponseOneOf as ImageModelResponseResponseOneOfOriginal,\n ImageGenerationRequestedEvent as ImageGenerationRequestedEventOriginal,\n ImageGenerationSucceededEvent as ImageGenerationSucceededEventOriginal,\n ImageGenerationFailedEvent as ImageGenerationFailedEventOriginal,\n GenerateImageByPromptRequest as GenerateImageByPromptRequestOriginal,\n GenerateImageByPromptResponse as GenerateImageByPromptResponseOriginal,\n GenerateImageByPromptObjectRequest as GenerateImageByPromptObjectRequestOriginal,\n GenerateImageByPromptObjectResponse as GenerateImageByPromptObjectResponseOriginal,\n GenerateContentByPromptRequest as GenerateContentByPromptRequestOriginal,\n AsyncGenerationConfig as AsyncGenerationConfigOriginal,\n SpiGenerationConfig as SpiGenerationConfigOriginal,\n GenerateContentByPromptResponse as GenerateContentByPromptResponseOriginal,\n GenerateContentByProjectRequest as GenerateContentByProjectRequestOriginal,\n GenerateContentByProjectResponse as GenerateContentByProjectResponseOriginal,\n GenerateContentByPromptObjectRequest as GenerateContentByPromptObjectRequestOriginal,\n GenerateContentByPromptObjectResponse as GenerateContentByPromptObjectResponseOriginal,\n GenerateTranscriptionRequest as GenerateTranscriptionRequestOriginal,\n GenerateTranscriptionRequestTranscriptionRequestOneOf as GenerateTranscriptionRequestTranscriptionRequestOneOfOriginal,\n CreateTranscriptionRequest as CreateTranscriptionRequestOriginal,\n TranscriptionModel as TranscriptionModelOriginal,\n TranscriptionModelWithLiterals as TranscriptionModelWithLiteralsOriginal,\n CreateTranscriptionRequestResponseFormat as CreateTranscriptionRequestResponseFormatOriginal,\n CreateTranscriptionRequestResponseFormatWithLiterals as CreateTranscriptionRequestResponseFormatWithLiteralsOriginal,\n TimestampGranularities as TimestampGranularitiesOriginal,\n TimestampGranularity as TimestampGranularityOriginal,\n TimestampGranularityWithLiterals as TimestampGranularityWithLiteralsOriginal,\n FileContent as FileContentOriginal,\n GenerateTranscriptionResponse as GenerateTranscriptionResponseOriginal,\n GenerateTranscriptionResponseTranscriptionResponseOneOf as GenerateTranscriptionResponseTranscriptionResponseOneOfOriginal,\n CreateTranscriptionResponse as CreateTranscriptionResponseOriginal,\n Word as WordOriginal,\n V1Segment as V1SegmentOriginal,\n GenerateAudioRequest as GenerateAudioRequestOriginal,\n GenerateAudioRequestAudioRequestOneOf as GenerateAudioRequestAudioRequestOneOfOriginal,\n CreateSpeechRequest as CreateSpeechRequestOriginal,\n SpeechModel as SpeechModelOriginal,\n SpeechModelWithLiterals as SpeechModelWithLiteralsOriginal,\n TextToSpeechRequest as TextToSpeechRequestOriginal,\n ElevenLabsTextToSpeechModel as ElevenLabsTextToSpeechModelOriginal,\n ElevenLabsTextToSpeechModelWithLiterals as ElevenLabsTextToSpeechModelWithLiteralsOriginal,\n VoiceSettings as VoiceSettingsOriginal,\n PronunciationDictionaryLocator as PronunciationDictionaryLocatorOriginal,\n GenerateAudioResponse as GenerateAudioResponseOriginal,\n GenerateAudioResponseAudioResponseOneOf as GenerateAudioResponseAudioResponseOneOfOriginal,\n CreateSpeechResponse as CreateSpeechResponseOriginal,\n GeneratedAudioChunk as GeneratedAudioChunkOriginal,\n GeneratedAudioChunkAudioChunkOneOf as GeneratedAudioChunkAudioChunkOneOfOriginal,\n SpeechChunk as SpeechChunkOriginal,\n TextToSpeechChunk as TextToSpeechChunkOriginal,\n AlignmentInfoInChunk as AlignmentInfoInChunkOriginal,\n PublishPromptRequest as PublishPromptRequestOriginal,\n PublishPromptResponse as PublishPromptResponseOriginal,\n GetPromptRequest as GetPromptRequestOriginal,\n GetPromptResponse as GetPromptResponseOriginal,\n PublishProjectRequest as PublishProjectRequestOriginal,\n Project as ProjectOriginal,\n ExperimentalPromptConfig as ExperimentalPromptConfigOriginal,\n PublishProjectResponse as PublishProjectResponseOriginal,\n ProjectConfigChangedDomainEvent as ProjectConfigChangedDomainEventOriginal,\n GetProjectRequest as GetProjectRequestOriginal,\n GetProjectResponse as GetProjectResponseOriginal,\n GetStatusRequest as GetStatusRequestOriginal,\n EntityType as EntityTypeOriginal,\n EntityTypeWithLiterals as EntityTypeWithLiteralsOriginal,\n GetStatusResponse as GetStatusResponseOriginal,\n OutageStatus as OutageStatusOriginal,\n OutageStatusWithLiterals as OutageStatusWithLiteralsOriginal,\n GetApplicationUsageRequest as GetApplicationUsageRequestOriginal,\n GetApplicationUsageResponse as GetApplicationUsageResponseOriginal,\n ApplicationBudgetInfo as ApplicationBudgetInfoOriginal,\n UserPerApplicationBudgetInfo as UserPerApplicationBudgetInfoOriginal,\n Wix_ai_gatewayV1EditImageRequest as Wix_ai_gatewayV1EditImageRequestOriginal,\n Wix_ai_gatewayV1EditImageRequestRequestOneOf as Wix_ai_gatewayV1EditImageRequestRequestOneOfOriginal,\n RemoveBackgroundRequest as RemoveBackgroundRequestOriginal,\n ImageEditingRequest as ImageEditingRequestOriginal,\n Guidance as GuidanceOriginal,\n ImageEditingModel as ImageEditingModelOriginal,\n ImageEditingModelWithLiterals as ImageEditingModelWithLiteralsOriginal,\n Background as BackgroundOriginal,\n Expand as ExpandOriginal,\n Export as ExportOriginal,\n Lighting as LightingOriginal,\n Margin as MarginOriginal,\n Padding as PaddingOriginal,\n Segmentation as SegmentationOriginal,\n Shadow as ShadowOriginal,\n TextRemoval as TextRemovalOriginal,\n V1EditImageRequest as V1EditImageRequestOriginal,\n V1EditImageModel as V1EditImageModelOriginal,\n V1EditImageModelWithLiterals as V1EditImageModelWithLiteralsOriginal,\n EditImageRequest as EditImageRequestOriginal,\n EditImageModel as EditImageModelOriginal,\n EditImageModelWithLiterals as EditImageModelWithLiteralsOriginal,\n Recraft_proxyV1EditImageRequest as Recraft_proxyV1EditImageRequestOriginal,\n EditAction as EditActionOriginal,\n EditActionWithLiterals as EditActionWithLiteralsOriginal,\n Wix_ai_gatewayV1EditImageResponse as Wix_ai_gatewayV1EditImageResponseOriginal,\n Wix_ai_gatewayV1EditImageResponseResponseOneOf as Wix_ai_gatewayV1EditImageResponseResponseOneOfOriginal,\n RemoveBackgroundResponse as RemoveBackgroundResponseOriginal,\n ImageEditingResponse as ImageEditingResponseOriginal,\n V1EditImageResponse as V1EditImageResponseOriginal,\n EditImageResponse as EditImageResponseOriginal,\n EditImageInput as EditImageInputOriginal,\n PredictionMetrics as PredictionMetricsOriginal,\n PredictionUrls as PredictionUrlsOriginal,\n Recraft_proxyV1EditImageResponse as Recraft_proxyV1EditImageResponseOriginal,\n PollImageGenerationResultRequest as PollImageGenerationResultRequestOriginal,\n PollImageGenerationResultRequestRequestOneOf as PollImageGenerationResultRequestRequestOneOfOriginal,\n V1GetResultRequest as V1GetResultRequestOriginal,\n GetResultRequest as GetResultRequestOriginal,\n GetTaskResultRequest as GetTaskResultRequestOriginal,\n GetVideoResultRequest as GetVideoResultRequestOriginal,\n PollImageGenerationResultResponse as PollImageGenerationResultResponseOriginal,\n PollImageGenerationResultResponseResponseOneOf as PollImageGenerationResultResponseResponseOneOfOriginal,\n V1GetResultResponse as V1GetResultResponseOriginal,\n GetResultResponse as GetResultResponseOriginal,\n GetTaskResultResponse as GetTaskResultResponseOriginal,\n GetTaskResultResponseResponseOneOf as GetTaskResultResponseResponseOneOfOriginal,\n GetVideoResultResponse as GetVideoResultResponseOriginal,\n DomainEvent as DomainEventOriginal,\n DomainEventBodyOneOf as DomainEventBodyOneOfOriginal,\n EntityCreatedEvent as EntityCreatedEventOriginal,\n RestoreInfo as RestoreInfoOriginal,\n EntityUpdatedEvent as EntityUpdatedEventOriginal,\n EntityDeletedEvent as EntityDeletedEventOriginal,\n ActionEvent as ActionEventOriginal,\n MessageEnvelope as MessageEnvelopeOriginal,\n IdentificationData as IdentificationDataOriginal,\n IdentificationDataIdOneOf as IdentificationDataIdOneOfOriginal,\n WebhookIdentityType as WebhookIdentityTypeOriginal,\n WebhookIdentityTypeWithLiterals as WebhookIdentityTypeWithLiteralsOriginal,\n} from './ds-wix-ai-gateway-v1-prompt-generators.types.js';\n"],"mappings":";AAAA,SAAS,yBAAyB;AAClC,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,0CAA0C;AACnD,SAAS,sBAAsB;AAC/B,SAAS,kBAAkB;AAI3B,SAAS,0CACP,MACA;AACA,QAAM,mBAAmB;AAAA,IACvB,oBAAoB;AAAA,MAClB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,YAAY;AAAA,MACV;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,cAAc;AAAA,MACZ;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,yBAAyB;AAAA,MACvB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,wBAAwB;AAAA,MACtB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,mBAAmB;AAAA,MACjB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AAEA,SAAO,WAAW,OAAO,OAAO,MAAM,EAAE,iBAAiB,CAAC,CAAC;AAC7D;AAEA,IAAM,eAAe;AAOd,SAAS,qBACd,SAC4B;AAC5B,WAAS,uBAAuB,EAAE,KAAK,GAAQ;AAC7C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,6BACd,SAC4B;AAC5B,WAAS,+BAA+B,EAAE,KAAK,GAAQ;AACrD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,2BACd,SAC4B;AAC5B,WAAS,6BAA6B,EAAE,KAAK,GAAQ;AACnD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,mCACd,SAC4B;AAC5B,WAAS,qCAAqC,EAAE,KAAK,GAAQ;AAC3D,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,kBAAkB,SAA6C;AAC7E,WAAS,oBAAoB,EAAE,KAAK,GAAQ;AAC1C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,8BACd,SAC4B;AAC5B,WAAS,gCAAgC,EAAE,KAAK,GAAQ;AACtD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,mBACd,SAC4B;AAC5B,WAAS,qBAAqB,EAAE,KAAK,GAAQ;AAC3C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,uBACd,SAC4B;AAC5B,WAAS,yBAAyB,EAAE,KAAK,GAAQ;AAC/C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,4BACd,SAC4B;AAC5B,WAAS,8BAA8B,EAAE,KAAK,GAAQ;AACpD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,wBACd,SAC4B;AAC5B,WAAS,0BAA0B,EAAE,KAAK,GAAQ;AAChD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,yBACd,SAC4B;AAC5B,WAAS,2BAA2B,EAAE,KAAK,GAAQ;AACjD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,8BACd,SAC4B;AAC5B,WAAS,gCAAgC,EAAE,KAAK,GAAQ;AACtD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO,CAAC,EAAE,MAAM,yCAAyC,CAAC;AAAA,MAC5D;AAAA,MACA;AAAA,QACE,aAAa;AAAA,QACb,OAAO,CAAC,EAAE,MAAM,mDAAmD,CAAC;AAAA,MACtE;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,uCAAuC;AAAA,YAC/C,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,wCAAwC;AAAA,YAChD,EAAE,MAAM,6CAA6C;AAAA,YACrD,EAAE,MAAM,2CAA2C;AAAA,UACrD;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,kDAAkD;AAAA,YAC1D,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,oDAAoD;AAAA,UAC9D;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,cAAc,SAA6C;AACzE,WAAS,gBAAgB,EAAE,KAAK,GAAQ;AACtC,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,kCAAkC;AAAA,UAC1C,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,wDAAwD;AAAA,UAChE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,kCAAkC;AAAA,UAC1C,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,wDAAwD;AAAA,UAChE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,4BAA4B;AAAA,YACpC,EAAE,MAAM,oCAAoC;AAAA,UAC9C;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,cAAc,SAA6C;AACzE,WAAS,gBAAgB,EAAE,KAAK,GAAQ;AACtC,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,UAAU,SAA6C;AACrE,WAAS,YAAY,EAAE,KAAK,GAAQ;AAClC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,SAAS,IAAI;AAAA,MACvC,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,uDAAuD;AAAA,YAC/D,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,uDAAuD;AAAA,YAC/D,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,yCAAyC;AAAA,YACjD,EAAE,MAAM,oDAAoD;AAAA,YAC5D,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,qCAAqC;AAAA,YAC7C,EAAE,MAAM,kDAAkD;AAAA,YAC1D,EAAE,MAAM,2CAA2C;AAAA,YACnD,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,4CAA4C;AAAA,YACpD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D,EAAE,MAAM,gDAAgD;AAAA,YACxD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,yCAAyC;AAAA,YACjD,EAAE,MAAM,oDAAoD;AAAA,YAC5D,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,qCAAqC;AAAA,YAC7C,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,uCAAuC;AAAA,YAC/C,EAAE,MAAM,gCAAgC;AAAA,YACxC,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,8CAA8C;AAAA,YACtD,EAAE,MAAM,8CAA8C;AAAA,YACtD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,UACzD;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,eAAe,SAA6C;AAC1E,WAAS,iBAAiB,EAAE,KAAK,GAAQ;AACvC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,WAAW,SAA6C;AACtE,WAAS,aAAa,EAAE,KAAK,GAAQ;AACnC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,OAAO;AAAA,IACnC;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,UAAU,SAA6C;AACrE,WAAS,YAAY,EAAE,KAAK,GAAQ;AAClC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,OAAO;AAAA,IACnC;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,oBACd,SAC4B;AAC5B,WAAS,sBAAsB,EAAE,KAAK,GAAQ;AAC5C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,OAAO;AAAA,IACnC;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,UAAU,SAA6C;AACrE,WAAS,YAAY,EAAE,KAAK,GAAQ;AAClC,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,6CAA6C;AAAA,UACrD,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0CAA0C;AAAA,QACpD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,kDAAkD;AAAA,YAC1D,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,+CAA+C;AAAA,UACzD;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,0BACd,SAC4B;AAC5B,WAAS,4BAA4B,EAAE,KAAK,GAAQ;AAClD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;;;ACr4FO,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,WAAQ;AACR,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,0BAAuB;AACvB,EAAAA,oBAAA,yBAAsB;AACtB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,mCAAgC;AAzCtB,SAAAA;AAAA,GAAA;AAgKL,IAAK,gDAAL,kBAAKC,mDAAL;AACL,EAAAA,+CAAA,aAAU;AACV,EAAAA,+CAAA,UAAO;AACP,EAAAA,+CAAA,eAAY;AACZ,EAAAA,+CAAA,YAAS;AACT,EAAAA,+CAAA,cAAW;AACX,EAAAA,+CAAA,UAAO;AAKP,EAAAA,+CAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAuLL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AAwEL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AA0KL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,mBAAgB;AAChB,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AAEpB,EAAAA,SAAA,4BAAyB;AACzB,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,mCAAgC;AAdtB,SAAAA;AAAA,GAAA;AAyGL,IAAK,mCAAL,kBAAKC,sCAAL;AACL,EAAAA,kCAAA,aAAU;AACV,EAAAA,kCAAA,UAAO;AACP,EAAAA,kCAAA,eAAY;AACZ,EAAAA,kCAAA,YAAS;AACT,EAAAA,kCAAA,cAAW;AACX,EAAAA,kCAAA,UAAO;AAKP,EAAAA,kCAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAoHL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,0BAAuB;AACvB,EAAAA,oBAAA,sBAAmB;AAfT,SAAAA;AAAA,GAAA;AAqDL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,WAAQ;AAHE,SAAAA;AAAA,GAAA;AA+FL,IAAK,WAAL,kBAAKC,cAAL;AAEL,EAAAA,UAAA,0BAAuB;AAEvB,EAAAA,UAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAoBL,IAAK,UAAL,kBAAKC,aAAL;AAEL,EAAAA,SAAA,yBAAsB;AAEtB,EAAAA,SAAA,gBAAa;AAEb,EAAAA,SAAA,oBAAiB;AAEjB,EAAAA,SAAA,+BAA4B;AARlB,SAAAA;AAAA,GAAA;AA4CL,IAAK,uBAAL,kBAAKC,0BAAL;AAEL,EAAAA,sBAAA,kCAA+B;AAE/B,EAAAA,sBAAA,0BAAuB;AAEvB,EAAAA,sBAAA,6BAA0B;AAE1B,EAAAA,sBAAA,2BAAwB;AARd,SAAAA;AAAA,GAAA;AAsDL,IAAK,6BAAL,kBAAKC,gCAAL;AAEL,EAAAA,4BAAA,sBAAmB;AAEnB,EAAAA,4BAAA,kBAAe;AAJL,SAAAA;AAAA,GAAA;AAoBL,IAAK,cAAL,kBAAKC,iBAAL;AAEL,EAAAA,aAAA,6BAA0B;AAE1B,EAAAA,aAAA,yBAAsB;AAJZ,SAAAA;AAAA,GAAA;AAgEL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,sBAAmB;AACnB,EAAAA,cAAA,qCAAkC;AAClC,EAAAA,cAAA,+BAA4B;AAC5B,EAAAA,cAAA,8BAA2B;AAC3B,EAAAA,cAAA,qCAAkC;AALxB,SAAAA;AAAA,GAAA;AAiBL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,uBAAoB;AACpB,EAAAA,WAAA,gBAAa;AACb,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,qBAAkB;AALR,SAAAA;AAAA,GAAA;AAkJL,IAAK,WAAL,kBAAKC,cAAL;AACL,EAAAA,UAAA,sBAAmB;AAEnB,EAAAA,UAAA,UAAO;AAEP,EAAAA,UAAA,WAAQ;AAER,EAAAA,UAAA,WAAQ;AAPE,SAAAA;AAAA,GAAA;AAyCL,IAAK,mBAAL,kBAAKC,sBAAL;AAEL,EAAAA,kBAAA,mCAAgC;AAEhC,EAAAA,kBAAA,eAAY;AAEZ,EAAAA,kBAAA,iBAAc;AAEd,EAAAA,kBAAA,gBAAa;AARH,SAAAA;AAAA,GAAA;AAoCL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AAEV,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,SAAM;AAEN,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,eAAY;AAfF,SAAAA;AAAA,GAAA;AAwIL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAQL,IAAK,QAAL,kBAAKC,WAAL;AACL,EAAAA,OAAA,aAAU;AAEV,EAAAA,OAAA,yBAAsB;AAEtB,EAAAA,OAAA,wBAAqB;AAErB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAEvB,EAAAA,OAAA,2BAAwB;AACxB,EAAAA,OAAA,yBAAsB;AACtB,EAAAA,OAAA,uBAAoB;AAEpB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAnBb,SAAAA;AAAA,GAAA;AA+CL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,UAAO;AACP,EAAAA,MAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAkFL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,aAAU;AAEV,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AAEZ,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AAsJL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,aAAU;AACV,EAAAA,gBAAA,UAAO;AACP,EAAAA,gBAAA,SAAM;AACN,EAAAA,gBAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,aAAU;AACV,EAAAA,eAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAwHL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAWL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,wBAAqB;AACrB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,0BAAuB;AAXb,SAAAA;AAAA,GAAA;AAuCL,IAAK,oBAAL,kBAAKC,uBAAL;AACL,EAAAA,mBAAA,aAAU;AACV,EAAAA,mBAAA,UAAO;AACP,EAAAA,mBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAuFL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,aAAU;AAEV,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA4JL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,aAAU;AACV,EAAAA,6BAAA,UAAO;AACP,EAAAA,6BAAA,SAAM;AACN,EAAAA,6BAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,6BAAL,kBAAKC,gCAAL;AACL,EAAAA,4BAAA,aAAU;AACV,EAAAA,4BAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAqGL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,wBAAqB;AACrB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,0BAAuB;AAVb,SAAAA;AAAA,GAAA;AAqCL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,UAAO;AACP,EAAAA,iBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAqHL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAoLL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,aAAU;AAEV,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AAEZ,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA4tBL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,SAAM;AACN,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAkDL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAwDL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AAEtB,EAAAA,YAAA,6BAA0B;AAE1B,EAAAA,YAAA,8BAA2B;AAE3B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,gCAA6B;AAE7B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,+BAA4B;AAblB,SAAAA;AAAA,GAAA;AAwDL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,4BAAyB;AAEzB,EAAAA,eAAA,0BAAuB;AAEvB,EAAAA,eAAA,gBAAa;AALH,SAAAA;AAAA,GAAA;AA2OL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,oCAAiC;AACjC,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAaL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AACxB,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,QAAK;AAHK,SAAAA;AAAA,GAAA;AAaL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,wBAAqB;AACrB,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AANP,SAAAA;AAAA,GAAA;AAmBL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,WAAQ;AACR,EAAAA,YAAA,aAAU;AAHA,SAAAA;AAAA,GAAA;AAgDL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,uCAAoC;AAEpC,EAAAA,YAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAsBL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,sCAAmC;AACnC,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,aAAU;AARA,SAAAA;AAAA,GAAA;AAuBL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,yBAAsB;AACtB,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,0BAAuB;AACvB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,YAAS;AACT,EAAAA,SAAA,WAAQ;AAXE,SAAAA;AAAA,GAAA;AA6BL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,8BAA2B;AAC3B,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,WAAQ;AACR,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,gBAAa;AACb,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,uBAAoB;AACpB,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAwFL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,4CAAyC;AACzC,EAAAA,gBAAA,uBAAoB;AAFV,SAAAA;AAAA,GAAA;AAWL,IAAK,iCAAL,kBAAKC,oCAAL;AACL,EAAAA,gCAAA,8BAA2B;AAC3B,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,WAAQ;AACR,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,gBAAa;AACb,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,uBAAoB;AACpB,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AA0FL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,mBAAgB;AAChB,EAAAA,gBAAA,oBAAiB;AAHP,SAAAA;AAAA,GAAA;AAaL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,wCAAqC;AAErC,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,qBAAkB;AAElB,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,iBAAc;AAEd,EAAAA,2BAAA,uBAAoB;AAEpB,EAAAA,2BAAA,kBAAe;AAbL,SAAAA;AAAA,GAAA;AA2BL,IAAK,6CAAL,kBAAKC,gDAAL;AACL,EAAAA,4CAAA,+BAA4B;AAC5B,EAAAA,4CAAA,UAAO;AACP,EAAAA,4CAAA,SAAM;AAHI,SAAAA;AAAA,GAAA;AAyHL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,iCAA8B;AAC9B,EAAAA,sBAAA,wBAAqB;AACrB,EAAAA,sBAAA,gBAAa;AACb,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,qBAAkB;AANR,SAAAA;AAAA,GAAA;AA8DL,IAAK,wBAAL,kBAAKC,2BAAL;AAEL,EAAAA,uBAAA,qCAAkC;AAElC,EAAAA,uBAAA,gBAAa;AAEb,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,eAAY;AAEZ,EAAAA,uBAAA,+BAA4B;AAE5B,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,2BAAwB;AAExB,EAAAA,uBAAA,wBAAqB;AAhBX,SAAAA;AAAA,GAAA;AAsOL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,6BAA0B;AAC1B,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,aAAU;AACV,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,2BAAwB;AACxB,EAAAA,WAAA,iCAA8B;AAC9B,EAAAA,WAAA,qBAAkB;AAClB,EAAAA,WAAA,0BAAuB;AACvB,EAAAA,WAAA,SAAM;AACN,EAAAA,WAAA,qBAAkB;AAVR,SAAAA;AAAA,GAAA;AA6CL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,gCAA6B;AAC7B,EAAAA,cAAA,UAAO;AACP,EAAAA,cAAA,SAAM;AACN,EAAAA,cAAA,WAAQ;AACR,EAAAA,cAAA,aAAU;AALA,SAAAA;AAAA,GAAA;AAiML,IAAK,kCAAL,kBAAKC,qCAAL;AACL,EAAAA,iCAAA,mDAAgD;AAChD,EAAAA,iCAAA,aAAU;AACV,EAAAA,iCAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAaL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,8BAA2B;AAC3B,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,gBAAa;AACb,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAmKL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,kBAAe;AAEf,EAAAA,yBAAA,uBAAoB;AAEpB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,mBAAgB;AA7BN,SAAAA;AAAA,GAAA;AA0LL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,8BAA2B;AAC3B,EAAAA,iBAAA,WAAQ;AACR,EAAAA,iBAAA,eAAY;AACZ,EAAAA,iBAAA,qBAAkB;AAClB,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,yBAAsB;AANZ,SAAAA;AAAA,GAAA;AAgCL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAyDL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,mCAAgC;AANtB,SAAAA;AAAA,GAAA;AA2FL,IAAK,+BAAL,kBAAKC,kCAAL;AAEL,EAAAA,8BAAA,qCAAkC;AAElC,EAAAA,8BAAA,gBAAa;AAJH,SAAAA;AAAA,GAAA;AA+IL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,kCAA+B;AAC/B,EAAAA,kBAAA,kBAAe;AACf,EAAAA,kBAAA,iBAAc;AACd,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,qBAAkB;AAClB,EAAAA,kBAAA,qBAAkB;AAClB,EAAAA,kBAAA,mBAAgB;AAPN,SAAAA;AAAA,GAAA;AA+GL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,6BAA0B;AAC1B,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,+BAA4B;AAJlB,SAAAA;AAAA,GAAA;AAmJL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,mCAAgC;AAKhC,EAAAA,qBAAA,yCAAsC;AAKtC,EAAAA,qBAAA,6CAA0C;AAXhC,SAAAA;AAAA,GAAA;AA+CL,IAAK,qCAAL,kBAAKC,wCAAL;AACL,EAAAA,oCAAA,aAAU;AACV,EAAAA,oCAAA,UAAO;AACP,EAAAA,oCAAA,eAAY;AACZ,EAAAA,oCAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AA2IL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AA8IL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,SAAM;AAEN,EAAAA,cAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AA8BL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,sBAAmB;AACnB,EAAAA,YAAA,uBAAoB;AACpB,EAAAA,YAAA,2BAAwB;AACxB,EAAAA,YAAA,mBAAgB;AALN,SAAAA;AAAA,GAAA;AA2FL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,gCAA6B;AAC7B,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,iCAA8B;AAC9B,EAAAA,kBAAA,iBAAc;AACd,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,yBAAsB;AACtB,EAAAA,kBAAA,yBAAsB;AACtB,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,wBAAqB;AAfX,SAAAA;AAAA,GAAA;AA0FL,IAAK,4CAAL,kBAAKC,+CAAL;AACL,EAAAA,2CAAA,sBAAmB;AACnB,EAAAA,2CAAA,UAAO;AACP,EAAAA,2CAAA,YAAS;AACT,EAAAA,2CAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAopBL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,gCAA6B;AAC7B,EAAAA,gBAAA,qCAAkC;AAClC,EAAAA,gBAAA,qCAAkC;AAClC,EAAAA,gBAAA,wBAAqB;AALX,SAAAA;AAAA,GAAA;AAsEL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,UAAO;AACP,EAAAA,sBAAA,YAAS;AACT,EAAAA,sBAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAomBL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,yBAAsB;AACtB,EAAAA,cAAA,YAAS;AACT,EAAAA,cAAA,gBAAa;AAHH,SAAAA;AAAA,GAAA;AA8pBL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,iBAAc;AAEd,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,gBAAa;AAKb,EAAAA,cAAA,YAAS;AAET,EAAAA,cAAA,gBAAa;AAEb,EAAAA,cAAA,WAAQ;AAER,EAAAA,cAAA,cAAW;AAEX,EAAAA,cAAA,eAAY;AAEZ,EAAAA,cAAA,wBAAqB;AAErB,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,6BAA0B;AAE1B,EAAAA,cAAA,kBAAe;AAEf,EAAAA,cAAA,0BAAuB;AAEvB,EAAAA,cAAA,yBAAsB;AAhCZ,SAAAA;AAAA,GAAA;AA4EL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,gBAAa;AACb,EAAAA,iBAAA,SAAM;AACN,EAAAA,iBAAA,YAAS;AACT,EAAAA,iBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAkRL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAyEL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAsEL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAipCL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AACZ,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AANF,SAAAA;AAAA,GAAA;AA+tBL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,6BAA0B;AAC1B,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,4BAAyB;AAJf,SAAAA;AAAA,GAAA;AAeL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,6BAA0B;AAE1B,EAAAA,2BAAA,WAAQ;AACR,EAAAA,2BAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAwCL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,SAAM;AACN,EAAAA,gBAAA,4BAAyB;AACzB,EAAAA,gBAAA,4BAAyB;AAJf,SAAAA;AAAA,GAAA;AAeL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,6BAA0B;AAC1B,EAAAA,yBAAA,WAAQ;AACR,EAAAA,yBAAA,YAAS;AAHC,SAAAA;AAAA,GAAA;AA6BL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,6BAA0B;AAC1B,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,wBAAqB;AAErB,EAAAA,kBAAA,0BAAuB;AACvB,EAAAA,kBAAA,0BAAuB;AANb,SAAAA;AAAA,GAAA;AA2CL,IAAK,WAAL,kBAAKC,cAAL;AACL,EAAAA,UAAA,uBAAoB;AACpB,EAAAA,UAAA,qBAAkB;AAClB,EAAAA,UAAA,wBAAqB;AACrB,EAAAA,UAAA,yBAAsB;AACtB,EAAAA,UAAA,oBAAiB;AACjB,EAAAA,UAAA,gBAAa;AACb,EAAAA,UAAA,wBAAqB;AACrB,EAAAA,UAAA,uBAAoB;AACpB,EAAAA,UAAA,0BAAuB;AATb,SAAAA;AAAA,GAAA;AA0vBL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,iCAA8B;AAC9B,EAAAA,oBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAWL,IAAK,2CAAL,kBAAKC,8CAAL;AACL,EAAAA,0CAAA,6BAA0B;AAC1B,EAAAA,0CAAA,UAAO;AACP,EAAAA,0CAAA,UAAO;AACP,EAAAA,0CAAA,SAAM;AACN,EAAAA,0CAAA,kBAAe;AACf,EAAAA,0CAAA,SAAM;AANI,SAAAA;AAAA,GAAA;AA2BL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,mCAAgC;AAChC,EAAAA,sBAAA,UAAO;AACP,EAAAA,sBAAA,aAAU;AAHA,SAAAA;AAAA,GAAA;AAmJL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAsFL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,8CAA2C;AAC3C,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,uBAAoB;AACpB,EAAAA,6BAAA,qBAAkB;AAJR,SAAAA;AAAA,GAAA;AA4PL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,YAAS;AACT,EAAAA,YAAA,aAAU;AACV,EAAAA,YAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAsBL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,oBAAiB;AACjB,EAAAA,cAAA,aAAU;AACV,EAAAA,cAAA,YAAS;AAHC,SAAAA;AAAA,GAAA;AAkPL,IAAK,oBAAL,kBAAKC,uBAAL;AACL,EAAAA,mBAAA,qCAAkC;AAClC,EAAAA,mBAAA,sCAAmC;AACnC,EAAAA,mBAAA,sCAAmC;AAHzB,SAAAA;AAAA,GAAA;AAsQL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,sCAAmC;AACnC,EAAAA,kBAAA,WAAQ;AAFE,SAAAA;AAAA,GAAA;AA+BL,IAAK,iBAAL,kBAAKC,oBAAL;AAEL,EAAAA,gBAAA,8BAA2B;AAE3B,EAAAA,gBAAA,iBAAc;AAEd,EAAAA,gBAAA,uBAAoB;AANV,SAAAA;AAAA,GAAA;AAoCL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,eAAY;AACZ,EAAAA,YAAA,uBAAoB;AACpB,EAAAA,YAAA,mBAAgB;AAChB,EAAAA,YAAA,sBAAmB;AACnB,EAAAA,YAAA,kBAAe;AANL,SAAAA;AAAA,GAAA;AAqeL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,aAAU;AACV,EAAAA,qBAAA,uBAAoB;AACpB,EAAAA,qBAAA,YAAS;AACT,EAAAA,qBAAA,cAAW;AACX,EAAAA,qBAAA,SAAM;AALI,SAAAA;AAAA,GAAA;;;ACt0aL,SAASC,wBAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,qBAAqB,OAAO;AAElE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,gCAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,6BAA6B,OAAO;AAE1E,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,8BAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,2BAA2B,OAAO;AAExE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,sCAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC;AAAA,IAClC;AAAA,EACF;AAEF,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,qBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,kBAAkB,OAAO;AAE/D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iCAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,8BAA8B,OAAO;AAE3E,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,sBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,mBAAmB,OAAO;AAEhE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,0BAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,uBAAuB,OAAO;AAEpE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,+BAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,4BAA4B,OAAO;AAEzE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,2BAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,wBAAwB,OAAO;AAErE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,4BAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,yBAAyB,OAAO;AAEtE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iCAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,8BAA8B,OAAO;AAE3E,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,cAAc,OAAO;AAE3D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iBAOd;AACA,QAAM,UAAU,EAAE,QAAQ,EAAE,IAAI,YAAY,EAAE;AAE9C,QAAM,oBACgC,cAAc,OAAO;AAE3D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,aAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,UAAU,OAAO;AAEvD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,kBAOd;AACA,QAAM,UAAU,EAAE,SAAS,EAAE,IAAI,aAAa,EAAE;AAEhD,QAAM,oBACgC,eAAe,OAAO;AAE5D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,cAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,WAAW,OAAO;AAExD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,aAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,UAAU,OAAO;AAEvD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,uBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,oBAAoB,OAAO;AAEjE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,aAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,UAAU,OAAO;AAEvD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,6BAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,0BAA0B,OAAO;AAEvE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;","names":["payload","OpenaiproxyV1Model","OpenaiproxyV1ChatCompletionMessageMessageRole","TextBisonModel","ChatBisonModel","V1Model","ChatCompletionMessageMessageRole","GoogleproxyV1Model","ContentRole","Language","Outcome","MediaResolutionLevel","DynamicRetrievalConfigMode","Environment","HarmCategory","Threshold","Modality","PersonGeneration","Mode","Type","Model","Role","MediaType","ToolChoiceType","McpServerType","V1CacheControlType","ClaudeModel","V1MessageRoleRole","V1ImageMediaTypeMediaType","GoogleproxyV1ToolChoiceType","GoogleproxyV1McpServerType","AnthropicModel","MessageRoleRole","CacheControlType","ImageMediaTypeMediaType","V1ToolChoiceType","V1McpServerType","LlamaModel","ConverseModel","V1ImageModel","ImageQuality","ImageSize","ImageStyle","ImageModel","ClipGuidancePreset","Sampler","TextToImageRequestStylePreset","ImageCoreModel","GenerateCoreRequestStylePreset","GenerationMode","ImageStableDiffusionModel","GenerateStableDiffusionRequestOutputFormat","GenerateAnImageModel","CreatePredictionModel","TaskInput","ResponseType","EditImageWithPromptRequestModel","StylePreset","TextToImageRequestModel","PerplexityModel","PerplexityMessageMessageRole","ImagenModel","GenerateImageMlPlatformModel","OpenAiImageModel","VideoGenModel","ChatCompletionModel","V1ChatCompletionMessageMessageRole","MessageRole","OutputFormat","VideoModel","V1ResponsesModel","ResponsesInputMessageResponsesMessageRole","ResponsesModel","ResponsesMessageRole","V1VideoModel","FinishReason","HarmProbability","ResponseTypeType","GoogleproxyV1ResponseTypeType","V1ResponseTypeType","GatewayMessageDefinitionRole","OpenaiproxyV1EmbeddingModel","V1EmbeddingEncodingFormat","EmbeddingModel","EmbeddingEncodingFormat","V1EmbeddingModel","TaskType","TranscriptionModel","CreateTranscriptionRequestResponseFormat","TimestampGranularity","SpeechModel","ElevenLabsTextToSpeechModel","EntityType","OutageStatus","ImageEditingModel","V1EditImageModel","EditImageModel","EditAction","WebhookIdentityType","generateTextByPrompt","generateTextByPromptStreamed","generateTextByPromptObject","generateTextByPromptObjectStreamed","generateEmbedding","generateTextByProject","generateTextByProjectStreamed","generateModeration","generateImageByProject","generateImageByPrompt","generateImageByPromptObject","generateContentByPrompt","generateContentByProject","generateContentByPromptObject","generateTranscription","generateAudio","generateAudioStreamed","publishPrompt","getPrompt","publishProject","getProject","getStatus","getApplicationUsage","editImage","pollImageGenerationResult"]}
1
+ {"version":3,"sources":["../../src/ds-wix-ai-gateway-v1-prompt-generators.http.ts","../../src/ds-wix-ai-gateway-v1-prompt-generators.types.ts","../../src/ds-wix-ai-gateway-v1-prompt-generators.meta.ts"],"sourcesContent":["import { toURLSearchParams } from '@wix/sdk-runtime/rest-modules';\nimport { transformSDKFloatToRESTFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformRESTFloatToSDKFloat } from '@wix/sdk-runtime/transformations/float';\nimport { transformSDKBytesToRESTBytes } from '@wix/sdk-runtime/transformations/bytes';\nimport { transformRESTBytesToSDKBytes } from '@wix/sdk-runtime/transformations/bytes';\nimport { transformRESTDurationToSDKDuration } from '@wix/sdk-runtime/transformations/duration';\nimport { transformPaths } from '@wix/sdk-runtime/transformations/transform-paths';\nimport { resolveUrl } from '@wix/sdk-runtime/rest-modules';\nimport { ResolveUrlOpts } from '@wix/sdk-runtime/rest-modules';\nimport { RequestOptionsFactory } from '@wix/sdk-types';\n\nfunction resolveWixDsWixAiGatewayV1WixAiGatewayUrl(\n opts: Omit<ResolveUrlOpts, 'domainToMappings'>\n) {\n const domainToMappings = {\n 'bo._base_domain_': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'wixbo.ai': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'wix-bo.com': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'api._api_base_domain_': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n ],\n 'manage._base_domain_': [\n {\n srcPath: '/_api/wix-ai-gateway',\n destPath: '',\n },\n {\n srcPath: '/wix-ai-gateway-envoy',\n destPath: '',\n },\n ],\n 'www.wixapis.com': [\n {\n srcPath: '/wix-ai-gateway',\n destPath: '',\n },\n ],\n };\n\n return resolveUrl(Object.assign(opts, { domainToMappings }));\n}\n\nconst PACKAGE_NAME = '@wix/auto_sdk_ai-gateway_generators';\n\n/**\n * Generate text according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPrompt(), and will be removed on 2026-03-31.\n */\nexport function generateTextByPrompt(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPrompt;\n}\n\n/**\n * Generate text according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByPromptStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptStreamed({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPromptStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt-streamed/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptStreamed;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPromptObject(), and will be removed on 2026-03-31.\n */\nexport function generateTextByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptObject;\n}\n\n/**\n * Generate text according to Prompt object configuration.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByPromptObjectStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByPromptObjectStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByPromptObjectStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-prompt-object-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByPromptObjectStreamed;\n}\n\n/** Generate an embedding using the provided request. */\nexport function generateEmbedding(payload: object): RequestOptionsFactory<any> {\n function __generateEmbedding({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateEmbedding',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-embedding',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'openAiEmbeddingsResponse.data.floatEmbedding.embedding',\n isRepeated: true,\n },\n {\n path: 'azureEmbeddingsResponse.data.floatEmbedding.embedding',\n isRepeated: true,\n },\n {\n path: 'googleEmbeddingsResponse.predictions.embeddings.embedding',\n isRepeated: true,\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateEmbedding;\n}\n\n/**\n * Generate text according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByProject(), and will be removed on 2026-03-31.\n */\nexport function generateTextByProject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-project/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByProject;\n}\n\n/**\n * Generate text according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate text request configuration, such as what vendor, what model and what parameters to use.\n * The response is streamed back in chunks.\n */\nexport function generateTextByProjectStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTextByProjectStreamed({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTextByProjectStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-by-project-streamed/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.safetyRatings.severityScore',\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'googleGeminiStreamChunk.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTextByProjectStreamed;\n}\n\n/** Generate moderation output from specified moderation model provider. */\nexport function generateModeration(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateModeration({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateModeration',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-moderation',\n data: payload,\n host,\n }),\n data: payload,\n };\n\n return metadata;\n }\n\n return __generateModeration;\n}\n\n/**\n * Generate image according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate image request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByProject(), and will be removed on 2026-03-31.\n */\nexport function generateImageByProject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateImageByProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateImageByProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-image-by-project/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateImageByProject;\n}\n\n/**\n * Generate image according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate image request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPrompt(), and will be removed on 2026-03-31.\n */\nexport function generateImageByPrompt(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateImageByPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateImageByPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-image-by-prompt/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateImageByPrompt;\n}\n\n/**\n * Generate image according to Prompt object configuration.\n * Prompt object is used for all generate image request configuration, such as what vendor, what model and what parameters to use.\n * @deprecated It has been replaced with GenerateContentByPromptObject(), and will be removed on 2026-03-31.\n */\nexport function generateImageByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateImageByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateImageByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-image-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateImageByPromptObject;\n}\n\n/**\n * Generate different content such as text, image, and video according to Prompt id, that was previously published in the service.\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByPrompt(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateContentByPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt/{promptId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPrompt;\n}\n\n/**\n * Generate different content such as text, image, and video according to Project id, that was previously published in the service. Project's default prompt will be used to perform the request.\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByProject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateContentByProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-content-by-project/{projectId}',\n data: payload,\n host,\n }),\n data: payload,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByProject;\n}\n\n/**\n * Generate different content such as text, image, and video according to Prompt object configuration\n * Prompt object is used for all generate content request configuration, such as what vendor, what model and what parameters to use.\n */\nexport function generateContentByPromptObject(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateContentByPromptObject({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateContentByPromptObject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-content-by-prompt-object',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'response.googleTextBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleChatBisonResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.probabilityScore',\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.safetyRatings.severityScore',\n },\n {\n path: 'response.googleGenerateImageResponse.predictions.safetyAttributes.scores',\n isRepeated: true,\n },\n { path: 'response.openAiResponsesResponse.temperature' },\n { path: 'response.azureOpenAiResponsesResponse.temperature' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.openAiChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.openAiChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleTextBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.temperature',\n },\n {\n path: 'materializedPrompt.googleChatBisonRequest.parameters.topP',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.temperature',\n },\n { path: 'materializedPrompt.azureChatCompletionRequest.topP' },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.azureChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'materializedPrompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'materializedPrompt.anthropicClaudeRequest.temperature' },\n { path: 'materializedPrompt.anthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.googleAnthropicClaudeRequest.temperature',\n },\n { path: 'materializedPrompt.googleAnthropicClaudeRequest.topP' },\n {\n path: 'materializedPrompt.invokeAnthropicModelRequest.temperature',\n },\n { path: 'materializedPrompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n {\n path: 'materializedPrompt.amazonConverseRequest.inferenceConfig.topP',\n },\n {\n path: 'materializedPrompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n {\n path: 'materializedPrompt.stabilityAiStableDiffusionRequest.strength',\n },\n {\n path: 'materializedPrompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'materializedPrompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n {\n path: 'materializedPrompt.stabilityAiEditWithPromptRequest.creativity',\n },\n { path: 'materializedPrompt.runwareTextToImageRequest.strength' },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'materializedPrompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformOpenAiRawRequest.topP' },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.mlPlatformOpenAiRawRequest.frequencyPenalty',\n },\n { path: 'materializedPrompt.openAiResponsesRequest.temperature' },\n { path: 'materializedPrompt.openAiResponsesRequest.topP' },\n {\n path: 'materializedPrompt.azureOpenAiResponsesRequest.temperature',\n },\n { path: 'materializedPrompt.azureOpenAiResponsesRequest.topP' },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.groundingSupports.confidenceScores',\n isRepeated: true,\n },\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.retrievalMetadata.googleSearchDynamicRetrievalScore',\n },\n { path: 'materializedPrompt.llamaModelRequest.temperature' },\n { path: 'materializedPrompt.llamaModelRequest.topP' },\n { path: 'materializedPrompt.runwareTextToImageRequest.cfgScale' },\n {\n path: 'materializedPrompt.runwareTextToImageRequest.loraModels.weight',\n },\n {\n path: 'materializedPrompt.mlPlatformLlamaModelRequest.temperature',\n },\n { path: 'materializedPrompt.mlPlatformLlamaModelRequest.topP' },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.temperature',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topP',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.topK',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'materializedPrompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n {\n path: 'materializedPrompt.runwareVideoInferenceRequest.cfgScale',\n },\n ],\n },\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n {\n path: 'response.googleGeminiGenerateContentResponse.candidates.groundingMetadata.searchEntryPoint.sdkBlob',\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateContentByPromptObject;\n}\n\n/** Transcribe input audio using the specified model. */\nexport function generateTranscription(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateTranscription({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [{ path: 'openAiTranscriptionRequest.temperature' }],\n },\n {\n transformFn: transformSDKBytesToRESTBytes,\n paths: [{ path: 'openAiTranscriptionRequest.fileContent.fileBytes' }],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateTranscription',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-transcription',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTDurationToSDKDuration,\n paths: [\n { path: 'openAiTranscriptionResponse.duration' },\n { path: 'openAiTranscriptionResponse.words.start' },\n { path: 'openAiTranscriptionResponse.words.end' },\n { path: 'openAiTranscriptionResponse.segments.start' },\n { path: 'openAiTranscriptionResponse.segments.end' },\n ],\n },\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n { path: 'openAiTranscriptionResponse.segments.temperature' },\n { path: 'openAiTranscriptionResponse.segments.avgLogprob' },\n { path: 'openAiTranscriptionResponse.segments.compressionRatio' },\n { path: 'openAiTranscriptionResponse.segments.noSpeechProb' },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateTranscription;\n}\n\n/** Generate audio from text using the specified model. */\nexport function generateAudio(payload: object): RequestOptionsFactory<any> {\n function __generateAudio({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'openAiCreateSpeechRequest.speed' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.style' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.stability' },\n {\n path: 'elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost',\n },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateAudio',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-audio',\n data: serializedData,\n host,\n }),\n data: serializedData,\n };\n\n return metadata;\n }\n\n return __generateAudio;\n}\n\n/** Generate audio from text using the specified model. The response is streamed back in chunks. */\nexport function generateAudioStreamed(\n payload: object\n): RequestOptionsFactory<any> {\n function __generateAudioStreamed({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'openAiCreateSpeechRequest.speed' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.style' },\n { path: 'elevenlabsTextToSpeechRequest.voiceSettings.stability' },\n {\n path: 'elevenlabsTextToSpeechRequest.voiceSettings.similarityBoost',\n },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GenerateAudioStreamed',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/generate-audio-streamed',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTBytesToSDKBytes,\n paths: [\n { path: 'openAiSpeechChunk.content' },\n { path: 'elevenlabsSpeechChunk.audioBase64' },\n ],\n },\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n {\n path: 'elevenlabsSpeechChunk.alignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.alignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterStartTimesSeconds',\n isRepeated: true,\n },\n {\n path: 'elevenlabsSpeechChunk.normalizedAlignment.characterEndTimesSeconds',\n isRepeated: true,\n },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __generateAudioStreamed;\n}\n\n/**\n * Publish the Prompt object to the service's storage. Enables request to GenerateTextByPrompt rpc using published Prompt's id.\n * Once published, a different Prompt configuration cannot with the same id cannot be published, so the published Prompt is considered immutable.\n */\nexport function publishPrompt(payload: object): RequestOptionsFactory<any> {\n function __publishPrompt({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.temperature' },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n { path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight' },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n { path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg' },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg' },\n { path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n { path: 'prompt.googleCreateChatCompletionRequest.presencePenalty' },\n { path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n { path: 'prompt.perplexityChatCompletionRequest.presencePenalty' },\n { path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.PublishPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/prompt/{prompt.id}',\n data: serializedData,\n host,\n }),\n data: serializedData,\n };\n\n return metadata;\n }\n\n return __publishPrompt;\n}\n\n/**\n * Retrieve the information about Prompt from service's storage.\n * If provided, the Prompt object's templated parameters will be expanded using values from the provided params.\n * An error will occur if the Prompt object's templated parameters are insufficient.\n */\nexport function getPrompt(payload: object): RequestOptionsFactory<any> {\n function __getPrompt({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetPrompt',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/prompt/{promptId}',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload, true),\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n { path: 'prompt.openAiChatCompletionRequest.temperature' },\n { path: 'prompt.openAiChatCompletionRequest.topP' },\n { path: 'prompt.openAiChatCompletionRequest.presencePenalty' },\n { path: 'prompt.openAiChatCompletionRequest.frequencyPenalty' },\n { path: 'prompt.googleTextBisonRequest.parameters.temperature' },\n { path: 'prompt.googleTextBisonRequest.parameters.topP' },\n { path: 'prompt.googleChatBisonRequest.parameters.temperature' },\n { path: 'prompt.googleChatBisonRequest.parameters.topP' },\n { path: 'prompt.azureChatCompletionRequest.temperature' },\n { path: 'prompt.azureChatCompletionRequest.topP' },\n { path: 'prompt.azureChatCompletionRequest.presencePenalty' },\n { path: 'prompt.azureChatCompletionRequest.frequencyPenalty' },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.temperature',\n },\n {\n path: 'prompt.googleGeminiGenerateContentRequest.generationConfig.topP',\n },\n { path: 'prompt.anthropicClaudeRequest.temperature' },\n { path: 'prompt.anthropicClaudeRequest.topP' },\n { path: 'prompt.googleAnthropicClaudeRequest.temperature' },\n { path: 'prompt.googleAnthropicClaudeRequest.topP' },\n { path: 'prompt.invokeAnthropicModelRequest.temperature' },\n { path: 'prompt.invokeAnthropicModelRequest.topP' },\n {\n path: 'prompt.amazonConverseRequest.inferenceConfig.temperature',\n },\n { path: 'prompt.amazonConverseRequest.inferenceConfig.topP' },\n {\n path: 'prompt.stabilityAiTextToImageRequest.textPrompts.weight',\n },\n { path: 'prompt.stabilityAiStableDiffusionRequest.strength' },\n {\n path: 'prompt.blackForestLabsGenerateImageRequest.imagePromptStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.trueCfg',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxPulid.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.loraStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.guidanceScale',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.controlStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.fluxDevControlnet.imageToImageStrength',\n },\n {\n path: 'prompt.replicateCreatePredictionRequest.prunaaiZImageTurbo.guidanceScale',\n },\n { path: 'prompt.stabilityAiEditWithPromptRequest.creativity' },\n { path: 'prompt.runwareTextToImageRequest.strength' },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.trueCfg',\n },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.idWeight',\n },\n {\n path: 'prompt.mlPlatformGenerateImageRequest.fluxPulid.guidanceScale',\n },\n { path: 'prompt.googleCreateChatCompletionRequest.temperature' },\n { path: 'prompt.googleCreateChatCompletionRequest.topP' },\n {\n path: 'prompt.googleCreateChatCompletionRequest.presencePenalty',\n },\n {\n path: 'prompt.googleCreateChatCompletionRequest.frequencyPenalty',\n },\n { path: 'prompt.mlPlatformOpenAiRawRequest.temperature' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.topP' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.presencePenalty' },\n { path: 'prompt.mlPlatformOpenAiRawRequest.frequencyPenalty' },\n { path: 'prompt.openAiResponsesRequest.temperature' },\n { path: 'prompt.openAiResponsesRequest.topP' },\n { path: 'prompt.azureOpenAiResponsesRequest.temperature' },\n { path: 'prompt.azureOpenAiResponsesRequest.topP' },\n { path: 'prompt.llamaModelRequest.temperature' },\n { path: 'prompt.llamaModelRequest.topP' },\n { path: 'prompt.runwareTextToImageRequest.cfgScale' },\n { path: 'prompt.runwareTextToImageRequest.loraModels.weight' },\n { path: 'prompt.mlPlatformLlamaModelRequest.temperature' },\n { path: 'prompt.mlPlatformLlamaModelRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.temperature' },\n { path: 'prompt.perplexityChatCompletionRequest.topP' },\n { path: 'prompt.perplexityChatCompletionRequest.topK' },\n {\n path: 'prompt.perplexityChatCompletionRequest.presencePenalty',\n },\n {\n path: 'prompt.perplexityChatCompletionRequest.frequencyPenalty',\n },\n { path: 'prompt.runwareVideoInferenceRequest.cfgScale' },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __getPrompt;\n}\n\n/**\n * Publish the Project object to the service's storage. Enables request to GenerateTextByProject rpc using published Project's id.\n * Different Project configuration can be published with the same id, overwriting the previous Project configuration.\n */\nexport function publishProject(payload: object): RequestOptionsFactory<any> {\n function __publishProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.PublishProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/project/{project.id}',\n data: payload,\n host,\n }),\n data: payload,\n };\n\n return metadata;\n }\n\n return __publishProject;\n}\n\n/** Retrieve the information about Project from service's storage. */\nexport function getProject(payload: object): RequestOptionsFactory<any> {\n function __getProject({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetProject',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/project/{projectId}',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload),\n };\n\n return metadata;\n }\n\n return __getProject;\n}\n\n/** Retrieve status by entity id and type. */\nexport function getStatus(payload: object): RequestOptionsFactory<any> {\n function __getStatus({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetStatus',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/status/{entityId}',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload),\n };\n\n return metadata;\n }\n\n return __getStatus;\n}\n\n/** Gets info about application's overall and per-user budget and current usage. */\nexport function getApplicationUsage(\n payload: object\n): RequestOptionsFactory<any> {\n function __getApplicationUsage({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'GET' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.GetApplicationUsage',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/application-usage',\n data: payload,\n host,\n }),\n params: toURLSearchParams(payload),\n };\n\n return metadata;\n }\n\n return __getApplicationUsage;\n}\n\n/** Image Editing API's */\nexport function editImage(payload: object): RequestOptionsFactory<any> {\n function __editImage({ host }: any) {\n const serializedData = transformPaths(payload, [\n {\n transformFn: transformSDKFloatToRESTFloat,\n paths: [\n { path: 'photoroomImageEditingRequest.background.guidance.scale' },\n { path: 'photoroomImageEditingRequest.margin.general' },\n { path: 'photoroomImageEditingRequest.margin.bottom' },\n { path: 'photoroomImageEditingRequest.margin.left' },\n { path: 'photoroomImageEditingRequest.margin.right' },\n { path: 'photoroomImageEditingRequest.margin.top' },\n ],\n },\n ]);\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn: 'wix.ds.wix_ai_gateway.v1.WixAiGateway.EditImage',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/edit-image',\n data: serializedData,\n host,\n }),\n data: serializedData,\n transformResponse: (payload: any) =>\n transformPaths(payload, [\n {\n transformFn: transformRESTFloatToSDKFloat,\n paths: [\n { path: 'photoroomRemoveBackgroundResponse.xUncertaintyScore' },\n { path: 'photoroomImageEditingResponse.xUncertaintyScore' },\n { path: 'replicateEditImageResponse.metrics.predictTime' },\n { path: 'replicateEditImageResponse.metrics.totalTime' },\n ],\n },\n ]),\n };\n\n return metadata;\n }\n\n return __editImage;\n}\n\n/** Poll image generation result by id. */\nexport function pollImageGenerationResult(\n payload: object\n): RequestOptionsFactory<any> {\n function __pollImageGenerationResult({ host }: any) {\n const metadata = {\n entityFqdn: 'wix.ds.wix_ai_gateway.v1.prompt',\n method: 'POST' as any,\n methodFqn:\n 'wix.ds.wix_ai_gateway.v1.WixAiGateway.PollImageGenerationResult',\n packageName: PACKAGE_NAME,\n migrationOptions: {\n optInTransformResponse: true,\n },\n url: resolveWixDsWixAiGatewayV1WixAiGatewayUrl({\n protoPath: '/v1/poll-image-generation-result',\n data: payload,\n host,\n }),\n data: payload,\n };\n\n return metadata;\n }\n\n return __pollImageGenerationResult;\n}\n","import type { GoogleProtoDuration } from '@wix/metro-runtime/ambassador';\n\nexport interface Prompt extends PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** Invoke Amazon Converse API request. */\n amazonConverseRequest?: InvokeConverseRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n /**\n * Prompt id.\n * @format GUID\n */\n id?: string | null;\n /**\n * Names of template parameters, that will be checked and substituted during GenerateText requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedParameterNames?: string[];\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n /**\n * Names of dynamic properties, that will be checked and substituted during requests.\n * @maxLength 1000\n * @maxSize 100\n */\n templatedDynamicPropertiesNames?: string[];\n}\n\n/** @oneof */\nexport interface PromptModelRequestOneOf {\n /** OpenAI chat completion request. */\n openAiChatCompletionRequest?: OpenaiproxyV1CreateChatCompletionRequest;\n /** Google bison text completion request. */\n googleTextBisonRequest?: TextBisonPredictRequest;\n /** Google bison chat completion request. */\n googleChatBisonRequest?: ChatBisonPredictRequest;\n /** Azure OpenAI chat completion request. */\n azureChatCompletionRequest?: CreateChatCompletionRequest;\n /** Google Gemini generate content request. */\n googleGeminiGenerateContentRequest?: GenerateContentRequest;\n /** Anthropic Claude via Amazon Bedrock generate content request. */\n anthropicClaudeRequest?: InvokeAnthropicClaudeModelRequest;\n /** Anthropic Claude via Google Vertex request. */\n googleAnthropicClaudeRequest?: V1InvokeAnthropicClaudeModelRequest;\n /** Native Anthropic API proxy generate content request. */\n invokeAnthropicModelRequest?: InvokeAnthropicModelRequest;\n /** Llama via Amazon Bedrock text completion request. */\n llamaModelRequest?: InvokeLlamaModelRequest;\n /** Invoke Amazon Converse API request. */\n amazonConverseRequest?: InvokeConverseRequest;\n /** OpenAI generate image request (Image Generation). */\n openAiCreateImageRequest?: CreateImageRequest;\n /** Stability AI text to image request (Image Generation). */\n stabilityAiTextToImageRequest?: V1TextToImageRequest;\n /** Stability AI generate core request (Image Generation). */\n stabilityAiGenerateCoreRequest?: GenerateCoreRequest;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 request. */\n stabilityAiStableDiffusionRequest?: GenerateStableDiffusionRequest;\n /** Black Forest Labs - Flux Generate an Image request. */\n blackForestLabsGenerateImageRequest?: GenerateAnImageRequest;\n /** Replicate AI - Create Prediction request. */\n replicateCreatePredictionRequest?: CreatePredictionRequest;\n /** Stability AI - Edit with Prompt request. */\n stabilityAiEditWithPromptRequest?: EditImageWithPromptRequest;\n /** Runware AI - Flux TextToImage request */\n runwareTextToImageRequest?: TextToImageRequest;\n /** ML Platform Llama model prediction request */\n mlPlatformLlamaModelRequest?: InvokeMlPlatformLlamaModelRequest;\n /** Perplexity chat completion request */\n perplexityChatCompletionRequest?: InvokeChatCompletionRequest;\n /** Google AI - generate image request */\n googleGenerateImageRequest?: GenerateImageRequest;\n /** ML platform - generate image request */\n mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageRequest?: CreateImageOpenAiRequest;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageRequest?: EditImageOpenAiRequest;\n /** Google AI - generate video request */\n googleGenerateVideoRequest?: GenerateVideoRequest;\n /** Google AI - create chat completion request */\n googleCreateChatCompletionRequest?: V1CreateChatCompletionRequest;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawRequest?: InvokeMlPlatformOpenAIChatCompletionRawRequest;\n /** Runware Video inference request */\n runwareVideoInferenceRequest?: VideoInferenceRequest;\n /** Open AI Responses API request */\n openAiResponsesRequest?: V1OpenAiResponsesRequest;\n /** Open AI Responses API request via Azure */\n azureOpenAiResponsesRequest?: OpenAiResponsesRequest;\n /** OpenAI video generation request */\n openAiCreateVideoRequest?: CreateVideoRequest;\n}\n\nexport interface FallbackPromptConfig {\n /**\n * Id of the fallback Prompt. This Prompt will be used for text generation in case the invocation of original Prompt fails.\n * @format GUID\n */\n fallbackPromptId?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequest\n extends OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: OpenaiproxyV1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: CreateChatCompletionRequestFunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: V1CreateChatCompletionRequestTool[];\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: V1FineTuningSpec;\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: OpenaiproxyV1CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * o1 models only\n * @maxLength 100\n */\n reasoningEffort?: string | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses.\n * Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface CreateChatCompletionRequestFunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport enum OpenaiproxyV1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_0301 = 'GPT_3_5_TURBO_0301',\n GPT_4 = 'GPT_4',\n GPT_4_0314 = 'GPT_4_0314',\n GPT_4_32K = 'GPT_4_32K',\n GPT_4_32K_0314 = 'GPT_4_32K_0314',\n GPT_3_5_TURBO_0613 = 'GPT_3_5_TURBO_0613',\n GPT_3_5_TURBO_16K = 'GPT_3_5_TURBO_16K',\n GPT_3_5_TURBO_16K_0613 = 'GPT_3_5_TURBO_16K_0613',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_4_32K_0613 = 'GPT_4_32K_0613',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_1106_PREVIEW = 'GPT_4_1106_PREVIEW',\n GPT_4_VISION_PREVIEW = 'GPT_4_VISION_PREVIEW',\n GPT_4_TURBO_PREVIEW = 'GPT_4_TURBO_PREVIEW',\n GPT_4_0125_PREVIEW = 'GPT_4_0125_PREVIEW',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4_TURBO_2024_04_09 = 'GPT_4_TURBO_2024_04_09',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4O_2024_08_06 = 'GPT_4O_2024_08_06',\n O1_PREVIEW = 'O1_PREVIEW',\n O1_PREVIEW_2024_09_12 = 'O1_PREVIEW_2024_09_12',\n O1_MINI = 'O1_MINI',\n O1_MINI_2024_09_12 = 'O1_MINI_2024_09_12',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O1_2024_12_17 = 'O1_2024_12_17',\n O3_MINI_2025_01_31 = 'O3_MINI_2025_01_31',\n GPT_4_OLD = 'GPT_4_OLD',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n O3_2025_04_16 = 'O3_2025_04_16',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n GPT_EXP = 'GPT_EXP',\n GPT_EXP_2 = 'GPT_EXP_2',\n GPT_5_2025_08_07 = 'GPT_5_2025_08_07',\n GPT_5_MINI_2025_08_07 = 'GPT_5_MINI_2025_08_07',\n GPT_5_NANO_2025_08_07 = 'GPT_5_NANO_2025_08_07',\n GPT_5_2_2025_12_11_COMPLETION = 'GPT_5_2_2025_12_11_COMPLETION',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ModelWithLiterals =\n | OpenaiproxyV1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_0301'\n | 'GPT_4'\n | 'GPT_4_0314'\n | 'GPT_4_32K'\n | 'GPT_4_32K_0314'\n | 'GPT_3_5_TURBO_0613'\n | 'GPT_3_5_TURBO_16K'\n | 'GPT_3_5_TURBO_16K_0613'\n | 'GPT_4_0613'\n | 'GPT_4_32K_0613'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_1106_PREVIEW'\n | 'GPT_4_VISION_PREVIEW'\n | 'GPT_4_TURBO_PREVIEW'\n | 'GPT_4_0125_PREVIEW'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4_TURBO_2024_04_09'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4O_2024_08_06'\n | 'O1_PREVIEW'\n | 'O1_PREVIEW_2024_09_12'\n | 'O1_MINI'\n | 'O1_MINI_2024_09_12'\n | 'GPT_4O_2024_11_20'\n | 'O1_2024_12_17'\n | 'O3_MINI_2025_01_31'\n | 'GPT_4_OLD'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'O3_2025_04_16'\n | 'O4_MINI_2025_04_16'\n | 'GPT_EXP'\n | 'GPT_EXP_2'\n | 'GPT_5_2025_08_07'\n | 'GPT_5_MINI_2025_08_07'\n | 'GPT_5_NANO_2025_08_07'\n | 'GPT_5_2_2025_12_11_COMPLETION';\n\nexport interface OpenaiproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: ChatCompletionMessageFunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: OpenaiproxyV1ChatCompletionMessageContentPart[];\n}\n\nexport interface ChatCompletionMessageFunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum OpenaiproxyV1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals =\n | OpenaiproxyV1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ChatCompletionMessageToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: ChatCompletionMessageFunctionWithArgs;\n}\n\nexport interface OpenaiproxyV1ChatCompletionMessageContentPart\n extends OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: OpenaiproxyV1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface V1CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: CreateChatCompletionRequestFunctionSignature;\n}\n\nexport interface V1FineTuningSpec {\n /**\n * Organization field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:<my-org>:custom_suffix:id\n * @maxLength 100\n */\n org?: string | null;\n /**\n * Suffix field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:<custom_suffix>:id\n * @maxLength 100\n */\n suffix?: string | null;\n /**\n * Id field from in the returned fine-tuned model name\n * Example: ft:gpt-3.5-turbo:my-org:custom_suffix:<id>\n * @maxLength 100\n */\n id?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface TextBisonPredictRequest {\n /**\n * TextInstance objects containing input prompts.\n * @maxSize 100\n */\n instances?: TextInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: TextBisonModelWithLiterals;\n}\n\nexport interface TextInstance {\n /**\n * Text input to generate model response. Prompts can include preamble, questions, suggestions, instructions, or examples.\n * @maxLength 100000\n */\n prompt?: string | null;\n}\n\nexport interface PredictParameters {\n /**\n * The temperature is used for sampling during response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection. Lower temperatures are good for prompts that\n * require a less open-ended or creative response, while higher temperatures can lead to more diverse or creative results.\n * A temperature of 0 means that the highest probability tokens are always selected. In this case, responses for a\n * given prompt are mostly deterministic, but a small amount of variation is still possible.\n * For most use cases, try starting with a temperature of 0.2. If the model returns a response that's too generic,\n * too short, or the model gives a fallback response, try increasing the temperature.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters. 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for longer responses.\n * @min 1\n * @max 2048\n */\n maxOutputTokens?: number | null;\n /**\n * Top-K changes how the model selects tokens for output. A top-K of 1 means the next selected token is the most probable\n * among all tokens in the model's vocabulary (also called greedy decoding), while a top-K of 3 means that the next\n * token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled. Then tokens are further\n * filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-K is 40.\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output. Tokens are selected from the most (see top-K) to least\n * probable until the sum of their probabilities equals the top-P value. For example, if tokens A, B, and C have a\n * probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5, then the model will select either A or B as the next\n * token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses. The default top-P is 0.95.\n * @max 1\n */\n topP?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in\n * the response. If a string appears multiple times in the response, then the response truncates where it's first\n * encountered. The strings are case-sensitive.\n * @maxSize 100\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * The number of response variations to return.\n * @min 1\n * @max 8\n */\n candidateCount?: number | null;\n}\n\nexport enum TextBisonModel {\n UNKNOWN_TEXT_BISON_MODEL = 'UNKNOWN_TEXT_BISON_MODEL',\n TEXT_BISON = 'TEXT_BISON',\n TEXT_BISON_001 = 'TEXT_BISON_001',\n TEXT_BISON_32K = 'TEXT_BISON_32K',\n TEXT_BISON_002 = 'TEXT_BISON_002',\n TEXT_BISON_32K_002 = 'TEXT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type TextBisonModelWithLiterals =\n | TextBisonModel\n | 'UNKNOWN_TEXT_BISON_MODEL'\n | 'TEXT_BISON'\n | 'TEXT_BISON_001'\n | 'TEXT_BISON_32K'\n | 'TEXT_BISON_002'\n | 'TEXT_BISON_32K_002';\n\nexport interface ChatBisonPredictRequest {\n /**\n * ChatInstance objects containing inputs.\n * @maxSize 100\n */\n instances?: ChatInstance[];\n /** Model parameters. */\n parameters?: PredictParameters;\n /** Model to be invoked. */\n model?: ChatBisonModelWithLiterals;\n}\n\nexport interface ChatInstance {\n /**\n * Optional. Context shapes how the model responds throughout the conversation. For example, you can use context\n * to specify words the model can or cannot use, topics to focus on or avoid, or the response format or style.\n * @maxLength 100000\n */\n context?: string | null;\n /**\n * Optional. Examples for the model to learn how to respond to the conversation.\n * @maxSize 1000\n */\n examples?: Example[];\n /**\n * Required. Conversation history provided to the model in a structured alternate-author form. Messages appear in\n * chronological order: oldest first, newest last. When the history of messages causes the input to exceed the\n * maximum length, the oldest messages are removed until the entire prompt is within the allowed limit.\n * @maxSize 1000\n */\n messages?: ChatMessage[];\n}\n\nexport interface Example {\n /** An example of an input Message from the user. */\n input?: ChatMessage;\n /** An example of what the model should output given the input. */\n output?: ChatMessage;\n}\n\nexport interface ChatMessage {\n /**\n * Author tag for the turn.\n * @maxLength 100000\n */\n author?: string | null;\n /**\n * Text content of the chat message.\n * @maxLength 100000\n */\n content?: string;\n}\n\nexport enum ChatBisonModel {\n UNKNOWN_CHAT_BISON_MODEL = 'UNKNOWN_CHAT_BISON_MODEL',\n CHAT_BISON = 'CHAT_BISON',\n CHAT_BISON_001 = 'CHAT_BISON_001',\n CHAT_BISON_32K = 'CHAT_BISON_32K',\n CHAT_BISON_002 = 'CHAT_BISON_002',\n CHAT_BISON_32K_002 = 'CHAT_BISON_32K_002',\n}\n\n/** @enumType */\nexport type ChatBisonModelWithLiterals =\n | ChatBisonModel\n | 'UNKNOWN_CHAT_BISON_MODEL'\n | 'CHAT_BISON'\n | 'CHAT_BISON_001'\n | 'CHAT_BISON_32K'\n | 'CHAT_BISON_002'\n | 'CHAT_BISON_32K_002';\n\nexport interface CreateChatCompletionRequest\n extends CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n /** ID of the model to use. */\n model?: V1ModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: V1ChatCompletionMessage[];\n /**\n * A list of functions the model may generate JSON inputs for.\n * @maxSize 100\n * @deprecated\n * @replacedBy tools\n */\n functions?: FunctionSignature[];\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Stream: Up to 4 sequences where the API will stop generating further tokens.\n * @maxSize 4\n * @maxLength 100\n */\n stop?: string[];\n /**\n * The maximum number of tokens allowed for the generated answer.\n * By default, the number of tokens the model can return will be (4096 - prompt tokens).\n */\n maxTokens?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * Modify the likelihood of specified tokens appearing in the completion.\n * Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.\n * Mathematically, the bias is added to the logits generated by the model prior to sampling.\n * The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n * values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n */\n logitBias?: Record<string, number>;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n /**\n * This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that\n * repeated requests with the same \"seed\" and parameters should return the same result. Determinism is not guaranteed,\n * and you should refer to the \"system_fingerprint\" response parameter to monitor changes in the backend.\n */\n seed?: string | null;\n /**\n * Controls which (if any) function is called by the model.\n * \"none\" means the model will not call a function and instead generates a message.\n * \"auto\" means the model can pick between generating a message or calling a function.\n * Specifying a particular function via {\"type: \"function\", \"function\": {\"name\": \"my_function\"}} forces the model to call that function.\n *\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10000\n */\n toolChoice?: string | null;\n /**\n * A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.\n * @maxSize 1000\n */\n tools?: CreateChatCompletionRequestTool[];\n /**\n * An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.\n * Setting to type to \"json_object\" enables JSON mode, which guarantees the message the model generates is valid JSON.\n * Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.\n * Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,\n * resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if finish_reason=\"length\",\n * which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.\n */\n responseFormat?: CreateChatCompletionRequestResponseFormat;\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n */\n maxCompletionTokens?: number | null;\n /** Whether to enable parallel function calling during tool use. */\n parallelToolCalls?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreateChatCompletionRequestFunctionCallOneOf {\n /** Specifying a particular function via {\"name\":\\ \"my_function\"} forces the model to call that function. */\n forceCallFunctionCallConfig?: Record<string, any> | null;\n /**\n * \"none\" means the model does not call a function, and responds to the end-user.\n * \"auto\" means the model can pick between an end-user or calling a function.\n * \"none\" is the default when no functions are present. \"auto\" is the default if functions are present.\n * @maxLength 10\n */\n defaultFunctionCallConfig?: string | null;\n}\n\nexport interface FunctionSignature {\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport enum V1Model {\n UNKNOWN = 'UNKNOWN',\n GPT_3_5_TURBO = 'GPT_3_5_TURBO',\n GPT_3_5_TURBO_1106 = 'GPT_3_5_TURBO_1106',\n GPT_4_0613 = 'GPT_4_0613',\n GPT_3_5_TURBO_0125 = 'GPT_3_5_TURBO_0125',\n GPT_4O_2024_05_13 = 'GPT_4O_2024_05_13',\n /** New models for Migration */\n GPT_4O_MINI_2024_07_18 = 'GPT_4O_MINI_2024_07_18',\n GPT_4_1_MINI_2025_04_14 = 'GPT_4_1_MINI_2025_04_14',\n GPT_4_1_NANO_2025_04_14 = 'GPT_4_1_NANO_2025_04_14',\n GPT_4_1_2025_04_14 = 'GPT_4_1_2025_04_14',\n GPT_4O_2024_11_20 = 'GPT_4O_2024_11_20',\n O4_MINI_2025_04_16 = 'O4_MINI_2025_04_16',\n GPT_5_2_2025_12_11_COMPLETION = 'GPT_5_2_2025_12_11_COMPLETION',\n}\n\n/** @enumType */\nexport type V1ModelWithLiterals =\n | V1Model\n | 'UNKNOWN'\n | 'GPT_3_5_TURBO'\n | 'GPT_3_5_TURBO_1106'\n | 'GPT_4_0613'\n | 'GPT_3_5_TURBO_0125'\n | 'GPT_4O_2024_05_13'\n | 'GPT_4O_MINI_2024_07_18'\n | 'GPT_4_1_MINI_2025_04_14'\n | 'GPT_4_1_NANO_2025_04_14'\n | 'GPT_4_1_2025_04_14'\n | 'GPT_4O_2024_11_20'\n | 'O4_MINI_2025_04_16'\n | 'GPT_5_2_2025_12_11_COMPLETION';\n\nexport interface V1ChatCompletionMessage {\n /** The role of the message author. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The contents of the message. content is required for all messages, and may be null for assistant messages with function calls.\n * @maxLength 1000000000\n */\n content?: string | null;\n /**\n * The name of the author of this message. name is required if role is function, and it should be the name of\n * the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.\n * @minLength 1\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The name and arguments of a function that should be called, as generated by the model.\n * @deprecated\n * @replacedBy tool_calls\n */\n functionCall?: FunctionWithArgs;\n /**\n * The tool calls generated by the model, such as function calls.\n * @maxSize 1000\n */\n toolCalls?: ToolCall[];\n /**\n * Tool call that this message is responding to.\n * @maxLength 100\n */\n toolCallId?: string | null;\n /**\n * An array of content parts with a defined type,each can be of type text or image_url when passing in images.\n * If defined, content field will be ignored.\n * You can pass multiple images by adding multiple image_url content parts.\n * Image input is only supported when using the gpt-4-visual-preview model.\n * @maxSize 5\n */\n contentParts?: ChatCompletionMessageContentPart[];\n}\n\nexport interface FunctionWithArgs {\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The arguments to call the function with, as generated by the model in JSON format.\n * Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by\n * your function schema. Validate the arguments in your code before calling your function.\n * @maxLength 1000000\n */\n arguments?: string | null;\n}\n\nexport interface ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * By controlling the detail parameter, which has three options, low, high, or auto,\n * you have control over how the model processes the image and generates its textual understanding.\n * more info and cost calculation : https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n FUNCTION = 'FUNCTION',\n TOOL = 'TOOL',\n /**\n * Developer-provided instructions that the model should follow, regardless of messages sent by the user.\n * With o1 models and newer, developer messages replace the previous system messages.\n */\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ChatCompletionMessageMessageRoleWithLiterals =\n | ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'FUNCTION'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface ToolCall {\n /**\n * The ID of the tool call.\n * @maxLength 100\n */\n id?: string;\n /**\n * The type of the tool. Currently, only function is supported.\n * @maxLength 100\n */\n type?: string;\n /** The function that the model called. */\n function?: FunctionWithArgs;\n}\n\nexport interface ChatCompletionMessageContentPart\n extends ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface CreateChatCompletionRequestTool {\n /**\n * The type of the tool. Currently, only \"function\" is supported.\n * @maxLength 100\n */\n type?: string;\n /** Function definition object. */\n function?: FunctionSignature;\n}\n\nexport interface CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface GenerateContentRequest {\n /** ID of the model to use. */\n model?: GoogleproxyV1ModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @maxSize 1000\n */\n contents?: Content[];\n /** The system instruction to the model. */\n systemInstruction?: SystemInstruction;\n /**\n * A list of Tools the model may use to generate the next response.\n * @maxSize 1000\n */\n tools?: GoogleproxyV1Tool[];\n /**\n * Per request settings for blocking unsafe content. Enforced on GenerateContentResponse.candidates.\n * @maxSize 100\n */\n safetySettings?: SafetySetting[];\n /** The generation configuration for the response. */\n generationConfig?: GenerationConfig;\n /** Tool configuration for any Tool specified in the request. */\n toolConfig?: V1ToolConfig;\n /** If present, describes the fine-tuning model that will be called instead of generic one. */\n fineTuningSpec?: FineTuningSpec;\n}\n\nexport enum GoogleproxyV1Model {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n GEMINI_1_0_PRO = 'GEMINI_1_0_PRO',\n GEMINI_1_0_PRO_VISION = 'GEMINI_1_0_PRO_VISION',\n GEMINI_1_5_PRO = 'GEMINI_1_5_PRO',\n GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH',\n GEMINI_2_0_FLASH = 'GEMINI_2_0_FLASH',\n GEMINI_2_0_FLASH_LITE = 'GEMINI_2_0_FLASH_LITE',\n GEMINI_2_5_PRO = 'GEMINI_2_5_PRO',\n GEMINI_2_5_FLASH = 'GEMINI_2_5_FLASH',\n GEMINI_2_5_FLASH_LITE = 'GEMINI_2_5_FLASH_LITE',\n GEMINI_2_5_FLASH_IMAGE = 'GEMINI_2_5_FLASH_IMAGE',\n GEMINI_2_5_COMPUTER_USE = 'GEMINI_2_5_COMPUTER_USE',\n GEMINI_3_0_PRO = 'GEMINI_3_0_PRO',\n GEMINI_3_0_PRO_IMAGE = 'GEMINI_3_0_PRO_IMAGE',\n GEMINI_3_0_FLASH = 'GEMINI_3_0_FLASH',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ModelWithLiterals =\n | GoogleproxyV1Model\n | 'UNKNOWN_MODEL'\n | 'GEMINI_1_0_PRO'\n | 'GEMINI_1_0_PRO_VISION'\n | 'GEMINI_1_5_PRO'\n | 'GEMINI_1_5_FLASH'\n | 'GEMINI_2_0_FLASH'\n | 'GEMINI_2_0_FLASH_LITE'\n | 'GEMINI_2_5_PRO'\n | 'GEMINI_2_5_FLASH'\n | 'GEMINI_2_5_FLASH_LITE'\n | 'GEMINI_2_5_FLASH_IMAGE'\n | 'GEMINI_2_5_COMPUTER_USE'\n | 'GEMINI_3_0_PRO'\n | 'GEMINI_3_0_PRO_IMAGE'\n | 'GEMINI_3_0_FLASH';\n\nexport interface Content {\n /**\n * The role in a conversation associated with the content.\n * Specifying a role is required even in single turn use cases. Acceptable values include the following:\n * USER: Specifies content that's sent by you. MODEL: Specifies the model's response.\n */\n role?: ContentRoleWithLiterals;\n /**\n * Ordered parts that make up the input. Parts may have different MIME types.\n * For gemini-1.0-pro, only the text field is valid. The token limit is 32k.\n * For gemini-1.0-pro-vision, you may specify either text only, text and up to 16 images, or text and 1 video. The token limit is 16k.\n * @maxSize 1000\n */\n parts?: V1ContentPart[];\n}\n\nexport enum ContentRole {\n UNKNOWN_CONTENT_ROLE = 'UNKNOWN_CONTENT_ROLE',\n USER = 'USER',\n MODEL = 'MODEL',\n}\n\n/** @enumType */\nexport type ContentRoleWithLiterals =\n | ContentRole\n | 'UNKNOWN_CONTENT_ROLE'\n | 'USER'\n | 'MODEL';\n\nexport interface V1ContentPart {\n /**\n * Union field data can be only one of the following:\n * The text instructions or chat dialogue to include in the prompt.\n * @maxLength 1000000000\n */\n text?: string | null;\n /** data field not supported for gemini-1.0-pro */\n contentData?: ContentData;\n /** A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values. */\n functionCall?: FunctionCall;\n /**\n * The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the\n * function is used as context to the model. This should contain the result of aFunctionCall made based on model prediction.\n */\n functionResponse?: FunctionResponse;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /** Optional. Media resolution level for the input media. */\n mediaResolution?: MediaResolution;\n /** Thought flag indicates that the content part is a thought. */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ContentData {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * The MIME type of the content data. supported types are image/jpeg, image/png.\n * @maxLength 100\n */\n mimeType?: string | null;\n}\n\nexport interface FunctionCall {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional. The function parameters and values in JSON object format. */\n args?: Record<string, any> | null;\n}\n\nexport interface FunctionResponse {\n /**\n * Required. The name of the function to call. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 63.\n * @maxLength 64\n */\n name?: string;\n /** Required. The function response in JSON object format. */\n response?: Record<string, any> | null;\n}\n\nexport interface ExecutableCode {\n /** Required. Programming language of the code. */\n language?: LanguageWithLiterals;\n /**\n * Required. The code to be executed.\n * @maxLength 100000\n */\n code?: string;\n}\n\nexport enum Language {\n /** Unspecified language. This value should not be used. */\n LANGUAGE_UNSPECIFIED = 'LANGUAGE_UNSPECIFIED',\n /** Python >= 3.10, with numpy and simpy available. */\n PYTHON = 'PYTHON',\n}\n\n/** @enumType */\nexport type LanguageWithLiterals = Language | 'LANGUAGE_UNSPECIFIED' | 'PYTHON';\n\nexport interface V1CodeExecutionResult {\n /** Required. Outcome of the code execution. */\n outcome?: OutcomeWithLiterals;\n /**\n * Optional. Contains stdout when code execution is successful, stderr or other description otherwise.\n * @maxLength 100000\n */\n output?: string | null;\n}\n\nexport enum Outcome {\n /** Unspecified status. This value should not be used. */\n OUTCOME_UNSPECIFIED = 'OUTCOME_UNSPECIFIED',\n /** Code execution completed successfully. */\n OUTCOME_OK = 'OUTCOME_OK',\n /** Code execution finished but with a failure. stderr should contain the reason. */\n OUTCOME_FAILED = 'OUTCOME_FAILED',\n /** Code execution ran for too long, and was cancelled. There may or may not be a partial output present. */\n OUTCOME_DEADLINE_EXCEEDED = 'OUTCOME_DEADLINE_EXCEEDED',\n}\n\n/** @enumType */\nexport type OutcomeWithLiterals =\n | Outcome\n | 'OUTCOME_UNSPECIFIED'\n | 'OUTCOME_OK'\n | 'OUTCOME_FAILED'\n | 'OUTCOME_DEADLINE_EXCEEDED';\n\n/**\n * Raw media bytes.\n * Text should not be sent as raw bytes, use the 'text' field.\n */\nexport interface Blob {\n /**\n * The IANA standard MIME type of the source data.\n * Examples: - image/png - image/jpeg\n * If an unsupported MIME type is provided, an error will be returned.\n * For a complete list of supported types, see https://ai.google.dev/gemini-api/docs/file-prompting-strategies#supported_file_formats.\n * @maxLength 100\n */\n mimeType?: string;\n /**\n * Represents raw bytes for media formats. Will be fetched from the passed URL in request, and uploaded to WixMP URL in response.\n * @format WEB_URL\n */\n data?: string;\n}\n\nexport interface MediaResolution {\n /** Media resolution level */\n level?: MediaResolutionLevelWithLiterals;\n}\n\nexport enum MediaResolutionLevel {\n /** Media resolution has not been set. */\n MEDIA_RESOLUTION_UNSPECIFIED = 'MEDIA_RESOLUTION_UNSPECIFIED',\n /** Media resolution set to low (64 tokens). */\n MEDIA_RESOLUTION_LOW = 'MEDIA_RESOLUTION_LOW',\n /** Media resolution set to medium (256 tokens). */\n MEDIA_RESOLUTION_MEDIUM = 'MEDIA_RESOLUTION_MEDIUM',\n /** Media resolution set to high (zoomed reframing with 256 tokens). */\n MEDIA_RESOLUTION_HIGH = 'MEDIA_RESOLUTION_HIGH',\n}\n\n/** @enumType */\nexport type MediaResolutionLevelWithLiterals =\n | MediaResolutionLevel\n | 'MEDIA_RESOLUTION_UNSPECIFIED'\n | 'MEDIA_RESOLUTION_LOW'\n | 'MEDIA_RESOLUTION_MEDIUM'\n | 'MEDIA_RESOLUTION_HIGH';\n\nexport interface SystemInstruction {\n /**\n * The role field of systemInstruction is ignored and doesn't affect the performance of the model.\n * @maxLength 20\n */\n role?: string | null;\n /**\n * Instructions for the model to steer it toward better performance.\n * The text strings count toward the token limit.\n * @maxSize 10\n */\n parts?: V1ContentPart[];\n}\n\nexport interface GoogleproxyV1Tool {\n /**\n * One or more function declarations\n * More information about the function declarations :\n * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling\n * @maxSize 1000\n */\n functionDeclarations?: FunctionDeclaration[];\n /** Optional. Retrieval tool that is powered by Google search. */\n googleSearchRetrieval?: GoogleSearchRetrieval;\n /** Optional. Enables the model to execute code as part of generation. */\n codeExecution?: CodeExecution;\n /** Optional. GoogleSearch tool type. Tool to support Google Search in Model. Powered by Google. */\n googleSearch?: GoogleSearch;\n /**\n * Optional. Tool to support the model interacting directly with the computer.\n * If enabled, it automatically populates computer-use specific Function Declarations.\n */\n computerUse?: ComputerUse;\n}\n\nexport enum DynamicRetrievalConfigMode {\n /** Always trigger retrieval. */\n MODE_UNSPECIFIED = 'MODE_UNSPECIFIED',\n /** Run retrieval only when system decides it is necessary. */\n MODE_DYNAMIC = 'MODE_DYNAMIC',\n}\n\n/** @enumType */\nexport type DynamicRetrievalConfigModeWithLiterals =\n | DynamicRetrievalConfigMode\n | 'MODE_UNSPECIFIED'\n | 'MODE_DYNAMIC';\n\nexport interface DynamicRetrievalConfig {\n /** The mode of the predictor to be used in dynamic retrieval. */\n mode?: DynamicRetrievalConfigModeWithLiterals;\n /** The threshold to be used in dynamic retrieval. If not set, a system default value is used. */\n dynamicThreshold?: string | null;\n}\n\nexport enum Environment {\n /** Defaults to browser. */\n ENVIRONMENT_UNSPECIFIED = 'ENVIRONMENT_UNSPECIFIED',\n /** Operates in a web browser. */\n ENVIRONMENT_BROWSER = 'ENVIRONMENT_BROWSER',\n}\n\n/** @enumType */\nexport type EnvironmentWithLiterals =\n | Environment\n | 'ENVIRONMENT_UNSPECIFIED'\n | 'ENVIRONMENT_BROWSER';\n\nexport interface FunctionDeclaration {\n /**\n * The name of the function to call. Must start with a letter or an underscore.\n * Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description and purpose of the function. The model uses this to decide how and whether to call the function.\n * For the best results, we recommend that you include a description.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * The parameters of this function in a format that's compatible with the OpenAPI\n * https://spec.opensoapis.org/oas/v3.0.3#schema\n */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GoogleSearchRetrieval {\n /** Specifies the dynamic retrieval configuration for the given source. */\n dynamicRetrievalConfig?: DynamicRetrievalConfig;\n}\n\nexport interface CodeExecution {}\n\nexport interface GoogleSearch {}\n\nexport interface ComputerUse {\n /** Required. The environment being operated. */\n environment?: EnvironmentWithLiterals;\n /**\n * Optional. By default, predefined functions are included in the final model call.\n * Some of them can be explicitly excluded from being automatically included.\n * This can serve two purposes:\n * 1. Using a more restricted / different action space.\n * 2. Improving the definitions / instructions of predefined functions.\n * @maxSize 100\n * @maxLength 1000\n */\n excludedPredefinedFunctions?: string[];\n}\n\nexport interface SafetySetting {\n /** The safety category to configure a threshold for. */\n category?: HarmCategoryWithLiterals;\n /** The threshold for blocking responses that could belong to the specified safety category based on probability. */\n threshold?: ThresholdWithLiterals;\n}\n\nexport enum HarmCategory {\n UNKNOWN_CATEGORY = 'UNKNOWN_CATEGORY',\n HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',\n HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',\n HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',\n}\n\n/** @enumType */\nexport type HarmCategoryWithLiterals =\n | HarmCategory\n | 'UNKNOWN_CATEGORY'\n | 'HARM_CATEGORY_SEXUALLY_EXPLICIT'\n | 'HARM_CATEGORY_HATE_SPEECH'\n | 'HARM_CATEGORY_HARASSMENT'\n | 'HARM_CATEGORY_DANGEROUS_CONTENT';\n\nexport enum Threshold {\n UNKNOWN_THRESHOLD = 'UNKNOWN_THRESHOLD',\n BLOCK_NONE = 'BLOCK_NONE',\n BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',\n BLOCK_MED_AND_ABOVE = 'BLOCK_MED_AND_ABOVE',\n BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',\n}\n\n/** @enumType */\nexport type ThresholdWithLiterals =\n | Threshold\n | 'UNKNOWN_THRESHOLD'\n | 'BLOCK_NONE'\n | 'BLOCK_LOW_AND_ABOVE'\n | 'BLOCK_MED_AND_ABOVE'\n | 'BLOCK_ONLY_HIGH';\n\nexport interface GenerationConfig {\n /**\n * The temperature is used for sampling during the response generation, which occurs when topP and topK are applied.\n * Temperature controls the degree of randomness in token selection.\n * Lower temperatures are good for prompts that require a more deterministic and less open-ended or creative response,\n * while higher temperatures can lead to more diverse or creative results. A temperature of 0 is deterministic:\n * the highest probability response is always selected.\n * Range: 0.0 - 1.0, Default for gemini-1.0-pro: 0.9, Default for gemini-1.0-pro-vision: 0.4\n * @max 1\n */\n temperature?: number | null;\n /**\n * Maximum number of tokens that can be generated in the response. A token is approximately four characters.\n * 100 tokens correspond to roughly 60-80 words.\n * Specify a lower value for shorter responses and a higher value for potentially longer responses.\n * Range for gemini-1.0-pro: 1-8192 (default: 8192),\n * Range for gemini-1.0-pro-vision: 1-2048 (default: 2048)\n * Range for gemini-2.5-pro: 1-65536\n * @min 1\n * @max 65536\n */\n maxOutputTokens?: string | null;\n /**\n * Top-K changes how the model selects tokens for output.\n * A top-K of 1 means the next selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding),\n * while a top-K of 3 means that the next token is selected from among the three most probable tokens by using temperature.\n * For each token selection step, the top-K tokens with the highest probabilities are sampled.\n * Then tokens are further filtered based on top-P with the final token selected using temperature sampling.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default for gemini-1.0-pro-vision: 32, Default for gemini-1.0-pro: none\n * @min 1\n * @max 40\n */\n topK?: number | null;\n /**\n * Top-P changes how the model selects tokens for output.\n * Tokens are selected from the most (see top-K) to least probable until the sum of their probabilities equals the top-P value.\n * For example, if tokens A, B, and C have a probability of 0.3, 0.2, and 0.1 and the top-P value is 0.5,\n * then the model will select either A or B as the next token by using temperature and excludes C as a candidate.\n * Specify a lower value for less random responses and a higher value for more random responses.\n * Default: 1.0\n * @max 1\n */\n topP?: number | null;\n /**\n * The number of response variations to return.This value must be 1.\n * @min 1\n * @max 1\n */\n candidateCount?: number | null;\n /**\n * Specifies a list of strings that tells the model to stop generating text if one of the strings is encountered in the response.\n * If a string appears multiple times in the response, then the response truncates where it's first encountered. The strings are case-sensitive.\n * For example, if the following is the returned response when stopSequences isn't specified:\n * public static string reverse(string myString)\n * Then the returned response with stopSequences set to [\"Str\",\"reverse\"] is:\n * public static string\n * Maximum 5 items in the list.\n * @maxSize 5\n * @maxLength 1000\n */\n stopSequences?: string[] | null;\n /**\n * Available for gemini-1.5-pro\n * The output format of the generated candidate text.\n * Supported MIME types: text/plain: (default) Text output. application/json: JSON response in the candidates.\n * text/x.enum: For classification tasks, output an enum value as defined in the response schema.\n * How to control the output format: https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/control-generated-output\n * @maxLength 50\n */\n responseMimeType?: string | null;\n /**\n * Available for gemini-1.5-pro.\n * The schema that generated candidate text must follow. For more information, see Control generated output.\n * You must specify the responseType or responseMimeType field to use this parameter.\n * Link for examples : https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema\n */\n responseSchema?: Record<string, any> | null;\n /**\n * Optional. Output schema of the generated response. This is an alternative to responseSchema that accepts JSON Schema.\n * If set, responseSchema must be omitted, but responseMimeType is required.\n * While the full JSON Schema may be sent, not all features are supported.\n * more information about supported features and examples can be found here:\n * https://ai.google.dev/api/generate-content#FIELDS.response_json_schema\n */\n responseJsonSchema?: Record<string, any> | null;\n /** Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking */\n thinkingConfig?: GenerationThinkingConfig;\n /**\n * Optional. The requested modalities of the response.\n * Represents the set of modalities that the model can return, and should be expected in the response.\n * This is an exact match to the modalities of the response.\n * A model may have multiple combinations of supported modalities.\n * If the requested modalities do not match any of the supported combinations, an error will be returned.\n * An empty list is equivalent to requesting only TEXT.\n * Currently supported as experimental feature for gemini-2.0-flash only.\n * @maxSize 5\n */\n responseModalities?: ModalityWithLiterals[];\n /**\n * Optional. Configuration for image generation.\n * This message allows you to control various aspects of image generation, such as the output format, aspect ratio, and whether the model can generate images of people.\n */\n imageConfig?: ImageConfig;\n /**\n * The media_resolution parameter controls how the Gemini API processes media inputs like images, videos,\n * and PDF documents by determining the maximum number of tokens allocated for media inputs,\n * allowing you to balance response quality against latency and cost.\n */\n mediaResolution?: MediaResolutionLevelWithLiterals;\n}\n\nexport interface GenerationThinkingConfig {\n /** Indicates whether to include thoughts in the response. If true, thoughts are returned only when available. */\n includeThoughts?: boolean | null;\n /** The number of thoughts tokens that the model should generate. */\n thinkingBudget?: string | null;\n /**\n * Thinking level parameter offering 2 states:\n * Low: Minimizes latency and cost. Best for simple instruction following or chat.\n * High: Maximizes reasoning depth. Default. Dynamic thinking.\n * The model may take significantly longer to reach a first token,\n * but the output will be more thoroughly vetted.\n * Note: You cannot use both thinking_level and the legacy thinking_budget parameter in the same request. Doing so will return a 400 error\n * @maxLength 20\n */\n thinkingLevel?: string | null;\n}\n\nexport enum Modality {\n UNKNOWN_MODALITY = 'UNKNOWN_MODALITY',\n /** Indicates the model should return text. */\n TEXT = 'TEXT',\n /** Indicates the model should return images. */\n IMAGE = 'IMAGE',\n /** Indicates the model should return audio. */\n AUDIO = 'AUDIO',\n}\n\n/** @enumType */\nexport type ModalityWithLiterals =\n | Modality\n | 'UNKNOWN_MODALITY'\n | 'TEXT'\n | 'IMAGE'\n | 'AUDIO';\n\nexport interface ImageConfig {\n /** Optional. The image output format for generated images. */\n imageOutputOptions?: ImageOutputOptions;\n /**\n * Optional. The desired aspect ratio for the generated images. The following aspect ratios are supported:\n * \"1:1\" \"2:3\", \"3:2\" \"3:4\", \"4:3\" \"4:5\", \"5:4\" \"9:16\", \"16:9\" \"21:9\"\n * @maxLength 10\n */\n aspectRatio?: string | null;\n /** Optional. Controls whether the model can generate people. */\n personGeneration?: PersonGenerationWithLiterals;\n}\n\nexport interface ImageOutputOptions {\n /**\n * Optional. The image format that the output should be saved as.\n * @maxLength 100\n */\n mimeType?: string | null;\n /** Optional. The compression quality of the output image. */\n compressionQuality?: string | null;\n}\n\nexport enum PersonGeneration {\n /** The default behavior is unspecified. The model will decide whether to generate images of people. */\n PERSON_GENERATION_UNSPECIFIED = 'PERSON_GENERATION_UNSPECIFIED',\n /** Allows the model to generate images of people, including adults and children. */\n ALLOW_ALL = 'ALLOW_ALL',\n /** Allows the model to generate images of adults, but not children. */\n ALLOW_ADULT = 'ALLOW_ADULT',\n /** Prevents the model from generating images of people. */\n ALLOW_NONE = 'ALLOW_NONE',\n}\n\n/** @enumType */\nexport type PersonGenerationWithLiterals =\n | PersonGeneration\n | 'PERSON_GENERATION_UNSPECIFIED'\n | 'ALLOW_ALL'\n | 'ALLOW_ADULT'\n | 'ALLOW_NONE';\n\nexport interface V1ToolConfig {\n /** Function calling config. */\n functionCallingConfig?: FunctionCallingConfig;\n}\n\nexport interface FunctionCallingConfig {\n /** Specifies the mode in which function calling should execute. If unspecified, the default value will be set to AUTO. */\n mode?: ModeWithLiterals;\n /**\n * A set of function names that, when provided, limits the functions the model will call.\n * This should only be set when the Mode is ANY or VALIDATED. Function names should match [FunctionDeclaration.name]. When set, model will predict a function call from only allowed function names.\n * @maxLength 64\n * @maxSize 100\n */\n allowedFunctionNames?: string[];\n}\n\nexport enum Mode {\n UNKNOWN = 'UNKNOWN',\n /** Default model behavior, model decides to predict either a function call or a natural language response. */\n AUTO = 'AUTO',\n /**\n * Model is constrained to always predicting a function call only. If \"allowedFunctionNames\" are set, the predicted function call will be limited to any one of \"allowedFunctionNames\",\n * else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n ANY = 'ANY',\n /** Model will not predict any function call. Model behavior is same as when not passing any function declarations. */\n NONE = 'NONE',\n /**\n * Model decides to predict either a function call or a natural language response, but will validate function calls with constrained decoding. If \"allowedFunctionNames\" are set, the predicted function call will be\n * limited to any one of \"allowedFunctionNames\", else the predicted function call will be any one of the provided \"functionDeclarations\".\n */\n VALIDATED = 'VALIDATED',\n}\n\n/** @enumType */\nexport type ModeWithLiterals =\n | Mode\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'NONE'\n | 'VALIDATED';\n\nexport interface FineTuningSpec {\n /**\n * Endpoint ID of the fine-tuning model to use.\n * @maxLength 100\n */\n id?: string | null;\n}\n\nexport interface InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: TypeWithLiterals;\n}\n\nexport enum Type {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type TypeWithLiterals = Type | 'UNKNOWN' | 'EPHEMERAL';\n\nexport enum Model {\n UNKNOWN = 'UNKNOWN',\n /** anthropic.claude-3-sonnet-20240229-v1:0 */\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n /** anthropic.claude-3-haiku-20240307-v1:0 */\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n /** anthropic.claude-3-5-sonnet-20240620-v1:0 */\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n /** anthropic.claude-3-5-sonnet-20241022-v2:0 */\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n /** us.anthropic.claude-3-5-haiku-20241022-v1:0 */\n CLAUDE_3_5_HAIKU_1_0 = 'CLAUDE_3_5_HAIKU_1_0',\n /** us.anthropic.claude-3-7-sonnet-20250219-v1:0 */\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n /** us.anthropic.claude-sonnet-4-5-20250929-v1:0 */\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n /** us.anthropic.claude-haiku-4-5-20251001-v1:0 */\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ModelWithLiterals =\n | Model\n | 'UNKNOWN'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_5_HAIKU_1_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: RoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: ContentBlock[];\n}\n\nexport enum Role {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type RoleWithLiterals = Role | 'UNKNOWN' | 'USER' | 'ASSISTANT';\n\nexport interface ContentBlock extends ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\n/** @oneof */\nexport interface ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image image = 2; // Image content. */\n imageUrl?: ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: RedactedThinking;\n}\n\nexport interface Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: MediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport enum MediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type MediaTypeWithLiterals =\n | MediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n}\n\nexport interface SimpleContentBlock extends SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\n/** @oneof */\nexport interface SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: ImageUrl;\n}\n\nexport interface Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface Tool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n}\n\nexport enum ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type ToolChoiceTypeWithLiterals =\n | ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: ToolConfiguration;\n}\n\nexport enum McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type McpServerTypeWithLiterals = McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface ToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface V1InvokeAnthropicClaudeModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: ClaudeModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: V1AnthropicClaudeMessage[];\n /**\n * System prompt.\n * @maxLength 1000000000\n * @deprecated System prompt.\n * @replacedBy system_prompt\n * @targetRemovalDate 2025-10-01\n */\n system?: string | null;\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: GoogleproxyV1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: InvokeAnthropicClaudeModelRequestTool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: GoogleproxyV1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: GoogleproxyV1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: GoogleproxyV1McpServer[];\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport interface GoogleproxyV1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface GoogleproxyV1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: V1CacheControlTypeWithLiterals;\n}\n\nexport enum V1CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type V1CacheControlTypeWithLiterals =\n | V1CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\nexport enum ClaudeModel {\n UNKNOWN_CLAUDE_MODEL = 'UNKNOWN_CLAUDE_MODEL',\n CLAUDE_3_SONNET_1_0 = 'CLAUDE_3_SONNET_1_0',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_OPUS_1_0 = 'CLAUDE_3_OPUS_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type ClaudeModelWithLiterals =\n | ClaudeModel\n | 'UNKNOWN_CLAUDE_MODEL'\n | 'CLAUDE_3_SONNET_1_0'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_OPUS_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface V1AnthropicClaudeMessage {\n /** The role of the message author. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GoogleproxyV1ContentBlock[];\n}\n\nexport enum V1MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type V1MessageRoleRoleWithLiterals =\n | V1MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface GoogleproxyV1ContentBlock\n extends GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockTypeOneOf {\n /**\n * @maxLength 1000000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image image = 2; // Image content. */\n imageUrl?: GoogleproxyV1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: GoogleproxyV1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: GoogleproxyV1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: GoogleproxyV1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1Text {\n /**\n * Text content.\n * @maxLength 1000000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: V1ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport enum V1ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type V1ImageMediaTypeMediaTypeWithLiterals =\n | V1ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface GoogleproxyV1ToolUse {\n /**\n * Tool use id\n * @maxLength 100\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface GoogleproxyV1ToolResult {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: V1SimpleContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n}\n\nexport interface V1SimpleContentBlock extends V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\n/** @oneof */\nexport interface V1SimpleContentBlockTypeOneOf {\n /**\n * @maxLength 1000000\n * @deprecated\n * @replacedBy text_content\n * @targetRemovalDate 2025-10-01\n */\n text?: string;\n /** Text content. */\n textContent?: GoogleproxyV1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n imageUrl?: GoogleproxyV1ImageUrl;\n}\n\nexport interface GoogleproxyV1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface GoogleproxyV1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\nexport interface InvokeAnthropicClaudeModelRequestTool {\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 1000000000\n */\n description?: string | null;\n /**\n * Tool's name\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: GoogleproxyV1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: GoogleproxyV1CacheControl;\n /**\n * Tool type for Claude built-in tools\n * @maxLength 100000\n */\n type?: string | null;\n /** Maximum uses of a tool allowed to the model. Currently used only by `web_search` */\n maxUses?: number | null;\n}\n\nexport interface GoogleproxyV1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: GoogleproxyV1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum GoogleproxyV1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ToolChoiceTypeWithLiterals =\n | GoogleproxyV1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL';\n\nexport interface GoogleproxyV1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface GoogleproxyV1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: GoogleproxyV1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: V1McpServerToolConfiguration;\n}\n\nexport enum GoogleproxyV1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type GoogleproxyV1McpServerTypeWithLiterals =\n | GoogleproxyV1McpServerType\n | 'UNKNOWN'\n | 'URL';\n\nexport interface V1McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface InvokeAnthropicModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: AnthropicModelWithLiterals;\n /**\n * Each input message content may be either a single string or an array of content blocks.\n * @maxSize 4096\n */\n messages?: AnthropicMessage[];\n /**\n * System prompt.\n * @maxSize 4096\n */\n systemPrompt?: V1Text[];\n /**\n * The maximum number of tokens to generate before stopping.\n * Defaults to 1024.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Custom text sequences that will cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n /**\n * Amount of randomness injected into the response.\n * Defaults to 1.0.\n * Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Only sample from the top K options for each subsequent token.\n * Use top_k to remove long tail low probability responses.\n * @max 500\n */\n topK?: number | null;\n /**\n * In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p.\n * You should alter either temperature or top_p, but not both.\n * @max 1\n */\n topP?: number | null;\n /**\n * Definitions of tools that the model may use.\n * If you include tools in your API request, the model may return tool_use content blocks that represent the model's\n * use of those tools. You can then run those tools using the tool input generated by the model and then optionally\n * return results back to the model using tool_result content blocks.\n * @maxSize 1000\n */\n tools?: V1Tool[];\n /**\n * How the model should use the provided tools. The model can use a specific tool, any available tool, or decide by itself.\n * More info: https://docs.anthropic.com/en/docs/build-with-claude/tool-use#forcing-tool-use\n */\n toolChoice?: V1ToolChoice;\n /**\n * Configuration for enabling Claude's extended thinking.\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n */\n thinking?: V1ThinkingConfig;\n /**\n * MCP servers to be utilized in this request\n * @maxSize 100\n */\n mcpServers?: V1McpServer[];\n /**\n * Container identifier for reuse across requests.\n * @maxLength 512\n */\n container?: string | null;\n /** An object describing metadata about the request. */\n metadata?: RequestMetadata;\n /** Desired output format. */\n outputFormat?: Record<string, any> | null;\n}\n\nexport enum AnthropicModel {\n UNKNOWN_ANTHROPIC_MODEL = 'UNKNOWN_ANTHROPIC_MODEL',\n CLAUDE_3_HAIKU_1_0 = 'CLAUDE_3_HAIKU_1_0',\n CLAUDE_3_5_SONNET_1_0 = 'CLAUDE_3_5_SONNET_1_0',\n CLAUDE_3_5_SONNET_2_0 = 'CLAUDE_3_5_SONNET_2_0',\n CLAUDE_3_7_SONNET_1_0 = 'CLAUDE_3_7_SONNET_1_0',\n CLAUDE_4_SONNET_1_0 = 'CLAUDE_4_SONNET_1_0',\n CLAUDE_4_OPUS_1_0 = 'CLAUDE_4_OPUS_1_0',\n CLAUDE_4_1_OPUS_1_0 = 'CLAUDE_4_1_OPUS_1_0',\n CLAUDE_4_5_SONNET_1_0 = 'CLAUDE_4_5_SONNET_1_0',\n CLAUDE_4_5_HAIKU_1_0 = 'CLAUDE_4_5_HAIKU_1_0',\n}\n\n/** @enumType */\nexport type AnthropicModelWithLiterals =\n | AnthropicModel\n | 'UNKNOWN_ANTHROPIC_MODEL'\n | 'CLAUDE_3_HAIKU_1_0'\n | 'CLAUDE_3_5_SONNET_1_0'\n | 'CLAUDE_3_5_SONNET_2_0'\n | 'CLAUDE_3_7_SONNET_1_0'\n | 'CLAUDE_4_SONNET_1_0'\n | 'CLAUDE_4_OPUS_1_0'\n | 'CLAUDE_4_1_OPUS_1_0'\n | 'CLAUDE_4_5_SONNET_1_0'\n | 'CLAUDE_4_5_HAIKU_1_0';\n\nexport interface AnthropicMessage {\n /** The role of the message author. */\n role?: MessageRoleRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\nexport enum MessageRoleRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type MessageRoleRoleWithLiterals =\n | MessageRoleRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT';\n\n/** Content object used in both request and response */\nexport interface V1ContentBlock extends V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\n/** @oneof */\nexport interface V1ContentBlockTypeOneOf {\n /** Text content. */\n textContent?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: V1ToolUse;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: V1ToolResult;\n /** Represents Claude’s internal thought process. */\n thinking?: V1Thinking;\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * Assistant requests an MCP tool call; client should execute it on the named MCP server\n * and later reply with mcp_tool_result referencing the same id.\n */\n mcpToolUse?: McpToolUse;\n /** User returns results of an MCP tool call; tool_use_id must equal the McpToolUse.id. Content carries output (text/image) or an error. */\n mcpToolResult?: V1ToolResult;\n /** Assistant announces an Anthropic-run server tool call (e.g., \"web_search\", \"code_execution\"). */\n serverToolUse?: ServerToolUse;\n /** Server tool result for Web Search. */\n webSearchToolResult?: WebSearchToolResult;\n /** Server tool result for Code Execution. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /** User attaches a file for the Code Execution container. */\n containerUpload?: ContainerUpload;\n /** Citable document. For future citations, resend this block in later requests so it remains in context. */\n document?: DocumentContent;\n /** Server tool result for Web Fetch. */\n webFetchToolResult?: WebFetchToolResult;\n}\n\nexport interface V1Text {\n /**\n * Text content.\n * @maxLength 1000000\n */\n text?: string;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n /**\n * Structured citations for this text block.\n * Populated by the model when citations are enabled.\n * @maxSize 256\n */\n citations?: Citation[];\n}\n\nexport interface V1CacheControl {\n /** Currently, “ephemeral” is the only supported cache type */\n type?: CacheControlTypeWithLiterals;\n /**\n * The time-to-live for the cache control breakpoint. This may be one the following values:\n * 5m: 5 minutes (default)\n * 1h: 1 hour\n * @maxLength 50\n */\n ttl?: string | null;\n}\n\nexport enum CacheControlType {\n UNKNOWN = 'UNKNOWN',\n EPHEMERAL = 'EPHEMERAL',\n}\n\n/** @enumType */\nexport type CacheControlTypeWithLiterals =\n | CacheControlType\n | 'UNKNOWN'\n | 'EPHEMERAL';\n\n/** Unified wrapper for all citation kinds (attach to Text.citations). */\nexport interface Citation extends CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\n/** @oneof */\nexport interface CitationTypeOneOf {\n /** Char location */\n charLocation?: CharLocationCitation;\n /** Page location */\n pageLocation?: PageLocationCitation;\n /** Content block location */\n contentBlockLocation?: ContentBlockLocationCitation;\n /** Web search result location */\n webSearchResultLocation?: WebSearchResultLocationCitation;\n /** Search result location */\n searchResultLocation?: SearchResultLocationCitation;\n}\n\nexport interface CharLocationCitation {\n /**\n * Should be \"char_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start character index (inclusive) within the document text. */\n startCharIndex?: number | null;\n /** 0-based end character index (exclusive) within the document text. */\n endCharIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface PageLocationCitation {\n /**\n * Should be \"page_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 1-based start page number (inclusive). */\n startPageNumber?: number | null;\n /** 1-based end page number (exclusive). */\n endPageNumber?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface ContentBlockLocationCitation {\n /**\n * Should be \"content_block_location\"\n * @maxLength 500\n */\n type?: string;\n /** 0-based index into all document blocks in this request turn. */\n documentIndex?: number | null;\n /**\n * Optional copy of the source document’s title; informational only.\n * @maxLength 10000\n */\n documentTitle?: string | null;\n /** 0-based start content-block index (inclusive) within the custom document. */\n startBlockIndex?: number | null;\n /** 0-based end content-block index (exclusive) within the custom document. */\n endBlockIndex?: number | null;\n /**\n * Optional quoted snippet; not counted toward tokens.\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface WebSearchResultLocationCitation {\n /**\n * Should be \"web_search_result_location\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the cited source\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the cited source\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * A reference that must be passed back for multi-turn conversations.\n * @maxLength 1000000\n */\n encryptedIndex?: string | null;\n /**\n * Up to 150 characters of the cited content\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface SearchResultLocationCitation {\n /**\n * Should be \"search_result_location\".\n * @maxLength 500\n */\n type?: string;\n /** Index of the search_result within the current turn (0-based). */\n searchResultIndex?: number | null;\n /** 0-based start block indices within that search_result's content. */\n startBlockIndex?: number | null;\n /** 0-based end block indices within that search_result's content. */\n endBlockIndex?: number | null;\n /**\n * Source string\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Optional title (same as search_result.title).\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional quoted snippet\n * @maxLength 1000000\n */\n citedText?: string | null;\n}\n\nexport interface V1ImageUrl {\n /**\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /** Media type of the image. */\n mediaType?: ImageMediaTypeMediaTypeWithLiterals;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport enum ImageMediaTypeMediaType {\n UNKNOWN = 'UNKNOWN',\n /** image/jpeg */\n IMAGE_JPEG = 'IMAGE_JPEG',\n /** image/png */\n IMAGE_PNG = 'IMAGE_PNG',\n /** image/webp */\n IMAGE_WEBP = 'IMAGE_WEBP',\n /** image/gif */\n IMAGE_GIF = 'IMAGE_GIF',\n}\n\n/** @enumType */\nexport type ImageMediaTypeMediaTypeWithLiterals =\n | ImageMediaTypeMediaType\n | 'UNKNOWN'\n | 'IMAGE_JPEG'\n | 'IMAGE_PNG'\n | 'IMAGE_WEBP'\n | 'IMAGE_GIF';\n\nexport interface V1ToolUse {\n /**\n * Tool use id\n * @maxLength 512\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string | null;\n /** Tool use input */\n input?: Record<string, any> | null;\n /** Optional: enable tool use caching */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1ToolResult {\n /**\n * Tool use id\n * @maxLength 512\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n isError?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: ToolResultContentBlock[];\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface ToolResultContentBlock\n extends ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\n/** @oneof */\nexport interface ToolResultContentBlockTypeOneOf {\n /** Text content. */\n text?: V1Text;\n /** Image content, represented as URL. Will be downloaded and passed on as base64. */\n image?: V1ImageUrl;\n /** Document content block. */\n document?: DocumentContent;\n /** Search result block with snippets/citations. */\n searchResult?: ToolResultSearchResult;\n}\n\nexport interface DocumentContent {\n /**\n * Should be \"document\"\n * @maxLength 500\n */\n type?: string;\n /** Citable payload or reference. */\n source?: DocumentSource;\n /**\n * Optional: Document title\n * Can be passed to the model but not used towards cited content.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Optional: Any document metadata as text or stringified json.\n * Can be passed to the model but not used towards cited content.\n * @maxLength 1000000\n */\n context?: string | null;\n /** Enable citations for this doc */\n citations?: CitationsEnabled;\n /** Optional: Cache the document content */\n cacheControl?: V1CacheControl;\n}\n\nexport interface DocumentSource {\n /**\n * One of: \"text\" | \"base64\" | \"content\" | \"file\" | \"url\".\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Required types \"text\"/\"base64\" (e.g., \"text/plain\", \"application/pdf\").\n * @maxLength 500\n */\n mediaType?: string | null;\n /**\n * For type \"text\": raw text. For \"base64\": bytes as base64.\n * @maxLength 10000000\n */\n data?: string | null;\n /**\n * For type \"file\": Files API id (e.g., \"file_01...\")\n * @maxLength 5000\n */\n fileId?: string | null;\n /**\n * For type \"url\": absolute URL to the document\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * For type \"content\": custom content block; Only text blocks are citable\n * @maxSize 500\n */\n content?: V1ContentBlock[];\n}\n\nexport interface CitationsEnabled {\n /** Whether to enable citations */\n enabled?: boolean | null;\n}\n\nexport interface ToolResultSearchResult {\n /**\n * Should be \"search_result\".\n * @maxLength 500\n */\n type?: string;\n /**\n * Where this result came from (URL or source label).\n * @maxLength 10000\n */\n source?: string | null;\n /**\n * Human-readable title for the result.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * Inline text snippets that summarize/support the result.\n * @maxSize 1000\n */\n content?: V1Text[];\n /**\n * Enable/disable citations for this result's content.\n * Matches Anthropic \"citations\" on search_result blocks.\n */\n citations?: CitationsEnabled;\n}\n\nexport interface V1Thinking {\n /**\n * Cryptographic token which verifies that the thinking block was generated by Claude, and is verified when thinking blocks are passed back to the API.\n * @maxLength 1000000\n */\n signature?: string;\n /**\n * Text content of a Thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n}\n\nexport interface V1RedactedThinking {\n /**\n * Occasionally Claude’s internal reasoning will be flagged by our safety systems. When this occurs, we encrypt some or all of the thinking block and return it to you as a redacted_thinking block.\n * These redacted thinking blocks are decrypted when passed back to the API, allowing Claude to continue its response without losing context.\n * @maxLength 1000000\n */\n data?: string;\n}\n\n/**\n * Assistant requests a Model Context Protocol (MCP) tool call.\n * Pair with ToolResult using the same `id`.\n */\nexport interface McpToolUse {\n /**\n * Unique id for this tool call; must match McpToolResult.tool_use_id.\n * @maxLength 512\n */\n id?: string | null;\n /**\n * Tool name as exposed by the MCP server.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Which MCP server to call (must match a server in the request).\n * @maxLength 1000\n */\n serverName?: string | null;\n /** JSON arguments for the tool (object per the tool's schema). */\n input?: Record<string, any> | null;\n}\n\n/**\n * Server-tool invocation announced by the ASSISTANT for Anthropic-run tools\n * (e.g., \"web_search\", \"code_execution\").\n */\nexport interface ServerToolUse {\n /**\n * Should be \"server_tool_use\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n id?: string | null;\n /**\n * The tool name. Available options: \"web_search\" | \"web_fetch\" | \"code_execution\" | \"bash_code_execution\" | \"text_editor_code_execution\"\n * @maxLength 500\n */\n name?: string | null;\n /**\n * Tool-specific parameters object:\n * web_search → { \"query\": \"<string>\" }\n * web_fetch → { \"url\": \"<string>\" }\n * code_execution→ { \"code\": \"<python source>\" }\n */\n input?: Record<string, any> | null;\n}\n\n/** Server tool result (web search). Either results[] OR error. */\nexport interface WebSearchToolResult extends WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n /**\n * Should be \"web_search_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebSearchToolResultContentOneOf {\n /** maps to JSON: content: [ ... ] */\n contentResults?: WebSearchResultList;\n /** maps to JSON: content: { ... } */\n contentError?: WebSearchToolResultError;\n}\n\n/** Success payload: the JSON `content` ARRAY of result items. */\nexport interface WebSearchResultList {\n /**\n * Results items\n * @maxSize 1000\n */\n items?: WebSearchResult[];\n}\n\n/**\n * One search result item.\n * Docs (“Search results include”): url, title, page_age, encrypted_content.\n * Each item also has the literal type.\n */\nexport interface WebSearchResult {\n /**\n * Should be \"web_search_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The URL of the source page.\n * @maxLength 10000\n */\n url?: string | null;\n /**\n * The title of the source page.\n * @maxLength 10000\n */\n title?: string | null;\n /**\n * When the site was last updated (e.g., \"April 30, 2025\").\n * @maxLength 100\n */\n pageAge?: string | null;\n /**\n * Encrypted content that must be passed back in multi-turn conversations for citations.\n * @maxLength 1000000\n */\n encryptedContent?: string | null;\n}\n\n/**\n * Error payload\n * Possible error codes: too_many_requests | invalid_input | max_uses_exceeded | query_too_long | unavailable\n */\nexport interface WebSearchToolResultError {\n /**\n * Should be \"web_search_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface CodeExecutionToolResult\n extends CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n /**\n * Should be \"code_execution_tool_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface CodeExecutionToolResultContentOneOf {\n /** Success */\n contentResult?: CodeExecutionResult;\n /** Error */\n contentError?: CodeExecutionToolResultError;\n}\n\n/** Success payload for code execution. */\nexport interface CodeExecutionResult {\n /**\n * Should be \"code_execution_result\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * Output from successful execution (print, etc.).\n * @maxLength 1000000\n */\n stdout?: string | null;\n /**\n * Error messages emitted by the program.\n * @maxLength 1000000\n */\n stderr?: string | null;\n /** 0 = success, non-zero = failure. */\n returnCode?: number | null;\n /**\n * Optional: Array of produced artifacts.\n * Example item (typical): { \"file_id\": \"file_abc123\", ... }\n * @maxSize 4096\n */\n content?: Record<string, any>[] | null;\n}\n\n/**\n * Error payload (HTTP 200; error lives in the result body).\n * Docs list: unavailable | code_execution_exceeded | container_expired\n */\nexport interface CodeExecutionToolResultError {\n /**\n * Should be \"code_execution_tool_result_error\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * The error code value, e.g. \"unavailable\", \"code_execution_exceeded\", \"container_expired\".\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\nexport interface ContainerUpload {\n /**\n * Should be \"container_upload\"\n * @maxLength 500\n */\n type?: string | null;\n /**\n * File identifier returned by the Files API (e.g., \"file_01abc...\").\n * @maxLength 5000\n */\n fileId?: string | null;\n}\n\n/** Web fetch tool result */\nexport interface WebFetchToolResult extends WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n /**\n * Should be \"web_fetch_tool_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * Tool use id\n * @maxLength 500\n */\n toolUseId?: string | null;\n}\n\n/** @oneof */\nexport interface WebFetchToolResultContentOneOf {\n /** Content success */\n contentSuccess?: WebFetchToolResultContentSuccess;\n /** Content error */\n contentError?: WebFetchToolResultContentError;\n}\n\nexport interface WebFetchToolResultContentSuccess {\n /**\n * Should be \"web_fetch_result\"\n * @maxLength 500\n */\n type?: string;\n /**\n * The URL that was fetched\n * @maxLength 10000\n */\n url?: string | null;\n /** A document block containing the fetched content */\n content?: DocumentContent;\n /**\n * Timestamp when the content was retrieved\n * @maxLength 256\n */\n retrievedAt?: string | null;\n}\n\nexport interface WebFetchToolResultContentError {\n /**\n * Should be \"web_fetch_tool_result_error\"\n * @maxLength 500\n */\n type?: string;\n /**\n * These are the possible error codes:\n * - invalid_tool_input: Invalid URL format\n * - url_too_long: URL exceeds maximum length (250 characters)\n * - url_not_allowed: URL blocked by domain filtering rules and model restrictions\n * - url_not_accessible: Failed to fetch content (HTTP error)\n * - too_many_requests: Rate limit exceeded\n * - unsupported_content_type: Content type not supported (only text and PDF)\n * - max_uses_exceeded: Maximum web fetch tool uses exceeded\n * - unavailable: An internal error occurred\n * @maxLength 500\n */\n errorCode?: string | null;\n}\n\n/** Top-level tool wrapper. Exactly one branch is set. */\nexport interface V1Tool extends V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\n/** @oneof */\nexport interface V1ToolKindOneOf {\n /**\n * Client tool.\n * User-defined custom tools that you create and implement\n */\n custom?: CustomTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can interact with computer environments through the computer use tool,\n * which provides screenshot capabilities and mouse/keyboard control for autonomous desktop interaction.\n */\n computerUse?: ComputerUseTool;\n /**\n * Client tool (Anthropic-defined).\n * Claude can use an Anthropic-defined text editor tool to view and modify text files, helping you debug, fix, and improve your code or other text documents.\n * This allows Claude to directly interact with your files, providing hands-on assistance rather than just suggesting changes.\n */\n textEditor?: TextEditorTool;\n /**\n * Client tool (Anthropic-defined).\n * The bash tool enables Claude to execute shell commands in a persistent bash session,\n * allowing system operations, script execution, and command-line automation.\n */\n bash?: BashTool;\n /**\n * Server tool (Anthropic-defined).\n * The web search tool gives Claude direct access to real-time web content,\n * allowing it to answer questions with up-to-date information beyond its knowledge cutoff.\n * Claude automatically cites sources from search results as part of its answer.\n */\n webSearch?: WebSearchTool;\n /**\n * Server tool (Anthropic-defined).\n * The code execution tool allows Claude to execute Python code in a secure, sandboxed environment.\n * Claude can analyze data, create visualizations, perform complex calculations, and process uploaded files directly within the API conversation.\n */\n codeExecution?: CodeExecutionTool;\n /**\n * Server tool (Anthropic-defined).\n * The web fetch tool allows Claude to retrieve full content from specified web pages and PDF documents.\n */\n webFetch?: WebFetchTool;\n}\n\nexport interface CustomTool {\n /**\n * The name of the tool. Must match the regex ^[a-zA-Z0-9_-]{1,64}$.\n * @maxLength 1000\n */\n name?: string;\n /**\n * Description of what this tool does.\n * Tool descriptions should be as detailed as possible. The more information that the model has about what the tool\n * is and how to use it, the better it will perform. You can use natural language descriptions to reinforce\n * important aspects of the tool input JSON schema.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * JSON schema for this tool's input.\n * This defines the shape of the input that your tool accepts and that the model will produce.\n */\n inputSchema?: V1InputSchema;\n /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */\n cacheControl?: V1CacheControl;\n}\n\nexport interface V1InputSchema {\n /**\n * Available options: object\n * @maxLength 100\n */\n type?: string | null;\n /** Object that defines JSON schema itself. */\n properties?: Record<string, any> | null;\n /**\n * List of required parameters from JSON schema.\n * @maxSize 4096\n * @maxLength 1000\n */\n required?: string[];\n}\n\nexport interface ComputerUseTool {\n /** Display width in pixels, recommend ≤1280 */\n displayWidthPx?: number;\n /** Display height in pixels, recommend ≤800 */\n displayHeightPx?: number;\n /** Display number for X11 environments */\n displayNumber?: number | null;\n}\n\nexport interface TextEditorTool {\n /** Parameter to control truncation when viewing large files. Available only for text_editor_20250728 and later. */\n maxCharacters?: number | null;\n}\n\nexport interface BashTool {\n /**\n * Name must be \"bash\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebSearchTool {\n /** Optional: Limit the number of searches per request; exceeding -> error \"max_uses_exceeded\". */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only include results from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never include results from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Localize search results */\n userLocation?: WebSearchUserLocation;\n /** Optional: caches the tool definition only (it will not cache the results) */\n cacheControl?: V1CacheControl;\n}\n\nexport interface WebSearchUserLocation {\n /**\n * The type of location (must be \"approximate\")\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The city name\n * @maxLength 500\n */\n city?: string | null;\n /**\n * The region or state\n * @maxLength 500\n */\n region?: string | null;\n /**\n * The country\n * @maxLength 500\n */\n country?: string | null;\n /**\n * The IANA timezone ID, e.g. \"America/Los_Angeles\"\n * @maxLength 500\n */\n timezone?: string | null;\n}\n\nexport interface CodeExecutionTool {\n /**\n * Name must be \"code_execution\".\n * @maxLength 500\n */\n name?: string | null;\n}\n\nexport interface WebFetchTool {\n /** Optional: Limit the number of fetches per request */\n maxUses?: number | null;\n /**\n * Note: You can use either allowed_domains or blocked_domains, but not both in the same request.\n * Optional: Only fetch from these domains, e.g. \"trusteddomain.org\"\n * @maxSize 100\n * @maxLength 500\n */\n allowedDomains?: string[];\n /**\n * Optional: Never fetch from these domains, e.g. \"untrustedsource.com\"\n * @maxSize 100\n * @maxLength 500\n */\n blockedDomains?: string[];\n /** Optional: Enable citations for fetched content */\n citations?: CitationsEnabled;\n /** Optional: Maximum content length in tokens */\n maxContentTokens?: number | null;\n}\n\nexport interface V1ToolChoice {\n /**\n * AUTO allows Claude to decide whether to call any provided tools or not. This is the default value.\n * ANY tells Claude that it must use one of the provided tools, but doesn’t force a particular tool.\n * TOOL allows us to force Claude to always use a particular tool.\n */\n type?: V1ToolChoiceTypeWithLiterals;\n /**\n * The name of the tool to use in case Type is TOOL.\n * @maxLength 1000\n */\n name?: string | null;\n /**\n * Whether to disable parallel tool use.\n * Defaults to false.\n * If set to true, the model will output at most one tool use (if Type is AUTO) or exactly one tool use (if Type is ANY or TOOL)\n */\n disableParallelToolUse?: boolean | null;\n}\n\nexport enum V1ToolChoiceType {\n UNKNOWN = 'UNKNOWN',\n AUTO = 'AUTO',\n ANY = 'ANY',\n TOOL = 'TOOL',\n NONE = 'NONE',\n}\n\n/** @enumType */\nexport type V1ToolChoiceTypeWithLiterals =\n | V1ToolChoiceType\n | 'UNKNOWN'\n | 'AUTO'\n | 'ANY'\n | 'TOOL'\n | 'NONE';\n\nexport interface V1ThinkingConfig {\n /**\n * Determines how many tokens Claude can use for its internal reasoning process. Larger budgets can enable more thorough\n * analysis for complex problems, improving response quality.\n * Must be ≥1024 and less than max_tokens.\n * @min 1024\n */\n budgetTokens?: number;\n /**\n * When enabled, responses include thinking content blocks showing Claude's thinking process before the final answer.\n * Requires a minimum budget of 1,024 tokens and counts towards your max_tokens limit.\n * Defaults to \"enabled\" in the mapper if unset to preserve legacy behavior.\n */\n enabled?: boolean | null;\n}\n\nexport interface V1McpServer {\n /**\n * McpServer name\n * @maxLength 1000\n */\n name?: string;\n /** Available options: url */\n type?: V1McpServerTypeWithLiterals;\n /**\n * McpServer url\n * @maxLength 10000\n */\n url?: string | null;\n /** Tool configuration */\n toolConfiguration?: McpServerToolConfiguration;\n}\n\nexport enum V1McpServerType {\n UNKNOWN = 'UNKNOWN',\n URL = 'URL',\n}\n\n/** @enumType */\nexport type V1McpServerTypeWithLiterals = V1McpServerType | 'UNKNOWN' | 'URL';\n\nexport interface McpServerToolConfiguration {\n /**\n * Allowed tools\n * @maxLength 1000\n * @maxSize 100\n */\n allowedTools?: string[];\n /** Enabled */\n enabled?: boolean | null;\n}\n\nexport interface RequestMetadata {\n /**\n * An external identifier for the user who is associated with the request.\n * This should be a uuid, hash value, or other opaque identifier. Anthropic may use this id to help detect abuse. Do not include any identifying information such as name, email address, or phone number.\n * Maximum length: 256\n * Examples: \"13803d75-b4b5-4c3e-b2a2-6f21399b021b\"\n * @maxLength 256\n */\n userId?: string | null;\n}\n\nexport interface InvokeLlamaModelRequest {\n /** The unique identifier of the model to invoke to run inference. */\n model?: LlamaModelWithLiterals;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport enum LlamaModel {\n UNKNOWN_LLAMA_MODEL = 'UNKNOWN_LLAMA_MODEL',\n /** meta.llama3-8b-instruct-v1:0 */\n LLAMA_3_8B_INSTRUCT_1_0 = 'LLAMA_3_8B_INSTRUCT_1_0',\n /** meta.llama3-70b-instruct-v1:0 */\n LLAMA_3_70B_INSTRUCT_1_0 = 'LLAMA_3_70B_INSTRUCT_1_0',\n /** meta.llama3-1-8b-instruct-v1:0 */\n LLAMA_3_1_8B_INSTRUCT_1_0 = 'LLAMA_3_1_8B_INSTRUCT_1_0',\n /** meta.llama3-1-70b-instruct-v1:0 */\n LLAMA_3_1_70B_INSTRUCT_1_0 = 'LLAMA_3_1_70B_INSTRUCT_1_0',\n /** meta.llama3-2-1b-instruct-v1:0 */\n LLAMA_3_2_1B_INSTRUCT_1_0 = 'LLAMA_3_2_1B_INSTRUCT_1_0',\n /** meta.llama3-2-3b-instruct-v1:0 */\n LLAMA_3_2_3B_INSTRUCT_1_0 = 'LLAMA_3_2_3B_INSTRUCT_1_0',\n}\n\n/** @enumType */\nexport type LlamaModelWithLiterals =\n | LlamaModel\n | 'UNKNOWN_LLAMA_MODEL'\n | 'LLAMA_3_8B_INSTRUCT_1_0'\n | 'LLAMA_3_70B_INSTRUCT_1_0'\n | 'LLAMA_3_1_8B_INSTRUCT_1_0'\n | 'LLAMA_3_1_70B_INSTRUCT_1_0'\n | 'LLAMA_3_2_1B_INSTRUCT_1_0'\n | 'LLAMA_3_2_3B_INSTRUCT_1_0';\n\nexport interface InvokeConverseRequest {\n /** The foundation model to use for this conversation. */\n model?: ConverseModelWithLiterals;\n /**\n * Conversation history and new input. Processed in the order provided.\n * @maxSize 4096\n */\n messages?: ConverseMessage[];\n /** Parameters controlling text generation behavior. */\n inferenceConfig?: ConverseInferenceConfig;\n /** Tool configuration for function calling. */\n toolConfig?: ToolConfig;\n /** Latency optimization settings. */\n performanceConfig?: ConversePerformanceConfig;\n /**\n * System prompts providing high-level instructions. Processed before conversation messages.\n * @maxSize 100\n */\n system?: SystemContentBlock[];\n /** Model-specific parameters as a JSON object. */\n additionalModelRequestFields?: Record<string, any> | null;\n /**\n * JSON paths to extract from the model's raw response.\n * @maxLength 1000\n * @maxSize 100\n */\n additionalModelResponseFieldPaths?: string[];\n}\n\nexport enum ConverseModel {\n UNKNOWN_CONVERSE_MODEL = 'UNKNOWN_CONVERSE_MODEL',\n /** OPEN AI Models */\n OPEN_AI_GPT_OSS_120B = 'OPEN_AI_GPT_OSS_120B',\n /** MiniMax AI */\n MINIMAX_M2 = 'MINIMAX_M2',\n}\n\n/** @enumType */\nexport type ConverseModelWithLiterals =\n | ConverseModel\n | 'UNKNOWN_CONVERSE_MODEL'\n | 'OPEN_AI_GPT_OSS_120B'\n | 'MINIMAX_M2';\n\nexport interface ConverseMessage {\n /** The role that generated this message (user or assistant). */\n role?: RoleWithLiterals;\n /**\n * Content blocks that can include text, tool use, and tool results.\n * @maxSize 4096\n */\n content?: ConverseContentBlock[];\n}\n\n/** Converse-specific content block (simplified structure for AWS Bedrock Converse API) */\nexport interface ConverseContentBlock extends ConverseContentBlockContentOneOf {\n /**\n * Plain text content.\n * @maxLength 100000000\n */\n text?: string;\n /** Reasoning refers to a Chain of Thought (CoT) that the model generates to enhance the accuracy of its final response. */\n reasoningContent?: ConverseReasoningContent;\n /** Tool use block representing a function call request. */\n toolUse?: ConverseToolUse;\n /** Tool result block containing the output of a tool execution. */\n toolResult?: ConverseToolResult;\n}\n\n/** @oneof */\nexport interface ConverseContentBlockContentOneOf {\n /**\n * Plain text content.\n * @maxLength 100000000\n */\n text?: string;\n /** Reasoning refers to a Chain of Thought (CoT) that the model generates to enhance the accuracy of its final response. */\n reasoningContent?: ConverseReasoningContent;\n /** Tool use block representing a function call request. */\n toolUse?: ConverseToolUse;\n /** Tool result block containing the output of a tool execution. */\n toolResult?: ConverseToolResult;\n}\n\nexport interface ConverseReasoningContent {\n /** Contains the reasoning that the model used to return the output. */\n reasoningText?: ReasoningText;\n}\n\nexport interface ReasoningText {\n /**\n * The reasoning that the model used to return the output.\n * @maxLength 100000000\n */\n text?: string;\n}\n\n/** Tool use request from the model */\nexport interface ConverseToolUse {\n /**\n * Unique identifier for this tool use.\n * @maxLength 1000\n */\n toolUseId?: string;\n /**\n * Name of the tool being invoked.\n * @maxLength 1000\n */\n name?: string;\n /** Input parameters for the tool as a JSON object. */\n input?: Record<string, any> | null;\n}\n\n/** Tool execution result */\nexport interface ConverseToolResult {\n /**\n * Identifier matching the tool_use_id from the ToolUse request.\n * @maxLength 1000\n */\n toolUseId?: string;\n /**\n * Result content (text only for now).\n * @maxSize 1000\n */\n content?: ConverseToolResultContent[];\n /**\n * Execution status: 'success' or 'error'.\n * @maxLength 100\n */\n status?: string | null;\n}\n\n/** Tool result content (text only for now) */\nexport interface ConverseToolResultContent\n extends ConverseToolResultContentContentOneOf {\n /**\n * A tool result that is text.\n * @maxLength 100000000\n */\n text?: string;\n /** A tool result that is JSON format data. */\n json?: Record<string, any> | null;\n}\n\n/** @oneof */\nexport interface ConverseToolResultContentContentOneOf {\n /**\n * A tool result that is text.\n * @maxLength 100000000\n */\n text?: string;\n /** A tool result that is JSON format data. */\n json?: Record<string, any> | null;\n}\n\n/** Parameters that control the model's text generation behavior. */\nexport interface ConverseInferenceConfig {\n /**\n * Maximum tokens to generate before stopping.\n * @min 1\n */\n maxTokens?: number | null;\n /**\n * Randomness in output. Higher values (closer to 1.0) increase creativity.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Nucleus sampling threshold. The model considers tokens whose cumulative probability exceeds this value.\n * @max 1\n */\n topP?: number | null;\n /**\n * Text sequences that cause the model to stop generating.\n * @maxLength 512\n * @maxSize 8191\n */\n stopSequences?: string[];\n}\n\n/** Converse-specific tool configuration */\nexport interface ToolConfig {\n /**\n * Available tools for the model.\n * @maxSize 1000\n */\n tools?: ConverseTool[];\n /** How the model should use tools. */\n toolChoice?: ToolChoice;\n}\n\n/** Tool wrapper with specification */\nexport interface ConverseTool {\n /** Tool specification containing name, description, and input schema. */\n toolSpec?: ToolSpecification;\n}\n\n/** Detailed tool specification */\nexport interface ToolSpecification {\n /**\n * Name of the tool.\n * @maxLength 256\n */\n name?: string;\n /**\n * Description of what the tool does.\n * @maxLength 2048\n */\n description?: string | null;\n /** JSON schema for tool input parameters. */\n inputSchema?: ConverseInputSchema;\n}\n\n/** Input schema wrapper */\nexport interface ConverseInputSchema {\n /** JSON schema as a Struct (wraps the schema in \"json\" field for AWS API). */\n json?: Record<string, any> | null;\n}\n\nexport interface ConversePerformanceConfig {\n /**\n * The desired latency profile. Valid values: 'standard' (default) or 'optimized'.\n * @maxLength 100\n */\n latency?: string | null;\n}\n\nexport interface SystemContentBlock {\n /**\n * Text providing high-level instructions or context for the conversation.\n * @maxLength 100000000\n */\n text?: string | null;\n}\n\nexport interface CreateImageRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: V1ImageModelWithLiterals;\n /** The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3.\n */\n quality?: ImageQualityWithLiterals;\n /** The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. */\n size?: ImageSizeWithLiterals;\n /**\n * The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images.\n * Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3.\n */\n style?: ImageStyleWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 100\n */\n user?: string | null;\n}\n\nexport enum V1ImageModel {\n UNKNOWN_IMAGE_GENERATION_MODEL = 'UNKNOWN_IMAGE_GENERATION_MODEL',\n DALL_E_2 = 'DALL_E_2',\n DALL_E_3 = 'DALL_E_3',\n}\n\n/** @enumType */\nexport type V1ImageModelWithLiterals =\n | V1ImageModel\n | 'UNKNOWN_IMAGE_GENERATION_MODEL'\n | 'DALL_E_2'\n | 'DALL_E_3';\n\nexport enum ImageQuality {\n UNKNOWN_IMAGE_QUALITY = 'UNKNOWN_IMAGE_QUALITY',\n STANDARD = 'STANDARD',\n HD = 'HD',\n}\n\n/** @enumType */\nexport type ImageQualityWithLiterals =\n | ImageQuality\n | 'UNKNOWN_IMAGE_QUALITY'\n | 'STANDARD'\n | 'HD';\n\nexport enum ImageSize {\n UNKNOWN_IMAGE_SIZE = 'UNKNOWN_IMAGE_SIZE',\n SIZE_256X256 = 'SIZE_256X256',\n SIZE_512X512 = 'SIZE_512X512',\n SIZE_1024X1024 = 'SIZE_1024X1024',\n SIZE_1792X1024 = 'SIZE_1792X1024',\n SIZE_1024X1792 = 'SIZE_1024X1792',\n}\n\n/** @enumType */\nexport type ImageSizeWithLiterals =\n | ImageSize\n | 'UNKNOWN_IMAGE_SIZE'\n | 'SIZE_256X256'\n | 'SIZE_512X512'\n | 'SIZE_1024X1024'\n | 'SIZE_1792X1024'\n | 'SIZE_1024X1792';\n\nexport enum ImageStyle {\n UNKNOWN_IMAGE_STYLE = 'UNKNOWN_IMAGE_STYLE',\n VIVID = 'VIVID',\n NATURAL = 'NATURAL',\n}\n\n/** @enumType */\nexport type ImageStyleWithLiterals =\n | ImageStyle\n | 'UNKNOWN_IMAGE_STYLE'\n | 'VIVID'\n | 'NATURAL';\n\nexport interface V1TextToImageRequest {\n /** The model to use for generating the image. */\n model?: ImageModelWithLiterals;\n /** Height of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n height?: number | null;\n /** Width of the image to generate, in pixels, in an increment divisible by 64. Default: 512 */\n width?: number | null;\n /**\n * An array of text prompts to use for generation.\n * @minSize 1\n * @maxSize 10\n */\n textPrompts?: TextPrompt[];\n /** How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt. Default: 7 */\n cfgScale?: number | null;\n /**\n * CLIP Guidance is a technique that uses the CLIP neural network to guide the generation of images to be more in-line with your included prompt,\n * which often results in improved coherency.\n */\n clipGuidancePreset?: ClipGuidancePresetWithLiterals;\n /** Which sampler to use for the diffusion process. If this value is omitted we'll automatically select an appropriate sampler for you. */\n sampler?: SamplerWithLiterals;\n /** Number of images to generate. Default: 1 */\n samples?: number | null;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Number of diffusion steps to run. Default: 30 */\n steps?: number | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: TextToImageRequestStylePresetWithLiterals;\n}\n\nexport enum ImageModel {\n STABILITY_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_IMAGE_MODEL_UNSPECIFIED',\n /** stable-diffusion-xl-1024-v1-0 - Stable Diffusion XL v1.0 */\n SDXL_1_0 = 'SDXL_1_0',\n}\n\n/** @enumType */\nexport type ImageModelWithLiterals =\n | ImageModel\n | 'STABILITY_IMAGE_MODEL_UNSPECIFIED'\n | 'SDXL_1_0';\n\nexport interface TextPrompt {\n /**\n * The text to generate the image from.\n * @maxLength 4000\n */\n text?: string | null;\n /** The weight of the text prompt. */\n weight?: number | null;\n}\n\nexport enum ClipGuidancePreset {\n CLIP_GUIDANCE_PRESET_UNSPECIFIED = 'CLIP_GUIDANCE_PRESET_UNSPECIFIED',\n FAST_BLUE = 'FAST_BLUE',\n FAST_GREEN = 'FAST_GREEN',\n NONE = 'NONE',\n SIMPLE = 'SIMPLE',\n SLOW = 'SLOW',\n SLOWER = 'SLOWER',\n SLOWEST = 'SLOWEST',\n}\n\n/** @enumType */\nexport type ClipGuidancePresetWithLiterals =\n | ClipGuidancePreset\n | 'CLIP_GUIDANCE_PRESET_UNSPECIFIED'\n | 'FAST_BLUE'\n | 'FAST_GREEN'\n | 'NONE'\n | 'SIMPLE'\n | 'SLOW'\n | 'SLOWER'\n | 'SLOWEST';\n\nexport enum Sampler {\n SAMPLER_UNSPECIFIED = 'SAMPLER_UNSPECIFIED',\n DDIM = 'DDIM',\n DDPM = 'DDPM',\n K_DPMPP_2M = 'K_DPMPP_2M',\n K_DPMPP_2S_ANCESTRAL = 'K_DPMPP_2S_ANCESTRAL',\n K_DPM_2 = 'K_DPM_2',\n K_DPM_2_ANCESTRAL = 'K_DPM_2_ANCESTRAL',\n K_EULER = 'K_EULER',\n K_EULER_ANCESTRAL = 'K_EULER_ANCESTRAL',\n K_HEUN = 'K_HEUN',\n K_LMS = 'K_LMS',\n}\n\n/** @enumType */\nexport type SamplerWithLiterals =\n | Sampler\n | 'SAMPLER_UNSPECIFIED'\n | 'DDIM'\n | 'DDPM'\n | 'K_DPMPP_2M'\n | 'K_DPMPP_2S_ANCESTRAL'\n | 'K_DPM_2'\n | 'K_DPM_2_ANCESTRAL'\n | 'K_EULER'\n | 'K_EULER_ANCESTRAL'\n | 'K_HEUN'\n | 'K_LMS';\n\nexport enum TextToImageRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type TextToImageRequestStylePresetWithLiterals =\n | TextToImageRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateCoreRequest {\n /** The model to use for generating the image. will always be STABLE_IMAGE_CORE */\n model?: ImageCoreModelWithLiterals;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Default: 0\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: GenerateCoreRequestStylePresetWithLiterals;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\nexport enum ImageCoreModel {\n STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED = 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED',\n STABLE_IMAGE_CORE = 'STABLE_IMAGE_CORE',\n}\n\n/** @enumType */\nexport type ImageCoreModelWithLiterals =\n | ImageCoreModel\n | 'STABILITY_CORE_IMAGE_MODEL_UNSPECIFIED'\n | 'STABLE_IMAGE_CORE';\n\nexport enum GenerateCoreRequestStylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type GenerateCoreRequestStylePresetWithLiterals =\n | GenerateCoreRequestStylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface GenerateStableDiffusionRequest {\n /**\n * The text prompt to generate the image from.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Controls whether this is a text-to-image or image-to-image generation.\n * - TEXT_TO_IMAGE requires only the prompt parameter.\n * - IMAGE_TO_IMAGE requires prompt, image, and strength parameters.\n */\n mode?: GenerationModeWithLiterals;\n /**\n * The image to use as the starting point for the generation.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Controls how much influence the image parameter has on the output image.\n * A value of 0 yields an image identical to the input; 1 ignores the input image.\n * This parameter is only valid for IMAGE_TO_IMAGE mode.\n */\n strength?: number | null;\n /**\n * Default: 1:1\n * One of : 16:9 1:1 21:9 2:3 3:2 4:5 5:4 9:16 9:21\n * Controls the aspect ratio of the generated image.\n * This parameter is only valid for TEXT_TO_IMAGE mode.\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /** The model to use for generation. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation. (Omit this parameter or pass 0 to use a random seed.) */\n seed?: string | null;\n /** Dictates the content-type of the generated image. */\n outputFormat?: GenerateStableDiffusionRequestOutputFormatWithLiterals;\n /**\n * Keywords of what you do not wish to see in the output image.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n}\n\nexport enum GenerationMode {\n UNKNOWN_GENERATION_MODE = 'UNKNOWN_GENERATION_MODE',\n TEXT_TO_IMAGE = 'TEXT_TO_IMAGE',\n IMAGE_TO_IMAGE = 'IMAGE_TO_IMAGE',\n}\n\n/** @enumType */\nexport type GenerationModeWithLiterals =\n | GenerationMode\n | 'UNKNOWN_GENERATION_MODE'\n | 'TEXT_TO_IMAGE'\n | 'IMAGE_TO_IMAGE';\n\nexport enum ImageStableDiffusionModel {\n STABLE_DIFFUSION_MODEL_UNSPECIFIED = 'STABLE_DIFFUSION_MODEL_UNSPECIFIED',\n /** sd3-large */\n SD3_LARGE = 'SD3_LARGE',\n /** sd3-large-turbo */\n SD3_LARGE_TURBO = 'SD3_LARGE_TURBO',\n /** sd3-medium */\n SD3_MEDIUM = 'SD3_MEDIUM',\n /** sd3.5-large */\n SD3_5_LARGE = 'SD3_5_LARGE',\n /** sd3.5-large-turbo */\n SD3_5_LARGE_TURBO = 'SD3_5_LARGE_TURBO',\n /** sd3.5-medium */\n SD3_5_MEDIUM = 'SD3_5_MEDIUM',\n}\n\n/** @enumType */\nexport type ImageStableDiffusionModelWithLiterals =\n | ImageStableDiffusionModel\n | 'STABLE_DIFFUSION_MODEL_UNSPECIFIED'\n | 'SD3_LARGE'\n | 'SD3_LARGE_TURBO'\n | 'SD3_MEDIUM'\n | 'SD3_5_LARGE'\n | 'SD3_5_LARGE_TURBO'\n | 'SD3_5_MEDIUM';\n\nexport enum GenerateStableDiffusionRequestOutputFormat {\n OUTPUT_FORMAT_UNSPECIFIED = 'OUTPUT_FORMAT_UNSPECIFIED',\n JPEG = 'JPEG',\n PNG = 'PNG',\n}\n\n/** @enumType */\nexport type GenerateStableDiffusionRequestOutputFormatWithLiterals =\n | GenerateStableDiffusionRequestOutputFormat\n | 'OUTPUT_FORMAT_UNSPECIFIED'\n | 'JPEG'\n | 'PNG';\n\n/** Request to generate an image */\nexport interface GenerateAnImageRequest {\n /** The model to use for generating the image. */\n model?: GenerateAnImageModelWithLiterals;\n /**\n * The prompt to use for image generation.\n * Relevant models : ALL\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Optional seed for reproducibility. If not provided, a random seed will be used.\n * Relevant models : ALL\n */\n seed?: number | null;\n /**\n * Aspect ratio of the image between 21:9 and 9:21\n * default: 16:9\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @maxLength 100\n */\n aspectRatio?: string | null;\n /**\n * Width of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n width?: number | null;\n /**\n * Height of the generated image in pixels. Must be a multiple of 32.\n * Relevant models : FLUX_1_DEV\n * @min 256\n * @max 1440\n */\n height?: number | null;\n /**\n * Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.\n * Relevant models : ALL\n * @max 6\n */\n safetyTolerance?: number | null;\n /**\n * Output format for the generated image. Can be 'jpeg' or 'png'.\n * Relevant models : ALL\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Generate less processed, more natural-looking images\n * Relevant models : FLUX_PRO_1_1_ULTRA\n */\n raw?: boolean | null;\n /**\n * Optional image to remix\n * The URL must be a valid wix mp or wix static URL.\n * Relevant models FLUX_PRO_1_1_ULTRA, FLUX_1_DEV, FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Blend between the prompt and the image prompt\n * Relevant models : FLUX_PRO_1_1_ULTRA\n * @max 1\n */\n imagePromptStrength?: number | null;\n /**\n * Optional image to remix\n * Image to use as control input - relevant models FLUX_PRO_1_DEPTH and FLUX_PRO_1_CANNY\n * @maxLength 100000\n */\n controlImageUrl?: string | null;\n /**\n * Whether to perform up sampling on the prompt\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n */\n promptUpsampling?: boolean | null;\n /**\n * Number of steps for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @min 15\n * @max 50\n */\n steps?: number | null;\n /**\n * Guidance strength for the image generation process\n * Relevant models FLUX_1_DEV, FLUX_PRO_1_DEPTH, FLUX_PRO_1_CANNY, FLUX_PRO_1_FILL\n * @max 100\n */\n guidance?: number | null;\n /**\n * Image Mask\n * A Urk representing a mask for the areas you want to modify in the image.\n * The mask should be the same dimensions as the image and in black and white.\n * Black areas (0%) indicate no modification, while white areas (100%) specify areas for in painting.\n * Optional if you provide an alpha mask in the original image.\n * Validation: The endpoint verifies that the dimensions of the mask match the original image.\n * Relevant models FLUX_PRO_1_FILL\n * @maxLength 100000\n */\n imageMaskUrl?: string | null;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\nexport enum GenerateAnImageModel {\n GEN_IMAGE_MODEL_UNSPECIFIED = 'GEN_IMAGE_MODEL_UNSPECIFIED',\n FLUX_PRO_1_1_ULTRA = 'FLUX_PRO_1_1_ULTRA',\n FLUX_1_DEV = 'FLUX_1_DEV',\n FLUX_PRO_1_CANNY = 'FLUX_PRO_1_CANNY',\n FLUX_PRO_1_DEPTH = 'FLUX_PRO_1_DEPTH',\n FLUX_PRO_1_FILL = 'FLUX_PRO_1_FILL',\n}\n\n/** @enumType */\nexport type GenerateAnImageModelWithLiterals =\n | GenerateAnImageModel\n | 'GEN_IMAGE_MODEL_UNSPECIFIED'\n | 'FLUX_PRO_1_1_ULTRA'\n | 'FLUX_1_DEV'\n | 'FLUX_PRO_1_CANNY'\n | 'FLUX_PRO_1_DEPTH'\n | 'FLUX_PRO_1_FILL';\n\nexport interface CreatePredictionRequest\n extends CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** Input for z-image-turbo */\n prunaaiZImageTurbo?: PrunaaiZImageTurbo;\n /** Input for qwen-image-layered */\n qwenImageLayered?: QwenImageLayered;\n /** The model version ID */\n model?: CreatePredictionModelWithLiterals;\n /**\n * skip polling flag - if set to true, the response will be returned immediately without waiting for the image to be generated.\n * user should call GetResult to get the image.\n */\n skipPolling?: boolean | null;\n}\n\n/** @oneof */\nexport interface CreatePredictionRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: FluxPulid;\n /** Input for FLUX_DEV_CONTROLNET */\n fluxDevControlnet?: FluxDevControlnet;\n /** Input for REVE_EDIT */\n reveEdit?: ReveEdit;\n /** Input for Florence 2 */\n lucatacoFlorence2Large?: LucatacoFlorence2Large;\n /** Input for Isaac-0.1 */\n perceptronIsaac01?: PerceptronIsaac01;\n /** Input for z-image-turbo */\n prunaaiZImageTurbo?: PrunaaiZImageTurbo;\n /** Input for qwen-image-layered */\n qwenImageLayered?: QwenImageLayered;\n}\n\nexport enum CreatePredictionModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n /** Flux-dev-controlnet */\n FLUX_DEV_CONTROLNET = 'FLUX_DEV_CONTROLNET',\n /** https://replicate.com/reve/edit. Has a `prompt` field, routed through GenerateContent */\n REVE_EDIT = 'REVE_EDIT',\n /** https://replicate.com/lucataco/florence-2-large */\n LUCATACO_FLORENCE_2_LARGE = 'LUCATACO_FLORENCE_2_LARGE',\n /** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\n PERCEPTRON_ISAAC_01 = 'PERCEPTRON_ISAAC_01',\n /** https://replicate.com/prunaai/z-image-turbo */\n PRUNAAI_Z_IMAGE_TURBO = 'PRUNAAI_Z_IMAGE_TURBO',\n /** https://replicate.com/qwen/qwen-image-layered */\n QWEN_IMAGE_LAYERED = 'QWEN_IMAGE_LAYERED',\n}\n\n/** @enumType */\nexport type CreatePredictionModelWithLiterals =\n | CreatePredictionModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID'\n | 'FLUX_DEV_CONTROLNET'\n | 'REVE_EDIT'\n | 'LUCATACO_FLORENCE_2_LARGE'\n | 'PERCEPTRON_ISAAC_01'\n | 'PRUNAAI_Z_IMAGE_TURBO'\n | 'QWEN_IMAGE_LAYERED';\n\nexport interface FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * Number of images to generate\n * @min 1\n * @max 4\n */\n numOutputs?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the quality of the output image for jpg and webp (1-100)\n * @min 1\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n}\n\nexport interface FluxDevControlnet {\n /** Set a seed for reproducibility. Random by default. */\n seed?: number | null;\n /**\n * Number of steps\n * @min 1\n * @max 50\n */\n steps?: number | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Optional LoRA model to use.\n * Give a URL to a HuggingFace .safetensors file, a Replicate .tar file or a CivitAI download link.\n * @maxLength 2000\n */\n loraUrl?: string | null;\n /**\n * Type of control net\n * @maxLength 100\n */\n controlType?: string | null;\n /**\n * Image to use with control net\n * @maxLength 2000\n */\n controlImage?: string | null;\n /**\n * Strength of LoRA model\n * @min -1\n * @max 3\n */\n loraStrength?: number | null;\n /**\n * Format of the output images\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Guidance scale\n * @max 5\n */\n guidanceScale?: number | null;\n /**\n * Quality of the output images, from 0 to 100.\n * @max 100\n */\n outputQuality?: number | null;\n /**\n * Things you do not want to see in your image\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Strength of control net.\n * @max 3\n */\n controlStrength?: number | null;\n /**\n * Preprocessor to use with depth control net\n * @maxLength 100\n */\n depthPreprocessor?: string | null;\n /**\n * Preprocessor to use with soft edge control net\n * @maxLength 100\n */\n softEdgePreprocessor?: string | null;\n /**\n * Strength of image to image control.\n * @max 1\n */\n imageToImageStrength?: number | null;\n /** Return the preprocessed image used to control the generation process. */\n returnPreprocessedImage?: boolean | null;\n}\n\nexport interface ReveEdit {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Edit instructions\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Specific version to use. Default: \"latest\"\n * @maxLength 10000\n */\n version?: string | null;\n}\n\n/** https://replicate.com/lucataco/florence-2-large/readme */\nexport interface LucatacoFlorence2Large {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /** Which task to perform */\n taskInput?: TaskInputWithLiterals;\n /**\n * Optional input for some task types\n * @maxLength 10000\n */\n textInput?: string | null;\n}\n\nexport enum TaskInput {\n UNRECOGNIZED_TASK_INPUT = 'UNRECOGNIZED_TASK_INPUT',\n OBJECT_DETECTION = 'OBJECT_DETECTION',\n CAPTION = 'CAPTION',\n DETAILED_CAPTION = 'DETAILED_CAPTION',\n MORE_DETAILED_CAPTION = 'MORE_DETAILED_CAPTION',\n CAPTION_TO_PHRASE_GROUNDING = 'CAPTION_TO_PHRASE_GROUNDING',\n REGION_PROPOSAL = 'REGION_PROPOSAL',\n DENSE_REGION_CAPTION = 'DENSE_REGION_CAPTION',\n OCR = 'OCR',\n OCR_WITH_REGION = 'OCR_WITH_REGION',\n}\n\n/** @enumType */\nexport type TaskInputWithLiterals =\n | TaskInput\n | 'UNRECOGNIZED_TASK_INPUT'\n | 'OBJECT_DETECTION'\n | 'CAPTION'\n | 'DETAILED_CAPTION'\n | 'MORE_DETAILED_CAPTION'\n | 'CAPTION_TO_PHRASE_GROUNDING'\n | 'REGION_PROPOSAL'\n | 'DENSE_REGION_CAPTION'\n | 'OCR'\n | 'OCR_WITH_REGION';\n\n/** https://replicate.com/perceptron-ai-inc/isaac-0.1 */\nexport interface PerceptronIsaac01 {\n /**\n * Image URI\n * @maxLength 10000\n */\n image?: string | null;\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /** Which task to perform */\n response?: ResponseTypeWithLiterals;\n /** Max new tokens */\n maxNewTokens?: string | null;\n}\n\nexport enum ResponseType {\n UNRECOGNIZED_RESPONSE_TYPE = 'UNRECOGNIZED_RESPONSE_TYPE',\n TEXT = 'TEXT',\n BOX = 'BOX',\n POINT = 'POINT',\n POLYGON = 'POLYGON',\n}\n\n/** @enumType */\nexport type ResponseTypeWithLiterals =\n | ResponseType\n | 'UNRECOGNIZED_RESPONSE_TYPE'\n | 'TEXT'\n | 'BOX'\n | 'POINT'\n | 'POLYGON';\n\n/** https://replicate.com/prunaai/z-image-turbo */\nexport interface PrunaaiZImageTurbo {\n /**\n * Prompt\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Height of the generated image\n * @min 64\n * @max 2048\n */\n width?: number | null;\n /**\n * Width of the generated image\n * @min 64\n * @max 2048\n */\n height?: number | null;\n /**\n * Number of inference steps. This actually results in (num_inference_steps - 1) DiT forwards\n * @min 1\n * @max 50\n */\n numInferenceSteps?: number | null;\n /**\n * Guidance scale. Should be 0 for Turbo models\n * @max 20\n */\n guidanceScale?: number | null;\n /** Random seed. Set for reproducible generation */\n seed?: number | null;\n /**\n * Format of the output images\n * @maxLength 5\n */\n outputFormat?: string | null;\n /**\n * Quality when saving the output images, from 0 to 100. 100 is best quality, 0 is lowest quality. Not relevant for .png outputs\n * @max 100\n */\n outputQuality?: number | null;\n}\n\n/** https://replicate.com/qwen/qwen-image-layered */\nexport interface QwenImageLayered {\n /**\n * Image to be converted into a layered image\n * @minLength 1\n * @maxLength 100000\n */\n image?: string;\n /**\n * Number of layers to generate (2-8)\n * @min 2\n * @max 8\n */\n numLayers?: number | null;\n /**\n * Text description of the input image. Use 'auto' for auto captioning\n * @maxLength 100000\n */\n description?: string | null;\n /** Run faster predictions with additional optimizations */\n goFast?: boolean | null;\n /** Random seed. Set for reproducible generation */\n seed?: number | null;\n /**\n * Format of the output images. Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Quality when saving the output images, from 0 to 100. Not relevant for .png outputs\n * @max 100\n */\n outputQuality?: number | null;\n /** Disable safety checker for generated images */\n disableSafetyChecker?: boolean | null;\n}\n\nexport interface EditImageWithPromptRequest {\n /** The model to use for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /**\n * The image you wish to inpaint.\n * Supported Formats: jpeg, png, webp\n * Validation Rules:\n * - Every side must be at least 64 pixels\n * - Total pixel count must be between 4,096 and 9,437,184 pixels\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * image format jpeg, png, webp\n * @maxLength 100\n */\n imageFormat?: string | null;\n /**\n * What you wish to see in the output image.\n * A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.\n * To control the weight of a given word use the format (word:weight),\n * where word is the word you'd like to control the weight of and weight is a value between 0 and 1.\n * For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue.\n * Optional for OUTPAINT model , and required for INPAINT model\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * A blurb of text describing what you do not wish to see in the output image.\n * This is an advanced feature.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /**\n * Controls the strength of the inpainting process on a per-pixel basis,\n * either via a second image (passed into this parameter) or via the alpha channel of the image parameter.\n * Passing in a Mask\n * The image passed to this parameter should be a black and white image that represents,\n * at any pixel, the strength of inpainting based on how dark or light the given pixel is.\n * Completely black pixels represent no inpainting strength while completely white pixels represent maximum strength.\n * In the event the mask is a different size than the image parameter, it will be automatically resized.\n * Alpha Channel Support\n * If you don't provide an explicit mask, one will be derived from the alpha channel of the image parameter.\n * Transparent pixels will be inpainted while opaque pixels will be preserved.\n * In the event an image with an alpha channel is provided along with a mask, the mask will take precedence.\n * Relevant only for INPAINT model\n * @maxLength 100000\n */\n imageMask?: string | null;\n /**\n * image mask format jpeg, png, webp\n * Relevant only for INPAINT model\n * @maxLength 100\n */\n imageMaskFormat?: string | null;\n /**\n * Grows the edges of the mask outward in all directions by the specified number of pixels. The expanded area around the mask will be blurred,\n * which can help smooth the transition between inpainted content and the original image.\n * Try this parameter if you notice seams or rough edges around the inpainted content.\n * Default: 5\n * Relevant only for INPAINT model\n * @max 100\n */\n growMask?: number | null;\n /**\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * style_preset 3d-model analog-film anime cinematic comic-book digital-art enhance fantasy-art isometric line-art low-poly modeling-compound neon-punk origami photographic pixel-art tile-texture\n * Pass in a style preset to guide the image model towards a particular style. This list of style presets is subject to change.\n */\n stylePreset?: StylePresetWithLiterals;\n /**\n * The direction to outpaint the image\n * Relevant only for OUTPAINT model\n * At least one of the fields must be set\n */\n outpaintDirection?: OutpaintDirection;\n /**\n * Controls the likelihood of creating additional details not heavily conditioned by the init image [0..1]\n * Relevant only for OUTPAINT model\n * @max 1\n */\n creativity?: number | null;\n}\n\nexport enum EditImageWithPromptRequestModel {\n UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL = 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL',\n INPAINT = 'INPAINT',\n OUTPAINT = 'OUTPAINT',\n}\n\n/** @enumType */\nexport type EditImageWithPromptRequestModelWithLiterals =\n | EditImageWithPromptRequestModel\n | 'UNKNOWN_EDIT_IMAGE_WITH__PROMPT_REQUEST_MODEL'\n | 'INPAINT'\n | 'OUTPAINT';\n\nexport enum StylePreset {\n STYLE_PRESET_UNSPECIFIED = 'STYLE_PRESET_UNSPECIFIED',\n ANALOG_FILM = 'ANALOG_FILM',\n ANIME = 'ANIME',\n CINEMATIC = 'CINEMATIC',\n COMIC_BOOK = 'COMIC_BOOK',\n DIGITAL_ART = 'DIGITAL_ART',\n ENHANCE = 'ENHANCE',\n FANTASY_ART = 'FANTASY_ART',\n ISOMETRIC = 'ISOMETRIC',\n LINE_ART = 'LINE_ART',\n LOW_POLY = 'LOW_POLY',\n MODELING_COMPOUND = 'MODELING_COMPOUND',\n NEON_PUNK = 'NEON_PUNK',\n ORIGAMI = 'ORIGAMI',\n PHOTOGRAPHIC = 'PHOTOGRAPHIC',\n PIXEL_ART = 'PIXEL_ART',\n TILE_TEXTURE = 'TILE_TEXTURE',\n MODEL_3D = 'MODEL_3D',\n}\n\n/** @enumType */\nexport type StylePresetWithLiterals =\n | StylePreset\n | 'STYLE_PRESET_UNSPECIFIED'\n | 'ANALOG_FILM'\n | 'ANIME'\n | 'CINEMATIC'\n | 'COMIC_BOOK'\n | 'DIGITAL_ART'\n | 'ENHANCE'\n | 'FANTASY_ART'\n | 'ISOMETRIC'\n | 'LINE_ART'\n | 'LOW_POLY'\n | 'MODELING_COMPOUND'\n | 'NEON_PUNK'\n | 'ORIGAMI'\n | 'PHOTOGRAPHIC'\n | 'PIXEL_ART'\n | 'TILE_TEXTURE'\n | 'MODEL_3D';\n\nexport interface OutpaintDirection {\n /**\n * The number of pixels to outpaint on the left side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n left?: number | null;\n /**\n * The number of pixels to outpaint on the right side of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n right?: number | null;\n /**\n * The number of pixels to outpaint on the top of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n up?: number | null;\n /**\n * The number of pixels to outpaint on the bottom of the image [0..2000]\n * Relevant only for OUTPAINT model\n * @max 2000\n */\n down?: number | null;\n}\n\nexport interface TextToImageRequest {\n /**\n * Specifies the format of the output image. Supported formats are: PNG, JPG and WEBP. Default: JPG.\n * @maxLength 4\n */\n outputFormat?: string | null;\n /**\n * Sets the compression quality of the output image. Higher values preserve more quality but increase file size, lower values reduce file size but decrease quality. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /** This parameter is used to enable or disable the NSFW check. */\n checkNsfw?: boolean | null;\n /**\n * A positive prompt is a text instruction to guide the model on generating the image. It is usually a sentence or a paragraph that provides positive guidance for the task. This parameter is essential to shape the desired results.\n * For example, if the positive prompt is \"dragon drinking coffee\", the model will generate an image of a dragon drinking coffee. The more detailed the prompt, the more accurate the results.\n * The length of the prompt must be between 2 and 3000 characters.\n * @maxLength 1000000\n */\n positivePrompt?: string;\n /**\n * Used to define the height dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n height?: number;\n /**\n * Used to define the width dimension of the generated image. Certain models perform better with specific dimensions.\n * The value must be divisible by 64, eg: 128...512, 576, 640...2048.\n */\n width?: number;\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /** Model to invoke. */\n model?: TextToImageRequestModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The number of steps is the number of iterations the model will perform to generate the image. Default: 28.\n * @min 1\n * @max 100\n */\n steps?: number | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /**\n * Guidance scale represents how closely the images will resemble the prompt or how much freedom the AI model has. Higher values are closer to the prompt. Low values may reduce the quality of the results. Default: 7.\n * @max 30\n */\n cfgScale?: number | null;\n /** The number of images to generate from the specified prompt. */\n numberResults?: number | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n /**\n * Used to determine the influence of the seedImage image in the generated output. A lower value results in more influence from the original image, while a higher value allows more creative deviation.\n * @max 1\n */\n strength?: number | null;\n /**\n * An array of LoRA models to be applied during the image generation process.\n * @maxSize 10\n */\n loraModels?: LoraModelSelect[];\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /** Inputs for the image generation process. */\n inputs?: Inputs;\n}\n\nexport enum TextToImageRequestModel {\n UNKNOWN_MODEL = 'UNKNOWN_MODEL',\n /** runware:101@1 */\n FLUX_1_DEV = 'FLUX_1_DEV',\n /** runware:100@1 */\n FLUX_1_SCHNELL = 'FLUX_1_SCHNELL',\n /** bfl:4@1 */\n FLUX_1_KONTEXT_MAX = 'FLUX_1_KONTEXT_MAX',\n /** bfl:3@1 */\n FLUX_1_KONTEXT_PRO = 'FLUX_1_KONTEXT_PRO',\n /** runware:108@20 */\n QWEN_IMAGE_EDIT = 'QWEN_IMAGE_EDIT',\n /** ideogram:4@1 */\n IDEOGRAM_3_0 = 'IDEOGRAM_3_0',\n /** ideogram:4@3 */\n IDEOGRAM_3_0_EDIT = 'IDEOGRAM_3_0_EDIT',\n /** bfl:2@2 */\n FLUX_1_1_PRO_ULTRA = 'FLUX_1_1_PRO_ULTRA',\n /** bfl:1@2 */\n FLUX_1_FILL_PRO = 'FLUX_1_FILL_PRO',\n /** bytedance:5@0 */\n SEEDREAM_4 = 'SEEDREAM_4',\n /** runware:102@1 */\n FLUX_DEV_FILL = 'FLUX_DEV_FILL',\n /** bfl:1@5 */\n FLUX_DEPTH_PRO = 'FLUX_DEPTH_PRO',\n /** bfl:1@4 */\n FLUX_CANNY_PRO = 'FLUX_CANNY_PRO',\n /** Should be used together with model_id filed from allowed models list */\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type TextToImageRequestModelWithLiterals =\n | TextToImageRequestModel\n | 'UNKNOWN_MODEL'\n | 'FLUX_1_DEV'\n | 'FLUX_1_SCHNELL'\n | 'FLUX_1_KONTEXT_MAX'\n | 'FLUX_1_KONTEXT_PRO'\n | 'QWEN_IMAGE_EDIT'\n | 'IDEOGRAM_3_0'\n | 'IDEOGRAM_3_0_EDIT'\n | 'FLUX_1_1_PRO_ULTRA'\n | 'FLUX_1_FILL_PRO'\n | 'SEEDREAM_4'\n | 'FLUX_DEV_FILL'\n | 'FLUX_DEPTH_PRO'\n | 'FLUX_CANNY_PRO'\n | 'FROM_MODEL_ID';\n\nexport interface LoraModelSelect {\n /**\n * The unique identifier of the LoRA model, typically in the format \"wix:<id>@<version>\".\n * @minLength 1\n * @maxLength 255\n */\n model?: string | null;\n /**\n * The weight or influence of the LoRA model during the generation process.\n * A higher value indicates a stronger influence of the LoRA model on the output.\n * @min -4\n * @max 4\n */\n weight?: number | null;\n}\n\nexport interface Inputs {\n /**\n * A list of reference images URLs to be used for the image generation process.\n * These images serve as visual references for the model.\n * @maxSize 10\n * @maxLength 10000\n */\n referenceImages?: string[] | null;\n /**\n * When doing inpainting, this parameter is required.\n * Specifies the mask image to be used for the inpainting process. The value must be a URL pointing to the image. The image must be accessible publicly.\n * Supported formats are: PNG, JPG and WEBP.\n * @maxLength 10000\n */\n maskImage?: string | null;\n /**\n * Specifies the seed image to be used for the diffusion process.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 10000\n */\n seedImage?: string | null;\n}\n\nexport interface InvokeMlPlatformLlamaModelRequest {\n /**\n * The ML platform model id.\n * @minLength 1\n * @maxLength 50\n */\n modelId?: string;\n /**\n * The prompt that you want to pass to the model. With Llama 2 Chat, format the conversation with the following template.\n * @maxLength 1000000\n */\n prompt?: string | null;\n /**\n * Specify the maximum number of tokens to use in the generated response.\n * The model truncates the response once the generated text exceeds max_gen_len.\n * @min 1\n */\n maxGenLen?: number | null;\n /**\n * Use a lower value to decrease randomness in the response.\n * @max 1\n */\n temperature?: number | null;\n /**\n * Use a lower value to ignore less probable options. Set to 0 or 1.0 to disable.\n * @max 1\n */\n topP?: number | null;\n}\n\nexport interface InvokeChatCompletionRequest {\n /** Model to invoke */\n model?: PerplexityModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far\n * @maxSize 1000\n */\n messages?: PerplexityMessage[];\n /**\n * Max number of completion tokens.\n * Completion token count + prompt token count must not exceed the size of the context window\n * @max 200000\n */\n maxTokens?: number | null;\n /**\n * The amount of randomness in the response, valued between 0 inclusive and 2 exclusive.\n * Higher values are more random, and lower values are more deterministic.\n */\n temperature?: number | null;\n /**\n * The nucleus sampling threshold, valued between 0 and 1 inclusive.\n * For each subsequent token, the model considers the results of the tokens with top_p probability mass.\n * Perplexity recommends either altering top_k or top_p, but not both.\n */\n topP?: number | null;\n /**\n * Given a list of domains, limit the citations used by the online model to URLs from the specified domains.\n * Currently limited to only 3 domains for whitelisting and blacklisting.\n * For blacklisting add a - to the beginning of the domain string.\n * @maxLength 10000\n * @maxSize 3\n */\n searchDomainFilter?: string[];\n /** Determines whether or not a request to an online model should return images. */\n returnImages?: boolean | null;\n /** Determines whether or not a request to an online model should return related questions. */\n returnRelatedQuestions?: boolean | null;\n /**\n * Returns search results within the specified time interval - does not apply to images.\n * Must be one of \"month\", \"week\", \"day\", \"hour\"\n * @maxLength 10\n */\n searchRecencyFilter?: string | null;\n /**\n * The number of tokens to keep for highest top-k filtering, specified as an integer between 0 and 2048 inclusive.\n * If set to 0, top-k filtering is disabled. Perplexity recommends either altering top_k or top_p, but not both.\n */\n topK?: number | null;\n /**\n * A value between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics. Incompatible with `frequency_penalty`.\n */\n presencePenalty?: number | null;\n /**\n * A multiplicative penalty greater than 0. Values greater than 1.0 penalize new tokens based on their existing\n * frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n * A value of 1.0 means no penalty. Incompatible with `presence_penalty`.\n */\n frequencyPenalty?: number | null;\n /**\n * Enable structured outputs with a JSON or Regex schema.\n * https://docs.perplexity.ai/guides/structured-outputs\n */\n responseFormat?: InvokeChatCompletionRequestResponseFormat;\n}\n\nexport enum PerplexityModel {\n UNKNOWN_PERPLEXITY_MODEL = 'UNKNOWN_PERPLEXITY_MODEL',\n SONAR = 'SONAR',\n SONAR_PRO = 'SONAR_PRO',\n SONAR_REASONING = 'SONAR_REASONING',\n SONAR_REASONING_PRO = 'SONAR_REASONING_PRO',\n SONAR_DEEP_RESEARCH = 'SONAR_DEEP_RESEARCH',\n}\n\n/** @enumType */\nexport type PerplexityModelWithLiterals =\n | PerplexityModel\n | 'UNKNOWN_PERPLEXITY_MODEL'\n | 'SONAR'\n | 'SONAR_PRO'\n | 'SONAR_REASONING'\n | 'SONAR_REASONING_PRO'\n | 'SONAR_DEEP_RESEARCH';\n\nexport interface PerplexityMessage {\n /**\n * The content of the message\n * @maxLength 200000\n */\n content?: string;\n /**\n * The role of the speaker in this turn of conversation. After the (optional) system message,\n * user and assistant roles should alternate with `user` then `assistant`, ending in `user`.\n */\n role?: PerplexityMessageMessageRoleWithLiterals;\n}\n\nexport enum PerplexityMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n SYSTEM = 'SYSTEM',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n}\n\n/** @enumType */\nexport type PerplexityMessageMessageRoleWithLiterals =\n | PerplexityMessageMessageRole\n | 'UNKNOWN'\n | 'SYSTEM'\n | 'USER'\n | 'ASSISTANT';\n\nexport interface InvokeChatCompletionRequestResponseFormat\n extends InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** @oneof */\nexport interface InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {\n /**\n * The schema should be a valid JSON schema object.\n * @maxLength 10000\n */\n jsonSchema?: string;\n /**\n * The regex is a regular expression string.\n * @maxLength 1000\n */\n regex?: string;\n}\n\n/** mimics https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api */\nexport interface GenerateImageRequest {\n /** ID of the model to use. */\n model?: ImagenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 1000\n */\n instances?: Instance[];\n /** The configuration for the generation. */\n parameters?: Parameters;\n}\n\nexport enum ImagenModel {\n UNKNOWN_IMAGEN_MODEL = 'UNKNOWN_IMAGEN_MODEL',\n IMAGEN_3_0_GENERATE_002 = 'IMAGEN_3_0_GENERATE_002',\n IMAGEN_3_0_FAST_GENERATE_001 = 'IMAGEN_3_0_FAST_GENERATE_001',\n IMAGEN_4_0_GENERATE_001 = 'IMAGEN_4_0_GENERATE_001',\n IMAGEN_4_0_FAST_GENERATE_001 = 'IMAGEN_4_0_FAST_GENERATE_001',\n IMAGEN_4_0_ULTRA_GENERATE_001 = 'IMAGEN_4_0_ULTRA_GENERATE_001',\n}\n\n/** @enumType */\nexport type ImagenModelWithLiterals =\n | ImagenModel\n | 'UNKNOWN_IMAGEN_MODEL'\n | 'IMAGEN_3_0_GENERATE_002'\n | 'IMAGEN_3_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_GENERATE_001'\n | 'IMAGEN_4_0_FAST_GENERATE_001'\n | 'IMAGEN_4_0_ULTRA_GENERATE_001';\n\nexport interface Instance {\n /**\n * The text prompt for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n}\n\nexport interface Parameters {\n /**\n * The number of images to generate (1-4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /** Optional random seed for image generation */\n seed?: string | null;\n /** Optional parameter to use LLM-based prompt rewriting for higher quality images */\n enhancePrompt?: boolean | null;\n /**\n * Optional text to discourage in the generated images\n * @maxLength 480\n */\n negativePrompt?: string | null;\n /**\n * Optional aspect ratio for the image (1:1, 9:16, 16:9, 3:4, 4:3)\n * @maxLength 5\n */\n aspectRatio?: string | null;\n /** Optional output image format options */\n outputOptions?: OutputOptions;\n /**\n * Optional setting for allowing/disallowing generation of people\n * @maxLength 20\n */\n personGeneration?: string | null;\n /**\n * Optional safety filtering level\n * @maxLength 50\n */\n safetySetting?: string | null;\n /** Optional flag to add invisible watermark */\n addWatermark?: boolean | null;\n}\n\nexport interface OutputOptions {\n /**\n * Image format (image/png or image/jpeg)\n * @maxLength 20\n */\n mimeType?: string | null;\n /**\n * Compression quality for JPEG (0-100)\n * @max 100\n */\n compressionQuality?: number | null;\n}\n\nexport interface GenerateImageMlPlatformRequest\n extends GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n /** The model version ID */\n model?: GenerateImageMlPlatformModelWithLiterals;\n}\n\n/** @oneof */\nexport interface GenerateImageMlPlatformRequestInputOneOf {\n /** The input parameters for FluxPulid model */\n fluxPulid?: V1FluxPulid;\n}\n\nexport enum GenerateImageMlPlatformModel {\n /** The model version ID */\n UNKNOWN_CREATE_PREDICTION_MODEL = 'UNKNOWN_CREATE_PREDICTION_MODEL',\n /** The model version ID */\n FLUX_PULID = 'FLUX_PULID',\n}\n\n/** @enumType */\nexport type GenerateImageMlPlatformModelWithLiterals =\n | GenerateImageMlPlatformModel\n | 'UNKNOWN_CREATE_PREDICTION_MODEL'\n | 'FLUX_PULID';\n\nexport interface V1FluxPulid {\n /**\n * The prompt for image generation\n * @maxLength 1000\n */\n prompt?: string | null;\n /** Starting step for the generation process */\n startStep?: number | null;\n /**\n * URL of the main face image\n * @maxLength 2000\n */\n mainFaceImage?: string | null;\n /**\n * Negative prompt to specify what to avoid in generation\n * @maxLength 1000\n */\n negativePrompt?: string | null;\n /**\n * Set a random seed for generation (leave blank or -1 for random)\n * @min -1\n */\n seed?: number | null;\n /**\n * Set the width of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n width?: number | null;\n /**\n * Set the height of the generated image (256-1536 pixels)\n * @min 256\n * @max 1536\n */\n height?: number | null;\n /**\n * Set the Classifier-Free Guidance (CFG) scale. 1.0 uses standard CFG, while values >1.0 enable\n * True CFG for more precise control over generation. Higher values increase adherence to the prompt at the cost of image quality.\n * @min 1\n * @max 10\n */\n trueCfg?: number | null;\n /**\n * Set the weight of the ID image influence (0.0-3.0)\n * @max 3\n */\n idWeight?: number | null;\n /**\n * Set the number of denoising steps (1-20)\n * @min 1\n * @max 20\n */\n numSteps?: number | null;\n /**\n * Choose the format of the output image\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * Set the guidance scale for text prompt influence (1.0-10.0)\n * @min 1\n * @max 10\n */\n guidanceScale?: number | null;\n /**\n * Set the max sequence length for prompt (T5), smaller is faster (128-512)\n * @min 128\n * @max 512\n */\n maxSequenceLength?: number | null;\n /** Time step to start CFG - new field for ml platform */\n timestepToStartCfg?: number | null;\n /** Option to disable the NSFW safety checker */\n disableSafetyChecker?: boolean | null;\n}\n\nexport interface CreateImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536x1024\n * Portrait: 1024x1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n}\n\nexport enum OpenAiImageModel {\n UNKNOWN_IMAGE_CREATION_MODEL = 'UNKNOWN_IMAGE_CREATION_MODEL',\n GPT_4O_IMAGE = 'GPT_4O_IMAGE',\n GPT_IMAGE_1 = 'GPT_IMAGE_1',\n GPT_IMAGE_EXP = 'GPT_IMAGE_EXP',\n GPT_IMAGE_EXP_2 = 'GPT_IMAGE_EXP_2',\n GPT_IMAGE_EXP_3 = 'GPT_IMAGE_EXP_3',\n GPT_IMAGE_1_5 = 'GPT_IMAGE_1_5',\n}\n\n/** @enumType */\nexport type OpenAiImageModelWithLiterals =\n | OpenAiImageModel\n | 'UNKNOWN_IMAGE_CREATION_MODEL'\n | 'GPT_4O_IMAGE'\n | 'GPT_IMAGE_1'\n | 'GPT_IMAGE_EXP'\n | 'GPT_IMAGE_EXP_2'\n | 'GPT_IMAGE_EXP_3'\n | 'GPT_IMAGE_1_5';\n\nexport interface EditImageOpenAiRequest {\n /**\n * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.\n * @maxLength 4000\n */\n prompt?: string | null;\n /** The model to use for image generation. */\n model?: OpenAiImageModelWithLiterals;\n /**\n * The number of images to be generated.\n * Default is 1\n */\n n?: number | null;\n /**\n * The quality of the image that will be generated.\n * low, medium, high , Default: high\n * @maxLength 4000\n */\n quality?: string | null;\n /**\n * The dimensions of the requested image.\n * Square: 1024x1024\n * Landscape: 1536 x 1024\n * Portrait: 1024 x 1536\n * Default: 1024x1024\n * @maxLength 4000\n */\n size?: string | null;\n /**\n * Output format png,webp jpeg\n * @maxLength 50\n */\n outputFormat?: string | null;\n /**\n * 0-100% compression for JPEG + WebP\n * Default: 100%\n */\n outputCompression?: number | null;\n /**\n * The image to be edited.\n * @maxLength 10000\n */\n imageUrl?: string | null;\n /**\n * The image mask to be edited.\n * @maxLength 10000\n */\n imageMaskUrl?: string | null;\n /**\n * Additional images to be edited.\n * @maxSize 10\n * @maxLength 10000\n */\n imageUrls?: string[] | null;\n /**\n * Moderation flag - values low and auto.\n * Setting moderation to low will include relaxed safety refusals for violence, self-harm\n * @maxLength 10\n */\n moderation?: string | null;\n /**\n * Allows to set transparency for the background of the generated image(s). This parameter is only supported for gpt-image-1.\n * Must be one of transparent, opaque or auto (default value).\n * When auto is used, the model will automatically determine the best background for the image.\n * If transparent, the output format needs to support transparency, so it should be set to either png (default value) or webp.\n * @maxLength 200\n */\n background?: string | null;\n /**\n * Control how much effort the model will exert to match the style and features, especially facial features, of input images.\n * This parameter is only supported for gpt-image-1. Supports high and low. Defaults to low.\n * @maxLength 10\n */\n inputFidelity?: string | null;\n}\n\n/** Mirrors https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo-video-generation */\nexport interface GenerateVideoRequest {\n /** ID of the Video generation model to use. */\n model?: VideoGenModelWithLiterals;\n /**\n * The content of the current conversation with the model.\n * @minSize 1\n * @maxSize 100\n */\n instances?: GenerateVideoInstance[];\n /** Generation-time settings. */\n parameters?: GenerateVideoParameters;\n}\n\nexport enum VideoGenModel {\n UNKNOWN_VIDEO_GEN_MODEL = 'UNKNOWN_VIDEO_GEN_MODEL',\n VEO_2_0_GENERATE_001 = 'VEO_2_0_GENERATE_001',\n VEO_3_0_GENERATE_001 = 'VEO_3_0_GENERATE_001',\n VEO_3_0_FAST_GENERATE_001 = 'VEO_3_0_FAST_GENERATE_001',\n}\n\n/** @enumType */\nexport type VideoGenModelWithLiterals =\n | VideoGenModel\n | 'UNKNOWN_VIDEO_GEN_MODEL'\n | 'VEO_2_0_GENERATE_001'\n | 'VEO_3_0_GENERATE_001'\n | 'VEO_3_0_FAST_GENERATE_001';\n\nexport interface GenerateVideoInstance {\n /**\n * Mandatory (text-to-video), optional if an input image prompt is provided (image-to-video)\n * Text input for guiding video generation.\n * @maxLength 10000\n */\n prompt?: string | null;\n /**\n * Mandatory (image-to-video), optional if a text prompt is provided (text-to-video)\n * Image input for guiding video generation.\n */\n image?: V1ImageInput;\n}\n\nexport interface V1ImageInput {\n /**\n * A publicly available image URL\n * @maxLength 10000\n */\n imageUrl?: string | null;\n /**\n * MIME type of the image (image/jpeg or image/png)\n * @maxLength 20\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateVideoParameters {\n /**\n * Requested video length in seconds (4, 6, or 8. The default is 8)\n * @min 4\n * @max 8\n */\n durationSeconds?: number | null;\n /**\n * A text string that describes anything you want to discourage the model from generating.\n * @maxLength 10000\n */\n negativePrompt?: string | null;\n /** Use gemini to enhance your prompts (default is True) */\n enhancePrompt?: boolean | null;\n /**\n * A number to request to make generated videos deterministic.\n * Adding a seed number with your request without changing other parameters will cause the model to produce the same videos.\n */\n seed?: string | null;\n /**\n * Number of videos to generate (1–4)\n * @min 1\n * @max 4\n */\n sampleCount?: number | null;\n /**\n * Aspect ratio: 16:9 (default, landscape) or 9:16 (portrait)\n * @maxLength 50\n */\n aspectRatio?: string | null;\n /**\n * The safety setting that controls whether people or face generation is allowed:\n * \"allow_adult\" (default value): allow generation of adults only\n * \"disallow\": disallows inclusion of people/faces in images\n * @maxLength 50\n */\n personGeneration?: string | null;\n /** Whether to generate audio for the video */\n generateAudio?: boolean | null;\n /**\n * The resolution of the generated video. Supported values: 720p, 1080p. Default: 1080p\n * @maxLength 50\n */\n resolution?: string | null;\n}\n\n/** Add to your existing proto file */\nexport interface V1CreateChatCompletionRequest {\n /** Model identifier */\n model?: ChatCompletionModelWithLiterals;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: GoogleproxyV1ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: V1CreateChatCompletionRequestResponseFormat;\n}\n\nexport enum ChatCompletionModel {\n UNKNOWN_CHAT_COMPLETION_MODEL = 'UNKNOWN_CHAT_COMPLETION_MODEL',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-scout\n * llama-4-scout-17b-16e-instruct-maas\n */\n LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS = 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS',\n /**\n * https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama/llama4-maverick\n * llama-4-maverick-17b-128e-instruct-maas\n */\n LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS = 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS',\n}\n\n/** @enumType */\nexport type ChatCompletionModelWithLiterals =\n | ChatCompletionModel\n | 'UNKNOWN_CHAT_COMPLETION_MODEL'\n | 'LLAMA_4_SCOUT_17B_16E_INSTRUCT_MAAS'\n | 'LLAMA_4_MAVERICK_17B_128E_INSTRUCT_MAAS';\n\nexport interface GoogleproxyV1ChatCompletionMessage {\n /** The role of the message author. */\n role?: V1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: V1ChatCompletionMessageContentPart[];\n}\n\nexport interface V1ChatCompletionMessageImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum V1ChatCompletionMessageMessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type V1ChatCompletionMessageMessageRoleWithLiterals =\n | V1ChatCompletionMessageMessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface V1ChatCompletionMessageContentPart\n extends V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ChatCompletionMessageContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: V1ChatCompletionMessageImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface V1CreateChatCompletionRequestResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawRequest {\n /**\n * ML Platform model identifier\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of messages comprising the conversation so far.\n * @minSize 1\n * @maxSize 1000\n */\n messages?: ChatCompletionMessage[];\n /**\n * An upper bound for the number of tokens that can be generated for a completion,\n * including visible output tokens and reasoning tokens.\n * @min 1\n * @max 4096\n */\n maxCompletionTokens?: number | null;\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n * We generally recommend altering this or top_p but not both.\n * @max 2\n */\n temperature?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both. Defaults to 1.\n * @max 1\n */\n topP?: number | null;\n /** How many chat completion choices to generate for each input message. Defaults to 1. */\n n?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on whether they appear in the text so far,\n * increasing the model's likelihood to talk about new topics.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n presencePenalty?: number | null;\n /**\n * Number between -2.0 and 2.0.\n * Positive values penalize new tokens based on their existing frequency in the text so far,\n * decreasing the model's likelihood to repeat the same line verbatim.\n * Defaults to 0.\n * @min -2\n * @max 2\n */\n frequencyPenalty?: number | null;\n /**\n * json_object: Interpreted as passing \"application/json\" to the API.\n * json_schema. Fully recursive schemas are not supported. additional_properties is supported.\n * text: Interpreted as passing \"text/plain\" to the API.\n * Any other MIME type is passed as is to the model, such as passing \"application/json\" directly.\n */\n responseFormat?: ResponseFormat;\n}\n\nexport interface ChatCompletionMessage {\n /** The role of the message author. */\n role?: MessageRoleWithLiterals;\n /**\n * The content of the message, which can be text or an image URL.\n * @maxSize 5\n */\n contentParts?: ContentPart[];\n}\n\nexport interface ImageUrlContent {\n /**\n * The URL of the image.\n * @maxLength 100000\n */\n url?: string | null;\n /**\n * Similar to media resolution, this determines the maximum tokens per image for the request.\n * Note that while OpenAI's field is per-image,\n * Google enforces the same detail across the request,\n * and passing multiple detail types in one request will throw an error.\n * @maxLength 100\n */\n detail?: string | null;\n}\n\nexport enum MessageRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n}\n\n/** @enumType */\nexport type MessageRoleWithLiterals =\n | MessageRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM';\n\nexport interface ContentPart extends ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ContentPartContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlContent;\n /**\n * Text content\n * @maxLength 1000000000\n */\n text?: string | null;\n}\n\nexport interface ResponseFormat {\n /**\n * Must be one of text, json_object or json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n}\n\nexport interface VideoInferenceRequest {\n /** Specifies the format of the output video. Supported formats are: MP4 and WEBM. Default: MP4. */\n outputFormat?: OutputFormatWithLiterals;\n /**\n * Sets the compression quality of the output video. Higher values preserve more quality but increase file size. Default: 95.\n * @min 20\n * @max 99\n */\n outputQuality?: number | null;\n /**\n * The text description that guides the video generation process. This prompt defines what you want to see in the video.\n * The length of the prompt must be at least 2 characters.\n * @minLength 2\n * @maxLength 100000\n */\n positivePrompt?: string | null;\n /**\n * Specifies what you want to avoid in the generated video.\n * @maxLength 100000\n */\n negativePrompt?: string | null;\n /**\n * An array of objects that define key frames to guide video generation.\n * @maxSize 100\n */\n frameImages?: FrameImage[];\n /**\n * An array containing reference images used to condition the generation process. Must be URLs pointing to the images. The images must be accessible publicly.\n * @maxSize 10\n * @maxLength 100000\n */\n referenceImages?: string[] | null;\n /**\n * The width of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n width?: number | null;\n /**\n * The height of the generated video in pixels. Must be a multiple of 8 for compatibility with video encoding standards.\n * @min 256\n * @max 10000\n */\n height?: number | null;\n /** The AI model to use for video generation. */\n model?: VideoModelWithLiterals;\n /**\n * Video model as a string\n * @maxLength 1000\n */\n modelId?: string | null;\n /**\n * The length of the generated video in seconds.\n * @min 1\n * @max 10\n */\n duration?: number | null;\n /**\n * The frame rate (frames per second) of the generated video. Default: 24.\n * @min 15\n * @max 60\n */\n fps?: number | null;\n /**\n * The number of denoising steps the model performs during video generation.\n * @min 10\n * @max 50\n */\n steps?: number | null;\n /** A seed is a value used to randomize the video generation. */\n seed?: string | null;\n /**\n * Controls how closely the video generation follows your prompt. Recommended range is 6.0-10.0 for most video models.\n * @max 50\n */\n cfgScale?: number | null;\n /**\n * Specifies how many videos to generate for the given parameters. Default: 1.\n * @min 1\n * @max 4\n */\n numberResults?: number | null;\n /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */\n providerSettings?: Record<string, any> | null;\n /**\n * Skip polling flag - if set to false, will poll until video generation is complete\n * If not set or true, returns immediately with task UUID for manual polling\n */\n skipPolling?: boolean | null;\n}\n\nexport enum OutputFormat {\n UNKNOWN_OUTPUT_FORMAT = 'UNKNOWN_OUTPUT_FORMAT',\n /** MPEG-4 video format, widely compatible and recommended for most use cases.MPEG-4 video format, widely compatible and recommended for most use cases. */\n MP4 = 'MP4',\n /** WebM video format, optimized for web delivery and smaller file sizes. */\n WEBM = 'WEBM',\n}\n\n/** @enumType */\nexport type OutputFormatWithLiterals =\n | OutputFormat\n | 'UNKNOWN_OUTPUT_FORMAT'\n | 'MP4'\n | 'WEBM';\n\nexport interface FrameImage {\n /**\n * Specifies the input image that will be used to constrain the video content at the specified frame position.\n * Must be a URL pointing to the image. The image must be accessible publicly.\n * @maxLength 100000\n */\n inputImage?: string;\n /**\n * Specifies the position of this frame constraint within the video timeline.\n * Can be \"first\", \"last\", or a numeric frame number.\n * @maxLength 20\n */\n frame?: string | null;\n}\n\nexport enum VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SEEDANCE_1_0_PRO = 'SEEDANCE_1_0_PRO',\n SEEDANCE_1_0_LITE = 'SEEDANCE_1_0_LITE',\n SEEDANCE_1_0_PRO_FAST = 'SEEDANCE_1_0_PRO_FAST',\n FROM_MODEL_ID = 'FROM_MODEL_ID',\n}\n\n/** @enumType */\nexport type VideoModelWithLiterals =\n | VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SEEDANCE_1_0_PRO'\n | 'SEEDANCE_1_0_LITE'\n | 'SEEDANCE_1_0_PRO_FAST'\n | 'FROM_MODEL_ID';\n\nexport interface V1OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: V1ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: V1ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: V1ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: V1ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: V1ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: V1ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport enum V1ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n O3_PRO_2025_06_10 = 'O3_PRO_2025_06_10',\n O3_DEEP_RESEARCH_2025_06_26 = 'O3_DEEP_RESEARCH_2025_06_26',\n GPT_5_CODEX = 'GPT_5_CODEX',\n GPT_5_1_2025_11_13 = 'GPT_5_1_2025_11_13',\n GPT_5_1_CODEX = 'GPT_5_1_CODEX',\n GPT_5_1_CODEX_MINI = 'GPT_5_1_CODEX_MINI',\n GPT_EXP_RESPONSES = 'GPT_EXP_RESPONSES',\n GPT_EXP_RESPONSES_2 = 'GPT_EXP_RESPONSES_2',\n GPT_EXP_RESPONSES_3 = 'GPT_EXP_RESPONSES_3',\n GPT_5_1_CODEX_MAX = 'GPT_5_1_CODEX_MAX',\n GPT_5_2_2025_12_11 = 'GPT_5_2_2025_12_11',\n}\n\n/** @enumType */\nexport type V1ResponsesModelWithLiterals =\n | V1ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES'\n | 'O3_PRO_2025_06_10'\n | 'O3_DEEP_RESEARCH_2025_06_26'\n | 'GPT_5_CODEX'\n | 'GPT_5_1_2025_11_13'\n | 'GPT_5_1_CODEX'\n | 'GPT_5_1_CODEX_MINI'\n | 'GPT_EXP_RESPONSES'\n | 'GPT_EXP_RESPONSES_2'\n | 'GPT_EXP_RESPONSES_3'\n | 'GPT_5_1_CODEX_MAX'\n | 'GPT_5_2_2025_12_11';\n\nexport interface V1ResponsesInputItem extends V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: V1ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: V1ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesInputMessageResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: V1ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesInputMessageResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesInputMessageResponsesMessageRoleWithLiterals =\n | ResponsesInputMessageResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface V1ResponsesInputMessageContent\n extends V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ResponsesInputMessageContentImageInput;\n /** File content */\n fileInput?: ResponsesInputMessageContentFileInput;\n}\n\nexport interface ResponsesInputMessageContentImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface ResponsesInputMessageContentFileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface V1ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: ResponsesOutputMessageOutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface V1OutputAnnotation\n extends V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\n/** @oneof */\nexport interface V1OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: V1UrlCitation;\n}\n\nexport interface V1UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface ResponsesOutputMessageOutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: V1OutputAnnotation[];\n}\n\nexport interface V1ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: ResponsesWebSearchToolCallAction;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface ResponsesWebSearchToolCallAction {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface V1ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface V1ResponsesReasoningOutput {\n /** @maxLength 100 */\n id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: V1ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: V1ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface V1ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface V1ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface V1ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: V1ResponsesCodeInterpreterOutput[];\n}\n\nexport interface V1ResponsesCodeInterpreterOutput\n extends V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: V1ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: V1ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface V1ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface V1ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface V1ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface V1ResponsesTextFormat\n extends V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface V1ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: ResponsesTextFormatJsonSchema;\n}\n\nexport interface ResponsesTextFormatJsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface V1ResponsesTool extends V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface V1ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: V1ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: V1ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: V1ResponsesCodeInterpreter;\n}\n\nexport interface V1ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: ResponsesWebSearchUserLocation;\n}\n\nexport interface ResponsesWebSearchUserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface V1ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface V1ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: V1ResponsesCodeInterpreterContainer;\n}\n\nexport interface V1ResponsesCodeInterpreterContainer\n extends V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface V1ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: V1ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface V1ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\nexport interface OpenAiResponsesRequest {\n /** ID of the model to use. */\n model?: ResponsesModelWithLiterals;\n /**\n * Specify additional output data to include in the model response. Currently supported values are:\n * code_interpreter_call.outputs: Includes the outputs of python code execution in code interpreter tool call items.\n * computer_call_output.output.image_url: Include image urls from the computer call output.\n * file_search_call.results: Include the search results of the file search tool call.\n * message.input_image.image_url: Include image urls from the input message.\n * message.output_text.logprobs: Include logprobs with assistant messages.\n * reasoning.encrypted_content: Includes an encrypted version of reasoning tokens in reasoning item outputs.\n * This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly\n * (like when the store parameter is set to false, or when an organization is enrolled in the zero data retention program).\n * @maxSize 20\n * @maxLength 10000\n */\n include?: string[] | null;\n /**\n * Text, image, or file inputs to the model, used to generate a response.\n * @maxSize 1000\n */\n input?: ResponsesInputItem[];\n /**\n * A system (or developer) message inserted into the model's context.\n * @maxLength 100000000\n */\n instructions?: string | null;\n /** An upper bound for the number of tokens that can be generated for a response. */\n maxOutputTokens?: number | null;\n /** The maximum number of total calls to built-in tools that can be processed in a response. */\n maxToolCalls?: number | null;\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** o-series models only */\n reasoning?: ResponsesReasoning;\n /** What sampling temperature to use, between 0 and 2. */\n temperature?: number | null;\n /** Configuration options for a text response from the model. Can be plain text or structured JSON data. */\n text?: ResponsesTextFormat;\n /** How the model should select which tool (or tools) to use. */\n toolChoice?: ResponsesToolChoice;\n /**\n * A list of tools the model may call.\n * @maxSize 1000\n */\n tools?: ResponsesTool[];\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** Whether to store the generated model response for later retrieval via API. */\n store?: boolean | null;\n}\n\nexport enum ResponsesModel {\n MODEL_UNSPECIFIED = 'MODEL_UNSPECIFIED',\n GPT_5_2025_08_07_RESPONSES = 'GPT_5_2025_08_07_RESPONSES',\n GPT_5_MINI_2025_08_07_RESPONSES = 'GPT_5_MINI_2025_08_07_RESPONSES',\n GPT_5_NANO_2025_08_07_RESPONSES = 'GPT_5_NANO_2025_08_07_RESPONSES',\n GPT_5_2_2025_12_11 = 'GPT_5_2_2025_12_11',\n}\n\n/** @enumType */\nexport type ResponsesModelWithLiterals =\n | ResponsesModel\n | 'MODEL_UNSPECIFIED'\n | 'GPT_5_2025_08_07_RESPONSES'\n | 'GPT_5_MINI_2025_08_07_RESPONSES'\n | 'GPT_5_NANO_2025_08_07_RESPONSES'\n | 'GPT_5_2_2025_12_11';\n\nexport interface ResponsesInputItem extends ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesInputItemItemOneOf {\n /**\n * A message input to the model with a role indicating instruction following hierarchy.\n * Instructions given with the developer or system role take precedence over instructions given with the user role.\n * Messages with the assistant role are presumed to have been generated by the model in previous interactiontons.\n */\n message?: ResponsesInputMessage;\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** The output/result of a function call. */\n functionToolCallOutput?: ResponsesFunctionToolCallOutput;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter tool call made by the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesInputMessage {\n /** The role of the message input. One of user, system, or developer. */\n role?: ResponsesMessageRoleWithLiterals;\n /**\n * The content of the message, which can be text, image, or file.\n * @maxSize 2000\n */\n content?: ResponsesInputMessageContent[];\n}\n\nexport enum ResponsesMessageRole {\n UNKNOWN_RESPONSE = 'UNKNOWN_RESPONSE',\n USER = 'USER',\n SYSTEM = 'SYSTEM',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type ResponsesMessageRoleWithLiterals =\n | ResponsesMessageRole\n | 'UNKNOWN_RESPONSE'\n | 'USER'\n | 'SYSTEM'\n | 'DEVELOPER';\n\nexport interface ResponsesInputMessageContent\n extends ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n /**\n * The type of the content part\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesInputMessageContentContentValueOneOf {\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /** Image content */\n imageUrl?: ImageInput;\n /** File content */\n fileInput?: FileInput;\n}\n\nexport interface ImageInput {\n /**\n * The URL or file_id of the image\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Detail level: high, low, or auto\n * @maxLength 10\n */\n detail?: string | null;\n}\n\nexport interface FileInput {\n /**\n * File identification - one of these should be provided\n * @maxLength 100000\n */\n fileUrl?: string | null;\n /**\n * filename\n * @maxLength 255\n */\n filename?: string | null;\n}\n\nexport interface ResponsesOutputMessage {\n /**\n * The unique ID of the output message.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the output message. Always message.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the message input. One of in_progress, completed, or incomplete. Populated when input items are returned via API.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The content of the output message.\n * @maxSize 1000\n */\n content?: OutputContent[];\n /**\n * The role of the output message. Always assistant.\n * @maxLength 100\n */\n role?: string | null;\n}\n\n/**\n * Annotation types\n * The annotations of the text output.\n */\nexport interface OutputAnnotation extends OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\n/** @oneof */\nexport interface OutputAnnotationAnnotationTypeOneOf {\n /** A citation for a web resource used to generate a model response. */\n urlCitation?: UrlCitation;\n}\n\nexport interface UrlCitation {\n /**\n * The type of the URL citation. Always url_citation.\n * @maxLength 100\n */\n type?: string | null;\n /** The index of the first character of the URL citation in the message. */\n startIndex?: number | null;\n /** The index of the last character of the URL citation in the message. */\n endIndex?: number | null;\n /**\n * The title of the web resource.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * The URL of the web resource.\n * @maxLength 10000\n */\n url?: string | null;\n}\n\nexport interface OutputContent {\n /**\n * The type of the output text output_text/refusal.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n text?: string | null;\n /**\n * The text content of the message.\n * @maxLength 1000000000\n */\n refusal?: string | null;\n /**\n * Annotations for the output content (citations, etc.)\n * @maxSize 1000\n */\n annotations?: OutputAnnotation[];\n}\n\nexport interface ResponsesWebSearchToolCall {\n /** The action performed by the model in the web search tool call. */\n action?: Action;\n /**\n * The unique ID of the web search tool call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The status of the web search tool call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The type of the web search tool call. Always web_search_call.\n * @maxLength 100\n */\n type?: string | null;\n}\n\nexport interface Action {\n /**\n * The action type.\n * Action type \"find\": Searches for a pattern within a loaded page.\n * Action type \"search\" - Performs a web search query.\n * Action type \"open_page\" - Opens a specific URL from search results.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The search query.\n * @maxLength 100000\n */\n query?: string | null;\n /**\n * The URL opened by the model.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * The pattern or text to search for within the page.\n * @maxLength 100000\n */\n pattern?: string | null;\n}\n\nexport interface ResponsesFunctionToolCall {\n /**\n * The unique ID of the function call.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the call. Always \"function_call\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The arguments passed to the function as a JSON string.\n * @maxLength 100000\n */\n arguments?: string | null;\n /**\n * The call ID that links this call to its output.\n * @maxLength 100\n */\n callId?: string | null;\n /**\n * The name of the function that was called.\n * @maxLength 100\n */\n name?: string | null;\n}\n\nexport interface ResponsesFunctionToolCallOutput {\n /**\n * The type of the output. Always \"function_call_output\".\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the function call output.\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The output/result of the function call.\n * @maxLength 1000000000\n */\n output?: string | null;\n /**\n * The call ID that links this output to its original call.\n * @maxLength 100\n */\n callId?: string | null;\n}\n\nexport interface ResponsesReasoningOutput {\n /** @maxLength 100 */\n id?: string | null;\n /** @maxLength 100 */\n type?: string | null;\n /** @maxSize 1000 */\n summary?: ResponsesReasoningSummaryContent[];\n /** @maxSize 1000 */\n content?: ResponsesReasoningContent[];\n /** @maxLength 10000000 */\n encryptedContent?: string | null;\n /** @maxLength 100 */\n status?: string | null;\n}\n\nexport interface ResponsesReasoningSummaryContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\nexport interface ResponsesReasoningContent {\n /** @maxLength 100 */\n type?: string | null;\n /** @maxLength 1000000 */\n text?: string | null;\n}\n\n/** Output types for code interpreter calls and outputs */\nexport interface ResponsesCodeInterpreterToolCall {\n /**\n * The unique ID of the code interpreter tool call\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The type of the tool call. Always \"code_interpreter_call\"\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The status of the tool call\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The code to run\n * @maxLength 1000000\n */\n code?: string | null;\n /**\n * The container ID used to run the code\n * @maxLength 100\n */\n containerId?: string | null;\n /**\n * The outputs generated by the code interpreter\n * @maxSize 100\n */\n outputs?: ResponsesCodeInterpreterOutput[];\n}\n\nexport interface ResponsesCodeInterpreterOutput\n extends ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterOutputOutputTypeOneOf {\n /** Log output */\n logsOutput?: ResponsesCodeInterpreterLogsOutput;\n /** Image output */\n imageOutput?: ResponsesCodeInterpreterImageOutput;\n}\n\nexport interface ResponsesCodeInterpreterLogsOutput {\n /**\n * The type of output. Always \"logs\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The logs output from the code interpreter\n * @maxLength 1000000\n */\n logs?: string | null;\n}\n\nexport interface ResponsesCodeInterpreterImageOutput {\n /**\n * The type of output. Always \"image\"\n * @maxLength 10\n */\n type?: string | null;\n /**\n * The image URL\n * @maxLength 1000\n */\n imageUrl?: string | null;\n}\n\nexport interface ResponsesReasoning {\n /**\n * Constrains effort on reasoning for reasoning models. Currently supported values are low, medium, and high.\n * Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.\n * @maxLength 100\n */\n effort?: string | null;\n /**\n * A summary of the reasoning performed by the model.\n * This can be useful for debugging and understanding the model's reasoning process. One of auto, concise, or detailed.\n * @maxLength 100\n */\n summary?: string | null;\n}\n\nexport interface ResponsesTextFormat extends ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n /**\n * Constrains the verbosity of the model's response. Lower values will result in more concise responses,\n * while higher values will result in more verbose responses. Currently supported values are low, medium, and high.\n * @maxLength 100\n */\n verbosity?: string | null;\n}\n\n/** @oneof */\nexport interface ResponsesTextFormatFormatOneOf {\n /** Structured Outputs configuration options, including a JSON Schema. */\n jsonSchema?: JsonSchema;\n}\n\nexport interface JsonSchema {\n /**\n * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n * @maxLength 64\n */\n name?: string | null;\n /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */\n schema?: Record<string, any> | null;\n /**\n * The type of response format being defined. Always json_schema.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * A description of what the response format is for, used by the model to determine how to respond in the format.\n * @maxLength 100000\n */\n description?: string | null;\n /**\n * Whether to enable strict schema adherence when generating the output.\n * If set to true, the model will always follow the exact schema defined in the schema field.\n * Only a subset of JSON Schema is supported when strict is true. To learn more, read the\n */\n strict?: boolean | null;\n}\n\nexport interface ResponsesToolChoice {\n /**\n * Tool choice mode\n * Controls which (if any) tool is called by the model.\n * none means the model will not call any tool and instead generates a message.\n * auto means the model can pick between generating a message or calling one or more tools.\n * required means the model must call one or more tools.\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * The type of hosted tool choice.\n * Allowed values are:\n * file_search\n * web_search_preview\n * computer_use_preview\n * code_interpreter\n * image_generation\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The label of the MCP server to use.\n * @maxLength 100\n */\n serverLabel?: string | null;\n}\n\nexport interface ResponsesTool extends ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\n/** @oneof */\nexport interface ResponsesToolToolTypeOneOf {\n /** A hosted tool that provides web search capabilities. */\n webSearch?: ResponsesWebSearch;\n /** A function that the model can call to perform a specific action. */\n function?: ResponsesFunction;\n /** Add code interpreter */\n codeInterpreter?: ResponsesCodeInterpreter;\n}\n\nexport interface ResponsesWebSearch {\n /**\n * The type of the web search tool. One of web_search_preview or web_search_preview_2025_03_11.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * High level guidance for the amount of context window space to use for the search. One of low, medium, or high. medium is the default.\n * @maxLength 100\n */\n searchContextSize?: string | null;\n /** To refine search results based on geography, you can specify an approximate user location using country, city, region, and/or timezone. */\n userLocation?: UserLocation;\n}\n\nexport interface UserLocation {\n /**\n * The type of location approximation. Always approximate.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * Free text input for the city of the user, e.g. San Francisco.\n * @maxLength 100\n */\n city?: string | null;\n /**\n * The two-letter ISO country code of the user, e.g. US.\n * https://en.wikipedia.org/wiki/ISO_3166-1\n * @maxLength 2\n */\n country?: string | null;\n /**\n * Free text input for the region of the user, e.g. California.\n * @maxLength 100\n */\n region?: string | null;\n /**\n * The IANA timezone of the user, e.g. America/Los_Angeles.\n * https://timeapi.io/documentation/iana-timezones\n * @maxLength 100\n */\n timezone?: string | null;\n}\n\nexport interface ResponsesFunction {\n /**\n * The type of the function tool. Always function.\n * @maxLength 100\n */\n type?: string | null;\n /**\n * The name of the function to call.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the function does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the functions accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n /** If true, the model will strictly follow the function parameters schema (a.k.a. open-ai structured outputs). */\n strict?: boolean | null;\n}\n\nexport interface ResponsesCodeInterpreter {\n /**\n * The type of the code interpreter tool. Always code_interpreter.\n * @maxLength 100\n */\n type?: string | null;\n /** The code interpreter container configuration */\n container?: ResponsesCodeInterpreterContainer;\n}\n\nexport interface ResponsesCodeInterpreterContainer\n extends ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\n/** @oneof */\nexport interface ResponsesCodeInterpreterContainerContainerTypeOneOf {\n /**\n * Container ID string\n * @maxLength 100\n */\n containerId?: string | null;\n /** Auto container with file IDs */\n autoContainer?: ResponsesCodeInterpreterContainerAuto;\n}\n\nexport interface ResponsesCodeInterpreterContainerAuto {\n /**\n * Always \"auto\"\n * @maxLength 10\n */\n type?: string | null;\n}\n\n/** More info and default values at https://platform.openai.com/docs/api-reference/videos/create */\nexport interface CreateVideoRequest {\n /**\n * Text prompt that describes the video to generate.\n * @maxLength 10000\n */\n prompt?: string;\n /** The video generation model to use. */\n model?: V1VideoModelWithLiterals;\n /**\n * Size of the generated video (width x height in pixels). Examples: \"720x1280\", \"1280x720\".\n * @maxLength 50\n */\n size?: string | null;\n /**\n * Clip duration in seconds. Default is 4 seconds if not specified.\n * @min 1\n * @max 180\n */\n seconds?: number | null;\n /**\n * Optional publicly accessible URL to an image reference that guides generation.\n * @maxLength 5000\n * @format WEB_URL\n */\n inputReferenceUrl?: string | null;\n}\n\nexport enum V1VideoModel {\n UNKNOWN_VIDEO_MODEL = 'UNKNOWN_VIDEO_MODEL',\n SORA_2 = 'SORA_2',\n SORA_2_PRO = 'SORA_2_PRO',\n}\n\n/** @enumType */\nexport type V1VideoModelWithLiterals =\n | V1VideoModel\n | 'UNKNOWN_VIDEO_MODEL'\n | 'SORA_2'\n | 'SORA_2_PRO';\n\nexport interface ContentGenerationRequestedEvent {\n /** Prompt that the generation was requested for. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface UserRequestInfo {\n /**\n * Interaction id\n * @maxLength 100\n */\n interactionId?: string | null;\n /**\n * Additional tags ,use comma separation format for multiple tags.\n * @maxLength 1000\n */\n additionalTags?: string | null;\n /**\n * GenAI feature name, required by FinOps for evaluation\n * @maxLength 1000\n */\n featureName?: string | null;\n /**\n * AppDefId to which the cost will be attributed to instead of the one that signs the request.\n * Will not work unless your application is explicitly allowed to override costs attribution.\n * Please reach out to #ai-tools-support if you think you need this field.\n * @format GUID\n */\n costAttributionOverrideId?: string | null;\n}\n\nexport interface ContentGenerationSucceededEvent {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateContent request. */\n materializedPrompt?: Prompt;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GenerateContentModelResponse\n extends GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Invoke Amazon Converse API response. */\n amazonConverseResponse?: InvokeConverseResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n /** Extracted generated content data from the model's response. */\n generatedContent?: GeneratedContent;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n /** Token usage information. */\n tokenUsage?: V1TokenUsage;\n /** Metadata about the response, such as finish reason. */\n responseMetadata?: ResponseMetadata;\n}\n\n/** @oneof */\nexport interface GenerateContentModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Invoke Amazon Converse API response. */\n amazonConverseResponse?: InvokeConverseResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** Google AI - Generate Video response. */\n googleGenerateVideoResponse?: GenerateVideoResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Runware Video inference response */\n runwareVideoInferenceResponse?: VideoInferenceResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /** Open AI Responses API response via Azure */\n azureOpenAiResponsesResponse?: OpenAiResponsesResponse;\n /** OpenAI video generation response */\n openAiCreateVideoResponse?: CreateVideoResponse;\n}\n\n/** Model generation result, at least one of the fields should be present */\nexport interface GeneratedContent {\n /**\n * Zero or more textual results. Only present when the model returned a text.\n * @maxSize 1000\n */\n texts?: TextContent[];\n /**\n * Zero or more images. Only present when the model returned an image.\n * @maxSize 1000\n */\n images?: MediaContent[];\n /**\n * Zero or more videos. Only present when the model returned a video.\n * @maxSize 1000\n */\n videos?: MediaContent[];\n /**\n * Zero or more thinking texts. Only present when the model returned a thought.\n * @maxSize 1000\n */\n thinkingTexts?: ThinkingTextContent[];\n /**\n * Zero or more tool call requests. Only present when the model requested to call a tool.\n * @maxSize 1000\n */\n tools?: ToolUseContent[];\n}\n\nexport interface TextContent {\n /**\n * Generated text\n * @maxLength 1000000\n */\n generatedText?: string | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface MediaContent {\n /**\n * Mime type, e.g. \"image/jpeg\" or \"video/mp4\"\n * @maxLength 500\n */\n mimeType?: string | null;\n /**\n * Wix Media Platform (WixMP) url where the image or video is stored.\n * @maxLength 5000\n */\n url?: string;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ThinkingTextContent {\n /**\n * The thought text of the model thinking\n * @maxLength 1000000\n */\n thoughtText?: string | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface ToolUseContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n id?: string | null;\n /**\n * Tool use name\n * @maxLength 1000\n */\n name?: string;\n /** Tool use input */\n input?: Record<string, any> | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport interface V1TokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n /** Total number of tokens used in the request. */\n totalTokens?: number | null;\n /** cache creation token usage */\n cacheCreationTokens?: number | null;\n /** cache read token usage */\n cacheReadTokens?: number | null;\n /** thought tokens usage */\n thoughtsTokens?: number | null;\n /** tool use tokens usage */\n toolUseTokens?: number | null;\n}\n\nexport interface ResponseMetadata {\n /**\n * Finish reason of the model response.\n * @maxLength 1000\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: OpenaiproxyV1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface CreateChatCompletionResponsePromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CreateChatCompletionResponseCompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: OpenaiproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface OpenaiproxyV1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: CreateChatCompletionResponsePromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CreateChatCompletionResponseCompletionTokenDetails;\n}\n\nexport interface TextBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: TextBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface TextBisonPrediction {\n /**\n * The result generated from input text.\n * @maxLength 100000\n */\n content?: string | null;\n /** Citation metadata */\n citationMetadata?: CitationMetadata;\n /** A collection of categories and their associated confidence scores. */\n safetyAttributes?: SafetyAttribute;\n}\n\nexport interface CitationMetadata {\n /**\n * Citations array\n * @maxSize 1000\n */\n citations?: V1Citation[];\n}\n\nexport interface V1Citation {\n /** Index in the prediction output where the citation starts (inclusive). Must be >= 0 and < end_index. */\n startIndex?: number | null;\n /** Index in the prediction output where the citation ends (exclusive). Must be > start_index and < len(output). */\n endIndex?: number | null;\n /**\n * URL associated with this citation. If present, this URL links to the webpage of the source of this citation.\n * Possible URLs include news websites, GitHub repos, etc.\n * @maxLength 1000\n */\n url?: string | null;\n /**\n * Title associated with this citation. If present, it refers to the title of the source of this citation.\n * Possible titles include news titles, book titles, etc.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * License associated with this recitation. If present, it refers to the license of the source of this citation.\n * Possible licenses include code licenses, e.g., mit license.\n * @maxLength 100\n */\n license?: string | null;\n /**\n * Publication date associated with this citation. If present, it refers to the date at which the source of this citation was published.\n * Possible formats are YYYY, YYYY-MM, YYYY-MM-DD.\n * @maxLength 100\n */\n publicationDate?: string | null;\n}\n\nexport interface SafetyAttribute {\n /**\n * The display names of Safety Attribute categories associated with the generated content. Order matches the Scores.\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /** A flag indicating if the model's input or output was blocked. */\n blocked?: boolean | null;\n /**\n * The confidence scores of the each category, higher value means higher confidence.\n * @maxSize 100\n */\n scores?: number[] | null;\n /**\n * An error code that identifies why the input or output was blocked.\n * For a list of error codes, see https://cloud.google.com/vertex-ai/docs/generative-ai/learn/responsible-ai#safety_filters_and_attributes.\n * @maxSize 100\n */\n errors?: string[] | null;\n}\n\nexport interface Metadata {\n /** TokenMetadata object */\n tokenMetadata?: TokenMetadata;\n}\n\nexport interface TokenMetadata {\n /** Number of input tokens. This is the total number of tokens across all messages, examples, and context. */\n inputTokenCount?: TokenCount;\n /** Number of output tokens. This is the total number of tokens in content across all candidates in the response. */\n outputTokenCount?: TokenCount;\n}\n\nexport interface TokenCount {\n /** Number of tokens */\n totalTokens?: number | null;\n /** Number of billable characters */\n totalBillableCharacters?: number | null;\n}\n\nexport interface ChatBisonPredictResponse {\n /**\n * Response predictions\n * @maxSize 100\n */\n predictions?: ChatBisonPrediction[];\n /** Response metadata */\n metadata?: Metadata;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface ChatBisonPrediction {\n /**\n * The chat result generated from given message.\n * @maxSize 100\n */\n candidates?: ChatMessage[];\n /**\n * Citation metadata\n * @maxSize 100\n */\n citationMetadata?: CitationMetadata[];\n /**\n * An array of collections of categories and their associated confidence scores. 1-1 mapping to candidates.\n * @maxSize 100\n */\n safetyAttributes?: SafetyAttribute[];\n}\n\nexport interface CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface PromptTokenDetails {\n /** Audio input tokens present in the prompt. */\n audioTokens?: number | null;\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface CompletionTokenDetails {\n /** Reasoning tokens present in the completion. */\n reasoningTokens?: number | null;\n /** Audio tokens present in the completion. */\n audioTokens?: number | null;\n /** Accepted prediction tokens. */\n acceptedPredictionTokens?: number | null;\n /** Rejected prediction tokens. */\n rejectedPredictionTokens?: number | null;\n}\n\nexport interface CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: V1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n /** Breakdown of tokens used in the prompt. */\n promptTokenDetails?: PromptTokenDetails;\n /** Breakdown of tokens used in the completion. */\n completionTokenDetails?: CompletionTokenDetails;\n}\n\nexport interface GenerateContentResponse {\n /**\n * The generated response.\n * @maxSize 1000\n */\n candidates?: Candidate[];\n /** The usage metadata. */\n usageMetadata?: UsageMetadata;\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Candidate {\n /** The generated response content. */\n content?: CandidateContent;\n /** The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens. */\n finishReason?: FinishReasonWithLiterals;\n /**\n * The safety ratings of the response.\n * @maxSize 100\n */\n safetyRatings?: SafetyRating[];\n /** The citation metadata of the response. */\n citationMetadata?: CandidateCitationMetadata;\n /** Output only. Metadata specifies sources used to ground generated content. */\n groundingMetadata?: GroundingMetadata;\n}\n\nexport interface CandidateContent {\n /**\n * The generated response content.\n * @maxSize 1000\n */\n parts?: CandidateContentPart[];\n}\n\nexport interface CandidateContentPart {\n /**\n * The text generated by the model.\n * @maxLength 100000\n */\n text?: string | null;\n /** function call */\n functionCall?: FunctionCall;\n /**\n * Code generated by the model that is meant to be executed, and the result returned to the model.\n * Only generated when using the CodeExecution tool, in which the code will be automatically executed, and a corresponding CodeExecutionResult will also be generated.\n */\n executableCode?: ExecutableCode;\n /**\n * Result of executing the ExecutableCode.\n * Only generated when using the CodeExecution, and always follows a part containing the ExecutableCode.\n */\n codeExecutionResult?: V1CodeExecutionResult;\n /** Inline media bytes. */\n inlineData?: Blob;\n /**\n * Thought flag indicates that the content part is a thought.\n * @readonly\n */\n thought?: boolean | null;\n /**\n * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.\n * @maxLength 10000000\n */\n thoughtSignature?: string | null;\n}\n\nexport enum FinishReason {\n UNKNOWN_FINISH_REASON = 'UNKNOWN_FINISH_REASON',\n /** The finish reason is unspecified. */\n UNSPECIFIED = 'UNSPECIFIED',\n /** Natural stop point of the model or provided stop sequence. */\n STOP = 'STOP',\n /** The maximum number of tokens as specified in the request was reached. */\n MAX_TOKENS = 'MAX_TOKENS',\n /**\n * The token generation was stopped as the response was flagged for safety reasons.\n * Note that Candidate.content is empty if content filters block the output.\n */\n SAFETY = 'SAFETY',\n /** The token generation was stopped as the response was flagged for unauthorized citations. */\n RECITATION = 'RECITATION',\n /** All other reasons that stopped the token */\n OTHER = 'OTHER',\n /** The response candidate content was flagged for using an unsupported language. */\n LANGUAGE = 'LANGUAGE',\n /** Token generation stopped because the content contains forbidden terms. */\n BLOCKLIST = 'BLOCKLIST',\n /** Token generation stopped for potentially containing prohibited content. */\n PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',\n /** Token generation stopped because the content potentially contains Sensitive Personally Identifiable Information (SPII). */\n SPII = 'SPII',\n /** The function call generated by the model is invalid. */\n MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL',\n /** Token generation stopped because generated images contain safety violations. */\n IMAGE_SAFETY = 'IMAGE_SAFETY',\n /** Model generated a tool call but no tools were enabled in the request. */\n UNEXPECTED_TOOL_CALL = 'UNEXPECTED_TOOL_CALL',\n /** Model called too many tools consecutively, thus the system exited execution. */\n TOO_MANY_TOOL_CALLS = 'TOO_MANY_TOOL_CALLS',\n}\n\n/** @enumType */\nexport type FinishReasonWithLiterals =\n | FinishReason\n | 'UNKNOWN_FINISH_REASON'\n | 'UNSPECIFIED'\n | 'STOP'\n | 'MAX_TOKENS'\n | 'SAFETY'\n | 'RECITATION'\n | 'OTHER'\n | 'LANGUAGE'\n | 'BLOCKLIST'\n | 'PROHIBITED_CONTENT'\n | 'SPII'\n | 'MALFORMED_FUNCTION_CALL'\n | 'IMAGE_SAFETY'\n | 'UNEXPECTED_TOOL_CALL'\n | 'TOO_MANY_TOOL_CALLS';\n\nexport interface SafetyRating {\n /** The safety category that the response belongs to. */\n category?: HarmCategoryWithLiterals;\n /** The probability that the response belongs to the specified safety category. */\n probability?: HarmProbabilityWithLiterals;\n /** The probability score that the response belongs to the specified safety category. */\n probabilityScore?: number | null;\n /**\n * The severity of the response's safety rating.\n * @maxLength 100\n */\n severity?: string | null;\n /** the severity score of the response's safety rating. */\n severityScore?: number | null;\n /**\n * A boolean flag associated with a safety attribute that indicates if the model's input or output was blocked.\n * If blocked is true, then the errors field in the response contains one or more error codes.\n * If blocked is false, then the response doesn't include the errors field.\n */\n blocked?: boolean | null;\n}\n\nexport enum HarmProbability {\n UNKNOWN_PROBABILITY = 'UNKNOWN_PROBABILITY',\n NEGLIGIBLE = 'NEGLIGIBLE',\n LOW = 'LOW',\n MEDIUM = 'MEDIUM',\n HIGH = 'HIGH',\n}\n\n/** @enumType */\nexport type HarmProbabilityWithLiterals =\n | HarmProbability\n | 'UNKNOWN_PROBABILITY'\n | 'NEGLIGIBLE'\n | 'LOW'\n | 'MEDIUM'\n | 'HIGH';\n\nexport interface CandidateCitationMetadata {\n /**\n * The citations of the response.\n * @maxSize 1000\n */\n citations?: CandidateCitationMetadataCitation[];\n}\n\nexport interface PublicationDate {\n /** The year of the publication date. */\n year?: number | null;\n /** The month of the publication date. */\n month?: number | null;\n /** The day of the publication date. */\n day?: number | null;\n}\n\nexport interface CandidateCitationMetadataCitation {\n /** An integer that specifies where a citation starts in the content. */\n startIndex?: number | null;\n /** An integer that specifies where a citation ends in the content. */\n endIndex?: number | null;\n /**\n * The URI of a citation source. Examples of a URI source might be a news website or a GitHub repository.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * The title of a citation source. Examples of source titles might be that of a news article or a book.\n * @maxLength 500\n */\n title?: string | null;\n /**\n * The license associated with a citation.\n * @maxLength 500\n */\n license?: string | null;\n /** The date a citation was published. Its valid formats are YYYY, YYYY-MM, and YYYY-MM-DD. */\n publicationDate?: PublicationDate;\n}\n\n/** Metadata returned to client when grounding is enabled. */\nexport interface GroundingMetadata {\n /**\n * Optional. Web search queries for the following-up web search.\n * @maxSize 1000\n * @maxLength 1000\n */\n webSearchQueries?: string[];\n /** Optional. Google search entry for the following-up web searches. */\n searchEntryPoint?: SearchEntryPoint;\n /**\n * List of supporting references retrieved from specified grounding source.\n * @maxSize 1000\n */\n groundingChunks?: GroundingChunk[];\n /**\n * Optional. List of grounding support.\n * @maxSize 1000\n */\n groundingSupports?: GroundingSupport[];\n /** Optional. Output only. Retrieval metadata. */\n retrievalMetadata?: RetrievalMetadata;\n}\n\n/** Google search entry point. */\nexport interface SearchEntryPoint {\n /**\n * Optional. Web content snippet that can be embedded in a web page or an app webview.\n * @maxLength 10000000\n */\n renderedContent?: string | null;\n /** Optional. Base64 encoded JSON representing array of <search term, search url> tuple. */\n sdkBlob?: Uint8Array | null;\n}\n\n/** Grounding chunk. */\nexport interface GroundingChunk extends GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** @oneof */\nexport interface GroundingChunkChunkTypeOneOf {\n /** Grounding chunk from the web. */\n web?: Web;\n /** Grounding chunk from context retrieved by the retrieval tools. */\n retrievedContext?: RetrievedContext;\n}\n\n/** Chunk from the web. */\nexport interface Web {\n /**\n * URI reference of the chunk.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the chunk.\n * @maxLength 1000\n */\n title?: string | null;\n}\n\n/** Chunk from context retrieved by the retrieval tools. */\nexport interface RetrievedContext {\n /**\n * URI reference of the attribution.\n * @format WEB_URL\n */\n uri?: string | null;\n /**\n * Title of the attribution.\n * @maxLength 1000\n */\n title?: string | null;\n /**\n * Text of the attribution.\n * @maxLength 100000\n */\n text?: string | null;\n}\n\n/** Grounding support. */\nexport interface GroundingSupport {\n /** Segment of the content this support belongs to. */\n segment?: Segment;\n /**\n * A list of indices (into 'grounding_chunk') specifying the\n * citations associated with the claim. For instance [1,3,4] means\n * that grounding_chunk[1], grounding_chunk[3],\n * grounding_chunk[4] are the retrieved content attributed to the claim.\n * @maxSize 1000\n */\n groundingChunkIndices?: number[];\n /**\n * Confidence score of the support references. Ranges from 0 to 1. 1 is the\n * most confident. This list must have the same size as the\n * grounding_chunk_indices.\n * @maxSize 1000\n */\n confidenceScores?: number[];\n}\n\n/** Segment of the content. */\nexport interface Segment {\n /** Output only. The index of a Part object within its parent Content object. */\n partIndex?: number | null;\n /**\n * Output only. Start index in the given Part, measured in bytes. Offset from\n * the start of the Part, inclusive, starting at zero.\n */\n startIndex?: number;\n /**\n * Output only. End index in the given Part, measured in bytes. Offset from\n * the start of the Part, exclusive, starting at zero.\n */\n endIndex?: number;\n /**\n * Output only. The text corresponding to the segment from the response.\n * @maxLength 100000\n */\n text?: string;\n}\n\n/** Metadata related to retrieval in the grounding flow. */\nexport interface RetrievalMetadata {\n /**\n * Optional. Score indicating how likely information from Google Search could\n * help answer the prompt. The score is in the range `[0, 1]`, where 0 is the\n * least likely and 1 is the most likely. This score is only populated when\n * Google Search grounding and dynamic retrieval is enabled. It will be\n * compared to the threshold to determine whether to trigger Google Search.\n */\n googleSearchDynamicRetrievalScore?: number | null;\n}\n\nexport interface UsageMetadata {\n /** Number of tokens in the request. */\n promptTokenCount?: number | null;\n /** Number of tokens in the response. */\n candidatesTokenCount?: number | null;\n /** Number of tokens in the request and response(s). */\n totalTokenCount?: number | null;\n /** Optional. Number of tokens of thoughts for thinking models. */\n thoughtsTokenCount?: number | null;\n /**\n * Output only. List of modalities that were processed in the request input.\n * @maxSize 10\n */\n promptTokensDetails?: ModalityTokenCount[];\n /**\n * Output only. List of modalities that were returned in the response.\n * @maxSize 10\n */\n candidatesTokensDetails?: ModalityTokenCount[];\n}\n\nexport interface ModalityTokenCount {\n /** The modality associated with this token count. */\n modality?: ModalityWithLiterals;\n /** Number of tokens. */\n tokenCount?: string | null;\n}\n\nexport interface InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: RoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n */\n contentBlocks?: ContentBlock[];\n}\n\nexport enum ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type ResponseTypeTypeWithLiterals =\n | ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport interface Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface V1InvokeAnthropicClaudeModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n responseType?: GoogleproxyV1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: V1MessageRoleRoleWithLiterals;\n /**\n * The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @maxLength 1000000000\n * @maxSize 4096\n * @deprecated The content generated by the model.\n * DEPRECATED - this field only returns text content that was generated. For full output including text and tool_use blocks use `content_blocks` field.\n * @replacedBy content_blocks\n * @targetRemovalDate 2024-11-01\n */\n content?: string[];\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: GoogleproxyV1Usage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 1000\n */\n contentBlocks?: GoogleproxyV1ContentBlock[];\n}\n\nexport enum GoogleproxyV1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type GoogleproxyV1ResponseTypeTypeWithLiterals =\n | GoogleproxyV1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport interface GoogleproxyV1Usage {\n /** The number of input tokens in the request. */\n inputTokens?: number;\n /** The number tokens of that the model generated in the response. */\n outputTokens?: number;\n /** Number of tokens written to the cache when creating a new entry */\n cacheCreationInputTokens?: number | null;\n /** Number of tokens retrieved from the cache for this request */\n cacheReadInputTokens?: number | null;\n}\n\nexport interface InvokeAnthropicModelResponse {\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * \"end_turn\": the model reached a natural stopping point\n * \"max_tokens\": we exceeded the requested max_tokens or the model's maximum\n * \"stop_sequence\": one of your provided custom stop_sequences was generated\n * \"tool_use\": the model invoked one or more tools\n * \"pause_turn\": we paused a long-running turn. You may provide the response back as-is in a subsequent request to let the model continue.\n * \"refusal\": when streaming classifiers intervene to handle potential policy violations\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** The type of response. */\n type?: V1ResponseTypeTypeWithLiterals;\n /** The conversational role of the generated message. The value is always `ASSISTANT`. */\n role?: MessageRoleRoleWithLiterals;\n /** Container for the number of tokens that you supplied in the request and the number tokens of that the model generated in the response. */\n usage?: V1Usage;\n /**\n * Information about the container used in this request.\n * This will be non-null if a container tool (e.g. code execution) was used.\n */\n container?: Container;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * Content generated by the model.\n * This is an array of content blocks, each of which has a type that determines its shape.\n * @maxSize 4096\n */\n content?: V1ContentBlock[];\n}\n\nexport enum V1ResponseTypeType {\n UNKNOWN = 'UNKNOWN',\n MESSAGE = 'MESSAGE',\n}\n\n/** @enumType */\nexport type V1ResponseTypeTypeWithLiterals =\n | V1ResponseTypeType\n | 'UNKNOWN'\n | 'MESSAGE';\n\nexport interface V1Usage {\n /** Breakdown of cached tokens by TTL */\n cacheCreation?: UsageCacheCreation;\n /** The number of input tokens used to create the cache entry. */\n cacheCreationInputTokens?: number | null;\n /** The number of input tokens read from the cache. */\n cacheReadInputTokens?: number | null;\n /** The number of input tokens which were used. */\n inputTokens?: number;\n /** The number of output tokens which were used. */\n outputTokens?: number;\n /** The number of server tool requests. */\n serverToolUse?: UsageServerToolUse;\n /**\n * If the request used the priority, standard, or batch tier.\n * Available options: standard, priority, batch\n * @maxLength 500\n */\n serviceTier?: string | null;\n}\n\nexport interface UsageCacheCreation {\n /** The number of input tokens used to create the 1 hour cache entry. */\n ephemeral1hInputTokens?: number;\n /** The number of input tokens used to create the 5 minute cache entry. */\n ephemeral5mInputTokens?: number;\n}\n\nexport interface UsageServerToolUse {\n /** The number of web search tool requests. */\n webSearchRequests?: number;\n /** The number of web fetch tool requests. */\n webFetchRequests?: number;\n}\n\nexport interface Container {\n /**\n * The time at which the container will expire.\n * @maxLength 100\n */\n expiresAt?: string;\n /**\n * Identifier for the container used in this request\n * @maxLength 512\n */\n id?: string;\n}\n\nexport interface InvokeLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeConverseResponse {\n /** The model's generated output. */\n output?: Output;\n /**\n * Why the model stopped: 'end_turn', 'max_tokens', 'stop_sequence', 'guardrail_intervened', or 'content_filtered'.\n * @maxLength 500\n */\n stopReason?: string | null;\n /** Token usage statistics including cache metrics. */\n usage?: InvokeConverseResponseTokenUsage;\n /** Performance metrics including latency. */\n metrics?: Metrics;\n /** Model-specific response fields as a JSON object. */\n additionalModelResponseFields?: Record<string, any> | null;\n /** The performance configuration applied to this request. */\n performanceConfig?: ConversePerformanceConfig;\n /** Total cost in microcents for this request */\n microcentsSpent?: string | null;\n}\n\n/** Container for the model's generated output. */\nexport interface Output {\n /** The generated message with role and content blocks. */\n message?: ConverseMessage;\n}\n\n/** todo: expose serverToolUsage */\nexport interface InvokeConverseResponseTokenUsage {\n /** Tokens in the input (prompt, history, system prompts). */\n inputTokens?: number;\n /** Tokens generated in the response. */\n outputTokens?: number;\n /** Total tokens processed (input + output). */\n totalTokens?: number;\n /** Tokens retrieved from cache. Only present when prompt caching is enabled. */\n cacheReadInputTokens?: number | null;\n /** Tokens written to cache for future requests. Only present when prompt caching is enabled. */\n cacheWriteInputTokens?: number | null;\n}\n\nexport interface Metrics {\n /** End-to-end latency in milliseconds. */\n latencyMs?: number;\n}\n\nexport interface InvokeMlPlatformLlamaModelResponse {\n /**\n * The generated text.\n * @maxLength 1000000\n */\n generation?: string | null;\n /** The number of tokens in the prompt. */\n promptTokenCount?: number | null;\n /** The number of tokens in the generated text. */\n generationTokenCount?: number | null;\n /**\n * The reason why the response stopped generating text. Possible values are:\n * stop – The model has finished generating text for the input prompt.\n * length – The length of the tokens for the generated text exceeds the value of max_gen_len in the call to InvokeModel\n * (InvokeModelWithResponseStream, if you are streaming output). The response is truncated to max_gen_len tokens.\n * Consider increasing the value of max_gen_len and trying again.\n * @maxLength 1000\n */\n stopReason?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface InvokeChatCompletionResponse {\n /**\n * Unique response ID\n * @maxLength 100\n */\n id?: string | null;\n /** The model used to generate the response */\n model?: PerplexityModelWithLiterals;\n /**\n * The object type, which always equals chat.completion\n * @maxLength 100\n */\n object?: string | null;\n /** The Unix timestamp (in seconds) of when the completion was created */\n created?: number | null;\n /**\n * Citations for the generated answer\n * @maxLength 10000\n * @maxSize 1000\n */\n citations?: string[];\n /** The list of completion choices the model generated for the input prompt */\n choices?: InvokeChatCompletionResponseChoice[];\n /** URLs and size metadata for returned images */\n images?: PerplexityImageDescriptor[];\n /**\n * Further questions related to the search\n * @maxLength 10000\n * @maxSize 1000\n */\n relatedQuestions?: string[];\n /** Usage statistics for the completion request. */\n usage?: InvokeChatCompletionResponseUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\n/** Structures the completion choice */\nexport interface InvokeChatCompletionResponseChoice {\n /** Choice index */\n index?: number | null;\n /**\n * Stop reason, can be `STOP` or `LENGTH`\n * @maxLength 10\n */\n finishReason?: string | null;\n /** Choice message, containing content and role */\n message?: PerplexityMessage;\n}\n\nexport interface PerplexityImageDescriptor {\n /**\n * Full image url\n * @maxLength 5000\n */\n imageUrl?: string | null;\n /**\n * Image origin website\n * @maxLength 5000\n */\n originUrl?: string | null;\n /** Height */\n height?: number | null;\n /** Width */\n width?: number | null;\n}\n\n/** Usage statistics for the completion request. */\nexport interface InvokeChatCompletionResponseUsage {\n /** The number of tokens provided in the request prompt. */\n promptTokens?: number | null;\n /** The number of tokens generated in the response output. */\n completionTokens?: number | null;\n /** The total number of tokens used in the chat completion (prompt + completion). */\n totalTokens?: number | null;\n /** Tokens passed into the input from citations found during search. Priced like `prompt_tokens` */\n citationTokens?: number | null;\n /** Reasoning tokens are used to reason through the research material before generating the final output via the CoTs */\n reasoningTokens?: number | null;\n /** Number of search queries executed. */\n numSearchQueries?: number | null;\n}\n\nexport interface CreateImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: V1ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * The prompt that was used to generate the image, if there was any revision to the prompt.\n * @maxLength 100000\n */\n revisedPrompt?: string | null;\n}\n\nexport interface V1TextToImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface ImageObject {\n /**\n * The WixMp URL of the generated image, available for 24 hours.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /** A specific value [0 .. 4294967294] used to guide the 'randomness' of the generation. */\n seed?: string | null;\n /**\n * Finish reason by the model provider.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface GenerateCoreResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageCoreModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateStableDiffusionResponse {\n /**\n * The generated image objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: ImageStableDiffusionModelWithLiterals;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateAnImageResponse {\n /**\n * The id of the task.\n * @format GUID\n */\n id?: string | null;\n /**\n * status of the image generation\n * one of Task not found, Pending, Request Moderated, Content Moderated, Ready, Error\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Result object for the generated image */\n result?: ResultObject;\n}\n\nexport interface ResultObject {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * prompt used for image generation\n * @maxLength 1000000\n */\n prompt?: string | null;\n /** seed used for image generation */\n seed?: string | null;\n}\n\nexport interface CreatePredictionResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /**\n * Prediction text output\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n textOutput?: string[] | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /** Token counts */\n tokenUsage?: CreatePredictionResponseTokenUsage;\n}\n\nexport interface CreatePredictionResponseTokenUsage {\n /** Number of input tokens used in the request. */\n inputTokens?: number | null;\n /** Number of output tokens generated by the model. */\n outputTokens?: number | null;\n}\n\nexport interface EditImageWithPromptResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: EditImageWithPromptRequestModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface TextToImageResponse {\n /**\n * Generation TextToImageTaskResult\n * @maxSize 1000\n */\n data?: TextToImageTaskResult[];\n}\n\nexport interface TextToImageTaskResult {\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * The unique identifier of the image.\n * @format GUID\n */\n imageUuid?: string;\n /**\n * If outputType is set to URL, this parameter contains the URL of the image to be downloaded.\n * @maxLength 2048\n */\n imageUrl?: string | null;\n /** If checkNSFW parameter is used, NSFWContent is included informing if the image has been flagged as potentially sensitive content. */\n nsfwContent?: boolean;\n /** A cost of generated image. */\n microcentsSpent?: string | null;\n /**\n * A seed is a value used to randomize the image generation.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n}\n\nexport interface GenerateImageResponse {\n /**\n * Array of generated image results, one for each requested sampleCount\n * @maxSize 8\n */\n predictions?: Prediction[];\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\nexport interface Prediction {\n /**\n * The URL of the generated image.\n * @maxLength 5000\n * @format WEB_URL\n * @readonly\n */\n url?: string | null;\n /**\n * Enhanced prompt used for generation (only returned for models that support prompt enhancement)\n * @maxLength 1000\n */\n prompt?: string | null;\n /**\n * The responsible AI filter reason\n * Only returned if includeRaiReason is enabled and this image was filtered out\n * @maxLength 1000\n */\n raiFilteredReason?: string | null;\n /** Safety attributes information */\n safetyAttributes?: SafetyAttributes;\n}\n\nexport interface SafetyAttributes {\n /**\n * The safety attribute categories\n * @maxSize 100\n * @maxLength 100\n */\n categories?: string[] | null;\n /**\n * The safety attribute scores\n * @maxSize 100\n */\n scores?: number[] | null;\n}\n\nexport interface GenerateVideoResponse {\n /**\n * Generated videos\n * @maxSize 4\n */\n videos?: GeneratedVideo[];\n /** Cost of the request in micro-cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GeneratedVideo {\n /**\n * The URL of the generated video.\n * @format WEB_URL\n */\n videoUrl?: string | null;\n /**\n * The video MIME type (currently only \"video/mp4\")\n * @maxLength 50\n */\n mimeType?: string | null;\n}\n\nexport interface GenerateImageMlPlatformResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface CreateImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport interface ImageUsage {\n /** Number of tokens in the input */\n inputTokens?: number | null;\n /** Details about input tokens */\n inputTokensDetails?: OpenAiImageTokenDetails;\n /** Number of tokens in the output */\n outputTokens?: number | null;\n /** Output tokens details */\n outputTokensDetails?: OpenAiImageTokenDetails;\n /** Total number of tokens used */\n totalTokens?: number | null;\n}\n\nexport interface OpenAiImageTokenDetails {\n /** Number of tokens used for image processing */\n imageTokens?: number | null;\n /** Number of tokens used for text processing */\n textTokens?: number | null;\n}\n\nexport interface EditImageOpenAiResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: V1ImageObject[];\n /** Image model used to generate the image. */\n model?: OpenAiImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n /** Usage information from the API response */\n usage?: ImageUsage;\n}\n\nexport interface V1CreateChatCompletionResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /** Model that produced the completion. */\n model?: ChatCompletionModelWithLiterals;\n /** A list of chat completion choices. Can be more than one if n is greater than 1. */\n choices?: V1CreateChatCompletionResponseChoice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1CreateChatCompletionResponseTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface V1CreateChatCompletionResponseChoice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: GoogleproxyV1ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface V1CreateChatCompletionResponseTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface InvokeMlPlatformOpenAIChatCompletionRawResponse {\n /**\n * A unique identifier for the chat completion.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * Description of the response object. Will be equal to \"chat.completion\" for chat completion.\n * @maxLength 100\n */\n object?: string | null;\n /** Timestamp for when the response was created. */\n created?: number | null;\n /**\n * Model that produced the completion.\n * @maxLength 10000\n */\n modelId?: string;\n /**\n * A list of chat completion choices. Can be more than one if n is greater than 1.\n * @maxSize 10000\n */\n choices?: Choice[];\n /** TokenUsage object describing the tokens usage per request. */\n usage?: TokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n /**\n * This fingerprint represents the backend configuration that the model runs with.\n * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n}\n\nexport interface Choice {\n /** Index of this Choice in choices array. */\n index?: number | null;\n /** ChatCompletionMessage object that defines the message. */\n message?: ChatCompletionMessage;\n /**\n * Reason why the message generation was stopped.\n * @maxLength 100\n */\n finishReason?: string | null;\n}\n\nexport interface TokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface VideoInferenceResponse {\n /**\n * Generation VideoInferenceTaskResult\n * @maxSize 1000\n */\n data?: VideoInferenceTaskResult[];\n}\n\nexport interface VideoInferenceTaskResult {\n /**\n * The API will return the taskType you sent in the request.\n * @maxLength 100\n */\n taskType?: string;\n /**\n * The API will return the taskUUID you sent in the request.\n * @format GUID\n */\n taskUuid?: string;\n /**\n * A unique identifier for the generated video.\n * @format GUID\n */\n videoUuid?: string | null;\n /**\n * If outputType is set to URL, this parameter contains the URL of the video to be downloaded.\n * @maxLength 10000\n */\n videoUrl?: string | null;\n /**\n * The seed value that was used to generate this video.\n * @min 1\n * @max 9223372036854776000\n */\n seed?: string | null;\n /** A cost of generated video. */\n microcentsSpent?: string | null;\n /**\n * The current processing status (for polling operations).\n * @maxLength 50\n */\n status?: string | null;\n}\n\nexport interface V1OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: OpenAiResponsesResponseIncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: V1ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: V1ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: V1ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: V1ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface OpenAiResponsesResponseIncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface V1ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: V1ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: V1ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: V1ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: V1ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: V1ResponsesCodeInterpreterToolCall;\n}\n\nexport interface V1ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: V1ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: V1ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface V1ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface V1ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface OpenAiResponsesResponse {\n /**\n * Unique identifier for this Response.\n * @maxLength 100\n */\n id?: string | null;\n /** Unix timestamp (in seconds) of when this Response was created. */\n createdAt?: string | null;\n /** Whether to run the model response in the background. */\n background?: boolean | null;\n /** Details about why the response is incomplete. */\n incompleteDetails?: IncompleteDetails;\n /** An upper bound for the number of tokens that can be generated for a response, including visible output tokens and reasoning tokens. */\n maxOutputTokens?: number | null;\n /**\n * The maximum number of total calls to built-in tools that can be processed in a response.\n * This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored.\n */\n maxToolCalls?: number | null;\n /**\n * Model ID used to generate the response, like gpt-4o or o3. OpenAI offers a wide range of models with different capabilities,\n * performance characteristics, and price points. Refer to the model guide to browse and compare available models.\n */\n model?: ResponsesModelWithLiterals;\n /**\n * The object type of this resource - always set to response.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An array of content items generated by the model.\n * The length and order of items in the output array is dependent on the model's response.\n * Rather than accessing the first item in the output array and assuming it's an assistant message with the content generated by the model,\n * you might consider using the output_text property where supported in SDKs.\n * @maxSize 1000\n */\n output?: ResponsesOutput[];\n /** Whether to allow the model to run tool calls in parallel. */\n parallelToolCalls?: boolean | null;\n /**\n * The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about conversation state.\n * @maxLength 100\n */\n previousResponseId?: string | null;\n /** The reasoning effort used by the model to generate the response. */\n reasoning?: ResponsesReasoning;\n /**\n * The status of the response generation. One of completed, failed, in_progress, cancelled, queued, or incomplete.\n * @maxLength 100\n */\n status?: string | null;\n /** What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. */\n temperature?: number | null;\n /**\n * An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.\n * @max 20\n */\n topLogprobs?: number | null;\n /**\n * An alternative to sampling with temperature, called nucleus sampling,\n * where the model considers the results of the tokens with top_p probability mass.\n * So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n * We generally recommend altering this or temperature but not both.\n * @max 20\n */\n topP?: number | null;\n /**\n * The truncation strategy to use for the model response.\n * auto: If the context of this response and previous ones exceeds the model's context window size, the model will truncate the response to fit the context window by dropping input items in the middle of the conversation.\n * disabled (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error.\n * @maxLength 100\n */\n truncation?: string | null;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: ResponsesTokenUsage;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface IncompleteDetails {\n /**\n * The reason why the response is incomplete.\n * @maxLength 100\n */\n reason?: string | null;\n}\n\nexport interface ResponsesOutput extends ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\n/** @oneof */\nexport interface ResponsesOutputOutputOneOf {\n /** An output message from the model. */\n outputMessage?: ResponsesOutputMessage;\n /** The results of a web search tool call. See the web search guide for more information. */\n webSearchToolCall?: ResponsesWebSearchToolCall;\n /** A function call made by the model. */\n functionToolCall?: ResponsesFunctionToolCall;\n /** A reasoning item output from the model. */\n reasoning?: ResponsesReasoningOutput;\n /** A code interpreter item output from the model. */\n codeInterpreterToolCall?: ResponsesCodeInterpreterToolCall;\n}\n\nexport interface ResponsesTokenUsage {\n /** Number of LLM tokens required to encode input. */\n inputTokens?: number | null;\n /** A detailed breakdown of the input tokens. */\n inputTokensDetails?: ResponsesInputTokensDetails;\n /** Number of LLM tokens required to encode output. */\n outputTokens?: number | null;\n /** A detailed breakdown of the output tokens. */\n outputTokensDetails?: ResponsesOutputTokensDetails;\n /** Total number of LLM tokens used for the request. */\n totalTokens?: number | null;\n}\n\nexport interface ResponsesInputTokensDetails {\n /** Cached tokens present in the prompt. */\n cachedTokens?: number | null;\n}\n\nexport interface ResponsesOutputTokensDetails {\n /** Cached tokens present in the prompt. */\n reasoningTokens?: number | null;\n}\n\nexport interface CreateVideoResponse {\n videoJob?: VideoJob;\n}\n\nexport interface VideoJob {\n /**\n * The unique identifier for the video generation job.\n * @maxLength 200\n */\n id?: string | null;\n /**\n * The status of the response generation.\n * @maxLength 50\n */\n status?: string | null;\n /**\n * The generated video result url. Only present when status is \"completed\".\n * @maxLength 5000\n * @format WEB_URL\n */\n url?: string | null;\n /** Error payload that explains why generation failed, if applicable. */\n error?: ErrorInfo;\n /** The progress of the video generation as a percentage (0-100) */\n progress?: number | null;\n}\n\nexport interface ErrorInfo {\n /**\n * code\n * @maxLength 50\n */\n code?: string | null;\n /**\n * message\n * @maxLength 1000\n */\n message?: string | null;\n}\n\nexport interface ContentGenerationFailedEvent {\n /**\n * Error message that content generation failed with.\n * @maxLength 10000\n */\n errorMessage?: string;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GenerateTextByPromptRequest {\n /**\n * Id of the Prompt that will be used to facilitate text generation request.\n * @format GUID\n */\n promptId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface FallbackProperties {\n /**\n * Flag to indicate whether to opt out of the request forwarding as a fallback.\n * Currently, only the fallback from OpenAI to Azure is supported for certain OpenAI models.\n * If set to true, the request will not be redirected to Azure in the event of a server failure by OpenAI.\n */\n optOut?: boolean | null;\n /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */\n fallbackPromptConfig?: FallbackPromptConfig;\n}\n\nexport interface DynamicRequestConfig {\n /**\n * List of GatewayToolDefinition's, used to overwrite tools in the prompt.\n * @maxSize 100\n */\n gatewayToolDefinitions?: GatewayToolDefinition[];\n /**\n * List of GatewayMessageDefinition's, which will be converted to model-specific format and appended to the messages saved in the prompt.\n * @maxSize 100\n */\n gatewayMessageDefinitions?: GatewayMessageDefinition[];\n}\n\nexport interface GatewayToolDefinition extends GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\n/** @oneof */\nexport interface GatewayToolDefinitionToolOneOf {\n /** Custom tool */\n customTool?: GatewayToolDefinitionCustomTool;\n /** Built-in tool */\n builtInTool?: BuiltInTool;\n}\n\nexport interface GatewayToolDefinitionCustomTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /**\n * The description of what the tool does.\n * @maxLength 100000\n */\n description?: string | null;\n /** The parameters the tool accepts, described as a JSON Schema object. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface BuiltInTool {\n /**\n * The name of the tool to be called.\n * @maxLength 64\n */\n name?: string | null;\n /** Optional parameters specific to the built-in tool. */\n parameters?: Record<string, any> | null;\n}\n\nexport interface GatewayMessageDefinition {\n /** The role of the message author. */\n role?: GatewayMessageDefinitionRoleWithLiterals;\n /**\n * The content of the message.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport enum GatewayMessageDefinitionRole {\n UNKNOWN = 'UNKNOWN',\n USER = 'USER',\n ASSISTANT = 'ASSISTANT',\n SYSTEM = 'SYSTEM',\n TOOL = 'TOOL',\n DEVELOPER = 'DEVELOPER',\n}\n\n/** @enumType */\nexport type GatewayMessageDefinitionRoleWithLiterals =\n | GatewayMessageDefinitionRole\n | 'UNKNOWN'\n | 'USER'\n | 'ASSISTANT'\n | 'SYSTEM'\n | 'TOOL'\n | 'DEVELOPER';\n\nexport interface GatewayContentBlock extends GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\n/** @oneof */\nexport interface GatewayContentBlockTypeOneOf {\n /** Text content. */\n text?: TextContent;\n /** Media content, represented as URL. */\n media?: MediaContent;\n /** Tool use content, describes which tool should be used and with which parameters. */\n toolUse?: ToolUseContent;\n /** Tool result content, describes the result of tool invocation. */\n toolResult?: ToolResultContent;\n /** Represents model's internal thought process. */\n thinking?: ThinkingTextContent;\n}\n\nexport interface ToolResultContent {\n /**\n * Tool use id\n * @maxLength 100\n */\n toolUseId?: string | null;\n /** Tool result is error. */\n error?: boolean | null;\n /**\n * Tool result content.\n * @maxSize 4096\n */\n content?: GatewayContentBlock[];\n}\n\nexport interface GenerateTextByPromptResponse {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface ModelResponse extends ModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n /**\n * Extracted generated text messages from the model's response.\n * @maxSize 100\n * @maxLength 100000\n */\n generatedTexts?: string[] | null;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n}\n\n/** @oneof */\nexport interface ModelResponseResponseOneOf {\n /** OpenAI chat completion response. */\n openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;\n /** Google bison text completion response. */\n googleTextBisonResponse?: TextBisonPredictResponse;\n /** Google bison chat completion response. */\n googleChatBisonResponse?: ChatBisonPredictResponse;\n /** Azure OpenAI chat completion response. */\n azureChatCompletionResponse?: CreateChatCompletionResponse;\n /** Google Gemini generate content response. */\n googleGeminiGenerateContentResponse?: GenerateContentResponse;\n /** Anthropic Claude via Amazon Bedrock generate content response. */\n anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;\n /** Anthropic Claude via Google vertex generate content response. */\n googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;\n /** Native Anthropic API proxy generate content response. */\n invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;\n /** Llama via Amazon Bedrock text completion response. */\n llamaModelResponse?: InvokeLlamaModelResponse;\n /** Llama via ML Platform text completion response. */\n mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;\n /** Perplexity chat completion response. */\n perplexityChatCompletionResponse?: InvokeChatCompletionResponse;\n /** Google create chat completion response. */\n googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;\n /** ML Platform - invoke an OpenAI-type interface with a JSON string */\n mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;\n /** Open AI Responses API response */\n openAiResponsesResponse?: V1OpenAiResponsesResponse;\n}\n\nexport interface GenerationRequestedEvent {\n /** Prompt that the generation was requested for. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface TextGenerationSucceededEvent {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface TextGenerationFailedEvent {\n /**\n * Error message that text generation failed with.\n * @maxLength 10000\n */\n errorMessage?: string;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GeneratedTextChunk extends GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n /**\n * Extracted text content from the chunk.\n * @maxLength 100\n */\n content?: string | null;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\n/** @oneof */\nexport interface GeneratedTextChunkModelChunkOneOf {\n /** Azure OpenAI chat completion chunk. */\n azureChatCompletionChunk?: ChatCompletionChunk;\n /** OpenAI chat completion chunk. */\n openaiChatCompletionChunk?: V1ChatCompletionChunk;\n /** Anthropic (via Google proxy) chat completion chunk. */\n googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;\n /** Google Gemini GenerateContentResponse chunk. */\n googleGeminiStreamChunk?: GenerateContentResponse;\n /** Anthropic (via Amazon proxy) chat completion chunk. */\n amazonAnthropicStreamChunk?: AnthropicStreamChunk;\n /** Native Anthropic API proxy stream chunk. */\n anthropicStreamChunk?: V1AnthropicStreamChunk;\n}\n\nexport interface ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: V1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 100\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ToolCall[];\n}\n\nexport interface ChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface V1ChatCompletionChunk {\n /**\n * A unique identifier for the chat completion. Each chunk has the same ID.\n * @maxLength 100\n */\n responseId?: string | null;\n /**\n * A list of chat completion choices. Can contain more than one elements if n is greater than 1.\n * Can also be empty for the last chunk if you set stream_options: {\"include_usage\": true}.\n */\n choices?: ChatCompletionChunkChunkChoice[];\n /**\n * The Unix timestamp (in seconds) of when the chat completion was created.\n * Each chunk has the same timestamp.\n */\n created?: number | null;\n /** Model that produced the completion. */\n model?: OpenaiproxyV1ModelWithLiterals;\n /**\n * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the\n * seed request parameter to understand when backend changes have been made that might impact determinism.\n * @maxLength 10000\n */\n systemFingerprint?: string | null;\n /**\n * The object type, which is always chat.completion.chunk.\n * @maxLength 100\n */\n object?: string | null;\n /**\n * An optional field that will only be present when you set stream_options: {\"include_usage\": true} in your request.\n * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.\n */\n usage?: OpenaiproxyV1CreateChatCompletionResponseTokenUsage;\n /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */\n microcentsSpent?: string | null;\n}\n\nexport interface ChunkChoiceChunkDelta {\n /**\n * The contents of the chunk message.\n * @maxLength 1000\n */\n content?: string | null;\n /** The role of the author of this message. */\n role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;\n /**\n * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.\n * @maxSize 100\n */\n toolCalls?: ChatCompletionMessageToolCall[];\n}\n\nexport interface ChatCompletionChunkChunkChoice {\n /** A chat completion delta generated by streamed model responses */\n delta?: ChunkChoiceChunkDelta;\n /**\n * The reason the model stopped generating tokens. This will be\n * \"stop\" if the model hit a natural stop point or a provided stop sequence,\n * \"length\" if the maximum number of tokens specified in the request was reached,\n * \"content_filter\" if content was omitted due to a flag from our content filters,\n * \"tool_calls\" if the model called a tool\n * @maxLength 100\n */\n finishReason?: string | null;\n /** The index of the choice in the list of choices. */\n index?: number | null;\n}\n\nexport interface GoogleproxyV1AnthropicStreamChunk\n extends GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1AnthropicStreamChunkContentOneOf {\n toolUse?: GoogleproxyV1ToolUse;\n contentBlockDelta?: GoogleproxyV1ContentBlockDelta;\n messageDelta?: V1AnthropicStreamChunkMessageDelta;\n redactedThinking?: GoogleproxyV1RedactedThinking;\n}\n\nexport interface GoogleproxyV1ContentBlockDelta\n extends GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface GoogleproxyV1ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface V1AnthropicStreamChunkMessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: GoogleproxyV1Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface AnthropicStreamChunk extends AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n index?: number | null;\n}\n\n/** @oneof */\nexport interface AnthropicStreamChunkContentOneOf {\n toolUse?: ToolUse;\n contentBlockDelta?: ContentBlockDelta;\n messageDelta?: MessageDelta;\n redactedThinking?: RedactedThinking;\n}\n\nexport interface ContentBlockDelta extends ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\n/** @oneof */\nexport interface ContentBlockDeltaDeltaOneOf {\n /** @maxLength 1000000 */\n text?: string;\n /** @maxLength 1000000 */\n partialJson?: string;\n /** @maxLength 1000000 */\n thinking?: string;\n /** @maxLength 1000000 */\n signature?: string;\n}\n\nexport interface MessageDelta {\n /**\n * The reason why Anthropic Claude stopped generating the response:\n * `end_turn` – The model reached a natural stopping point.\n * `max_tokens` – The generated text exceeded the value of the max_tokens input field or exceeded the maximum number of tokens that the model supports.\n * `stop_sequence` – The model generated one of the stop sequences that you specified in the stop_sequences input field.\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * Which custom stop sequence was generated, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Token usage statistics. */\n usage?: Usage;\n microcentsSpent?: string | null;\n}\n\nexport interface V1AnthropicStreamChunk\n extends V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n /**\n * The unique identifier for the response. The format and length of the ID might change over time.\n * @maxLength 512\n */\n responseId?: string;\n /**\n * The ID for the Anthropic Claude model that made the request.\n * @maxLength 512\n */\n model?: string;\n /**\n * Index of the content block this chunk refers to (when relevant).\n * For example, text and tool-input deltas apply to the block at this index.\n */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface V1AnthropicStreamChunkContentOneOf {\n /** Announcement of a model-initiated tool call (client tools or Anthropic-run tools) */\n toolUse?: V1ToolUse;\n /**\n * Start of a server tool block at `index` (e.g., \"web_search\", \"web_fetch\", \"code_execution\").\n * The tool input will stream via ContentBlockDelta.partial_json for the SAME `index`,\n * and is finalized by ContentBlockStop for that `index`.\n */\n serverToolUse?: ServerToolUse;\n /** Start of a Web Search result block at `index`. Completion is marked by ContentBlockStop. */\n webSearchToolResult?: WebSearchToolResult;\n /** Start of a Web Fetch result block at `index`. Completion is marked by ContentBlockStop. */\n webFetchToolResult?: WebFetchToolResult;\n /** Start of a Code Execution result block at `index`. Completion is marked by ContentBlockStop. */\n codeExecutionToolResult?: CodeExecutionToolResult;\n /**\n * Incremental data that refines the content block at `index`\n * (text characters, tool-input JSON fragments, thinking text, or thinking signature).\n */\n contentBlockDelta?: V1ContentBlockDelta;\n /**\n * Top-level message updates:\n * - stop reason / stop sequence (when known),\n * - cumulative token usage (input, output, cache, server-tool counters),\n * - optional cost fields (e.g., microcents).\n */\n messageDelta?: AnthropicStreamChunkMessageDelta;\n /**\n * Redacted variant of thinking content when Claude’s safety systems redact internal reasoning.\n * Pass back unchanged in a follow-up request to let Claude continue without losing context.\n */\n redactedThinking?: V1RedactedThinking;\n}\n\nexport interface V1ContentBlockDelta extends V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\n/** @oneof */\nexport interface V1ContentBlockDeltaDeltaOneOf {\n /**\n * Characters belonging to a text content block.\n * @maxLength 1000000\n */\n text?: string;\n /**\n * A fragment of the tool `input` JSON (as a string) for a tool_use/server_tool_use block.\n * Multiple fragments across chunks together represent the final JSON value.\n * @maxLength 1000000\n */\n partialJson?: string;\n /**\n * Portion of the model’s extended-thinking content for a thinking block.\n * @maxLength 1000000\n */\n thinking?: string;\n /**\n * Signature data associated with a thinking block (emitted immediately before that block completes).\n * @maxLength 1000000\n */\n signature?: string;\n}\n\nexport interface AnthropicStreamChunkMessageDelta {\n /**\n * Why generation concluded for this assistant message, when applicable:\n * \"end_turn\" | \"max_tokens\" | \"stop_sequence\" | \"tool_use\" | \"pause_turn\" | \"refusal\".\n * @maxLength 512\n */\n stopReason?: string | null;\n /**\n * The specific custom stop sequence that was produced, if any.\n * @maxLength 512\n */\n stopSequence?: string | null;\n /** Cumulative token usage at this point in the stream. */\n usage?: V1Usage;\n /** Cost of the request so far, in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GenerateTextByPromptObjectRequest {\n /** Prompt object that describes the text generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface GenerateTextByPromptObjectResponse {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateEmbeddingsRequest\n extends GenerateEmbeddingsRequestEmbeddingRequestOneOf {\n /** OpenAi Embeddings Request */\n openAiEmbeddingsRequest?: V1CreateEmbeddingsRequest;\n /** Azure Embeddings Request */\n azureEmbeddingsRequest?: CreateEmbeddingsRequest;\n /** Google Vertex Embeddings Request */\n googleEmbeddingsRequest?: GetEmbeddingRequest;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateEmbeddingsRequestEmbeddingRequestOneOf {\n /** OpenAi Embeddings Request */\n openAiEmbeddingsRequest?: V1CreateEmbeddingsRequest;\n /** Azure Embeddings Request */\n azureEmbeddingsRequest?: CreateEmbeddingsRequest;\n /** Google Vertex Embeddings Request */\n googleEmbeddingsRequest?: GetEmbeddingRequest;\n}\n\nexport interface V1CreateEmbeddingsRequest {\n /**\n * Input text to get embeddings for, encoded as a string or array of tokens.\n * To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays.\n * Each input must not exceed 8192 tokens in length.\n * @minSize 1\n * @maxSize 200\n * @maxLength 40000\n */\n input?: string[] | null;\n /** Embedding model that produced the embeddings. */\n model?: OpenaiproxyV1EmbeddingModelWithLiterals;\n /** The format to return the embeddings in. Can be either float or base64. */\n encodingFormat?: V1EmbeddingEncodingFormatWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 50\n */\n user?: string | null;\n /**\n * The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n * @min 1\n */\n dimensions?: string | null;\n}\n\nexport enum OpenaiproxyV1EmbeddingModel {\n UNKNOWN_EMBEDDING_MODEL = 'UNKNOWN_EMBEDDING_MODEL',\n TEXT_EMBEDDING_ADA_002 = 'TEXT_EMBEDDING_ADA_002',\n TEXT_EMBEDDING_3_SMALL = 'TEXT_EMBEDDING_3_SMALL',\n TEXT_EMBEDDING_3_LARGE = 'TEXT_EMBEDDING_3_LARGE',\n}\n\n/** @enumType */\nexport type OpenaiproxyV1EmbeddingModelWithLiterals =\n | OpenaiproxyV1EmbeddingModel\n | 'UNKNOWN_EMBEDDING_MODEL'\n | 'TEXT_EMBEDDING_ADA_002'\n | 'TEXT_EMBEDDING_3_SMALL'\n | 'TEXT_EMBEDDING_3_LARGE';\n\nexport enum V1EmbeddingEncodingFormat {\n UNKNOWN_ENCODING_FORMAT = 'UNKNOWN_ENCODING_FORMAT',\n /** Will request base64 from OpenAI and parse server-side */\n FLOAT = 'FLOAT',\n BASE64 = 'BASE64',\n}\n\n/** @enumType */\nexport type V1EmbeddingEncodingFormatWithLiterals =\n | V1EmbeddingEncodingFormat\n | 'UNKNOWN_ENCODING_FORMAT'\n | 'FLOAT'\n | 'BASE64';\n\nexport interface CreateEmbeddingsRequest {\n /**\n * Input text to get embeddings for, encoded as a string or array of tokens.\n * To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays.\n * Each input must not exceed 8192 tokens in length.\n * @minSize 1\n * @maxSize 200\n * @maxLength 40000\n */\n input?: string[] | null;\n /** Embedding model that produced the embeddings. */\n model?: EmbeddingModelWithLiterals;\n /** The format to return the embeddings in. Currently, only float is supported. */\n encodingFormat?: EmbeddingEncodingFormatWithLiterals;\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.\n * @maxLength 50\n */\n user?: string | null;\n /**\n * The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.\n * @min 1\n */\n dimensions?: string | null;\n}\n\nexport enum EmbeddingModel {\n UNKNOWN_EMBEDDING_MODEL = 'UNKNOWN_EMBEDDING_MODEL',\n ADA = 'ADA',\n TEXT_EMBEDDING_3_SMALL = 'TEXT_EMBEDDING_3_SMALL',\n TEXT_EMBEDDING_3_LARGE = 'TEXT_EMBEDDING_3_LARGE',\n}\n\n/** @enumType */\nexport type EmbeddingModelWithLiterals =\n | EmbeddingModel\n | 'UNKNOWN_EMBEDDING_MODEL'\n | 'ADA'\n | 'TEXT_EMBEDDING_3_SMALL'\n | 'TEXT_EMBEDDING_3_LARGE';\n\nexport enum EmbeddingEncodingFormat {\n UNKNOWN_ENCODING_FORMAT = 'UNKNOWN_ENCODING_FORMAT',\n FLOAT = 'FLOAT',\n BASE64 = 'BASE64',\n}\n\n/** @enumType */\nexport type EmbeddingEncodingFormatWithLiterals =\n | EmbeddingEncodingFormat\n | 'UNKNOWN_ENCODING_FORMAT'\n | 'FLOAT'\n | 'BASE64';\n\n/** Request for getting embeddings from text */\nexport interface GetEmbeddingRequest {\n /** The model to use for generating embeddings */\n model?: V1EmbeddingModelWithLiterals;\n /**\n * Array of instances containing text to embed\n * five texts of up to 2,048 tokens per text for all models\n * @minSize 1\n * @maxSize 5\n */\n instances?: TextEmbeddingInstance[];\n /** Optional parameters for the embedding request */\n parameters?: TextEmbeddingParameters;\n}\n\n/** Enum for different embedding models offered by Vertex AI */\nexport enum V1EmbeddingModel {\n UNKNOWN_EMBEDDING_MODEL = 'UNKNOWN_EMBEDDING_MODEL',\n TEXT_MULTILINGUAL_EMBEDDING_002 = 'TEXT_MULTILINGUAL_EMBEDDING_002',\n TEXT_EMBEDDING_005 = 'TEXT_EMBEDDING_005',\n /** Experimental model text-embedding-large-exp-03-07 */\n TEXT_EMBEDDING_LARGE = 'TEXT_EMBEDDING_LARGE',\n GEMINI_EMBEDDING_001 = 'GEMINI_EMBEDDING_001',\n}\n\n/** @enumType */\nexport type V1EmbeddingModelWithLiterals =\n | V1EmbeddingModel\n | 'UNKNOWN_EMBEDDING_MODEL'\n | 'TEXT_MULTILINGUAL_EMBEDDING_002'\n | 'TEXT_EMBEDDING_005'\n | 'TEXT_EMBEDDING_LARGE'\n | 'GEMINI_EMBEDDING_001';\n\n/** Instance containing text to embed */\nexport interface TextEmbeddingInstance {\n /**\n * The text content to embed\n * up to 2,048 tokens per text for all models\n * @maxLength 40000\n */\n content?: string | null;\n /**\n * Optional task type that helps optimize the embedding for specific use cases\n * If left blank, the default used is RETRIEVAL_QUERY\n */\n taskType?: TaskTypeWithLiterals;\n /**\n * Optional title for the content\n * @maxLength 10000\n */\n title?: string | null;\n}\n\n/**\n * Enum for task types that help optimize embeddings for specific use cases\n * Used to convey intended downstream application to help the model produce better embeddings\n * If left blank, the default used is RETRIEVAL_QUERY\n */\nexport enum TaskType {\n UNKNOWN_TASK_TYPE = 'UNKNOWN_TASK_TYPE',\n RETRIEVAL_QUERY = 'RETRIEVAL_QUERY',\n RETRIEVAL_DOCUMENT = 'RETRIEVAL_DOCUMENT',\n SEMANTIC_SIMILARITY = 'SEMANTIC_SIMILARITY',\n CLASSIFICATION = 'CLASSIFICATION',\n CLUSTERING = 'CLUSTERING',\n QUESTION_ANSWERING = 'QUESTION_ANSWERING',\n FACT_VERIFICATION = 'FACT_VERIFICATION',\n CODE_RETRIEVAL_QUERY = 'CODE_RETRIEVAL_QUERY',\n}\n\n/** @enumType */\nexport type TaskTypeWithLiterals =\n | TaskType\n | 'UNKNOWN_TASK_TYPE'\n | 'RETRIEVAL_QUERY'\n | 'RETRIEVAL_DOCUMENT'\n | 'SEMANTIC_SIMILARITY'\n | 'CLASSIFICATION'\n | 'CLUSTERING'\n | 'QUESTION_ANSWERING'\n | 'FACT_VERIFICATION'\n | 'CODE_RETRIEVAL_QUERY';\n\n/** Parameters for the embedding request */\nexport interface TextEmbeddingParameters {\n /** Optional: Used to specify output embedding size. If set, output embeddings will be truncated to the size specified. */\n outputDimensionality?: number | null;\n /** Optional: When set to true, input text will be truncated. When set to false, an error is returned if the input text is longer than the maximum length supported by the model. Defaults to true. */\n autoTruncate?: boolean;\n}\n\nexport interface GenerateEmbeddingsResponse\n extends GenerateEmbeddingsResponseEmbeddingResponseOneOf {\n /** OpenAi Embeddings Response */\n openAiEmbeddingsResponse?: V1CreateEmbeddingsResponse;\n /** Azure Embeddings Response */\n azureEmbeddingsResponse?: CreateEmbeddingsResponse;\n /** Google Vertex Embeddings Response */\n googleEmbeddingsResponse?: GetEmbeddingResponse;\n}\n\n/** @oneof */\nexport interface GenerateEmbeddingsResponseEmbeddingResponseOneOf {\n /** OpenAi Embeddings Response */\n openAiEmbeddingsResponse?: V1CreateEmbeddingsResponse;\n /** Azure Embeddings Response */\n azureEmbeddingsResponse?: CreateEmbeddingsResponse;\n /** Google Vertex Embeddings Response */\n googleEmbeddingsResponse?: GetEmbeddingResponse;\n}\n\nexport interface V1CreateEmbeddingsResponse {\n /**\n * The object type, which is always list.\n * @maxLength 50\n */\n objectType?: string | null;\n /**\n * A list of embeddings for each input.\n * @maxSize 1000\n */\n data?: V1EmbeddingInfo[];\n /** Embedding model that produced the embeddings. */\n model?: OpenaiproxyV1EmbeddingModelWithLiterals;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: CreateEmbeddingsResponseEmbeddingUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1EmbeddingInfo extends V1EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: V1FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 1000\n */\n base64Embedding?: string | null;\n /**\n * The object type, which is always \"embedding\".\n * @maxLength 50\n */\n objectType?: string | null;\n /** The index of the embedding in the list of embeddings. */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface V1EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: V1FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 1000\n */\n base64Embedding?: string | null;\n}\n\nexport interface V1FloatEmbedding {\n /**\n * The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.\n * @maxSize 10000\n */\n embedding?: number[] | null;\n}\n\nexport interface CreateEmbeddingsResponseEmbeddingUsage {\n /** Number of prompt tokens */\n promptTokens?: number | null;\n /** Total number of tokens used for the embedding request. */\n totalTokens?: number | null;\n}\n\nexport interface CreateEmbeddingsResponse {\n /**\n * The object type, which is always list.\n * @maxLength 50\n */\n objectType?: string | null;\n /**\n * A list of embeddings for each input.\n * @maxSize 1000\n */\n data?: EmbeddingInfo[];\n /** Embedding model that produced the embeddings. */\n model?: EmbeddingModelWithLiterals;\n /** TokenUsage object describing the tokens usage per request. */\n usage?: EmbeddingUsage;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface EmbeddingInfo extends EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 10000\n */\n base64Embedding?: string | null;\n /**\n * The object type, which is always \"embedding\".\n * @maxLength 50\n */\n objectType?: string | null;\n /** The index of the embedding in the list of embeddings. */\n index?: number | null;\n}\n\n/** @oneof */\nexport interface EmbeddingInfoEmbeddingResultOneOf {\n /** The embedding vector, which is a list of floats. */\n floatEmbedding?: FloatEmbedding;\n /**\n * The embedding vector, which is a base64 encoded string.\n * @maxLength 10000\n */\n base64Embedding?: string | null;\n}\n\nexport interface FloatEmbedding {\n /**\n * The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.\n * @maxSize 10000\n */\n embedding?: number[] | null;\n}\n\nexport interface EmbeddingUsage {\n /** Number of prompt tokens */\n promptTokens?: number | null;\n /** Total number of tokens used for the embedding request. */\n totalTokens?: number | null;\n}\n\n/** Response containing the generated embeddings */\nexport interface GetEmbeddingResponse {\n /**\n * The generated embedding values\n * @maxSize 5\n */\n predictions?: EmbeddingPrediction[];\n /** Cost of the request in micro cents */\n microcentsSpent?: string | null;\n}\n\n/** Embeddings data */\nexport interface EmbeddingPrediction {\n embeddings?: EmbeddingInstance;\n}\n\n/** Single content embedding instance */\nexport interface EmbeddingInstance {\n /** Metadata about the embedding */\n statistics?: Statistics;\n /**\n * The generated embedding values\n * @maxSize 10000\n */\n embedding?: number[];\n}\n\n/** Metadata about the embedding generation */\nexport interface Statistics {\n /** Number of tokens processed */\n tokenCount?: number | null;\n /** Truncation indicator */\n truncated?: boolean | null;\n}\n\nexport interface GenerateTextByProjectRequest {\n /**\n * Id of the Project that will be used to facilitate text generation request.\n * The project's default_prompt_id field will be used as prompt.\n * @format GUID\n */\n projectId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * This field is ignored for streaming requests.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n}\n\nexport interface GenerateTextByProjectResponse {\n /** ModelResponse object that describes the text generation result. */\n response?: ModelResponse;\n /**\n * Id of associated Prompt that was invoked.\n * @format GUID\n */\n promptId?: string;\n /** Prompt's final form that was used to issue a GenerateText request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateModerationRequest\n extends GenerateModerationRequestModerationRequestOneOf {\n /** OpenAi Moderation Request */\n openAiModerationRequest?: CreateModerationRequest;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateModerationRequestModerationRequestOneOf {\n /** OpenAi Moderation Request */\n openAiModerationRequest?: CreateModerationRequest;\n}\n\nexport interface CreateModerationRequest {\n /**\n * The input text to classify.\n * @maxLength 100000\n * @maxSize 1000\n */\n input?: string[];\n /**\n * Two content moderations models are available: text-moderation-stable and text-moderation-latest.\n * The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always\n * using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating\n * the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.\n * @maxLength 50\n */\n model?: string | null;\n /**\n * An array of input parts with a defined type, each can be of type text or image_url when passing in images.\n * If defined, input field will be ignored.\n * Image input is only supported when using the omni-moderation model.\n * @maxSize 1000\n */\n multiModalInputs?: MultiModalInput[];\n}\n\nexport interface ImageUrlInput {\n /**\n * The URL of the image, must be a valid wix-mp URL.\n * @maxLength 100000\n */\n url?: string | null;\n}\n\nexport interface MultiModalInput extends MultiModalInputContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlInput;\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n /**\n * The type of the content part. Can be text or image_url.\n * @maxLength 100\n */\n type?: string | null;\n}\n\n/** @oneof */\nexport interface MultiModalInputContentValueOneOf {\n /** Image_url content */\n imageUrl?: ImageUrlInput;\n /**\n * Text content\n * @maxLength 1000000\n */\n text?: string | null;\n}\n\nexport interface GenerateModerationResponse\n extends GenerateModerationResponseModerationResponseOneOf {\n /** OpenAi Moderation Response */\n openAiModerationResponse?: CreateModerationResponse;\n}\n\n/** @oneof */\nexport interface GenerateModerationResponseModerationResponseOneOf {\n /** OpenAi Moderation Response */\n openAiModerationResponse?: CreateModerationResponse;\n}\n\nexport interface CreateModerationResponse {\n /**\n * The unique identifier for the moderation request.\n * @maxLength 100\n */\n id?: string | null;\n /**\n * The model used to generate the moderation results.\n * @maxLength 100\n */\n model?: string | null;\n /**\n * A list of moderation objects.\n * @maxSize 1000\n */\n results?: ModerationResult[];\n}\n\nexport interface ModerationResult {\n /** Whether the content violates OpenAI's usage policies: https://openai.com/policies/usage-policies. */\n flagged?: boolean;\n /** A list of the categories, and whether they are flagged or not. */\n categories?: Record<string, any> | null;\n /** A list of the categories along with their scores as predicted by model. */\n categoryScores?: Record<string, any> | null;\n /** A list of the categories along with the input type(s) that the score applies to. */\n categoryAppliedInputTypes?: Record<string, any> | null;\n}\n\nexport interface GenerateImageByProjectRequest {\n /**\n * Id of the Project that will be used to facilitate image generation request.\n * The project's default_prompt_id field will be used as prompt.\n * @format GUID\n */\n projectId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /**\n * Skip polling flag. supported requests :\n * black_forest_labs_generate_image_response , replicate_create_prediction_response\n */\n skipPolling?: boolean | null;\n}\n\nexport interface GenerateImageByProjectResponse {\n /** ImageModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /**\n * Id of associated Prompt that was invoked.\n * @format GUID\n */\n promptId?: string;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface ImageModelResponse extends ImageModelResponseResponseOneOf {\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n}\n\n/** @oneof */\nexport interface ImageModelResponseResponseOneOf {\n /** OpenAI image generation response. */\n openAiCreateImageResponse?: CreateImageResponse;\n /** Stability AI text to image response. */\n stabilityAiTextToImageResponse?: V1TextToImageResponse;\n /** Stability AI generate core response. */\n stabilityAiGenerateCoreResponse?: GenerateCoreResponse;\n /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */\n stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;\n /** Black Forest Labs image generation response. */\n blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;\n /** Replicate image generation response. */\n replicateCreatePredictionResponse?: CreatePredictionResponse;\n /** Stability AI - Edit Image with prompt response. */\n stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;\n /** Runware AI - Flux TextToImage response. */\n runwareTextToImageResponse?: TextToImageResponse;\n /** Google AI - Generate Image with Imagen Model response. */\n googleGenerateImageResponse?: GenerateImageResponse;\n /** ML generate image response. */\n mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;\n /** OpenAI image creation response. */\n openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;\n /** OpenAI image edit response. */\n openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;\n}\n\nexport interface ImageGenerationRequestedEvent {\n /** Prompt that the generation was requested for. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface ImageGenerationSucceededEvent {\n /** ModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface ImageGenerationFailedEvent {\n /**\n * Error message that text generation failed with.\n * @maxLength 10000\n */\n errorMessage?: string;\n /**\n * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.\n * @format GUID\n */\n eventChainId?: string;\n}\n\nexport interface GenerateImageByPromptRequest {\n /**\n * Id of the Prompt that will be used to facilitate image generation request.\n * @format GUID\n */\n promptId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n}\n\nexport interface GenerateImageByPromptResponse {\n /** ModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateImageByPromptObjectRequest {\n /** Prompt object that describes the image generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n}\n\nexport interface GenerateImageByPromptObjectResponse {\n /** ImageModelResponse object that describes the image generation result. */\n response?: ImageModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateContentByPromptRequest {\n /**\n * Id of the Prompt that will be used to facilitate content generation request.\n * @format GUID\n */\n promptId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface AsyncGenerationConfig {\n /**\n * If specified, the response will be asynchronously sent to this topic via event.\n * @maxLength 1000\n */\n asyncResultTopic?: string | null;\n /** Skip polling flag. */\n skipPolling?: boolean | null;\n /** SPI generation configuration. */\n spiGenerationConfig?: SpiGenerationConfig;\n}\n\nexport interface SpiGenerationConfig {\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n appId?: string | null;\n /**\n * SPI client app_id.\n * @maxLength 100\n */\n componentId?: string | null;\n}\n\nexport interface GenerateContentByPromptResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateContentByProjectRequest {\n /**\n * Id of the Project that will be used to facilitate content generation request.\n * The project's default_prompt_id field will be used as prompt.\n * @format GUID\n */\n projectId: string | null;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface GenerateContentByProjectResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /**\n * Id of associated Prompt that was invoked.\n * @format GUID\n */\n promptId?: string;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateContentByPromptObjectRequest {\n /** Prompt object that describes the content generation request. */\n prompt?: Prompt;\n /** Key-value pairs that will be used to substitute templated parameters in the prompt. */\n params?: Record<string, string>;\n /** Contains additional information for the request, such as interaction ID and additional tags (comma-separated string). */\n userRequestInfo?: UserRequestInfo;\n /** Fallback properties for the request. */\n fallbackProperties?: FallbackProperties;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n /** Optional asynchronous configuration. When set, results are delivered via pub/sub events or explicit polling. */\n asyncGenerationConfig?: AsyncGenerationConfig;\n /** Dynamic request configuration containing tools and other dynamic properties. */\n dynamicRequestConfig?: DynamicRequestConfig;\n}\n\nexport interface GenerateContentByPromptObjectResponse {\n /** Model response object that describes the content generation result. */\n response?: GenerateContentModelResponse;\n /** Prompt's final form that was used to issue a GenerateImage request. */\n materializedPrompt?: Prompt;\n /**\n * Unique interaction identifier, generated during the generation request.\n * @format GUID\n */\n predictionId?: string;\n}\n\nexport interface GenerateTranscriptionRequest\n extends GenerateTranscriptionRequestTranscriptionRequestOneOf {\n /** OpenAi transcription request */\n openAiTranscriptionRequest?: CreateTranscriptionRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateTranscriptionRequestTranscriptionRequestOneOf {\n /** OpenAi transcription request */\n openAiTranscriptionRequest?: CreateTranscriptionRequest;\n}\n\nexport interface CreateTranscriptionRequest {\n /**\n * The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.\n * @format WEB_URL\n */\n file?: string | null;\n /** Transcription ai model to use. */\n model?: TranscriptionModelWithLiterals;\n /**\n * The language of the input audio.\n * Supplying the input language in ISO-639-1 format will improve accuracy and latency.\n * @format LANGUAGE\n */\n language?: string | null;\n /**\n * Prompt text to guide the model's style or continue a previous audio segment.\n * @maxLength 4000\n */\n prompt?: string | null;\n /**\n * The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.\n * DEPRECATED - Will always be set to verbose_json for cost monitoring.\n * @deprecated The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.\n * DEPRECATED - Will always be set to verbose_json for cost monitoring.\n * @replacedBy content_blocks\n * @targetRemovalDate 2025-01-30\n */\n responseFormat?: CreateTranscriptionRequestResponseFormatWithLiterals;\n /** The sampling temperature, between 0 and 1. */\n temperature?: number | null;\n /**\n * The timestamp granularities to populate for this transcription.\n * response_format must be set verbose_json to use timestamp granularities.\n * Either or both of these options are supported: word, or segment.\n * Note: There is no additional latency for segment timestamps,\n * but generating word timestamps incurs additional latency.\n */\n timestampGranularities?: TimestampGranularities;\n /** Content of the input file, can by used instead of the `file` field. */\n fileContent?: FileContent;\n}\n\nexport enum TranscriptionModel {\n UNKNOWN_TRANSCRIPTION_MODEL = 'UNKNOWN_TRANSCRIPTION_MODEL',\n WHISPER_1 = 'WHISPER_1',\n}\n\n/** @enumType */\nexport type TranscriptionModelWithLiterals =\n | TranscriptionModel\n | 'UNKNOWN_TRANSCRIPTION_MODEL'\n | 'WHISPER_1';\n\nexport enum CreateTranscriptionRequestResponseFormat {\n UNKNOWN_RESPONSE_FORMAT = 'UNKNOWN_RESPONSE_FORMAT',\n JSON = 'JSON',\n TEXT = 'TEXT',\n SRT = 'SRT',\n VERBOSE_JSON = 'VERBOSE_JSON',\n VTT = 'VTT',\n}\n\n/** @enumType */\nexport type CreateTranscriptionRequestResponseFormatWithLiterals =\n | CreateTranscriptionRequestResponseFormat\n | 'UNKNOWN_RESPONSE_FORMAT'\n | 'JSON'\n | 'TEXT'\n | 'SRT'\n | 'VERBOSE_JSON'\n | 'VTT';\n\nexport interface TimestampGranularities {\n /**\n * Timestamp granularity, can be WORD or SEGMENT or both.\n * @maxSize 1000\n */\n timestampGranularities?: TimestampGranularityWithLiterals[];\n}\n\nexport enum TimestampGranularity {\n UNKNOWN_TIMESTAMP_GRANULARITY = 'UNKNOWN_TIMESTAMP_GRANULARITY',\n WORD = 'WORD',\n SEGMENT = 'SEGMENT',\n}\n\n/** @enumType */\nexport type TimestampGranularityWithLiterals =\n | TimestampGranularity\n | 'UNKNOWN_TIMESTAMP_GRANULARITY'\n | 'WORD'\n | 'SEGMENT';\n\nexport interface FileContent {\n /** File bytes */\n fileBytes?: Uint8Array;\n /**\n * File name\n * @maxLength 100\n */\n fileName?: string;\n}\n\nexport interface GenerateTranscriptionResponse\n extends GenerateTranscriptionResponseTranscriptionResponseOneOf {\n /** OpenAi transcription response */\n openAiTranscriptionResponse?: CreateTranscriptionResponse;\n}\n\n/** @oneof */\nexport interface GenerateTranscriptionResponseTranscriptionResponseOneOf {\n /** OpenAi transcription response */\n openAiTranscriptionResponse?: CreateTranscriptionResponse;\n}\n\nexport interface CreateTranscriptionResponse {\n /**\n * Language of the input audio.\n * @maxLength 50\n */\n language?: string | null;\n /** Input audio duration in seconds. */\n duration?: GoogleProtoDuration;\n /**\n * Transcribed text.\n * @maxLength 10000\n */\n text?: string;\n /**\n * Extracted words and their corresponding timestamps.\n * @maxSize 1000\n */\n words?: Word[];\n /**\n * Segments of the transcribed text and their corresponding details.\n * @maxSize 1000\n */\n segments?: V1Segment[];\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface Word {\n /**\n * The text content of the word.\n * @maxLength 100\n */\n word?: string;\n /** Start time of the word. */\n start?: GoogleProtoDuration;\n /** End time of the word */\n end?: GoogleProtoDuration;\n}\n\nexport interface V1Segment {\n /** Unique identifier of the segment. */\n id?: number;\n /** Seek offset of the segment. */\n seek?: number;\n /** Start time of the segment. */\n start?: GoogleProtoDuration;\n /** End time of the segment */\n end?: GoogleProtoDuration;\n /**\n * Text content of the segment.\n * @maxLength 10000\n */\n text?: string;\n /**\n * Array of token IDs for the text content.\n * @maxSize 1000\n */\n tokens?: string[];\n /** Temperature parameter used for generating the segment. */\n temperature?: number;\n /** Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. */\n avgLogprob?: number;\n /** Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. */\n compressionRatio?: number;\n /** Probability of no speech in the segment. If the value is higher than 1.0 and the avg_logprob is below -1, consider this segment silent. */\n noSpeechProb?: number;\n}\n\nexport interface GenerateAudioRequest\n extends GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface GenerateAudioRequestAudioRequestOneOf {\n /** OpenAi create speech request */\n openAiCreateSpeechRequest?: CreateSpeechRequest;\n /** ElevenLabs text to speech request */\n elevenlabsTextToSpeechRequest?: TextToSpeechRequest;\n}\n\nexport interface CreateSpeechRequest {\n /** One of the available TTS models: https://platform.openai.com/docs/models#tts */\n model?: SpeechModelWithLiterals;\n /**\n * The text to generate audio for. The maximum length is 4096 characters.\n * @maxLength 4096\n */\n input?: string;\n /**\n * The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide.\n * @maxLength 100\n */\n voice?: string;\n /**\n * The format to audio in. Supported formats are mp3, opus, aac, flac, wav, and pcm.\n * @maxLength 100\n */\n responseFormat?: string | null;\n /**\n * The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.\n * @min 0.25\n * @max 4\n */\n speed?: number | null;\n}\n\nexport enum SpeechModel {\n UNKNOWN_SPEECH_MODEL = 'UNKNOWN_SPEECH_MODEL',\n TTS_1 = 'TTS_1',\n TTS_1_HD = 'TTS_1_HD',\n}\n\n/** @enumType */\nexport type SpeechModelWithLiterals =\n | SpeechModel\n | 'UNKNOWN_SPEECH_MODEL'\n | 'TTS_1'\n | 'TTS_1_HD';\n\nexport interface TextToSpeechRequest {\n /**\n * Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.\n * @maxLength 100\n */\n voiceId?: string;\n /**\n * The output format of the generated audio. List of supported values: mp3_22050_32, mp3_44100_32, mp3_44100_64, mp3_44100_96, mp3_44100_128, mp3_44100_192, pcm_16000, pcm_22050, pcm_24000, pcm_44100, ulaw_8000\n * @maxLength 100\n */\n outputFormat?: string | null;\n /**\n * When enable_logging is set to false full privacy mode will be used for the request.\n * This will mean history features are unavailable for this request, including request stitching.\n * Full privacy mode may only be used by enterprise customers.\n */\n enableLogging?: boolean;\n /**\n * The text that will get converted into speech.\n * @maxLength 10000000\n */\n text?: string;\n /** Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. */\n modelId?: ElevenLabsTextToSpeechModelWithLiterals;\n /**\n * Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 supports language enforcement. For other models, an error will be returned if language code is provided.\n * @maxLength 100\n */\n languageCode?: string | null;\n /** Voice settings overriding stored settings for the given voice. They are applied only on the given request. */\n voiceSettings?: VoiceSettings;\n /**\n * A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request\n * @maxSize 10\n */\n pronunciationDictionaryLocators?: PronunciationDictionaryLocator[];\n /** If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. */\n seed?: string | null;\n /**\n * The text that came before the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n previousText?: string | null;\n /**\n * The text that comes after the text of the current request. Can be used to improve the flow of prosody when concatenating together multiple generations or to influence the prosody in the current generation.\n * @maxLength 10000000\n */\n nextText?: string | null;\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n previousRequestIds?: string[];\n /**\n * A list of request_id of the samples that were generated before this generation. Can be used to improve the flow of prosody when splitting up a large task into multiple requests.\n * The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.\n * @maxSize 100\n * @maxLength 10\n */\n nextRequestIds?: string[];\n /**\n * This parameter controls text normalization with three modes: ‘auto’, ‘on’, and ‘off’. When set to ‘auto’, the system will automatically decide whether to apply text normalization (e.g., spelling out numbers).\n * With ‘on’, text normalization will always be applied, while with ‘off’, it will be skipped. Cannot be turned on for ‘eleven_turbo_v2_5’ model.\n * Defaults to ‘auto’.\n * @maxLength 100\n */\n applyTextNormalization?: string | null;\n /** When set to true, response chunks will include with precise character-level timing information for audio-text synchronization. */\n withTimings?: boolean;\n}\n\nexport enum ElevenLabsTextToSpeechModel {\n UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL = 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL',\n ELEVEN_MULTILINGUAL_V2 = 'ELEVEN_MULTILINGUAL_V2',\n ELEVEN_FLASH_V2_5 = 'ELEVEN_FLASH_V2_5',\n ELEVEN_FLASH_V2 = 'ELEVEN_FLASH_V2',\n}\n\n/** @enumType */\nexport type ElevenLabsTextToSpeechModelWithLiterals =\n | ElevenLabsTextToSpeechModel\n | 'UNKNOWN_ELEVEN_LABS_TEXT_TO_SPEECH_MODEL'\n | 'ELEVEN_MULTILINGUAL_V2'\n | 'ELEVEN_FLASH_V2_5'\n | 'ELEVEN_FLASH_V2';\n\nexport interface VoiceSettings {\n /** Defines the stability for voice settings. */\n stability?: number;\n /** Defines the similarity boost for voice settings. */\n similarityBoost?: number;\n /** Defines the style for voice settings. This parameter is available on V2+ models. */\n style?: number | null;\n /** Defines the use speaker boost for voice settings. This parameter is available on V2+ models. */\n useSpeakerBoost?: boolean;\n}\n\nexport interface PronunciationDictionaryLocator {\n /**\n * pronunciation_dictionary_id\n * @maxLength 100\n */\n pronunciationDictionaryId?: string;\n /**\n * version_id\n * @maxLength 100\n */\n versionId?: string;\n}\n\nexport interface GenerateAudioResponse\n extends GenerateAudioResponseAudioResponseOneOf {\n /** OpenAi create speech response */\n openAiCreateSpeechResponse?: CreateSpeechResponse;\n}\n\n/** @oneof */\nexport interface GenerateAudioResponseAudioResponseOneOf {\n /** OpenAi create speech response */\n openAiCreateSpeechResponse?: CreateSpeechResponse;\n}\n\nexport interface CreateSpeechResponse {\n /**\n * Audio file content\n * @format WEB_URL\n */\n contentUrl?: string | null;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface GeneratedAudioChunk\n extends GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\n/** @oneof */\nexport interface GeneratedAudioChunkAudioChunkOneOf {\n /** OpenAi create speech chunk */\n openAiSpeechChunk?: SpeechChunk;\n /** ElevenLabs create speech chunk */\n elevenlabsSpeechChunk?: TextToSpeechChunk;\n}\n\nexport interface SpeechChunk {\n /** Partial audio file bytes. */\n content?: Uint8Array;\n}\n\nexport interface TextToSpeechChunk {\n /** Base64 encoded audio chunk */\n audioBase64?: Uint8Array;\n /** Alignment information for the generated audio given the input text sequence. */\n alignment?: AlignmentInfoInChunk;\n /** Alignment information for the generated audio given the input normalized text sequence. */\n normalizedAlignment?: AlignmentInfoInChunk;\n}\n\nexport interface AlignmentInfoInChunk {\n /**\n * Array of individual characters from the input or normalized text\n * @maxSize 1000000\n */\n characterStartTimesSeconds?: number[];\n /**\n * Array of start times (in seconds) for each character\n * @maxSize 1000000\n */\n characterEndTimesSeconds?: number[];\n /**\n * Array of end times (in seconds) for each character\n * @maxSize 1000000\n * @maxLength 1\n */\n characters?: string[];\n}\n\nexport interface PublishPromptRequest {\n /**\n * Prompt object to be serialized in the service's storage.\n * After serialization, the GenerateTextByPromptId with Prompt's id can be used.\n */\n prompt?: Prompt;\n}\n\nexport interface PublishPromptResponse {}\n\nexport interface GetPromptRequest {\n /**\n * Id of the Prompt object to be retrieved from service's storage.\n * @format GUID\n */\n promptId: string;\n /**\n * Key-value pairs that will be used to substitute templated parameters in the prompt.\n * It is expected that only USER or SYSTEM messages can be templated.\n */\n params?: Record<string, string>;\n /** Key-value pairs that will be used to overwrite dynamic properties in the prompt. */\n dynamicProperties?: Record<string, string>;\n}\n\nexport interface GetPromptResponse {\n /** Prompt object from service's storage. */\n prompt?: Prompt;\n}\n\nexport interface PublishProjectRequest {\n /**\n * Project object to be serialized in the service's storage.\n * After serialization, the GenerateTextByProjectId with Project's id can be used.\n */\n project?: Project;\n}\n\nexport interface Project {\n /**\n * Project id.\n * @format GUID\n */\n id?: string;\n /**\n * Id of the default Prompt associated with this Project. This Prompt be used to conduct text generation requests.\n * @format GUID\n */\n defaultPromptId?: string;\n /** ExperimentalPromptConfig object that describes optional second Prompt that can be invoked. */\n experimentPromptConfig?: ExperimentalPromptConfig;\n}\n\nexport interface ExperimentalPromptConfig {\n /**\n * Id of the experimental Prompt associated with this Project. This Prompt will be used for text generation in case the associated experiment resolves to true.\n * @format GUID\n */\n experimentalPromptId?: string | null;\n /**\n * Name of experiment in Petri that will define the switch between default and optional Prompt. False is resolved to default_prompt_id invocation. True is resolved to experimental_prompt_id invocation.\n * @maxLength 200\n */\n petriExperimentName?: string | null;\n}\n\nexport interface PublishProjectResponse {}\n\n/** Should match same event in GatewayVisibility */\nexport interface ProjectConfigChangedDomainEvent {\n /**\n * Project ID\n * @maxLength 10000\n */\n projectId?: string;\n /**\n * Old default prompt id, if available\n * @maxLength 10000\n */\n oldDefaultPromptId?: string;\n /**\n * Old experimental prompt id, if available\n * @maxLength 10000\n */\n oldExperimentalPromptId?: string;\n /**\n * Old experiment name, if available\n * @maxLength 10000\n */\n oldExperimentName?: string;\n /**\n * New default prompt id\n * @maxLength 10000\n */\n newDefaultPromptId?: string;\n /**\n * New experimental prompt id\n * @maxLength 10000\n */\n newExperimentalPromptId?: string;\n /**\n * New experiment name\n * @maxLength 10000\n */\n newExperimentName?: string;\n /**\n * Application that originated the request\n * @maxLength 10000\n */\n applicationId?: string;\n /**\n * Sender artifact ID\n * @maxLength 10000\n */\n artifactId?: string;\n}\n\nexport interface GetProjectRequest {\n /**\n * Id of the Project object to be retrieved from service's storage.\n * @format GUID\n */\n projectId: string;\n}\n\nexport interface GetProjectResponse {\n /** Project object from service's storage. */\n project?: Project;\n}\n\nexport interface GetStatusRequest {\n /** Type of the entity to retrieve status for. */\n entityType: EntityTypeWithLiterals;\n /**\n * Id of the entity to retrieve status for, In case of VENDOR, the id is one of the supported vendors.\n * Supported vendors are: {open-ai,google,azure,stability-ai}.\n * In case of PROJECT or PROMPT, the id is the id of the Project or Prompt object.\n * @maxLength 50\n */\n entityId: string | null;\n}\n\nexport enum EntityType {\n UNKNOWN_ENTITY_TYPE = 'UNKNOWN_ENTITY_TYPE',\n VENDOR = 'VENDOR',\n PROJECT = 'PROJECT',\n PROMPT = 'PROMPT',\n}\n\n/** @enumType */\nexport type EntityTypeWithLiterals =\n | EntityType\n | 'UNKNOWN_ENTITY_TYPE'\n | 'VENDOR'\n | 'PROJECT'\n | 'PROMPT';\n\nexport interface GetStatusResponse {\n /** Type of the entity to retrieve status for. */\n entityType?: EntityTypeWithLiterals;\n /** Outage status of the entity. */\n outageStatus?: OutageStatusWithLiterals;\n}\n\nexport enum OutageStatus {\n UNKNOWN_STATUS = 'UNKNOWN_STATUS',\n HEALTHY = 'HEALTHY',\n OUTAGE = 'OUTAGE',\n}\n\n/** @enumType */\nexport type OutageStatusWithLiterals =\n | OutageStatus\n | 'UNKNOWN_STATUS'\n | 'HEALTHY'\n | 'OUTAGE';\n\nexport interface GetApplicationUsageRequest {}\n\nexport interface GetApplicationUsageResponse {\n /** Info about application quota usage */\n applicationBudgetInfo?: ApplicationBudgetInfo;\n /** Info about user per application quota usage */\n userPerApplicationBudgetInfo?: UserPerApplicationBudgetInfo;\n}\n\nexport interface ApplicationBudgetInfo {\n /** Whether the next call is estimated to succeed based on the remaining monthly budget constraints. */\n eligible?: boolean;\n /** Monthly budget assigned to the calling application, in microcents. */\n totalMonthlyBudget?: string;\n /** Monthly budget spent thus far by the calling application, in microcents. */\n spentMonthlyBudget?: string;\n}\n\nexport interface UserPerApplicationBudgetInfo {\n /** Whether the next call is estimated to succeed based on the remaining budget constraints. */\n eligible?: boolean;\n /** Budget assigned to the user, in microcents. */\n totalBudget?: string;\n /** Budget spent thus far by the user, in microcents. */\n spentBudget?: string;\n /**\n * Limitation timeframe\n * @maxLength 20\n */\n timeframe?: string;\n}\n\nexport interface Wix_ai_gatewayV1EditImageRequest\n extends Wix_ai_gatewayV1EditImageRequestRequestOneOf {\n /** Photoroom remove background request */\n photoroomRemoveBackgroundRequest?: RemoveBackgroundRequest;\n /** Photoroom image editing request */\n photoroomImageEditingRequest?: ImageEditingRequest;\n /** Stability Edit image */\n stabilityAiEditRequest?: V1EditImageRequest;\n /** Replicate edit image */\n replicateEditImageRequest?: EditImageRequest;\n /** Recraft edit image */\n recraftEditImageRequest?: Recraft_proxyV1EditImageRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface Wix_ai_gatewayV1EditImageRequestRequestOneOf {\n /** Photoroom remove background request */\n photoroomRemoveBackgroundRequest?: RemoveBackgroundRequest;\n /** Photoroom image editing request */\n photoroomImageEditingRequest?: ImageEditingRequest;\n /** Stability Edit image */\n stabilityAiEditRequest?: V1EditImageRequest;\n /** Replicate edit image */\n replicateEditImageRequest?: EditImageRequest;\n /** Recraft edit image */\n recraftEditImageRequest?: Recraft_proxyV1EditImageRequest;\n}\n\nexport interface RemoveBackgroundRequest {\n /**\n * The image file to render\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * The format of the resulting image.\n * Allowed values: Allowed values:png,jpg webp\n * Default value is \"png\".\n * @maxLength 100\n */\n format?: string | null;\n /**\n * The channels of the resulting image\n * Allowed values:rgba,alpha\n * Default: rgba\n * @maxLength 100\n */\n channels?: string | null;\n /**\n * The background color of the resulting image. Can be a hex code (#FF00FF) or a HTML color (red, green, etc.)\n * @maxLength 100\n */\n bgColor?: string | null;\n /**\n * Will resize the output to the specified size. Can be preview (0.25 Megapixels),\n * medium (1.5 MP), hd (4 MP) or full (36 MP, can be slower for large images).\n * Useful for mobile apps that need smaller images.\n * Allowed values:preview,medium,hd,full\n * Default:full\n * @maxLength 100\n */\n size?: string | null;\n /**\n * If true, the image returned is cropped to the cutout border. Transparent pixels are removed from the border\n * Allowed values:true,false\n * Default: false\n * @maxLength 100\n */\n crop?: string | null;\n /**\n * If true, automatically removes colored reflections that have been left on the main subject by a green background.\n * Allowed values:true,false\n * Default: false\n * @maxLength 100\n */\n despill?: string | null;\n}\n\nexport interface ImageEditingRequest {\n /** The model version to use for image editing */\n model?: ImageEditingModelWithLiterals;\n /** The background properties to use for the image editing */\n background?: Background;\n /** The expand properties to use for the image editing */\n expand?: Expand;\n /** The export properties to use for the image editing */\n export?: Export;\n /**\n * [Advanced] Defines the horizontal alignment of the cutout subject within its bounding box.Show all...\n * Allowed values:left,center,right\n * @maxLength 100\n */\n horizontalAlignment?: string | null;\n /**\n * If set to true (default), cropped sides of the subject will snap to the edges For instance,\n * for a portrait image cropped below the elbows, the subject will be aligned at the bottom even if a bottom padding is provided\n * (but it will still respect bottom margin)\n * Can't be provided if removeBackground is set to false\n * (See positioning section of the documentation for more information)\n * Default: true\n */\n ignorePaddingAndSnapOnCroppedSides?: boolean | null;\n /**\n * URL of the main image used by the API. The GET endpoint accepts imageUrl only.\n * The maximum size of the image is 30MB.\n * If you want to directly upload an image file, please instead use the POST endpoint with the argument imageFile.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /** The lighting properties to use for the image editing */\n lighting?: Lighting;\n /** The margin properties to use for the image editing */\n margin?: Margin;\n /**\n * Maximum output height. Can only be provided if outputSize is originalImage or croppedSubject.\n * Useful for: re dimensioning while keeping the aspect ratio\n */\n maxHeight?: number | null;\n /**\n * Maximum output width. Can only be provided if outputSize is originalImage or croppedSubject.\n * Useful for: resizing an image while keeping the aspect ratio\n */\n maxWidth?: number | null;\n /**\n * Output size of the image. In the form of either:\n * auto to keep the template dimensions when templateId is defined, or behave like originalImage when templateId isn't defined (default)\n * widthXheight for a custom size (example: 200x400)\n * originalImage to keep the original image dimensions\n * croppedSubject to use the size of the foreground dimensions after cropping around it\n * Default:auto\n * Match pattern: ^(auto|\\d+x\\d+|originalImage|croppedSubject)$\n * @maxLength 100\n */\n outputSize?: string | null;\n /** The padding properties to use for the image editing */\n padding?: Padding;\n /**\n * [Advanced] subjectBox by default. When set to originalImage, the padding / margin will be around the original image and not the cropped subject.\n * It can lead to the subject disappearing when scaling is set to 'fill', for instance if the subject is on the left of a landscape image and outputSize is a square.\n * Most use cases don't require this option. It is useful if you'd like to maintain subject positioning in the original image.\n * Can't be provided if removeBackground is set to false\n * Allowed values:subjectBox,originalImage\n * Default: subjectBox\n * @maxLength 100\n */\n referenceBox?: string | null;\n /**\n * If enabled (default), the background of the image will be removed using PhotoRoom's award-winning algorithm\n * Default:true\n */\n removeBackground?: boolean | null;\n /**\n * Whether the subject should fit (default) or fill the output image If set to fit, the empty pixels will be transparentShow all...\n * Allowed values: fit fill , Default:fill\n * @maxLength 100\n */\n scaling?: string | null;\n /** The segmentation properties to use for the image editing */\n segmentation?: Segmentation;\n /** The shadow properties to use for the image editing */\n shadow?: Shadow;\n /**\n * The ID of the template to render\n * @format GUID\n */\n templateId?: string | null;\n /** The text removal properties to use for the image editing */\n textRemoval?: TextRemoval;\n /**\n * [Advanced] Defines the vertical alignment of the cutout subject within its bounding box.\n * Specifying a custom vertical alignment will implicitly set ignorePaddingAndSnapOnCroppedSides to false for the vertical direction.\n * Allowed values: top center, bottom\n * @maxLength 100\n */\n verticalAlignment?: string | null;\n}\n\nexport interface Guidance {\n /**\n * URL of the image to use as a background image guidance.\n * Can't be provided if removeBackground is set to false.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * How closely the generated background will be matching the guiding image, between 0 and 1.\n * A value of 1 means it will match the guiding image as much as possible, a value of 0 means the guiding image will be ignored.\n * @max 1\n */\n scale?: number | null;\n}\n\nexport enum ImageEditingModel {\n IMAGE_EDITING_MODEL_UNSPECIFIED = 'IMAGE_EDITING_MODEL_UNSPECIFIED',\n PR_AI_BACKGROUND_MODEL_VERSION_3 = 'PR_AI_BACKGROUND_MODEL_VERSION_3',\n PR_AI_BACKGROUND_MODEL_VERSION_4 = 'PR_AI_BACKGROUND_MODEL_VERSION_4',\n}\n\n/** @enumType */\nexport type ImageEditingModelWithLiterals =\n | ImageEditingModel\n | 'IMAGE_EDITING_MODEL_UNSPECIFIED'\n | 'PR_AI_BACKGROUND_MODEL_VERSION_3'\n | 'PR_AI_BACKGROUND_MODEL_VERSION_4';\n\nexport interface Background {\n /**\n * Color of the background. If omitted, background will be transparent unless background.imageUrl or background.\n * imageFile is provided. Can be a hex color without the hash sign (example: FF0000, FF0000EE) or color name (examples: red, blue)\n * Can't be provided if removeBackground is set to false\n * Default: transparent\n * @maxLength 1000\n */\n color?: string | null;\n /**\n * If ai.auto, a pre-processing step is applied to expand the prompt into a longer form.\n * auto and never are legacy values that will be removed in the next major version.\n * @maxLength 1000\n */\n expandPrompt?: string | null;\n /** The guidance properties to use for the image editing */\n guidance?: Guidance;\n /**\n * URL of the image to use as a background.\n * Can't be provided if removeBackground is set to false\n * The maximum size of the image is 30MB.\n * If background.imageUrl is provided, neither background.imageFile nor background.prompt can be provided, and vice versa.\n * The URL must be a valid wix mp or wix static URL.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * Prompt to use for guiding the background generation process.\n * If background.prompt is provided, neither background.imageUrl nor background.imageFile can be provided, and vice versa.\n * @maxLength 100000\n */\n prompt?: string | null;\n /**\n * Whether the background should fit or fill (default) the output image If set to fit, the empty pixels will be transparentShow all...\n * Allowed values: fit fill , Default:fill\n * @maxLength 100\n */\n scaling?: string | null;\n /** Seed used to generate the background. Can be used to get similar looking results for the same prompt. */\n seed?: number | null;\n}\n\nexport interface Expand {\n /**\n * Expand mode to use on the main image used by the API.\n * If set to ai.auto, all transparent pixels will automatically be filled based on the content of the current background\n * (either the original background, if removeBackground has been set to false, or a static background, if background.imageUrl has been provided)\n * Expand will rely on output size, subject position, and fitting mode.\n * @maxLength 1000\n */\n mode?: string | null;\n /** Seed used to generate the background. Can be used to get similar looking results for the same prompt. */\n seed?: number | null;\n}\n\nexport interface Export {\n /**\n * The pixel density of the result image.\n * Pixel density can be set to any value between 72 and 1200 dpi.\n */\n dpi?: number | null;\n /**\n * The format of the result image.\n * Default value is \"png\".\n * Allowed values: png, jpeg, jpg Default:png\n * @maxLength 100\n */\n format?: string | null;\n}\n\nexport interface Lighting {\n /**\n * Lighting mode to use on the main image used by the API. If set to ai.auto, the lighting will be automatically adjusted\n * Allowed value:ai.auto\n * @maxLength 100\n */\n mode?: string | null;\n}\n\nexport interface Margin {\n /**\n * General margin around the subject. Can be expressed as a number between 0 and 0.49,\n * a percentage string between 0% and 49% (e.g., \"30%\"), or a pixel value string (e.g., \"100px\").\n * Unlike padding, margin is never ignored even on cropped sides of the subject.\n * Expressed in a ratio of the output image size. See positioning section of the documentation for more information.\n * Default: 0\n * @max 0.49\n */\n general?: number | null;\n /**\n * Bottom Margin, overrides general margin on the bottom side. Accepts the same formats as margin.\n * Default: 0\n * @max 0.49\n */\n bottom?: number | null;\n /**\n * Left Margin, overrides general margin on the left side. Accepts the same formats as margin.\n * @max 0.49\n */\n left?: number | null;\n /**\n * Right Margin, overrides general margin on the right side. Accepts the same formats as margin.\n * @max 0.49\n */\n right?: number | null;\n /**\n * Top Margin, overrides general margin on the top side. Accepts the same formats as margin.\n * @max 0.49\n */\n top?: number | null;\n}\n\nexport interface Padding {\n /**\n * General padding around the subject. Can be expressed as a number between 0 and 0.49, a percentage string between 0% and 49% (e.g., \"30%\"),\n * or a pixel value string (e.g., \"100px\"). Unlike margin, padding will be ignored on cropped sides of the subject if that option is enabled.\n * Expressed in a ratio of the size of the document, minus margins (similar to CSS).\n * See positioning section of the documentation for more information.\n * Default: 0\n * @maxLength 100\n */\n general?: string | null;\n /**\n * Bottom Padding, overrides general padding on the bottom side. Accepts the same formats as padding.\n * Default: 0\n * @maxLength 100\n */\n bottom?: string | null;\n /**\n * Left Padding, overrides general padding on the left side. Accepts the same formats as padding.\n * @maxLength 100\n */\n left?: string | null;\n /**\n * Right Padding, overrides general padding on the right side. Accepts the same formats as padding.\n * @maxLength 100\n */\n right?: string | null;\n /**\n * Top Padding, overrides general padding on the top side. Accepts the same formats as padding.\n * @maxLength 100\n */\n top?: string | null;\n}\n\nexport interface Segmentation {\n /**\n * Controls whether or not the salient object should be kept or ignored by the segmentation model.\n * Allowed values:keepSalientObject ignoreSalientObject\n * Default: ignoreSalientObject\n * @maxLength 100\n */\n mode?: string | null;\n /**\n * A textual description of what the segmentation should remove.\n * @maxLength 100000\n */\n negativePrompt?: string | null;\n /**\n * A textual description of what the segmentation should keep.\n * @maxLength 100000\n */\n prompt?: string | null;\n}\n\nexport interface Shadow {\n /**\n * Shadow generation mode to use on the main image used by the API. If set to ai.soft,\n * a soft shadow will be generated If set to ai.hard, a hard shadow will be generated If set to ai.floating, a floating shadow will be generated\n * Allowed values:ai.soft,ai.hard, ai.floating\n * @maxLength 100\n */\n mode?: string | null;\n}\n\nexport interface TextRemoval {\n /**\n * Text removal mode to use on the main image used by the API.\n * If set to ai.artificial, artificial text will be automatically removed.\n * Artificial text includes all text added on an image through post-precessing, such as company name, watermarks, discount, etc.\n * If set to ai.natural, natural text will be automatically removed.\n * Natural text includes text that naturally occurs in an image such as writing on buildings or clothing's, road signs, etc.\n * If set to ai.all, all text (natural and artificial) will be automatically removed.\n * Allowed values: ai.artificial, ai.natural ai.all\n * @maxLength 100\n */\n mode?: string | null;\n}\n\nexport interface V1EditImageRequest {\n /** The model to use for generating the image. */\n model?: V1EditImageModelWithLiterals;\n /**\n * The image you wish to inpaint.\n * Supported Formats: jpeg, png, webp\n * Validation Rules:\n * - Every side must be at least 64 pixels\n * - Total pixel count must be between 4,096 and 9,437,184 pixels\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * image format jpeg, png, webp\n * @maxLength 100\n */\n imageFormat?: string | null;\n /**\n * Controls the strength of the inpainting process on a per-pixel basis,\n * either via a second image (passed into this parameter) or via the alpha channel of the image parameter.\n * Passing in a Mask\n * The image passed to this parameter should be a black and white image that represents,\n * at any pixel, the strength of inpainting based on how dark or light the given pixel is.\n * Completely black pixels represent no inpainting strength while completely white pixels represent maximum strength.\n * In the event the mask is a different size than the image parameter, it will be automatically resized.\n * Alpha Channel Support\n * If you don't provide an explicit mask, one will be derived from the alpha channel of the image parameter.\n * Transparent pixels will be inpainted while opaque pixels will be preserved.\n * In the event an image with an alpha channel is provided along with a mask, the mask will take precedence.\n * @maxLength 100000\n */\n imageMask?: string | null;\n /**\n * image mask format jpeg, png, webp\n * @maxLength 100\n */\n imageMaskFormat?: string | null;\n /**\n * Grows the edges of the mask outward in all directions by the specified number of pixels. The expanded area around the mask will be blurred,\n * which can help smooth the transition between inpainted content and the original image.\n * Try this parameter if you notice seams or rough edges around the inpainted content.\n * Default: 5\n * @max 100\n */\n growMask?: number | null;\n /**\n * A specific value [ 0 .. 4294967294 ] that is used to guide the 'randomness' of the generation.\n * (Omit this parameter or pass 0 to use a random seed.)\n */\n seed?: string | null;\n /**\n * Default: png\n * Enum: jpeg png webp\n * Dictates the content-type of the generated image.\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\n/** flynt-http-mapping-breaking-change */\nexport enum V1EditImageModel {\n UNKNOWN_EDIT_IMAGE_REQUEST_MODEL = 'UNKNOWN_EDIT_IMAGE_REQUEST_MODEL',\n ERASE = 'ERASE',\n}\n\n/** @enumType */\nexport type V1EditImageModelWithLiterals =\n | V1EditImageModel\n | 'UNKNOWN_EDIT_IMAGE_REQUEST_MODEL'\n | 'ERASE';\n\nexport interface EditImageRequest {\n /** Model to use */\n model?: EditImageModelWithLiterals;\n /**\n * Input image URL\n * @maxLength 1000\n */\n image?: string | null;\n /** Desired scale */\n scale?: string | null;\n /** Optional face enhancement */\n faceEnhance?: boolean | null;\n /**\n * Choose the format of the output image\n * Default: \"webp\"\n * @maxLength 100\n */\n outputFormat?: string | null;\n}\n\nexport enum EditImageModel {\n /** Default */\n UNKNOWN_EDIT_IMAGE_MODEL = 'UNKNOWN_EDIT_IMAGE_MODEL',\n /** real-esrgan */\n REAL_ESRGAN = 'REAL_ESRGAN',\n /** https://replicate.com/recraft-ai/recraft-vectorize/api */\n RECRAFT_VECTORIZE = 'RECRAFT_VECTORIZE',\n}\n\n/** @enumType */\nexport type EditImageModelWithLiterals =\n | EditImageModel\n | 'UNKNOWN_EDIT_IMAGE_MODEL'\n | 'REAL_ESRGAN'\n | 'RECRAFT_VECTORIZE';\n\nexport interface Recraft_proxyV1EditImageRequest {\n /** Which action to perform */\n editAction?: EditActionWithLiterals;\n /**\n * Input image URL\n * @maxLength 10000\n */\n image?: string;\n /**\n * Image mask URL, used only by `ERASE_REGION`. From the docs: \"An image encoded in grayscale color mode,\n * used to define the specific regions of the image to be erased. The white pixels represent the parts of the\n * image that will be erased, while black pixels indicate the parts of the image that will remain unchanged.\n * Should have exactly the same size as the image.\n * Each pixel of the image should be either pure black (value 0) or pure white (value 255).\"\n * @maxLength 10000\n */\n mask?: string | null;\n}\n\n/** https://www.recraft.ai/docs/api-reference/usage#vectorize-image */\nexport enum EditAction {\n UNKNOWN_EDIT_ACTION = 'UNKNOWN_EDIT_ACTION',\n VECTORIZE = 'VECTORIZE',\n REMOVE_BACKGROUND = 'REMOVE_BACKGROUND',\n CRISP_UPSCALE = 'CRISP_UPSCALE',\n CREATIVE_UPSCALE = 'CREATIVE_UPSCALE',\n ERASE_REGION = 'ERASE_REGION',\n}\n\n/** @enumType */\nexport type EditActionWithLiterals =\n | EditAction\n | 'UNKNOWN_EDIT_ACTION'\n | 'VECTORIZE'\n | 'REMOVE_BACKGROUND'\n | 'CRISP_UPSCALE'\n | 'CREATIVE_UPSCALE'\n | 'ERASE_REGION';\n\nexport interface Wix_ai_gatewayV1EditImageResponse\n extends Wix_ai_gatewayV1EditImageResponseResponseOneOf {\n /** Photoroom remove background response */\n photoroomRemoveBackgroundResponse?: RemoveBackgroundResponse;\n /** Photoroom image editing response */\n photoroomImageEditingResponse?: ImageEditingResponse;\n /** Stability Edit response */\n stabilityAiEditResponse?: V1EditImageResponse;\n /** Replicate edit image */\n replicateEditImageResponse?: EditImageResponse;\n /** Recraft edit image */\n recraftEditImageResponse?: Recraft_proxyV1EditImageResponse;\n /** Extracted cost of the request in microcents. */\n cost?: string | null;\n}\n\n/** @oneof */\nexport interface Wix_ai_gatewayV1EditImageResponseResponseOneOf {\n /** Photoroom remove background response */\n photoroomRemoveBackgroundResponse?: RemoveBackgroundResponse;\n /** Photoroom image editing response */\n photoroomImageEditingResponse?: ImageEditingResponse;\n /** Stability Edit response */\n stabilityAiEditResponse?: V1EditImageResponse;\n /** Replicate edit image */\n replicateEditImageResponse?: EditImageResponse;\n /** Recraft edit image */\n recraftEditImageResponse?: Recraft_proxyV1EditImageResponse;\n}\n\nexport interface RemoveBackgroundResponse {\n /**\n * The URL of the image generated by the API. The image will be available for 24 hours.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /**\n * The uncertainty score is a number between 0 and 1. 0 means the model is very confident that the cutout is accurate,\n * 1 means the model is unsure.\n * For instance, shoes on a shoe box might give a higher uncertainty score as the model is unsure what to segment\n * (shoes, box or both). Currently the model returns an uncertainty score only for images of objects.\n * If an image contains humans, it will return the value -1.\n */\n xUncertaintyScore?: number | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface ImageEditingResponse {\n /** This is the seed used when generating the background. Can be used to get similar looking results for the same prompt. */\n prAiBackgroundSeed?: number | null;\n /** When removing texts from an image, it will return the number of texts detected. */\n prTextsDetected?: number | null;\n /**\n * The uncertainty score is a number between 0 and 1. 0 means the model is very confident that the cutout is accurate,\n * 1 means the model is unsure. For instance, shoes on a shoe box might give a higher uncertainty score as the model is unsure what to segment (shoes, box or both).\n * Currently the model returns an uncertainty score only for images of objects. If an image contains humans, it will return the value -1.\n */\n xUncertaintyScore?: number | null;\n /**\n * The URL of the image generated by the API. The image will be available for 24 hours.\n * @maxLength 100000\n */\n imageUrl?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface V1EditImageResponse {\n /**\n * The generated images objects.\n * @maxSize 10\n */\n data?: ImageObject[];\n /** The model used for generating the image. */\n model?: V1EditImageModelWithLiterals;\n /** Cost of the request in micro cents. */\n microcentsSpent?: string | null;\n}\n\nexport interface EditImageResponse {\n /**\n * Timestamp when the prediction was completed\n * @maxLength 2000\n */\n completedAt?: string | null;\n /**\n * Timestamp when the prediction was created\n * @maxLength 2000\n */\n createdAt?: string | null;\n /** Whether data has been removed */\n dataRemoved?: boolean | null;\n /**\n * Error message if the prediction failed\n * @maxLength 1000\n */\n error?: string | null;\n /**\n * Unique identifier for the prediction\n * @maxLength 100\n */\n predictionId?: string | null;\n /** Input parameters for the prediction */\n input?: EditImageInput;\n /**\n * Logs from the prediction process\n * @maxLength 10000\n */\n logs?: string | null;\n /** Performance metrics */\n metrics?: PredictionMetrics;\n /**\n * Output URL of the processed image\n * @maxLength 2000\n */\n output?: string | null;\n /**\n * Timestamp when the prediction started\n * @maxLength 2000\n */\n startedAt?: string | null;\n /**\n * Status of the prediction\n * @maxLength 50\n */\n status?: string | null;\n /** URLs for API operations */\n urls?: PredictionUrls;\n /**\n * Version of the model used\n * @maxLength 100\n */\n version?: string | null;\n /** Cost of the request in microcents. */\n microcentsSpent?: string | null;\n}\n\nexport interface EditImageInput {\n /**\n * Input image URL\n * @maxLength 1000\n */\n image?: string | null;\n /** Desired scale */\n scale?: string | null;\n /** Optional face enhancement */\n faceEnhance?: boolean | null;\n}\n\nexport interface PredictionMetrics {\n /** Time spent on prediction in seconds */\n predictTime?: number | null;\n /** Total time for the entire process in seconds */\n totalTime?: number | null;\n}\n\nexport interface PredictionUrls {\n /**\n * URL to get the prediction status\n * @maxLength 2000\n */\n get?: string | null;\n /**\n * URL to cancel the prediction\n * @maxLength 2000\n */\n cancel?: string | null;\n}\n\nexport interface Recraft_proxyV1EditImageResponse {\n /**\n * Output URL of the processed image\n * @maxLength 10000\n */\n output?: string | null;\n /** Cost of the request in microcents */\n microcentsSpent?: string | null;\n}\n\nexport interface PollImageGenerationResultRequest\n extends PollImageGenerationResultRequestRequestOneOf {\n /** replicate proxy getResult request */\n replicateGetResultRequest?: V1GetResultRequest;\n /** BFL proxy getResult request */\n bflGetResultRequest?: GetResultRequest;\n /** Runware GetTaskResult request */\n runwareGetTaskResultRequest?: GetTaskResultRequest;\n /** OpenAI getVideoResult request */\n openAiGetVideoResultRequest?: GetVideoResultRequest;\n /** Contains additional information for the request. */\n userRequestInfo?: UserRequestInfo;\n}\n\n/** @oneof */\nexport interface PollImageGenerationResultRequestRequestOneOf {\n /** replicate proxy getResult request */\n replicateGetResultRequest?: V1GetResultRequest;\n /** BFL proxy getResult request */\n bflGetResultRequest?: GetResultRequest;\n /** Runware GetTaskResult request */\n runwareGetTaskResultRequest?: GetTaskResultRequest;\n /** OpenAI getVideoResult request */\n openAiGetVideoResultRequest?: GetVideoResultRequest;\n}\n\nexport interface V1GetResultRequest {\n /**\n * The id of the task.\n * @maxLength 1000\n */\n id?: string | null;\n}\n\nexport interface GetResultRequest {\n /**\n * The id of the task.\n * @format GUID\n */\n id?: string | null;\n}\n\nexport interface GetTaskResultRequest {\n /**\n * Task UUID to get results for\n * @format GUID\n */\n taskUuid?: string;\n}\n\nexport interface GetVideoResultRequest {\n /**\n * The id of the video generation job.\n * @maxLength 200\n */\n id?: string;\n}\n\nexport interface PollImageGenerationResultResponse\n extends PollImageGenerationResultResponseResponseOneOf {\n /** replicate proxy getResult response */\n replicateGetResultResponse?: V1GetResultResponse;\n /** BFL proxy getResult response */\n bflGetResultResponse?: GetResultResponse;\n /** Runware GetTaskResult response */\n runwareGetTaskResultResponse?: GetTaskResultResponse;\n /** OpenAI getVideoResult response */\n openAiGetVideoResultResponse?: GetVideoResultResponse;\n}\n\n/** @oneof */\nexport interface PollImageGenerationResultResponseResponseOneOf {\n /** replicate proxy getResult response */\n replicateGetResultResponse?: V1GetResultResponse;\n /** BFL proxy getResult response */\n bflGetResultResponse?: GetResultResponse;\n /** Runware GetTaskResult response */\n runwareGetTaskResultResponse?: GetTaskResultResponse;\n /** OpenAI getVideoResult response */\n openAiGetVideoResultResponse?: GetVideoResultResponse;\n}\n\nexport interface V1GetResultResponse {\n /**\n * The prediction ID\n * @maxLength 1000\n */\n id?: string | null;\n /**\n * Model Name\n * @maxLength 100\n */\n model?: string | null;\n /**\n * Model version\n * @maxLength 100\n */\n version?: string | null;\n /**\n * The prediction output URLs\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n output?: string[] | null;\n /**\n * Prediction text output\n * @minSize 1\n * @maxSize 10\n * @maxLength 40000\n */\n textOutput?: string[] | null;\n /**\n * The prediction status\n * @maxLength 100\n */\n status?: string | null;\n}\n\nexport interface GetResultResponse {\n /**\n * The prompt to use for image generation.\n * @format GUID\n */\n id?: string | null;\n /**\n * status of the image generation\n * one of Task not found, Pending, Request Moderated, Content Moderated, Ready, Error\n * @maxLength 100\n */\n status?: string | null;\n /** Result object for the generated image */\n result?: ResultObject;\n}\n\nexport interface GetTaskResultResponse\n extends GetTaskResultResponseResponseOneOf {\n videoInferenceResponse?: VideoInferenceResponse;\n}\n\n/** @oneof */\nexport interface GetTaskResultResponseResponseOneOf {\n videoInferenceResponse?: VideoInferenceResponse;\n}\n\nexport interface GetVideoResultResponse {\n videoJob?: VideoJob;\n}\n\nexport interface DomainEvent extends DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n /** Event ID. With this ID you can easily spot duplicated events and ignore them. */\n id?: string;\n /**\n * Fully Qualified Domain Name of an entity. This is a unique identifier assigned to the API main business entities.\n * For example, `wix.stores.catalog.product`, `wix.bookings.session`, `wix.payments.transaction`.\n */\n entityFqdn?: string;\n /**\n * Event action name, placed at the top level to make it easier for users to dispatch messages.\n * For example: `created`/`updated`/`deleted`/`started`/`completed`/`email_opened`.\n */\n slug?: string;\n /** ID of the entity associated with the event. */\n entityId?: string;\n /** Event timestamp in [ISO-8601](https://en.wikipedia.org/wiki/ISO_8601) format and UTC time. For example, `2020-04-26T13:57:50.699Z`. */\n eventTime?: Date | null;\n /**\n * Whether the event was triggered as a result of a privacy regulation application\n * (for example, GDPR).\n */\n triggeredByAnonymizeRequest?: boolean | null;\n /** If present, indicates the action that triggered the event. */\n originatedFrom?: string | null;\n /**\n * A sequence number that indicates the order of updates to an entity. For example, if an entity was updated at 16:00 and then again at 16:01, the second update will always have a higher sequence number.\n * You can use this number to make sure you're handling updates in the right order. Just save the latest sequence number on your end and compare it to the one in each new message. If the new message has an older (lower) number, you can safely ignore it.\n */\n entityEventSequence?: string | null;\n}\n\n/** @oneof */\nexport interface DomainEventBodyOneOf {\n createdEvent?: EntityCreatedEvent;\n updatedEvent?: EntityUpdatedEvent;\n deletedEvent?: EntityDeletedEvent;\n actionEvent?: ActionEvent;\n}\n\nexport interface EntityCreatedEvent {\n entityAsJson?: string;\n /** Indicates the event was triggered by a restore-from-trashbin operation for a previously deleted entity */\n restoreInfo?: RestoreInfo;\n}\n\nexport interface RestoreInfo {\n deletedDate?: Date | null;\n}\n\nexport interface EntityUpdatedEvent {\n /**\n * Since platformized APIs only expose PATCH and not PUT we can't assume that the fields sent from the client are the actual diff.\n * This means that to generate a list of changed fields (as opposed to sent fields) one needs to traverse both objects.\n * We don't want to impose this on all developers and so we leave this traversal to the notification recipients which need it.\n */\n currentEntityAsJson?: string;\n}\n\nexport interface EntityDeletedEvent {\n /** Entity that was deleted. */\n deletedEntityAsJson?: string | null;\n}\n\nexport interface ActionEvent {\n bodyAsJson?: string;\n}\n\nexport interface MessageEnvelope {\n /**\n * App instance ID.\n * @format GUID\n */\n instanceId?: string | null;\n /**\n * Event type.\n * @maxLength 150\n */\n eventType?: string;\n /** The identification type and identity data. */\n identity?: IdentificationData;\n /** Stringify payload. */\n data?: string;\n}\n\nexport interface IdentificationData extends IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n /** @readonly */\n identityType?: WebhookIdentityTypeWithLiterals;\n}\n\n/** @oneof */\nexport interface IdentificationDataIdOneOf {\n /**\n * ID of a site visitor that has not logged in to the site.\n * @format GUID\n */\n anonymousVisitorId?: string;\n /**\n * ID of a site visitor that has logged in to the site.\n * @format GUID\n */\n memberId?: string;\n /**\n * ID of a Wix user (site owner, contributor, etc.).\n * @format GUID\n */\n wixUserId?: string;\n /**\n * ID of an app.\n * @format GUID\n */\n appId?: string;\n}\n\nexport enum WebhookIdentityType {\n UNKNOWN = 'UNKNOWN',\n ANONYMOUS_VISITOR = 'ANONYMOUS_VISITOR',\n MEMBER = 'MEMBER',\n WIX_USER = 'WIX_USER',\n APP = 'APP',\n}\n\n/** @enumType */\nexport type WebhookIdentityTypeWithLiterals =\n | WebhookIdentityType\n | 'UNKNOWN'\n | 'ANONYMOUS_VISITOR'\n | 'MEMBER'\n | 'WIX_USER'\n | 'APP';\n\nexport interface AccountDetails {\n /**\n * ID of the account.\n * @format GUID\n */\n accountId?: string | null;\n /**\n * ID of the parent account.\n * @format GUID\n */\n parentAccountId?: string | null;\n /**\n * ID of the site, if applicable.\n * @format GUID\n */\n siteId?: string | null;\n}\n","import * as ambassadorWixDsWixAiGatewayV1Prompt from './ds-wix-ai-gateway-v1-prompt-generators.http.js';\nimport * as ambassadorWixDsWixAiGatewayV1PromptTypes from './ds-wix-ai-gateway-v1-prompt-generators.types.js';\nimport * as ambassadorWixDsWixAiGatewayV1PromptUniversalTypes from './ds-wix-ai-gateway-v1-prompt-generators.universal.js';\n\nexport type __PublicMethodMetaInfo<\n K = string,\n M = unknown,\n T = unknown,\n S = unknown,\n Q = unknown,\n R = unknown\n> = {\n getUrl: (context: any) => string;\n httpMethod: K;\n path: string;\n pathParams: M;\n __requestType: T;\n __originalRequestType: S;\n __responseType: Q;\n __originalResponseType: R;\n};\n\nexport function generateTextByPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByPromptStreamed(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedTextChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedTextChunk\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPromptStreamed(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt-streamed/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByPromptObject(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptObjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptObjectResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPromptObject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt-object',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByPromptObjectStreamed(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedTextChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedTextChunk\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByPromptObjectStreamed(\n payload\n );\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-prompt-object-streamed',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateEmbedding(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateEmbeddingsRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateEmbeddingsRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateEmbeddingsResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateEmbeddingsResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateEmbedding(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-embedding',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTextByProjectStreamed(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTextByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedTextChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedTextChunk\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTextByProjectStreamed(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-by-project-streamed/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateModeration(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateModerationRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateModerationRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateModerationResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateModerationResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateModeration(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-moderation',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateImageByProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateImageByProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-image-by-project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateImageByPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateImageByPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-image-by-prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateImageByPromptObject(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateImageByPromptObjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateImageByPromptObjectResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateImageByPromptObject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-image-by-prompt-object',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateContentByPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateContentByPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-content-by-prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateContentByProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateContentByProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-content-by-project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateContentByPromptObject(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptObjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateContentByPromptObjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateContentByPromptObjectResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateContentByPromptObject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-content-by-prompt-object',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateTranscription(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTranscriptionRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTranscriptionRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateTranscriptionResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateTranscriptionResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateTranscription(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-transcription',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateAudio(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateAudioResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateAudioResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateAudio(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-audio',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function generateAudioStreamed(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GenerateAudioRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GeneratedAudioChunk,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GeneratedAudioChunk\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.generateAudioStreamed(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/generate-audio-streamed',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function publishPrompt(): __PublicMethodMetaInfo<\n 'POST',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishPromptResponse\n> {\n const payload = { prompt: { id: ':promptId' } } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.publishPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/prompt/{prompt.id}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getPrompt(): __PublicMethodMetaInfo<\n 'GET',\n { promptId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetPromptRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetPromptResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetPromptResponse\n> {\n const payload = { promptId: ':promptId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getPrompt(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/prompt/{promptId}',\n pathParams: { promptId: 'promptId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function publishProject(): __PublicMethodMetaInfo<\n 'POST',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PublishProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PublishProjectResponse\n> {\n const payload = { project: { id: ':projectId' } } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.publishProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/project/{project.id}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getProject(): __PublicMethodMetaInfo<\n 'GET',\n { projectId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetProjectRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetProjectResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetProjectResponse\n> {\n const payload = { projectId: ':projectId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getProject(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/project/{projectId}',\n pathParams: { projectId: 'projectId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getStatus(): __PublicMethodMetaInfo<\n 'GET',\n { entityId: string },\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetStatusRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetStatusRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetStatusResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetStatusResponse\n> {\n const payload = { entityId: ':entityId' } as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getStatus(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/status/{entityId}',\n pathParams: { entityId: 'entityId' },\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function getApplicationUsage(): __PublicMethodMetaInfo<\n 'GET',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetApplicationUsageRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetApplicationUsageRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.GetApplicationUsageResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.GetApplicationUsageResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.getApplicationUsage(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'GET',\n path: '/v1/application-usage',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function editImage(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.Wix_ai_gatewayV1EditImageRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.Wix_ai_gatewayV1EditImageRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.Wix_ai_gatewayV1EditImageResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.Wix_ai_gatewayV1EditImageResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.editImage(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/edit-image',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport function pollImageGenerationResult(): __PublicMethodMetaInfo<\n 'POST',\n {},\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PollImageGenerationResultRequest,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PollImageGenerationResultRequest,\n ambassadorWixDsWixAiGatewayV1PromptUniversalTypes.PollImageGenerationResultResponse,\n ambassadorWixDsWixAiGatewayV1PromptTypes.PollImageGenerationResultResponse\n> {\n const payload = {} as any;\n\n const getRequestOptions =\n ambassadorWixDsWixAiGatewayV1Prompt.pollImageGenerationResult(payload);\n\n const getUrl = (context: any): string => {\n const { url } = getRequestOptions(context);\n return url!;\n };\n\n return {\n getUrl,\n httpMethod: 'POST',\n path: '/v1/poll-image-generation-result',\n pathParams: {},\n __requestType: null as any,\n __originalRequestType: null as any,\n __responseType: null as any,\n __originalResponseType: null as any,\n };\n}\n\nexport {\n Prompt as PromptOriginal,\n PromptModelRequestOneOf as PromptModelRequestOneOfOriginal,\n FallbackPromptConfig as FallbackPromptConfigOriginal,\n OpenaiproxyV1CreateChatCompletionRequest as OpenaiproxyV1CreateChatCompletionRequestOriginal,\n OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf as OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOfOriginal,\n CreateChatCompletionRequestFunctionSignature as CreateChatCompletionRequestFunctionSignatureOriginal,\n OpenaiproxyV1Model as OpenaiproxyV1ModelOriginal,\n OpenaiproxyV1ModelWithLiterals as OpenaiproxyV1ModelWithLiteralsOriginal,\n OpenaiproxyV1ChatCompletionMessage as OpenaiproxyV1ChatCompletionMessageOriginal,\n ChatCompletionMessageFunctionWithArgs as ChatCompletionMessageFunctionWithArgsOriginal,\n OpenaiproxyV1ChatCompletionMessageImageUrlContent as OpenaiproxyV1ChatCompletionMessageImageUrlContentOriginal,\n OpenaiproxyV1ChatCompletionMessageMessageRole as OpenaiproxyV1ChatCompletionMessageMessageRoleOriginal,\n OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals as OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiteralsOriginal,\n ChatCompletionMessageToolCall as ChatCompletionMessageToolCallOriginal,\n OpenaiproxyV1ChatCompletionMessageContentPart as OpenaiproxyV1ChatCompletionMessageContentPartOriginal,\n OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf as OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOfOriginal,\n V1CreateChatCompletionRequestTool as V1CreateChatCompletionRequestToolOriginal,\n V1FineTuningSpec as V1FineTuningSpecOriginal,\n OpenaiproxyV1CreateChatCompletionRequestResponseFormat as OpenaiproxyV1CreateChatCompletionRequestResponseFormatOriginal,\n TextBisonPredictRequest as TextBisonPredictRequestOriginal,\n TextInstance as TextInstanceOriginal,\n PredictParameters as PredictParametersOriginal,\n TextBisonModel as TextBisonModelOriginal,\n TextBisonModelWithLiterals as TextBisonModelWithLiteralsOriginal,\n ChatBisonPredictRequest as ChatBisonPredictRequestOriginal,\n ChatInstance as ChatInstanceOriginal,\n Example as ExampleOriginal,\n ChatMessage as ChatMessageOriginal,\n ChatBisonModel as ChatBisonModelOriginal,\n ChatBisonModelWithLiterals as ChatBisonModelWithLiteralsOriginal,\n CreateChatCompletionRequest as CreateChatCompletionRequestOriginal,\n CreateChatCompletionRequestFunctionCallOneOf as CreateChatCompletionRequestFunctionCallOneOfOriginal,\n FunctionSignature as FunctionSignatureOriginal,\n V1Model as V1ModelOriginal,\n V1ModelWithLiterals as V1ModelWithLiteralsOriginal,\n V1ChatCompletionMessage as V1ChatCompletionMessageOriginal,\n FunctionWithArgs as FunctionWithArgsOriginal,\n ChatCompletionMessageImageUrlContent as ChatCompletionMessageImageUrlContentOriginal,\n ChatCompletionMessageMessageRole as ChatCompletionMessageMessageRoleOriginal,\n ChatCompletionMessageMessageRoleWithLiterals as ChatCompletionMessageMessageRoleWithLiteralsOriginal,\n ToolCall as ToolCallOriginal,\n ChatCompletionMessageContentPart as ChatCompletionMessageContentPartOriginal,\n ChatCompletionMessageContentPartContentValueOneOf as ChatCompletionMessageContentPartContentValueOneOfOriginal,\n CreateChatCompletionRequestTool as CreateChatCompletionRequestToolOriginal,\n CreateChatCompletionRequestResponseFormat as CreateChatCompletionRequestResponseFormatOriginal,\n GenerateContentRequest as GenerateContentRequestOriginal,\n GoogleproxyV1Model as GoogleproxyV1ModelOriginal,\n GoogleproxyV1ModelWithLiterals as GoogleproxyV1ModelWithLiteralsOriginal,\n Content as ContentOriginal,\n ContentRole as ContentRoleOriginal,\n ContentRoleWithLiterals as ContentRoleWithLiteralsOriginal,\n V1ContentPart as V1ContentPartOriginal,\n ContentData as ContentDataOriginal,\n FunctionCall as FunctionCallOriginal,\n FunctionResponse as FunctionResponseOriginal,\n ExecutableCode as ExecutableCodeOriginal,\n Language as LanguageOriginal,\n LanguageWithLiterals as LanguageWithLiteralsOriginal,\n V1CodeExecutionResult as V1CodeExecutionResultOriginal,\n Outcome as OutcomeOriginal,\n OutcomeWithLiterals as OutcomeWithLiteralsOriginal,\n Blob as BlobOriginal,\n MediaResolution as MediaResolutionOriginal,\n MediaResolutionLevel as MediaResolutionLevelOriginal,\n MediaResolutionLevelWithLiterals as MediaResolutionLevelWithLiteralsOriginal,\n SystemInstruction as SystemInstructionOriginal,\n GoogleproxyV1Tool as GoogleproxyV1ToolOriginal,\n DynamicRetrievalConfigMode as DynamicRetrievalConfigModeOriginal,\n DynamicRetrievalConfigModeWithLiterals as DynamicRetrievalConfigModeWithLiteralsOriginal,\n DynamicRetrievalConfig as DynamicRetrievalConfigOriginal,\n Environment as EnvironmentOriginal,\n EnvironmentWithLiterals as EnvironmentWithLiteralsOriginal,\n FunctionDeclaration as FunctionDeclarationOriginal,\n GoogleSearchRetrieval as GoogleSearchRetrievalOriginal,\n CodeExecution as CodeExecutionOriginal,\n GoogleSearch as GoogleSearchOriginal,\n ComputerUse as ComputerUseOriginal,\n SafetySetting as SafetySettingOriginal,\n HarmCategory as HarmCategoryOriginal,\n HarmCategoryWithLiterals as HarmCategoryWithLiteralsOriginal,\n Threshold as ThresholdOriginal,\n ThresholdWithLiterals as ThresholdWithLiteralsOriginal,\n GenerationConfig as GenerationConfigOriginal,\n GenerationThinkingConfig as GenerationThinkingConfigOriginal,\n Modality as ModalityOriginal,\n ModalityWithLiterals as ModalityWithLiteralsOriginal,\n ImageConfig as ImageConfigOriginal,\n ImageOutputOptions as ImageOutputOptionsOriginal,\n PersonGeneration as PersonGenerationOriginal,\n PersonGenerationWithLiterals as PersonGenerationWithLiteralsOriginal,\n V1ToolConfig as V1ToolConfigOriginal,\n FunctionCallingConfig as FunctionCallingConfigOriginal,\n Mode as ModeOriginal,\n ModeWithLiterals as ModeWithLiteralsOriginal,\n FineTuningSpec as FineTuningSpecOriginal,\n InvokeAnthropicClaudeModelRequest as InvokeAnthropicClaudeModelRequestOriginal,\n InputSchema as InputSchemaOriginal,\n CacheControl as CacheControlOriginal,\n Type as TypeOriginal,\n TypeWithLiterals as TypeWithLiteralsOriginal,\n Model as ModelOriginal,\n ModelWithLiterals as ModelWithLiteralsOriginal,\n AnthropicClaudeMessage as AnthropicClaudeMessageOriginal,\n Role as RoleOriginal,\n RoleWithLiterals as RoleWithLiteralsOriginal,\n ContentBlock as ContentBlockOriginal,\n ContentBlockTypeOneOf as ContentBlockTypeOneOfOriginal,\n Text as TextOriginal,\n ImageUrl as ImageUrlOriginal,\n MediaType as MediaTypeOriginal,\n MediaTypeWithLiterals as MediaTypeWithLiteralsOriginal,\n ToolUse as ToolUseOriginal,\n ToolResult as ToolResultOriginal,\n SimpleContentBlock as SimpleContentBlockOriginal,\n SimpleContentBlockTypeOneOf as SimpleContentBlockTypeOneOfOriginal,\n Thinking as ThinkingOriginal,\n RedactedThinking as RedactedThinkingOriginal,\n Tool as ToolOriginal,\n ToolChoice as ToolChoiceOriginal,\n ToolChoiceType as ToolChoiceTypeOriginal,\n ToolChoiceTypeWithLiterals as ToolChoiceTypeWithLiteralsOriginal,\n ThinkingConfig as ThinkingConfigOriginal,\n McpServer as McpServerOriginal,\n McpServerType as McpServerTypeOriginal,\n McpServerTypeWithLiterals as McpServerTypeWithLiteralsOriginal,\n ToolConfiguration as ToolConfigurationOriginal,\n V1InvokeAnthropicClaudeModelRequest as V1InvokeAnthropicClaudeModelRequestOriginal,\n GoogleproxyV1InputSchema as GoogleproxyV1InputSchemaOriginal,\n GoogleproxyV1CacheControl as GoogleproxyV1CacheControlOriginal,\n V1CacheControlType as V1CacheControlTypeOriginal,\n V1CacheControlTypeWithLiterals as V1CacheControlTypeWithLiteralsOriginal,\n ClaudeModel as ClaudeModelOriginal,\n ClaudeModelWithLiterals as ClaudeModelWithLiteralsOriginal,\n V1AnthropicClaudeMessage as V1AnthropicClaudeMessageOriginal,\n V1MessageRoleRole as V1MessageRoleRoleOriginal,\n V1MessageRoleRoleWithLiterals as V1MessageRoleRoleWithLiteralsOriginal,\n GoogleproxyV1ContentBlock as GoogleproxyV1ContentBlockOriginal,\n GoogleproxyV1ContentBlockTypeOneOf as GoogleproxyV1ContentBlockTypeOneOfOriginal,\n GoogleproxyV1Text as GoogleproxyV1TextOriginal,\n GoogleproxyV1ImageUrl as GoogleproxyV1ImageUrlOriginal,\n V1ImageMediaTypeMediaType as V1ImageMediaTypeMediaTypeOriginal,\n V1ImageMediaTypeMediaTypeWithLiterals as V1ImageMediaTypeMediaTypeWithLiteralsOriginal,\n GoogleproxyV1ToolUse as GoogleproxyV1ToolUseOriginal,\n GoogleproxyV1ToolResult as GoogleproxyV1ToolResultOriginal,\n V1SimpleContentBlock as V1SimpleContentBlockOriginal,\n V1SimpleContentBlockTypeOneOf as V1SimpleContentBlockTypeOneOfOriginal,\n GoogleproxyV1Thinking as GoogleproxyV1ThinkingOriginal,\n GoogleproxyV1RedactedThinking as GoogleproxyV1RedactedThinkingOriginal,\n InvokeAnthropicClaudeModelRequestTool as InvokeAnthropicClaudeModelRequestToolOriginal,\n GoogleproxyV1ToolChoice as GoogleproxyV1ToolChoiceOriginal,\n GoogleproxyV1ToolChoiceType as GoogleproxyV1ToolChoiceTypeOriginal,\n GoogleproxyV1ToolChoiceTypeWithLiterals as GoogleproxyV1ToolChoiceTypeWithLiteralsOriginal,\n GoogleproxyV1ThinkingConfig as GoogleproxyV1ThinkingConfigOriginal,\n GoogleproxyV1McpServer as GoogleproxyV1McpServerOriginal,\n GoogleproxyV1McpServerType as GoogleproxyV1McpServerTypeOriginal,\n GoogleproxyV1McpServerTypeWithLiterals as GoogleproxyV1McpServerTypeWithLiteralsOriginal,\n V1McpServerToolConfiguration as V1McpServerToolConfigurationOriginal,\n InvokeAnthropicModelRequest as InvokeAnthropicModelRequestOriginal,\n AnthropicModel as AnthropicModelOriginal,\n AnthropicModelWithLiterals as AnthropicModelWithLiteralsOriginal,\n AnthropicMessage as AnthropicMessageOriginal,\n MessageRoleRole as MessageRoleRoleOriginal,\n MessageRoleRoleWithLiterals as MessageRoleRoleWithLiteralsOriginal,\n V1ContentBlock as V1ContentBlockOriginal,\n V1ContentBlockTypeOneOf as V1ContentBlockTypeOneOfOriginal,\n V1Text as V1TextOriginal,\n V1CacheControl as V1CacheControlOriginal,\n CacheControlType as CacheControlTypeOriginal,\n CacheControlTypeWithLiterals as CacheControlTypeWithLiteralsOriginal,\n Citation as CitationOriginal,\n CitationTypeOneOf as CitationTypeOneOfOriginal,\n CharLocationCitation as CharLocationCitationOriginal,\n PageLocationCitation as PageLocationCitationOriginal,\n ContentBlockLocationCitation as ContentBlockLocationCitationOriginal,\n WebSearchResultLocationCitation as WebSearchResultLocationCitationOriginal,\n SearchResultLocationCitation as SearchResultLocationCitationOriginal,\n V1ImageUrl as V1ImageUrlOriginal,\n ImageMediaTypeMediaType as ImageMediaTypeMediaTypeOriginal,\n ImageMediaTypeMediaTypeWithLiterals as ImageMediaTypeMediaTypeWithLiteralsOriginal,\n V1ToolUse as V1ToolUseOriginal,\n V1ToolResult as V1ToolResultOriginal,\n ToolResultContentBlock as ToolResultContentBlockOriginal,\n ToolResultContentBlockTypeOneOf as ToolResultContentBlockTypeOneOfOriginal,\n DocumentContent as DocumentContentOriginal,\n DocumentSource as DocumentSourceOriginal,\n CitationsEnabled as CitationsEnabledOriginal,\n ToolResultSearchResult as ToolResultSearchResultOriginal,\n V1Thinking as V1ThinkingOriginal,\n V1RedactedThinking as V1RedactedThinkingOriginal,\n McpToolUse as McpToolUseOriginal,\n ServerToolUse as ServerToolUseOriginal,\n WebSearchToolResult as WebSearchToolResultOriginal,\n WebSearchToolResultContentOneOf as WebSearchToolResultContentOneOfOriginal,\n WebSearchResultList as WebSearchResultListOriginal,\n WebSearchResult as WebSearchResultOriginal,\n WebSearchToolResultError as WebSearchToolResultErrorOriginal,\n CodeExecutionToolResult as CodeExecutionToolResultOriginal,\n CodeExecutionToolResultContentOneOf as CodeExecutionToolResultContentOneOfOriginal,\n CodeExecutionResult as CodeExecutionResultOriginal,\n CodeExecutionToolResultError as CodeExecutionToolResultErrorOriginal,\n ContainerUpload as ContainerUploadOriginal,\n WebFetchToolResult as WebFetchToolResultOriginal,\n WebFetchToolResultContentOneOf as WebFetchToolResultContentOneOfOriginal,\n WebFetchToolResultContentSuccess as WebFetchToolResultContentSuccessOriginal,\n WebFetchToolResultContentError as WebFetchToolResultContentErrorOriginal,\n V1Tool as V1ToolOriginal,\n V1ToolKindOneOf as V1ToolKindOneOfOriginal,\n CustomTool as CustomToolOriginal,\n V1InputSchema as V1InputSchemaOriginal,\n ComputerUseTool as ComputerUseToolOriginal,\n TextEditorTool as TextEditorToolOriginal,\n BashTool as BashToolOriginal,\n WebSearchTool as WebSearchToolOriginal,\n WebSearchUserLocation as WebSearchUserLocationOriginal,\n CodeExecutionTool as CodeExecutionToolOriginal,\n WebFetchTool as WebFetchToolOriginal,\n V1ToolChoice as V1ToolChoiceOriginal,\n V1ToolChoiceType as V1ToolChoiceTypeOriginal,\n V1ToolChoiceTypeWithLiterals as V1ToolChoiceTypeWithLiteralsOriginal,\n V1ThinkingConfig as V1ThinkingConfigOriginal,\n V1McpServer as V1McpServerOriginal,\n V1McpServerType as V1McpServerTypeOriginal,\n V1McpServerTypeWithLiterals as V1McpServerTypeWithLiteralsOriginal,\n McpServerToolConfiguration as McpServerToolConfigurationOriginal,\n RequestMetadata as RequestMetadataOriginal,\n InvokeLlamaModelRequest as InvokeLlamaModelRequestOriginal,\n LlamaModel as LlamaModelOriginal,\n LlamaModelWithLiterals as LlamaModelWithLiteralsOriginal,\n InvokeConverseRequest as InvokeConverseRequestOriginal,\n ConverseModel as ConverseModelOriginal,\n ConverseModelWithLiterals as ConverseModelWithLiteralsOriginal,\n ConverseMessage as ConverseMessageOriginal,\n ConverseContentBlock as ConverseContentBlockOriginal,\n ConverseContentBlockContentOneOf as ConverseContentBlockContentOneOfOriginal,\n ConverseReasoningContent as ConverseReasoningContentOriginal,\n ReasoningText as ReasoningTextOriginal,\n ConverseToolUse as ConverseToolUseOriginal,\n ConverseToolResult as ConverseToolResultOriginal,\n ConverseToolResultContent as ConverseToolResultContentOriginal,\n ConverseToolResultContentContentOneOf as ConverseToolResultContentContentOneOfOriginal,\n ConverseInferenceConfig as ConverseInferenceConfigOriginal,\n ToolConfig as ToolConfigOriginal,\n ConverseTool as ConverseToolOriginal,\n ToolSpecification as ToolSpecificationOriginal,\n ConverseInputSchema as ConverseInputSchemaOriginal,\n ConversePerformanceConfig as ConversePerformanceConfigOriginal,\n SystemContentBlock as SystemContentBlockOriginal,\n CreateImageRequest as CreateImageRequestOriginal,\n V1ImageModel as V1ImageModelOriginal,\n V1ImageModelWithLiterals as V1ImageModelWithLiteralsOriginal,\n ImageQuality as ImageQualityOriginal,\n ImageQualityWithLiterals as ImageQualityWithLiteralsOriginal,\n ImageSize as ImageSizeOriginal,\n ImageSizeWithLiterals as ImageSizeWithLiteralsOriginal,\n ImageStyle as ImageStyleOriginal,\n ImageStyleWithLiterals as ImageStyleWithLiteralsOriginal,\n V1TextToImageRequest as V1TextToImageRequestOriginal,\n ImageModel as ImageModelOriginal,\n ImageModelWithLiterals as ImageModelWithLiteralsOriginal,\n TextPrompt as TextPromptOriginal,\n ClipGuidancePreset as ClipGuidancePresetOriginal,\n ClipGuidancePresetWithLiterals as ClipGuidancePresetWithLiteralsOriginal,\n Sampler as SamplerOriginal,\n SamplerWithLiterals as SamplerWithLiteralsOriginal,\n TextToImageRequestStylePreset as TextToImageRequestStylePresetOriginal,\n TextToImageRequestStylePresetWithLiterals as TextToImageRequestStylePresetWithLiteralsOriginal,\n GenerateCoreRequest as GenerateCoreRequestOriginal,\n ImageCoreModel as ImageCoreModelOriginal,\n ImageCoreModelWithLiterals as ImageCoreModelWithLiteralsOriginal,\n GenerateCoreRequestStylePreset as GenerateCoreRequestStylePresetOriginal,\n GenerateCoreRequestStylePresetWithLiterals as GenerateCoreRequestStylePresetWithLiteralsOriginal,\n GenerateStableDiffusionRequest as GenerateStableDiffusionRequestOriginal,\n GenerationMode as GenerationModeOriginal,\n GenerationModeWithLiterals as GenerationModeWithLiteralsOriginal,\n ImageStableDiffusionModel as ImageStableDiffusionModelOriginal,\n ImageStableDiffusionModelWithLiterals as ImageStableDiffusionModelWithLiteralsOriginal,\n GenerateStableDiffusionRequestOutputFormat as GenerateStableDiffusionRequestOutputFormatOriginal,\n GenerateStableDiffusionRequestOutputFormatWithLiterals as GenerateStableDiffusionRequestOutputFormatWithLiteralsOriginal,\n GenerateAnImageRequest as GenerateAnImageRequestOriginal,\n GenerateAnImageModel as GenerateAnImageModelOriginal,\n GenerateAnImageModelWithLiterals as GenerateAnImageModelWithLiteralsOriginal,\n CreatePredictionRequest as CreatePredictionRequestOriginal,\n CreatePredictionRequestInputOneOf as CreatePredictionRequestInputOneOfOriginal,\n CreatePredictionModel as CreatePredictionModelOriginal,\n CreatePredictionModelWithLiterals as CreatePredictionModelWithLiteralsOriginal,\n FluxPulid as FluxPulidOriginal,\n FluxDevControlnet as FluxDevControlnetOriginal,\n ReveEdit as ReveEditOriginal,\n LucatacoFlorence2Large as LucatacoFlorence2LargeOriginal,\n TaskInput as TaskInputOriginal,\n TaskInputWithLiterals as TaskInputWithLiteralsOriginal,\n PerceptronIsaac01 as PerceptronIsaac01Original,\n ResponseType as ResponseTypeOriginal,\n ResponseTypeWithLiterals as ResponseTypeWithLiteralsOriginal,\n PrunaaiZImageTurbo as PrunaaiZImageTurboOriginal,\n QwenImageLayered as QwenImageLayeredOriginal,\n EditImageWithPromptRequest as EditImageWithPromptRequestOriginal,\n EditImageWithPromptRequestModel as EditImageWithPromptRequestModelOriginal,\n EditImageWithPromptRequestModelWithLiterals as EditImageWithPromptRequestModelWithLiteralsOriginal,\n StylePreset as StylePresetOriginal,\n StylePresetWithLiterals as StylePresetWithLiteralsOriginal,\n OutpaintDirection as OutpaintDirectionOriginal,\n TextToImageRequest as TextToImageRequestOriginal,\n TextToImageRequestModel as TextToImageRequestModelOriginal,\n TextToImageRequestModelWithLiterals as TextToImageRequestModelWithLiteralsOriginal,\n LoraModelSelect as LoraModelSelectOriginal,\n Inputs as InputsOriginal,\n InvokeMlPlatformLlamaModelRequest as InvokeMlPlatformLlamaModelRequestOriginal,\n InvokeChatCompletionRequest as InvokeChatCompletionRequestOriginal,\n PerplexityModel as PerplexityModelOriginal,\n PerplexityModelWithLiterals as PerplexityModelWithLiteralsOriginal,\n PerplexityMessage as PerplexityMessageOriginal,\n PerplexityMessageMessageRole as PerplexityMessageMessageRoleOriginal,\n PerplexityMessageMessageRoleWithLiterals as PerplexityMessageMessageRoleWithLiteralsOriginal,\n InvokeChatCompletionRequestResponseFormat as InvokeChatCompletionRequestResponseFormatOriginal,\n InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf as InvokeChatCompletionRequestResponseFormatFormatDetailsOneOfOriginal,\n GenerateImageRequest as GenerateImageRequestOriginal,\n ImagenModel as ImagenModelOriginal,\n ImagenModelWithLiterals as ImagenModelWithLiteralsOriginal,\n Instance as InstanceOriginal,\n Parameters as ParametersOriginal,\n OutputOptions as OutputOptionsOriginal,\n GenerateImageMlPlatformRequest as GenerateImageMlPlatformRequestOriginal,\n GenerateImageMlPlatformRequestInputOneOf as GenerateImageMlPlatformRequestInputOneOfOriginal,\n GenerateImageMlPlatformModel as GenerateImageMlPlatformModelOriginal,\n GenerateImageMlPlatformModelWithLiterals as GenerateImageMlPlatformModelWithLiteralsOriginal,\n V1FluxPulid as V1FluxPulidOriginal,\n CreateImageOpenAiRequest as CreateImageOpenAiRequestOriginal,\n OpenAiImageModel as OpenAiImageModelOriginal,\n OpenAiImageModelWithLiterals as OpenAiImageModelWithLiteralsOriginal,\n EditImageOpenAiRequest as EditImageOpenAiRequestOriginal,\n GenerateVideoRequest as GenerateVideoRequestOriginal,\n VideoGenModel as VideoGenModelOriginal,\n VideoGenModelWithLiterals as VideoGenModelWithLiteralsOriginal,\n GenerateVideoInstance as GenerateVideoInstanceOriginal,\n V1ImageInput as V1ImageInputOriginal,\n GenerateVideoParameters as GenerateVideoParametersOriginal,\n V1CreateChatCompletionRequest as V1CreateChatCompletionRequestOriginal,\n ChatCompletionModel as ChatCompletionModelOriginal,\n ChatCompletionModelWithLiterals as ChatCompletionModelWithLiteralsOriginal,\n GoogleproxyV1ChatCompletionMessage as GoogleproxyV1ChatCompletionMessageOriginal,\n V1ChatCompletionMessageImageUrlContent as V1ChatCompletionMessageImageUrlContentOriginal,\n V1ChatCompletionMessageMessageRole as V1ChatCompletionMessageMessageRoleOriginal,\n V1ChatCompletionMessageMessageRoleWithLiterals as V1ChatCompletionMessageMessageRoleWithLiteralsOriginal,\n V1ChatCompletionMessageContentPart as V1ChatCompletionMessageContentPartOriginal,\n V1ChatCompletionMessageContentPartContentValueOneOf as V1ChatCompletionMessageContentPartContentValueOneOfOriginal,\n V1CreateChatCompletionRequestResponseFormat as V1CreateChatCompletionRequestResponseFormatOriginal,\n InvokeMlPlatformOpenAIChatCompletionRawRequest as InvokeMlPlatformOpenAIChatCompletionRawRequestOriginal,\n ChatCompletionMessage as ChatCompletionMessageOriginal,\n ImageUrlContent as ImageUrlContentOriginal,\n MessageRole as MessageRoleOriginal,\n MessageRoleWithLiterals as MessageRoleWithLiteralsOriginal,\n ContentPart as ContentPartOriginal,\n ContentPartContentValueOneOf as ContentPartContentValueOneOfOriginal,\n ResponseFormat as ResponseFormatOriginal,\n VideoInferenceRequest as VideoInferenceRequestOriginal,\n OutputFormat as OutputFormatOriginal,\n OutputFormatWithLiterals as OutputFormatWithLiteralsOriginal,\n FrameImage as FrameImageOriginal,\n VideoModel as VideoModelOriginal,\n VideoModelWithLiterals as VideoModelWithLiteralsOriginal,\n V1OpenAiResponsesRequest as V1OpenAiResponsesRequestOriginal,\n V1ResponsesModel as V1ResponsesModelOriginal,\n V1ResponsesModelWithLiterals as V1ResponsesModelWithLiteralsOriginal,\n V1ResponsesInputItem as V1ResponsesInputItemOriginal,\n V1ResponsesInputItemItemOneOf as V1ResponsesInputItemItemOneOfOriginal,\n V1ResponsesInputMessage as V1ResponsesInputMessageOriginal,\n ResponsesInputMessageResponsesMessageRole as ResponsesInputMessageResponsesMessageRoleOriginal,\n ResponsesInputMessageResponsesMessageRoleWithLiterals as ResponsesInputMessageResponsesMessageRoleWithLiteralsOriginal,\n V1ResponsesInputMessageContent as V1ResponsesInputMessageContentOriginal,\n V1ResponsesInputMessageContentContentValueOneOf as V1ResponsesInputMessageContentContentValueOneOfOriginal,\n ResponsesInputMessageContentImageInput as ResponsesInputMessageContentImageInputOriginal,\n ResponsesInputMessageContentFileInput as ResponsesInputMessageContentFileInputOriginal,\n V1ResponsesOutputMessage as V1ResponsesOutputMessageOriginal,\n V1OutputAnnotation as V1OutputAnnotationOriginal,\n V1OutputAnnotationAnnotationTypeOneOf as V1OutputAnnotationAnnotationTypeOneOfOriginal,\n V1UrlCitation as V1UrlCitationOriginal,\n ResponsesOutputMessageOutputContent as ResponsesOutputMessageOutputContentOriginal,\n V1ResponsesWebSearchToolCall as V1ResponsesWebSearchToolCallOriginal,\n ResponsesWebSearchToolCallAction as ResponsesWebSearchToolCallActionOriginal,\n V1ResponsesFunctionToolCall as V1ResponsesFunctionToolCallOriginal,\n V1ResponsesFunctionToolCallOutput as V1ResponsesFunctionToolCallOutputOriginal,\n V1ResponsesReasoningOutput as V1ResponsesReasoningOutputOriginal,\n V1ResponsesReasoningSummaryContent as V1ResponsesReasoningSummaryContentOriginal,\n V1ResponsesReasoningContent as V1ResponsesReasoningContentOriginal,\n V1ResponsesCodeInterpreterToolCall as V1ResponsesCodeInterpreterToolCallOriginal,\n V1ResponsesCodeInterpreterOutput as V1ResponsesCodeInterpreterOutputOriginal,\n V1ResponsesCodeInterpreterOutputOutputTypeOneOf as V1ResponsesCodeInterpreterOutputOutputTypeOneOfOriginal,\n V1ResponsesCodeInterpreterLogsOutput as V1ResponsesCodeInterpreterLogsOutputOriginal,\n V1ResponsesCodeInterpreterImageOutput as V1ResponsesCodeInterpreterImageOutputOriginal,\n V1ResponsesReasoning as V1ResponsesReasoningOriginal,\n V1ResponsesTextFormat as V1ResponsesTextFormatOriginal,\n V1ResponsesTextFormatFormatOneOf as V1ResponsesTextFormatFormatOneOfOriginal,\n ResponsesTextFormatJsonSchema as ResponsesTextFormatJsonSchemaOriginal,\n V1ResponsesToolChoice as V1ResponsesToolChoiceOriginal,\n V1ResponsesTool as V1ResponsesToolOriginal,\n V1ResponsesToolToolTypeOneOf as V1ResponsesToolToolTypeOneOfOriginal,\n V1ResponsesWebSearch as V1ResponsesWebSearchOriginal,\n ResponsesWebSearchUserLocation as ResponsesWebSearchUserLocationOriginal,\n V1ResponsesFunction as V1ResponsesFunctionOriginal,\n V1ResponsesCodeInterpreter as V1ResponsesCodeInterpreterOriginal,\n V1ResponsesCodeInterpreterContainer as V1ResponsesCodeInterpreterContainerOriginal,\n V1ResponsesCodeInterpreterContainerContainerTypeOneOf as V1ResponsesCodeInterpreterContainerContainerTypeOneOfOriginal,\n V1ResponsesCodeInterpreterContainerAuto as V1ResponsesCodeInterpreterContainerAutoOriginal,\n OpenAiResponsesRequest as OpenAiResponsesRequestOriginal,\n ResponsesModel as ResponsesModelOriginal,\n ResponsesModelWithLiterals as ResponsesModelWithLiteralsOriginal,\n ResponsesInputItem as ResponsesInputItemOriginal,\n ResponsesInputItemItemOneOf as ResponsesInputItemItemOneOfOriginal,\n ResponsesInputMessage as ResponsesInputMessageOriginal,\n ResponsesMessageRole as ResponsesMessageRoleOriginal,\n ResponsesMessageRoleWithLiterals as ResponsesMessageRoleWithLiteralsOriginal,\n ResponsesInputMessageContent as ResponsesInputMessageContentOriginal,\n ResponsesInputMessageContentContentValueOneOf as ResponsesInputMessageContentContentValueOneOfOriginal,\n ImageInput as ImageInputOriginal,\n FileInput as FileInputOriginal,\n ResponsesOutputMessage as ResponsesOutputMessageOriginal,\n OutputAnnotation as OutputAnnotationOriginal,\n OutputAnnotationAnnotationTypeOneOf as OutputAnnotationAnnotationTypeOneOfOriginal,\n UrlCitation as UrlCitationOriginal,\n OutputContent as OutputContentOriginal,\n ResponsesWebSearchToolCall as ResponsesWebSearchToolCallOriginal,\n Action as ActionOriginal,\n ResponsesFunctionToolCall as ResponsesFunctionToolCallOriginal,\n ResponsesFunctionToolCallOutput as ResponsesFunctionToolCallOutputOriginal,\n ResponsesReasoningOutput as ResponsesReasoningOutputOriginal,\n ResponsesReasoningSummaryContent as ResponsesReasoningSummaryContentOriginal,\n ResponsesReasoningContent as ResponsesReasoningContentOriginal,\n ResponsesCodeInterpreterToolCall as ResponsesCodeInterpreterToolCallOriginal,\n ResponsesCodeInterpreterOutput as ResponsesCodeInterpreterOutputOriginal,\n ResponsesCodeInterpreterOutputOutputTypeOneOf as ResponsesCodeInterpreterOutputOutputTypeOneOfOriginal,\n ResponsesCodeInterpreterLogsOutput as ResponsesCodeInterpreterLogsOutputOriginal,\n ResponsesCodeInterpreterImageOutput as ResponsesCodeInterpreterImageOutputOriginal,\n ResponsesReasoning as ResponsesReasoningOriginal,\n ResponsesTextFormat as ResponsesTextFormatOriginal,\n ResponsesTextFormatFormatOneOf as ResponsesTextFormatFormatOneOfOriginal,\n JsonSchema as JsonSchemaOriginal,\n ResponsesToolChoice as ResponsesToolChoiceOriginal,\n ResponsesTool as ResponsesToolOriginal,\n ResponsesToolToolTypeOneOf as ResponsesToolToolTypeOneOfOriginal,\n ResponsesWebSearch as ResponsesWebSearchOriginal,\n UserLocation as UserLocationOriginal,\n ResponsesFunction as ResponsesFunctionOriginal,\n ResponsesCodeInterpreter as ResponsesCodeInterpreterOriginal,\n ResponsesCodeInterpreterContainer as ResponsesCodeInterpreterContainerOriginal,\n ResponsesCodeInterpreterContainerContainerTypeOneOf as ResponsesCodeInterpreterContainerContainerTypeOneOfOriginal,\n ResponsesCodeInterpreterContainerAuto as ResponsesCodeInterpreterContainerAutoOriginal,\n CreateVideoRequest as CreateVideoRequestOriginal,\n V1VideoModel as V1VideoModelOriginal,\n V1VideoModelWithLiterals as V1VideoModelWithLiteralsOriginal,\n ContentGenerationRequestedEvent as ContentGenerationRequestedEventOriginal,\n UserRequestInfo as UserRequestInfoOriginal,\n ContentGenerationSucceededEvent as ContentGenerationSucceededEventOriginal,\n GenerateContentModelResponse as GenerateContentModelResponseOriginal,\n GenerateContentModelResponseResponseOneOf as GenerateContentModelResponseResponseOneOfOriginal,\n GeneratedContent as GeneratedContentOriginal,\n TextContent as TextContentOriginal,\n MediaContent as MediaContentOriginal,\n ThinkingTextContent as ThinkingTextContentOriginal,\n ToolUseContent as ToolUseContentOriginal,\n V1TokenUsage as V1TokenUsageOriginal,\n ResponseMetadata as ResponseMetadataOriginal,\n OpenaiproxyV1CreateChatCompletionResponse as OpenaiproxyV1CreateChatCompletionResponseOriginal,\n CreateChatCompletionResponsePromptTokenDetails as CreateChatCompletionResponsePromptTokenDetailsOriginal,\n CreateChatCompletionResponseCompletionTokenDetails as CreateChatCompletionResponseCompletionTokenDetailsOriginal,\n OpenaiproxyV1CreateChatCompletionResponseChoice as OpenaiproxyV1CreateChatCompletionResponseChoiceOriginal,\n OpenaiproxyV1CreateChatCompletionResponseTokenUsage as OpenaiproxyV1CreateChatCompletionResponseTokenUsageOriginal,\n TextBisonPredictResponse as TextBisonPredictResponseOriginal,\n TextBisonPrediction as TextBisonPredictionOriginal,\n CitationMetadata as CitationMetadataOriginal,\n V1Citation as V1CitationOriginal,\n SafetyAttribute as SafetyAttributeOriginal,\n Metadata as MetadataOriginal,\n TokenMetadata as TokenMetadataOriginal,\n TokenCount as TokenCountOriginal,\n ChatBisonPredictResponse as ChatBisonPredictResponseOriginal,\n ChatBisonPrediction as ChatBisonPredictionOriginal,\n CreateChatCompletionResponse as CreateChatCompletionResponseOriginal,\n PromptTokenDetails as PromptTokenDetailsOriginal,\n CompletionTokenDetails as CompletionTokenDetailsOriginal,\n CreateChatCompletionResponseChoice as CreateChatCompletionResponseChoiceOriginal,\n CreateChatCompletionResponseTokenUsage as CreateChatCompletionResponseTokenUsageOriginal,\n GenerateContentResponse as GenerateContentResponseOriginal,\n Candidate as CandidateOriginal,\n CandidateContent as CandidateContentOriginal,\n CandidateContentPart as CandidateContentPartOriginal,\n FinishReason as FinishReasonOriginal,\n FinishReasonWithLiterals as FinishReasonWithLiteralsOriginal,\n SafetyRating as SafetyRatingOriginal,\n HarmProbability as HarmProbabilityOriginal,\n HarmProbabilityWithLiterals as HarmProbabilityWithLiteralsOriginal,\n CandidateCitationMetadata as CandidateCitationMetadataOriginal,\n PublicationDate as PublicationDateOriginal,\n CandidateCitationMetadataCitation as CandidateCitationMetadataCitationOriginal,\n GroundingMetadata as GroundingMetadataOriginal,\n SearchEntryPoint as SearchEntryPointOriginal,\n GroundingChunk as GroundingChunkOriginal,\n GroundingChunkChunkTypeOneOf as GroundingChunkChunkTypeOneOfOriginal,\n Web as WebOriginal,\n RetrievedContext as RetrievedContextOriginal,\n GroundingSupport as GroundingSupportOriginal,\n Segment as SegmentOriginal,\n RetrievalMetadata as RetrievalMetadataOriginal,\n UsageMetadata as UsageMetadataOriginal,\n ModalityTokenCount as ModalityTokenCountOriginal,\n InvokeAnthropicClaudeModelResponse as InvokeAnthropicClaudeModelResponseOriginal,\n ResponseTypeType as ResponseTypeTypeOriginal,\n ResponseTypeTypeWithLiterals as ResponseTypeTypeWithLiteralsOriginal,\n Usage as UsageOriginal,\n V1InvokeAnthropicClaudeModelResponse as V1InvokeAnthropicClaudeModelResponseOriginal,\n GoogleproxyV1ResponseTypeType as GoogleproxyV1ResponseTypeTypeOriginal,\n GoogleproxyV1ResponseTypeTypeWithLiterals as GoogleproxyV1ResponseTypeTypeWithLiteralsOriginal,\n GoogleproxyV1Usage as GoogleproxyV1UsageOriginal,\n InvokeAnthropicModelResponse as InvokeAnthropicModelResponseOriginal,\n V1ResponseTypeType as V1ResponseTypeTypeOriginal,\n V1ResponseTypeTypeWithLiterals as V1ResponseTypeTypeWithLiteralsOriginal,\n V1Usage as V1UsageOriginal,\n UsageCacheCreation as UsageCacheCreationOriginal,\n UsageServerToolUse as UsageServerToolUseOriginal,\n Container as ContainerOriginal,\n InvokeLlamaModelResponse as InvokeLlamaModelResponseOriginal,\n InvokeConverseResponse as InvokeConverseResponseOriginal,\n Output as OutputOriginal,\n InvokeConverseResponseTokenUsage as InvokeConverseResponseTokenUsageOriginal,\n Metrics as MetricsOriginal,\n InvokeMlPlatformLlamaModelResponse as InvokeMlPlatformLlamaModelResponseOriginal,\n InvokeChatCompletionResponse as InvokeChatCompletionResponseOriginal,\n InvokeChatCompletionResponseChoice as InvokeChatCompletionResponseChoiceOriginal,\n PerplexityImageDescriptor as PerplexityImageDescriptorOriginal,\n InvokeChatCompletionResponseUsage as InvokeChatCompletionResponseUsageOriginal,\n CreateImageResponse as CreateImageResponseOriginal,\n V1ImageObject as V1ImageObjectOriginal,\n V1TextToImageResponse as V1TextToImageResponseOriginal,\n ImageObject as ImageObjectOriginal,\n GenerateCoreResponse as GenerateCoreResponseOriginal,\n GenerateStableDiffusionResponse as GenerateStableDiffusionResponseOriginal,\n GenerateAnImageResponse as GenerateAnImageResponseOriginal,\n ResultObject as ResultObjectOriginal,\n CreatePredictionResponse as CreatePredictionResponseOriginal,\n CreatePredictionResponseTokenUsage as CreatePredictionResponseTokenUsageOriginal,\n EditImageWithPromptResponse as EditImageWithPromptResponseOriginal,\n TextToImageResponse as TextToImageResponseOriginal,\n TextToImageTaskResult as TextToImageTaskResultOriginal,\n GenerateImageResponse as GenerateImageResponseOriginal,\n Prediction as PredictionOriginal,\n SafetyAttributes as SafetyAttributesOriginal,\n GenerateVideoResponse as GenerateVideoResponseOriginal,\n GeneratedVideo as GeneratedVideoOriginal,\n GenerateImageMlPlatformResponse as GenerateImageMlPlatformResponseOriginal,\n CreateImageOpenAiResponse as CreateImageOpenAiResponseOriginal,\n ImageUsage as ImageUsageOriginal,\n OpenAiImageTokenDetails as OpenAiImageTokenDetailsOriginal,\n EditImageOpenAiResponse as EditImageOpenAiResponseOriginal,\n V1CreateChatCompletionResponse as V1CreateChatCompletionResponseOriginal,\n V1CreateChatCompletionResponseChoice as V1CreateChatCompletionResponseChoiceOriginal,\n V1CreateChatCompletionResponseTokenUsage as V1CreateChatCompletionResponseTokenUsageOriginal,\n InvokeMlPlatformOpenAIChatCompletionRawResponse as InvokeMlPlatformOpenAIChatCompletionRawResponseOriginal,\n Choice as ChoiceOriginal,\n TokenUsage as TokenUsageOriginal,\n VideoInferenceResponse as VideoInferenceResponseOriginal,\n VideoInferenceTaskResult as VideoInferenceTaskResultOriginal,\n V1OpenAiResponsesResponse as V1OpenAiResponsesResponseOriginal,\n OpenAiResponsesResponseIncompleteDetails as OpenAiResponsesResponseIncompleteDetailsOriginal,\n V1ResponsesOutput as V1ResponsesOutputOriginal,\n V1ResponsesOutputOutputOneOf as V1ResponsesOutputOutputOneOfOriginal,\n V1ResponsesTokenUsage as V1ResponsesTokenUsageOriginal,\n V1ResponsesInputTokensDetails as V1ResponsesInputTokensDetailsOriginal,\n V1ResponsesOutputTokensDetails as V1ResponsesOutputTokensDetailsOriginal,\n OpenAiResponsesResponse as OpenAiResponsesResponseOriginal,\n IncompleteDetails as IncompleteDetailsOriginal,\n ResponsesOutput as ResponsesOutputOriginal,\n ResponsesOutputOutputOneOf as ResponsesOutputOutputOneOfOriginal,\n ResponsesTokenUsage as ResponsesTokenUsageOriginal,\n ResponsesInputTokensDetails as ResponsesInputTokensDetailsOriginal,\n ResponsesOutputTokensDetails as ResponsesOutputTokensDetailsOriginal,\n CreateVideoResponse as CreateVideoResponseOriginal,\n VideoJob as VideoJobOriginal,\n ErrorInfo as ErrorInfoOriginal,\n ContentGenerationFailedEvent as ContentGenerationFailedEventOriginal,\n GenerateTextByPromptRequest as GenerateTextByPromptRequestOriginal,\n FallbackProperties as FallbackPropertiesOriginal,\n DynamicRequestConfig as DynamicRequestConfigOriginal,\n GatewayToolDefinition as GatewayToolDefinitionOriginal,\n GatewayToolDefinitionToolOneOf as GatewayToolDefinitionToolOneOfOriginal,\n GatewayToolDefinitionCustomTool as GatewayToolDefinitionCustomToolOriginal,\n BuiltInTool as BuiltInToolOriginal,\n GatewayMessageDefinition as GatewayMessageDefinitionOriginal,\n GatewayMessageDefinitionRole as GatewayMessageDefinitionRoleOriginal,\n GatewayMessageDefinitionRoleWithLiterals as GatewayMessageDefinitionRoleWithLiteralsOriginal,\n GatewayContentBlock as GatewayContentBlockOriginal,\n GatewayContentBlockTypeOneOf as GatewayContentBlockTypeOneOfOriginal,\n ToolResultContent as ToolResultContentOriginal,\n GenerateTextByPromptResponse as GenerateTextByPromptResponseOriginal,\n ModelResponse as ModelResponseOriginal,\n ModelResponseResponseOneOf as ModelResponseResponseOneOfOriginal,\n GenerationRequestedEvent as GenerationRequestedEventOriginal,\n TextGenerationSucceededEvent as TextGenerationSucceededEventOriginal,\n TextGenerationFailedEvent as TextGenerationFailedEventOriginal,\n GeneratedTextChunk as GeneratedTextChunkOriginal,\n GeneratedTextChunkModelChunkOneOf as GeneratedTextChunkModelChunkOneOfOriginal,\n ChatCompletionChunk as ChatCompletionChunkOriginal,\n ChunkDelta as ChunkDeltaOriginal,\n ChunkChoice as ChunkChoiceOriginal,\n V1ChatCompletionChunk as V1ChatCompletionChunkOriginal,\n ChunkChoiceChunkDelta as ChunkChoiceChunkDeltaOriginal,\n ChatCompletionChunkChunkChoice as ChatCompletionChunkChunkChoiceOriginal,\n GoogleproxyV1AnthropicStreamChunk as GoogleproxyV1AnthropicStreamChunkOriginal,\n GoogleproxyV1AnthropicStreamChunkContentOneOf as GoogleproxyV1AnthropicStreamChunkContentOneOfOriginal,\n GoogleproxyV1ContentBlockDelta as GoogleproxyV1ContentBlockDeltaOriginal,\n GoogleproxyV1ContentBlockDeltaDeltaOneOf as GoogleproxyV1ContentBlockDeltaDeltaOneOfOriginal,\n V1AnthropicStreamChunkMessageDelta as V1AnthropicStreamChunkMessageDeltaOriginal,\n AnthropicStreamChunk as AnthropicStreamChunkOriginal,\n AnthropicStreamChunkContentOneOf as AnthropicStreamChunkContentOneOfOriginal,\n ContentBlockDelta as ContentBlockDeltaOriginal,\n ContentBlockDeltaDeltaOneOf as ContentBlockDeltaDeltaOneOfOriginal,\n MessageDelta as MessageDeltaOriginal,\n V1AnthropicStreamChunk as V1AnthropicStreamChunkOriginal,\n V1AnthropicStreamChunkContentOneOf as V1AnthropicStreamChunkContentOneOfOriginal,\n V1ContentBlockDelta as V1ContentBlockDeltaOriginal,\n V1ContentBlockDeltaDeltaOneOf as V1ContentBlockDeltaDeltaOneOfOriginal,\n AnthropicStreamChunkMessageDelta as AnthropicStreamChunkMessageDeltaOriginal,\n GenerateTextByPromptObjectRequest as GenerateTextByPromptObjectRequestOriginal,\n GenerateTextByPromptObjectResponse as GenerateTextByPromptObjectResponseOriginal,\n GenerateEmbeddingsRequest as GenerateEmbeddingsRequestOriginal,\n GenerateEmbeddingsRequestEmbeddingRequestOneOf as GenerateEmbeddingsRequestEmbeddingRequestOneOfOriginal,\n V1CreateEmbeddingsRequest as V1CreateEmbeddingsRequestOriginal,\n OpenaiproxyV1EmbeddingModel as OpenaiproxyV1EmbeddingModelOriginal,\n OpenaiproxyV1EmbeddingModelWithLiterals as OpenaiproxyV1EmbeddingModelWithLiteralsOriginal,\n V1EmbeddingEncodingFormat as V1EmbeddingEncodingFormatOriginal,\n V1EmbeddingEncodingFormatWithLiterals as V1EmbeddingEncodingFormatWithLiteralsOriginal,\n CreateEmbeddingsRequest as CreateEmbeddingsRequestOriginal,\n EmbeddingModel as EmbeddingModelOriginal,\n EmbeddingModelWithLiterals as EmbeddingModelWithLiteralsOriginal,\n EmbeddingEncodingFormat as EmbeddingEncodingFormatOriginal,\n EmbeddingEncodingFormatWithLiterals as EmbeddingEncodingFormatWithLiteralsOriginal,\n GetEmbeddingRequest as GetEmbeddingRequestOriginal,\n V1EmbeddingModel as V1EmbeddingModelOriginal,\n V1EmbeddingModelWithLiterals as V1EmbeddingModelWithLiteralsOriginal,\n TextEmbeddingInstance as TextEmbeddingInstanceOriginal,\n TaskType as TaskTypeOriginal,\n TaskTypeWithLiterals as TaskTypeWithLiteralsOriginal,\n TextEmbeddingParameters as TextEmbeddingParametersOriginal,\n GenerateEmbeddingsResponse as GenerateEmbeddingsResponseOriginal,\n GenerateEmbeddingsResponseEmbeddingResponseOneOf as GenerateEmbeddingsResponseEmbeddingResponseOneOfOriginal,\n V1CreateEmbeddingsResponse as V1CreateEmbeddingsResponseOriginal,\n V1EmbeddingInfo as V1EmbeddingInfoOriginal,\n V1EmbeddingInfoEmbeddingResultOneOf as V1EmbeddingInfoEmbeddingResultOneOfOriginal,\n V1FloatEmbedding as V1FloatEmbeddingOriginal,\n CreateEmbeddingsResponseEmbeddingUsage as CreateEmbeddingsResponseEmbeddingUsageOriginal,\n CreateEmbeddingsResponse as CreateEmbeddingsResponseOriginal,\n EmbeddingInfo as EmbeddingInfoOriginal,\n EmbeddingInfoEmbeddingResultOneOf as EmbeddingInfoEmbeddingResultOneOfOriginal,\n FloatEmbedding as FloatEmbeddingOriginal,\n EmbeddingUsage as EmbeddingUsageOriginal,\n GetEmbeddingResponse as GetEmbeddingResponseOriginal,\n EmbeddingPrediction as EmbeddingPredictionOriginal,\n EmbeddingInstance as EmbeddingInstanceOriginal,\n Statistics as StatisticsOriginal,\n GenerateTextByProjectRequest as GenerateTextByProjectRequestOriginal,\n GenerateTextByProjectResponse as GenerateTextByProjectResponseOriginal,\n GenerateModerationRequest as GenerateModerationRequestOriginal,\n GenerateModerationRequestModerationRequestOneOf as GenerateModerationRequestModerationRequestOneOfOriginal,\n CreateModerationRequest as CreateModerationRequestOriginal,\n ImageUrlInput as ImageUrlInputOriginal,\n MultiModalInput as MultiModalInputOriginal,\n MultiModalInputContentValueOneOf as MultiModalInputContentValueOneOfOriginal,\n GenerateModerationResponse as GenerateModerationResponseOriginal,\n GenerateModerationResponseModerationResponseOneOf as GenerateModerationResponseModerationResponseOneOfOriginal,\n CreateModerationResponse as CreateModerationResponseOriginal,\n ModerationResult as ModerationResultOriginal,\n GenerateImageByProjectRequest as GenerateImageByProjectRequestOriginal,\n GenerateImageByProjectResponse as GenerateImageByProjectResponseOriginal,\n ImageModelResponse as ImageModelResponseOriginal,\n ImageModelResponseResponseOneOf as ImageModelResponseResponseOneOfOriginal,\n ImageGenerationRequestedEvent as ImageGenerationRequestedEventOriginal,\n ImageGenerationSucceededEvent as ImageGenerationSucceededEventOriginal,\n ImageGenerationFailedEvent as ImageGenerationFailedEventOriginal,\n GenerateImageByPromptRequest as GenerateImageByPromptRequestOriginal,\n GenerateImageByPromptResponse as GenerateImageByPromptResponseOriginal,\n GenerateImageByPromptObjectRequest as GenerateImageByPromptObjectRequestOriginal,\n GenerateImageByPromptObjectResponse as GenerateImageByPromptObjectResponseOriginal,\n GenerateContentByPromptRequest as GenerateContentByPromptRequestOriginal,\n AsyncGenerationConfig as AsyncGenerationConfigOriginal,\n SpiGenerationConfig as SpiGenerationConfigOriginal,\n GenerateContentByPromptResponse as GenerateContentByPromptResponseOriginal,\n GenerateContentByProjectRequest as GenerateContentByProjectRequestOriginal,\n GenerateContentByProjectResponse as GenerateContentByProjectResponseOriginal,\n GenerateContentByPromptObjectRequest as GenerateContentByPromptObjectRequestOriginal,\n GenerateContentByPromptObjectResponse as GenerateContentByPromptObjectResponseOriginal,\n GenerateTranscriptionRequest as GenerateTranscriptionRequestOriginal,\n GenerateTranscriptionRequestTranscriptionRequestOneOf as GenerateTranscriptionRequestTranscriptionRequestOneOfOriginal,\n CreateTranscriptionRequest as CreateTranscriptionRequestOriginal,\n TranscriptionModel as TranscriptionModelOriginal,\n TranscriptionModelWithLiterals as TranscriptionModelWithLiteralsOriginal,\n CreateTranscriptionRequestResponseFormat as CreateTranscriptionRequestResponseFormatOriginal,\n CreateTranscriptionRequestResponseFormatWithLiterals as CreateTranscriptionRequestResponseFormatWithLiteralsOriginal,\n TimestampGranularities as TimestampGranularitiesOriginal,\n TimestampGranularity as TimestampGranularityOriginal,\n TimestampGranularityWithLiterals as TimestampGranularityWithLiteralsOriginal,\n FileContent as FileContentOriginal,\n GenerateTranscriptionResponse as GenerateTranscriptionResponseOriginal,\n GenerateTranscriptionResponseTranscriptionResponseOneOf as GenerateTranscriptionResponseTranscriptionResponseOneOfOriginal,\n CreateTranscriptionResponse as CreateTranscriptionResponseOriginal,\n Word as WordOriginal,\n V1Segment as V1SegmentOriginal,\n GenerateAudioRequest as GenerateAudioRequestOriginal,\n GenerateAudioRequestAudioRequestOneOf as GenerateAudioRequestAudioRequestOneOfOriginal,\n CreateSpeechRequest as CreateSpeechRequestOriginal,\n SpeechModel as SpeechModelOriginal,\n SpeechModelWithLiterals as SpeechModelWithLiteralsOriginal,\n TextToSpeechRequest as TextToSpeechRequestOriginal,\n ElevenLabsTextToSpeechModel as ElevenLabsTextToSpeechModelOriginal,\n ElevenLabsTextToSpeechModelWithLiterals as ElevenLabsTextToSpeechModelWithLiteralsOriginal,\n VoiceSettings as VoiceSettingsOriginal,\n PronunciationDictionaryLocator as PronunciationDictionaryLocatorOriginal,\n GenerateAudioResponse as GenerateAudioResponseOriginal,\n GenerateAudioResponseAudioResponseOneOf as GenerateAudioResponseAudioResponseOneOfOriginal,\n CreateSpeechResponse as CreateSpeechResponseOriginal,\n GeneratedAudioChunk as GeneratedAudioChunkOriginal,\n GeneratedAudioChunkAudioChunkOneOf as GeneratedAudioChunkAudioChunkOneOfOriginal,\n SpeechChunk as SpeechChunkOriginal,\n TextToSpeechChunk as TextToSpeechChunkOriginal,\n AlignmentInfoInChunk as AlignmentInfoInChunkOriginal,\n PublishPromptRequest as PublishPromptRequestOriginal,\n PublishPromptResponse as PublishPromptResponseOriginal,\n GetPromptRequest as GetPromptRequestOriginal,\n GetPromptResponse as GetPromptResponseOriginal,\n PublishProjectRequest as PublishProjectRequestOriginal,\n Project as ProjectOriginal,\n ExperimentalPromptConfig as ExperimentalPromptConfigOriginal,\n PublishProjectResponse as PublishProjectResponseOriginal,\n ProjectConfigChangedDomainEvent as ProjectConfigChangedDomainEventOriginal,\n GetProjectRequest as GetProjectRequestOriginal,\n GetProjectResponse as GetProjectResponseOriginal,\n GetStatusRequest as GetStatusRequestOriginal,\n EntityType as EntityTypeOriginal,\n EntityTypeWithLiterals as EntityTypeWithLiteralsOriginal,\n GetStatusResponse as GetStatusResponseOriginal,\n OutageStatus as OutageStatusOriginal,\n OutageStatusWithLiterals as OutageStatusWithLiteralsOriginal,\n GetApplicationUsageRequest as GetApplicationUsageRequestOriginal,\n GetApplicationUsageResponse as GetApplicationUsageResponseOriginal,\n ApplicationBudgetInfo as ApplicationBudgetInfoOriginal,\n UserPerApplicationBudgetInfo as UserPerApplicationBudgetInfoOriginal,\n Wix_ai_gatewayV1EditImageRequest as Wix_ai_gatewayV1EditImageRequestOriginal,\n Wix_ai_gatewayV1EditImageRequestRequestOneOf as Wix_ai_gatewayV1EditImageRequestRequestOneOfOriginal,\n RemoveBackgroundRequest as RemoveBackgroundRequestOriginal,\n ImageEditingRequest as ImageEditingRequestOriginal,\n Guidance as GuidanceOriginal,\n ImageEditingModel as ImageEditingModelOriginal,\n ImageEditingModelWithLiterals as ImageEditingModelWithLiteralsOriginal,\n Background as BackgroundOriginal,\n Expand as ExpandOriginal,\n Export as ExportOriginal,\n Lighting as LightingOriginal,\n Margin as MarginOriginal,\n Padding as PaddingOriginal,\n Segmentation as SegmentationOriginal,\n Shadow as ShadowOriginal,\n TextRemoval as TextRemovalOriginal,\n V1EditImageRequest as V1EditImageRequestOriginal,\n V1EditImageModel as V1EditImageModelOriginal,\n V1EditImageModelWithLiterals as V1EditImageModelWithLiteralsOriginal,\n EditImageRequest as EditImageRequestOriginal,\n EditImageModel as EditImageModelOriginal,\n EditImageModelWithLiterals as EditImageModelWithLiteralsOriginal,\n Recraft_proxyV1EditImageRequest as Recraft_proxyV1EditImageRequestOriginal,\n EditAction as EditActionOriginal,\n EditActionWithLiterals as EditActionWithLiteralsOriginal,\n Wix_ai_gatewayV1EditImageResponse as Wix_ai_gatewayV1EditImageResponseOriginal,\n Wix_ai_gatewayV1EditImageResponseResponseOneOf as Wix_ai_gatewayV1EditImageResponseResponseOneOfOriginal,\n RemoveBackgroundResponse as RemoveBackgroundResponseOriginal,\n ImageEditingResponse as ImageEditingResponseOriginal,\n V1EditImageResponse as V1EditImageResponseOriginal,\n EditImageResponse as EditImageResponseOriginal,\n EditImageInput as EditImageInputOriginal,\n PredictionMetrics as PredictionMetricsOriginal,\n PredictionUrls as PredictionUrlsOriginal,\n Recraft_proxyV1EditImageResponse as Recraft_proxyV1EditImageResponseOriginal,\n PollImageGenerationResultRequest as PollImageGenerationResultRequestOriginal,\n PollImageGenerationResultRequestRequestOneOf as PollImageGenerationResultRequestRequestOneOfOriginal,\n V1GetResultRequest as V1GetResultRequestOriginal,\n GetResultRequest as GetResultRequestOriginal,\n GetTaskResultRequest as GetTaskResultRequestOriginal,\n GetVideoResultRequest as GetVideoResultRequestOriginal,\n PollImageGenerationResultResponse as PollImageGenerationResultResponseOriginal,\n PollImageGenerationResultResponseResponseOneOf as PollImageGenerationResultResponseResponseOneOfOriginal,\n V1GetResultResponse as V1GetResultResponseOriginal,\n GetResultResponse as GetResultResponseOriginal,\n GetTaskResultResponse as GetTaskResultResponseOriginal,\n GetTaskResultResponseResponseOneOf as GetTaskResultResponseResponseOneOfOriginal,\n GetVideoResultResponse as GetVideoResultResponseOriginal,\n DomainEvent as DomainEventOriginal,\n DomainEventBodyOneOf as DomainEventBodyOneOfOriginal,\n EntityCreatedEvent as EntityCreatedEventOriginal,\n RestoreInfo as RestoreInfoOriginal,\n EntityUpdatedEvent as EntityUpdatedEventOriginal,\n EntityDeletedEvent as EntityDeletedEventOriginal,\n ActionEvent as ActionEventOriginal,\n MessageEnvelope as MessageEnvelopeOriginal,\n IdentificationData as IdentificationDataOriginal,\n IdentificationDataIdOneOf as IdentificationDataIdOneOfOriginal,\n WebhookIdentityType as WebhookIdentityTypeOriginal,\n WebhookIdentityTypeWithLiterals as WebhookIdentityTypeWithLiteralsOriginal,\n AccountDetails as AccountDetailsOriginal,\n} from './ds-wix-ai-gateway-v1-prompt-generators.types.js';\n"],"mappings":";AAAA,SAAS,yBAAyB;AAClC,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,oCAAoC;AAC7C,SAAS,0CAA0C;AACnD,SAAS,sBAAsB;AAC/B,SAAS,kBAAkB;AAI3B,SAAS,0CACP,MACA;AACA,QAAM,mBAAmB;AAAA,IACvB,oBAAoB;AAAA,MAClB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,YAAY;AAAA,MACV;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,cAAc;AAAA,MACZ;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,yBAAyB;AAAA,MACvB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,wBAAwB;AAAA,MACtB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,MACA;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,IACA,mBAAmB;AAAA,MACjB;AAAA,QACE,SAAS;AAAA,QACT,UAAU;AAAA,MACZ;AAAA,IACF;AAAA,EACF;AAEA,SAAO,WAAW,OAAO,OAAO,MAAM,EAAE,iBAAiB,CAAC,CAAC;AAC7D;AAEA,IAAM,eAAe;AAOd,SAAS,qBACd,SAC4B;AAC5B,WAAS,uBAAuB,EAAE,KAAK,GAAQ;AAC7C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,6BACd,SAC4B;AAC5B,WAAS,+BAA+B,EAAE,KAAK,GAAQ;AACrD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,2BACd,SAC4B;AAC5B,WAAS,6BAA6B,EAAE,KAAK,GAAQ;AACnD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,mCACd,SAC4B;AAC5B,WAAS,qCAAqC,EAAE,KAAK,GAAQ;AAC3D,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,kBAAkB,SAA6C;AAC7E,WAAS,oBAAoB,EAAE,KAAK,GAAQ;AAC1C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,8BACd,SAC4B;AAC5B,WAAS,gCAAgC,EAAE,KAAK,GAAQ;AACtD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,mBACd,SAC4B;AAC5B,WAAS,qBAAqB,EAAE,KAAK,GAAQ;AAC3C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,uBACd,SAC4B;AAC5B,WAAS,yBAAyB,EAAE,KAAK,GAAQ;AAC/C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,4BACd,SAC4B;AAC5B,WAAS,8BAA8B,EAAE,KAAK,GAAQ;AACpD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,wBACd,SAC4B;AAC5B,WAAS,0BAA0B,EAAE,KAAK,GAAQ;AAChD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,yBACd,SAC4B;AAC5B,WAAS,2BAA2B,EAAE,KAAK,GAAQ;AACjD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,8BACd,SAC4B;AAC5B,WAAS,gCAAgC,EAAE,KAAK,GAAQ;AACtD,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,YACvD,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,iDAAiD;AAAA,YACzD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,wDAAwD;AAAA,YAChE;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,sDAAsD;AAAA,YAC9D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,YACR;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO,CAAC,EAAE,MAAM,yCAAyC,CAAC;AAAA,MAC5D;AAAA,MACA;AAAA,QACE,aAAa;AAAA,QACb,OAAO,CAAC,EAAE,MAAM,mDAAmD,CAAC;AAAA,MACtE;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,uCAAuC;AAAA,YAC/C,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,wCAAwC;AAAA,YAChD,EAAE,MAAM,6CAA6C;AAAA,YACrD,EAAE,MAAM,2CAA2C;AAAA,UACrD;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,mDAAmD;AAAA,YAC3D,EAAE,MAAM,kDAAkD;AAAA,YAC1D,EAAE,MAAM,wDAAwD;AAAA,YAChE,EAAE,MAAM,oDAAoD;AAAA,UAC9D;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,cAAc,SAA6C;AACzE,WAAS,gBAAgB,EAAE,KAAK,GAAQ;AACtC,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,kCAAkC;AAAA,UAC1C,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,wDAAwD;AAAA,UAChE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,sBACd,SAC4B;AAC5B,WAAS,wBAAwB,EAAE,KAAK,GAAQ;AAC9C,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,kCAAkC;AAAA,UAC1C,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,wDAAwD;AAAA,UAChE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,4BAA4B;AAAA,YACpC,EAAE,MAAM,oCAAoC;AAAA,UAC9C;AAAA,QACF;AAAA,QACA;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,YACA;AAAA,cACE,MAAM;AAAA,cACN,YAAY;AAAA,YACd;AAAA,UACF;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,cAAc,SAA6C;AACzE,WAAS,gBAAgB,EAAE,KAAK,GAAQ;AACtC,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,sDAAsD;AAAA,UAC9D,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,kDAAkD;AAAA,UAC1D,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,oDAAoD;AAAA,UAC5D;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,4DAA4D;AAAA,UACpE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,2DAA2D;AAAA,UACnE;AAAA,YACE,MAAM;AAAA,UACR;AAAA,UACA,EAAE,MAAM,uDAAuD;AAAA,UAC/D,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,2DAA2D;AAAA,UACnE,EAAE,MAAM,4DAA4D;AAAA,UACpE,EAAE,MAAM,gDAAgD;AAAA,UACxD,EAAE,MAAM,yCAAyC;AAAA,UACjD,EAAE,MAAM,oDAAoD;AAAA,UAC5D,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qCAAqC;AAAA,UAC7C,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,uCAAuC;AAAA,UAC/C,EAAE,MAAM,gCAAgC;AAAA,UACxC,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,iDAAiD;AAAA,UACzD,EAAE,MAAM,0CAA0C;AAAA,UAClD,EAAE,MAAM,qDAAqD;AAAA,UAC7D,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,0DAA0D;AAAA,UAClE,EAAE,MAAM,+CAA+C;AAAA,QACzD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAOO,SAAS,UAAU,SAA6C;AACrE,WAAS,YAAY,EAAE,KAAK,GAAQ;AAClC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,SAAS,IAAI;AAAA,MACvC,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,uDAAuD;AAAA,YAC/D,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,uDAAuD;AAAA,YAC/D,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,yCAAyC;AAAA,YACjD,EAAE,MAAM,oDAAoD;AAAA,YAC5D,EAAE,MAAM,qDAAqD;AAAA,YAC7D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,qCAAqC;AAAA,YAC7C,EAAE,MAAM,kDAAkD;AAAA,YAC1D,EAAE,MAAM,2CAA2C;AAAA,YACnD,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,oDAAoD;AAAA,YAC5D;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,4CAA4C;AAAA,YACpD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,uDAAuD;AAAA,YAC/D,EAAE,MAAM,gDAAgD;AAAA,YACxD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,gDAAgD;AAAA,YACxD,EAAE,MAAM,yCAAyC;AAAA,YACjD,EAAE,MAAM,oDAAoD;AAAA,YAC5D,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,qCAAqC;AAAA,YAC7C,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,uCAAuC;AAAA,YAC/C,EAAE,MAAM,gCAAgC;AAAA,YACxC,EAAE,MAAM,4CAA4C;AAAA,YACpD,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,0CAA0C;AAAA,YAClD,EAAE,MAAM,qDAAqD;AAAA,YAC7D,EAAE,MAAM,8CAA8C;AAAA,YACtD,EAAE,MAAM,8CAA8C;AAAA,YACtD;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA;AAAA,cACE,MAAM;AAAA,YACR;AAAA,YACA,EAAE,MAAM,+CAA+C;AAAA,UACzD;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAMO,SAAS,eAAe,SAA6C;AAC1E,WAAS,iBAAiB,EAAE,KAAK,GAAQ;AACvC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,WAAW,SAA6C;AACtE,WAAS,aAAa,EAAE,KAAK,GAAQ;AACnC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,OAAO;AAAA,IACnC;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,UAAU,SAA6C;AACrE,WAAS,YAAY,EAAE,KAAK,GAAQ;AAClC,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,OAAO;AAAA,IACnC;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,oBACd,SAC4B;AAC5B,WAAS,sBAAsB,EAAE,KAAK,GAAQ;AAC5C,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,QAAQ,kBAAkB,OAAO;AAAA,IACnC;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,UAAU,SAA6C;AACrE,WAAS,YAAY,EAAE,KAAK,GAAQ;AAClC,UAAM,iBAAiB,eAAe,SAAS;AAAA,MAC7C;AAAA,QACE,aAAa;AAAA,QACb,OAAO;AAAA,UACL,EAAE,MAAM,yDAAyD;AAAA,UACjE,EAAE,MAAM,8CAA8C;AAAA,UACtD,EAAE,MAAM,6CAA6C;AAAA,UACrD,EAAE,MAAM,2CAA2C;AAAA,UACnD,EAAE,MAAM,4CAA4C;AAAA,UACpD,EAAE,MAAM,0CAA0C;AAAA,QACpD;AAAA,MACF;AAAA,IACF,CAAC;AACD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WAAW;AAAA,MACX,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,MACN,mBAAmB,CAACA,aAClB,eAAeA,UAAS;AAAA,QACtB;AAAA,UACE,aAAa;AAAA,UACb,OAAO;AAAA,YACL,EAAE,MAAM,sDAAsD;AAAA,YAC9D,EAAE,MAAM,kDAAkD;AAAA,YAC1D,EAAE,MAAM,iDAAiD;AAAA,YACzD,EAAE,MAAM,+CAA+C;AAAA,UACzD;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACL;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;AAGO,SAAS,0BACd,SAC4B;AAC5B,WAAS,4BAA4B,EAAE,KAAK,GAAQ;AAClD,UAAM,WAAW;AAAA,MACf,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,WACE;AAAA,MACF,aAAa;AAAA,MACb,kBAAkB;AAAA,QAChB,wBAAwB;AAAA,MAC1B;AAAA,MACA,KAAK,0CAA0C;AAAA,QAC7C,WAAW;AAAA,QACX,MAAM;AAAA,QACN;AAAA,MACF,CAAC;AAAA,MACD,MAAM;AAAA,IACR;AAEA,WAAO;AAAA,EACT;AAEA,SAAO;AACT;;;ACr4FO,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,WAAQ;AACR,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,0BAAuB;AACvB,EAAAA,oBAAA,yBAAsB;AACtB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,uBAAoB;AACpB,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,wBAAqB;AACrB,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,mCAAgC;AAzCtB,SAAAA;AAAA,GAAA;AAgKL,IAAK,gDAAL,kBAAKC,mDAAL;AACL,EAAAA,+CAAA,aAAU;AACV,EAAAA,+CAAA,UAAO;AACP,EAAAA,+CAAA,eAAY;AACZ,EAAAA,+CAAA,YAAS;AACT,EAAAA,+CAAA,cAAW;AACX,EAAAA,+CAAA,UAAO;AAKP,EAAAA,+CAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAuLL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AAwEL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,8BAA2B;AAC3B,EAAAA,gBAAA,gBAAa;AACb,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,oBAAiB;AACjB,EAAAA,gBAAA,wBAAqB;AANX,SAAAA;AAAA,GAAA;AA0KL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,mBAAgB;AAChB,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AAEpB,EAAAA,SAAA,4BAAyB;AACzB,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,6BAA0B;AAC1B,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,wBAAqB;AACrB,EAAAA,SAAA,mCAAgC;AAdtB,SAAAA;AAAA,GAAA;AAyGL,IAAK,mCAAL,kBAAKC,sCAAL;AACL,EAAAA,kCAAA,aAAU;AACV,EAAAA,kCAAA,UAAO;AACP,EAAAA,kCAAA,eAAY;AACZ,EAAAA,kCAAA,YAAS;AACT,EAAAA,kCAAA,cAAW;AACX,EAAAA,kCAAA,UAAO;AAKP,EAAAA,kCAAA,eAAY;AAXF,SAAAA;AAAA,GAAA;AAmHL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,mBAAgB;AAChB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,sBAAmB;AACnB,EAAAA,oBAAA,2BAAwB;AACxB,EAAAA,oBAAA,4BAAyB;AACzB,EAAAA,oBAAA,6BAA0B;AAC1B,EAAAA,oBAAA,oBAAiB;AACjB,EAAAA,oBAAA,0BAAuB;AACvB,EAAAA,oBAAA,sBAAmB;AAfT,SAAAA;AAAA,GAAA;AAqDL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,WAAQ;AAHE,SAAAA;AAAA,GAAA;AA+FL,IAAK,WAAL,kBAAKC,cAAL;AAEL,EAAAA,UAAA,0BAAuB;AAEvB,EAAAA,UAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAoBL,IAAK,UAAL,kBAAKC,aAAL;AAEL,EAAAA,SAAA,yBAAsB;AAEtB,EAAAA,SAAA,gBAAa;AAEb,EAAAA,SAAA,oBAAiB;AAEjB,EAAAA,SAAA,+BAA4B;AARlB,SAAAA;AAAA,GAAA;AA4CL,IAAK,uBAAL,kBAAKC,0BAAL;AAEL,EAAAA,sBAAA,kCAA+B;AAE/B,EAAAA,sBAAA,0BAAuB;AAEvB,EAAAA,sBAAA,6BAA0B;AAE1B,EAAAA,sBAAA,2BAAwB;AARd,SAAAA;AAAA,GAAA;AAsDL,IAAK,6BAAL,kBAAKC,gCAAL;AAEL,EAAAA,4BAAA,sBAAmB;AAEnB,EAAAA,4BAAA,kBAAe;AAJL,SAAAA;AAAA,GAAA;AAoBL,IAAK,cAAL,kBAAKC,iBAAL;AAEL,EAAAA,aAAA,6BAA0B;AAE1B,EAAAA,aAAA,yBAAsB;AAJZ,SAAAA;AAAA,GAAA;AAgEL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,sBAAmB;AACnB,EAAAA,cAAA,qCAAkC;AAClC,EAAAA,cAAA,+BAA4B;AAC5B,EAAAA,cAAA,8BAA2B;AAC3B,EAAAA,cAAA,qCAAkC;AALxB,SAAAA;AAAA,GAAA;AAiBL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,uBAAoB;AACpB,EAAAA,WAAA,gBAAa;AACb,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,yBAAsB;AACtB,EAAAA,WAAA,qBAAkB;AALR,SAAAA;AAAA,GAAA;AAkJL,IAAK,WAAL,kBAAKC,cAAL;AACL,EAAAA,UAAA,sBAAmB;AAEnB,EAAAA,UAAA,UAAO;AAEP,EAAAA,UAAA,WAAQ;AAER,EAAAA,UAAA,WAAQ;AAPE,SAAAA;AAAA,GAAA;AAyCL,IAAK,mBAAL,kBAAKC,sBAAL;AAEL,EAAAA,kBAAA,mCAAgC;AAEhC,EAAAA,kBAAA,eAAY;AAEZ,EAAAA,kBAAA,iBAAc;AAEd,EAAAA,kBAAA,gBAAa;AARH,SAAAA;AAAA,GAAA;AAoCL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AAEV,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,SAAM;AAEN,EAAAA,MAAA,UAAO;AAKP,EAAAA,MAAA,eAAY;AAfF,SAAAA;AAAA,GAAA;AAwIL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAQL,IAAK,QAAL,kBAAKC,WAAL;AACL,EAAAA,OAAA,aAAU;AAEV,EAAAA,OAAA,yBAAsB;AAEtB,EAAAA,OAAA,wBAAqB;AAErB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAEvB,EAAAA,OAAA,2BAAwB;AACxB,EAAAA,OAAA,yBAAsB;AACtB,EAAAA,OAAA,uBAAoB;AAEpB,EAAAA,OAAA,2BAAwB;AAExB,EAAAA,OAAA,0BAAuB;AAnBb,SAAAA;AAAA,GAAA;AA+CL,IAAK,OAAL,kBAAKC,UAAL;AACL,EAAAA,MAAA,aAAU;AACV,EAAAA,MAAA,UAAO;AACP,EAAAA,MAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAkFL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,aAAU;AAEV,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AAEZ,EAAAA,WAAA,gBAAa;AAEb,EAAAA,WAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AAsJL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,aAAU;AACV,EAAAA,gBAAA,UAAO;AACP,EAAAA,gBAAA,SAAM;AACN,EAAAA,gBAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,aAAU;AACV,EAAAA,eAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAwHL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAWL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,wBAAqB;AACrB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,yBAAsB;AACtB,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,2BAAwB;AACxB,EAAAA,aAAA,0BAAuB;AAXb,SAAAA;AAAA,GAAA;AAuCL,IAAK,oBAAL,kBAAKC,uBAAL;AACL,EAAAA,mBAAA,aAAU;AACV,EAAAA,mBAAA,UAAO;AACP,EAAAA,mBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAuFL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,aAAU;AAEV,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA4JL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,aAAU;AACV,EAAAA,6BAAA,UAAO;AACP,EAAAA,6BAAA,SAAM;AACN,EAAAA,6BAAA,UAAO;AAJG,SAAAA;AAAA,GAAA;AA+CL,IAAK,6BAAL,kBAAKC,gCAAL;AACL,EAAAA,4BAAA,aAAU;AACV,EAAAA,4BAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAqGL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,wBAAqB;AACrB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,yBAAsB;AACtB,EAAAA,gBAAA,2BAAwB;AACxB,EAAAA,gBAAA,0BAAuB;AAVb,SAAAA;AAAA,GAAA;AAqCL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,UAAO;AACP,EAAAA,iBAAA,eAAY;AAHF,SAAAA;AAAA,GAAA;AAqHL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAoLL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,aAAU;AAEV,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AAEZ,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,eAAY;AATF,SAAAA;AAAA,GAAA;AA4tBL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,SAAM;AACN,EAAAA,kBAAA,UAAO;AACP,EAAAA,kBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAkDL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,aAAU;AACV,EAAAA,iBAAA,SAAM;AAFI,SAAAA;AAAA,GAAA;AAwDL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AAEtB,EAAAA,YAAA,6BAA0B;AAE1B,EAAAA,YAAA,8BAA2B;AAE3B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,gCAA6B;AAE7B,EAAAA,YAAA,+BAA4B;AAE5B,EAAAA,YAAA,+BAA4B;AAblB,SAAAA;AAAA,GAAA;AAwDL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,4BAAyB;AAEzB,EAAAA,eAAA,0BAAuB;AAEvB,EAAAA,eAAA,gBAAa;AALH,SAAAA;AAAA,GAAA;AA2OL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,oCAAiC;AACjC,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAaL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AACxB,EAAAA,cAAA,cAAW;AACX,EAAAA,cAAA,QAAK;AAHK,SAAAA;AAAA,GAAA;AAaL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,wBAAqB;AACrB,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,kBAAe;AACf,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AACjB,EAAAA,WAAA,oBAAiB;AANP,SAAAA;AAAA,GAAA;AAmBL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,WAAQ;AACR,EAAAA,YAAA,aAAU;AAHA,SAAAA;AAAA,GAAA;AAgDL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,uCAAoC;AAEpC,EAAAA,YAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAsBL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,sCAAmC;AACnC,EAAAA,oBAAA,eAAY;AACZ,EAAAA,oBAAA,gBAAa;AACb,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,UAAO;AACP,EAAAA,oBAAA,YAAS;AACT,EAAAA,oBAAA,aAAU;AARA,SAAAA;AAAA,GAAA;AAuBL,IAAK,UAAL,kBAAKC,aAAL;AACL,EAAAA,SAAA,yBAAsB;AACtB,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,UAAO;AACP,EAAAA,SAAA,gBAAa;AACb,EAAAA,SAAA,0BAAuB;AACvB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,aAAU;AACV,EAAAA,SAAA,uBAAoB;AACpB,EAAAA,SAAA,YAAS;AACT,EAAAA,SAAA,WAAQ;AAXE,SAAAA;AAAA,GAAA;AA6BL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,8BAA2B;AAC3B,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,WAAQ;AACR,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,gBAAa;AACb,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,iBAAc;AACd,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,cAAW;AACX,EAAAA,+BAAA,uBAAoB;AACpB,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,eAAY;AACZ,EAAAA,+BAAA,kBAAe;AACf,EAAAA,+BAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAwFL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,4CAAyC;AACzC,EAAAA,gBAAA,uBAAoB;AAFV,SAAAA;AAAA,GAAA;AAWL,IAAK,iCAAL,kBAAKC,oCAAL;AACL,EAAAA,gCAAA,8BAA2B;AAC3B,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,WAAQ;AACR,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,gBAAa;AACb,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,iBAAc;AACd,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,cAAW;AACX,EAAAA,gCAAA,uBAAoB;AACpB,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,aAAU;AACV,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,eAAY;AACZ,EAAAA,gCAAA,kBAAe;AACf,EAAAA,gCAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AA0FL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,mBAAgB;AAChB,EAAAA,gBAAA,oBAAiB;AAHP,SAAAA;AAAA,GAAA;AAaL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,wCAAqC;AAErC,EAAAA,2BAAA,eAAY;AAEZ,EAAAA,2BAAA,qBAAkB;AAElB,EAAAA,2BAAA,gBAAa;AAEb,EAAAA,2BAAA,iBAAc;AAEd,EAAAA,2BAAA,uBAAoB;AAEpB,EAAAA,2BAAA,kBAAe;AAbL,SAAAA;AAAA,GAAA;AA2BL,IAAK,6CAAL,kBAAKC,gDAAL;AACL,EAAAA,4CAAA,+BAA4B;AAC5B,EAAAA,4CAAA,UAAO;AACP,EAAAA,4CAAA,SAAM;AAHI,SAAAA;AAAA,GAAA;AAyHL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,iCAA8B;AAC9B,EAAAA,sBAAA,wBAAqB;AACrB,EAAAA,sBAAA,gBAAa;AACb,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,qBAAkB;AANR,SAAAA;AAAA,GAAA;AA8DL,IAAK,wBAAL,kBAAKC,2BAAL;AAEL,EAAAA,uBAAA,qCAAkC;AAElC,EAAAA,uBAAA,gBAAa;AAEb,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,eAAY;AAEZ,EAAAA,uBAAA,+BAA4B;AAE5B,EAAAA,uBAAA,yBAAsB;AAEtB,EAAAA,uBAAA,2BAAwB;AAExB,EAAAA,uBAAA,wBAAqB;AAhBX,SAAAA;AAAA,GAAA;AAsOL,IAAK,YAAL,kBAAKC,eAAL;AACL,EAAAA,WAAA,6BAA0B;AAC1B,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,aAAU;AACV,EAAAA,WAAA,sBAAmB;AACnB,EAAAA,WAAA,2BAAwB;AACxB,EAAAA,WAAA,iCAA8B;AAC9B,EAAAA,WAAA,qBAAkB;AAClB,EAAAA,WAAA,0BAAuB;AACvB,EAAAA,WAAA,SAAM;AACN,EAAAA,WAAA,qBAAkB;AAVR,SAAAA;AAAA,GAAA;AA6CL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,gCAA6B;AAC7B,EAAAA,cAAA,UAAO;AACP,EAAAA,cAAA,SAAM;AACN,EAAAA,cAAA,WAAQ;AACR,EAAAA,cAAA,aAAU;AALA,SAAAA;AAAA,GAAA;AAiML,IAAK,kCAAL,kBAAKC,qCAAL;AACL,EAAAA,iCAAA,mDAAgD;AAChD,EAAAA,iCAAA,aAAU;AACV,EAAAA,iCAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAaL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,8BAA2B;AAC3B,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,gBAAa;AACb,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,iBAAc;AACd,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,cAAW;AACX,EAAAA,aAAA,uBAAoB;AACpB,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,kBAAe;AACf,EAAAA,aAAA,cAAW;AAlBD,SAAAA;AAAA,GAAA;AAmKL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,kBAAe;AAEf,EAAAA,yBAAA,uBAAoB;AAEpB,EAAAA,yBAAA,wBAAqB;AAErB,EAAAA,yBAAA,qBAAkB;AAElB,EAAAA,yBAAA,gBAAa;AAEb,EAAAA,yBAAA,mBAAgB;AAEhB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,oBAAiB;AAEjB,EAAAA,yBAAA,mBAAgB;AA7BN,SAAAA;AAAA,GAAA;AA0LL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,8BAA2B;AAC3B,EAAAA,iBAAA,WAAQ;AACR,EAAAA,iBAAA,eAAY;AACZ,EAAAA,iBAAA,qBAAkB;AAClB,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,yBAAsB;AANZ,SAAAA;AAAA,GAAA;AAgCL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAyDL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,6BAA0B;AAC1B,EAAAA,aAAA,kCAA+B;AAC/B,EAAAA,aAAA,mCAAgC;AANtB,SAAAA;AAAA,GAAA;AA2FL,IAAK,+BAAL,kBAAKC,kCAAL;AAEL,EAAAA,8BAAA,qCAAkC;AAElC,EAAAA,8BAAA,gBAAa;AAJH,SAAAA;AAAA,GAAA;AA+IL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,kCAA+B;AAC/B,EAAAA,kBAAA,kBAAe;AACf,EAAAA,kBAAA,iBAAc;AACd,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,qBAAkB;AAClB,EAAAA,kBAAA,qBAAkB;AAClB,EAAAA,kBAAA,mBAAgB;AAPN,SAAAA;AAAA,GAAA;AA+GL,IAAK,gBAAL,kBAAKC,mBAAL;AACL,EAAAA,eAAA,6BAA0B;AAC1B,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,0BAAuB;AACvB,EAAAA,eAAA,+BAA4B;AAJlB,SAAAA;AAAA,GAAA;AAmJL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,mCAAgC;AAKhC,EAAAA,qBAAA,yCAAsC;AAKtC,EAAAA,qBAAA,6CAA0C;AAXhC,SAAAA;AAAA,GAAA;AA+CL,IAAK,qCAAL,kBAAKC,wCAAL;AACL,EAAAA,oCAAA,aAAU;AACV,EAAAA,oCAAA,UAAO;AACP,EAAAA,oCAAA,eAAY;AACZ,EAAAA,oCAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AA2IL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,aAAU;AACV,EAAAA,aAAA,UAAO;AACP,EAAAA,aAAA,eAAY;AACZ,EAAAA,aAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AA8IL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,SAAM;AAEN,EAAAA,cAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AA8BL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,sBAAmB;AACnB,EAAAA,YAAA,uBAAoB;AACpB,EAAAA,YAAA,2BAAwB;AACxB,EAAAA,YAAA,mBAAgB;AALN,SAAAA;AAAA,GAAA;AA2FL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,gCAA6B;AAC7B,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,iCAA8B;AAC9B,EAAAA,kBAAA,iBAAc;AACd,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,mBAAgB;AAChB,EAAAA,kBAAA,wBAAqB;AACrB,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,yBAAsB;AACtB,EAAAA,kBAAA,yBAAsB;AACtB,EAAAA,kBAAA,uBAAoB;AACpB,EAAAA,kBAAA,wBAAqB;AAfX,SAAAA;AAAA,GAAA;AA0FL,IAAK,4CAAL,kBAAKC,+CAAL;AACL,EAAAA,2CAAA,sBAAmB;AACnB,EAAAA,2CAAA,UAAO;AACP,EAAAA,2CAAA,YAAS;AACT,EAAAA,2CAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAopBL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,uBAAoB;AACpB,EAAAA,gBAAA,gCAA6B;AAC7B,EAAAA,gBAAA,qCAAkC;AAClC,EAAAA,gBAAA,qCAAkC;AAClC,EAAAA,gBAAA,wBAAqB;AALX,SAAAA;AAAA,GAAA;AAsEL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,sBAAmB;AACnB,EAAAA,sBAAA,UAAO;AACP,EAAAA,sBAAA,YAAS;AACT,EAAAA,sBAAA,eAAY;AAJF,SAAAA;AAAA,GAAA;AAomBL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,yBAAsB;AACtB,EAAAA,cAAA,YAAS;AACT,EAAAA,cAAA,gBAAa;AAHH,SAAAA;AAAA,GAAA;AA8pBL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,2BAAwB;AAExB,EAAAA,cAAA,iBAAc;AAEd,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,gBAAa;AAKb,EAAAA,cAAA,YAAS;AAET,EAAAA,cAAA,gBAAa;AAEb,EAAAA,cAAA,WAAQ;AAER,EAAAA,cAAA,cAAW;AAEX,EAAAA,cAAA,eAAY;AAEZ,EAAAA,cAAA,wBAAqB;AAErB,EAAAA,cAAA,UAAO;AAEP,EAAAA,cAAA,6BAA0B;AAE1B,EAAAA,cAAA,kBAAe;AAEf,EAAAA,cAAA,0BAAuB;AAEvB,EAAAA,cAAA,yBAAsB;AAhCZ,SAAAA;AAAA,GAAA;AA4EL,IAAK,kBAAL,kBAAKC,qBAAL;AACL,EAAAA,iBAAA,yBAAsB;AACtB,EAAAA,iBAAA,gBAAa;AACb,EAAAA,iBAAA,SAAM;AACN,EAAAA,iBAAA,YAAS;AACT,EAAAA,iBAAA,UAAO;AALG,SAAAA;AAAA,GAAA;AAkRL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,aAAU;AACV,EAAAA,kBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAyEL,IAAK,gCAAL,kBAAKC,mCAAL;AACL,EAAAA,+BAAA,aAAU;AACV,EAAAA,+BAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAsEL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,aAAU;AACV,EAAAA,oBAAA,aAAU;AAFA,SAAAA;AAAA,GAAA;AAipCL,IAAK,+BAAL,kBAAKC,kCAAL;AACL,EAAAA,8BAAA,aAAU;AACV,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AACZ,EAAAA,8BAAA,YAAS;AACT,EAAAA,8BAAA,UAAO;AACP,EAAAA,8BAAA,eAAY;AANF,SAAAA;AAAA,GAAA;AA+tBL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,6BAA0B;AAC1B,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,4BAAyB;AAJf,SAAAA;AAAA,GAAA;AAeL,IAAK,4BAAL,kBAAKC,+BAAL;AACL,EAAAA,2BAAA,6BAA0B;AAE1B,EAAAA,2BAAA,WAAQ;AACR,EAAAA,2BAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAwCL,IAAK,iBAAL,kBAAKC,oBAAL;AACL,EAAAA,gBAAA,6BAA0B;AAC1B,EAAAA,gBAAA,SAAM;AACN,EAAAA,gBAAA,4BAAyB;AACzB,EAAAA,gBAAA,4BAAyB;AAJf,SAAAA;AAAA,GAAA;AAeL,IAAK,0BAAL,kBAAKC,6BAAL;AACL,EAAAA,yBAAA,6BAA0B;AAC1B,EAAAA,yBAAA,WAAQ;AACR,EAAAA,yBAAA,YAAS;AAHC,SAAAA;AAAA,GAAA;AA6BL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,6BAA0B;AAC1B,EAAAA,kBAAA,qCAAkC;AAClC,EAAAA,kBAAA,wBAAqB;AAErB,EAAAA,kBAAA,0BAAuB;AACvB,EAAAA,kBAAA,0BAAuB;AANb,SAAAA;AAAA,GAAA;AA2CL,IAAK,WAAL,kBAAKC,cAAL;AACL,EAAAA,UAAA,uBAAoB;AACpB,EAAAA,UAAA,qBAAkB;AAClB,EAAAA,UAAA,wBAAqB;AACrB,EAAAA,UAAA,yBAAsB;AACtB,EAAAA,UAAA,oBAAiB;AACjB,EAAAA,UAAA,gBAAa;AACb,EAAAA,UAAA,wBAAqB;AACrB,EAAAA,UAAA,uBAAoB;AACpB,EAAAA,UAAA,0BAAuB;AATb,SAAAA;AAAA,GAAA;AA0vBL,IAAK,qBAAL,kBAAKC,wBAAL;AACL,EAAAA,oBAAA,iCAA8B;AAC9B,EAAAA,oBAAA,eAAY;AAFF,SAAAA;AAAA,GAAA;AAWL,IAAK,2CAAL,kBAAKC,8CAAL;AACL,EAAAA,0CAAA,6BAA0B;AAC1B,EAAAA,0CAAA,UAAO;AACP,EAAAA,0CAAA,UAAO;AACP,EAAAA,0CAAA,SAAM;AACN,EAAAA,0CAAA,kBAAe;AACf,EAAAA,0CAAA,SAAM;AANI,SAAAA;AAAA,GAAA;AA2BL,IAAK,uBAAL,kBAAKC,0BAAL;AACL,EAAAA,sBAAA,mCAAgC;AAChC,EAAAA,sBAAA,UAAO;AACP,EAAAA,sBAAA,aAAU;AAHA,SAAAA;AAAA,GAAA;AAmJL,IAAK,cAAL,kBAAKC,iBAAL;AACL,EAAAA,aAAA,0BAAuB;AACvB,EAAAA,aAAA,WAAQ;AACR,EAAAA,aAAA,cAAW;AAHD,SAAAA;AAAA,GAAA;AAsFL,IAAK,8BAAL,kBAAKC,iCAAL;AACL,EAAAA,6BAAA,8CAA2C;AAC3C,EAAAA,6BAAA,4BAAyB;AACzB,EAAAA,6BAAA,uBAAoB;AACpB,EAAAA,6BAAA,qBAAkB;AAJR,SAAAA;AAAA,GAAA;AA4PL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,YAAS;AACT,EAAAA,YAAA,aAAU;AACV,EAAAA,YAAA,YAAS;AAJC,SAAAA;AAAA,GAAA;AAsBL,IAAK,eAAL,kBAAKC,kBAAL;AACL,EAAAA,cAAA,oBAAiB;AACjB,EAAAA,cAAA,aAAU;AACV,EAAAA,cAAA,YAAS;AAHC,SAAAA;AAAA,GAAA;AAkPL,IAAK,oBAAL,kBAAKC,uBAAL;AACL,EAAAA,mBAAA,qCAAkC;AAClC,EAAAA,mBAAA,sCAAmC;AACnC,EAAAA,mBAAA,sCAAmC;AAHzB,SAAAA;AAAA,GAAA;AAsQL,IAAK,mBAAL,kBAAKC,sBAAL;AACL,EAAAA,kBAAA,sCAAmC;AACnC,EAAAA,kBAAA,WAAQ;AAFE,SAAAA;AAAA,GAAA;AA+BL,IAAK,iBAAL,kBAAKC,oBAAL;AAEL,EAAAA,gBAAA,8BAA2B;AAE3B,EAAAA,gBAAA,iBAAc;AAEd,EAAAA,gBAAA,uBAAoB;AANV,SAAAA;AAAA,GAAA;AAoCL,IAAK,aAAL,kBAAKC,gBAAL;AACL,EAAAA,YAAA,yBAAsB;AACtB,EAAAA,YAAA,eAAY;AACZ,EAAAA,YAAA,uBAAoB;AACpB,EAAAA,YAAA,mBAAgB;AAChB,EAAAA,YAAA,sBAAmB;AACnB,EAAAA,YAAA,kBAAe;AANL,SAAAA;AAAA,GAAA;AAqeL,IAAK,sBAAL,kBAAKC,yBAAL;AACL,EAAAA,qBAAA,aAAU;AACV,EAAAA,qBAAA,uBAAoB;AACpB,EAAAA,qBAAA,YAAS;AACT,EAAAA,qBAAA,cAAW;AACX,EAAAA,qBAAA,SAAM;AALI,SAAAA;AAAA,GAAA;;;ACr0aL,SAASC,wBAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,qBAAqB,OAAO;AAElE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,gCAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,6BAA6B,OAAO;AAE1E,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,8BAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,2BAA2B,OAAO;AAExE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,sCAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC;AAAA,IAClC;AAAA,EACF;AAEF,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,qBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,kBAAkB,OAAO;AAE/D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iCAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,8BAA8B,OAAO;AAE3E,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,sBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,mBAAmB,OAAO;AAEhE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,0BAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,uBAAuB,OAAO;AAEpE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,+BAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,4BAA4B,OAAO;AAEzE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,2BAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,wBAAwB,OAAO;AAErE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,4BAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,yBAAyB,OAAO;AAEtE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iCAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,8BAA8B,OAAO;AAE3E,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,cAAc,OAAO;AAE3D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,yBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,sBAAsB,OAAO;AAEnE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,iBAOd;AACA,QAAM,UAAU,EAAE,QAAQ,EAAE,IAAI,YAAY,EAAE;AAE9C,QAAM,oBACgC,cAAc,OAAO;AAE3D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,aAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,UAAU,OAAO;AAEvD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,kBAOd;AACA,QAAM,UAAU,EAAE,SAAS,EAAE,IAAI,aAAa,EAAE;AAEhD,QAAM,oBACgC,eAAe,OAAO;AAE5D,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,cAOd;AACA,QAAM,UAAU,EAAE,WAAW,aAAa;AAE1C,QAAM,oBACgC,WAAW,OAAO;AAExD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,WAAW,YAAY;AAAA,IACrC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,aAOd;AACA,QAAM,UAAU,EAAE,UAAU,YAAY;AAExC,QAAM,oBACgC,UAAU,OAAO;AAEvD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,EAAE,UAAU,WAAW;AAAA,IACnC,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,uBAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,oBAAoB,OAAO;AAEjE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,aAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,UAAU,OAAO;AAEvD,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;AAEO,SAASC,6BAOd;AACA,QAAM,UAAU,CAAC;AAEjB,QAAM,oBACgC,0BAA0B,OAAO;AAEvE,QAAM,SAAS,CAAC,YAAyB;AACvC,UAAM,EAAE,IAAI,IAAI,kBAAkB,OAAO;AACzC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL;AAAA,IACA,YAAY;AAAA,IACZ,MAAM;AAAA,IACN,YAAY,CAAC;AAAA,IACb,eAAe;AAAA,IACf,uBAAuB;AAAA,IACvB,gBAAgB;AAAA,IAChB,wBAAwB;AAAA,EAC1B;AACF;","names":["payload","OpenaiproxyV1Model","OpenaiproxyV1ChatCompletionMessageMessageRole","TextBisonModel","ChatBisonModel","V1Model","ChatCompletionMessageMessageRole","GoogleproxyV1Model","ContentRole","Language","Outcome","MediaResolutionLevel","DynamicRetrievalConfigMode","Environment","HarmCategory","Threshold","Modality","PersonGeneration","Mode","Type","Model","Role","MediaType","ToolChoiceType","McpServerType","V1CacheControlType","ClaudeModel","V1MessageRoleRole","V1ImageMediaTypeMediaType","GoogleproxyV1ToolChoiceType","GoogleproxyV1McpServerType","AnthropicModel","MessageRoleRole","CacheControlType","ImageMediaTypeMediaType","V1ToolChoiceType","V1McpServerType","LlamaModel","ConverseModel","V1ImageModel","ImageQuality","ImageSize","ImageStyle","ImageModel","ClipGuidancePreset","Sampler","TextToImageRequestStylePreset","ImageCoreModel","GenerateCoreRequestStylePreset","GenerationMode","ImageStableDiffusionModel","GenerateStableDiffusionRequestOutputFormat","GenerateAnImageModel","CreatePredictionModel","TaskInput","ResponseType","EditImageWithPromptRequestModel","StylePreset","TextToImageRequestModel","PerplexityModel","PerplexityMessageMessageRole","ImagenModel","GenerateImageMlPlatformModel","OpenAiImageModel","VideoGenModel","ChatCompletionModel","V1ChatCompletionMessageMessageRole","MessageRole","OutputFormat","VideoModel","V1ResponsesModel","ResponsesInputMessageResponsesMessageRole","ResponsesModel","ResponsesMessageRole","V1VideoModel","FinishReason","HarmProbability","ResponseTypeType","GoogleproxyV1ResponseTypeType","V1ResponseTypeType","GatewayMessageDefinitionRole","OpenaiproxyV1EmbeddingModel","V1EmbeddingEncodingFormat","EmbeddingModel","EmbeddingEncodingFormat","V1EmbeddingModel","TaskType","TranscriptionModel","CreateTranscriptionRequestResponseFormat","TimestampGranularity","SpeechModel","ElevenLabsTextToSpeechModel","EntityType","OutageStatus","ImageEditingModel","V1EditImageModel","EditImageModel","EditAction","WebhookIdentityType","generateTextByPrompt","generateTextByPromptStreamed","generateTextByPromptObject","generateTextByPromptObjectStreamed","generateEmbedding","generateTextByProject","generateTextByProjectStreamed","generateModeration","generateImageByProject","generateImageByPrompt","generateImageByPromptObject","generateContentByPrompt","generateContentByProject","generateContentByPromptObject","generateTranscription","generateAudio","generateAudioStreamed","publishPrompt","getPrompt","publishProject","getProject","getStatus","getApplicationUsage","editImage","pollImageGenerationResult"]}