@wix/auto_sdk_ai-gateway_generators 1.0.71 → 1.0.73

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/build/cjs/index.d.ts +936 -381
  2. package/build/cjs/index.js +25 -11
  3. package/build/cjs/index.js.map +1 -1
  4. package/build/cjs/index.typings.d.ts +1 -1
  5. package/build/cjs/index.typings.js +25 -11
  6. package/build/cjs/index.typings.js.map +1 -1
  7. package/build/cjs/meta.d.ts +927 -380
  8. package/build/cjs/meta.js +21 -9
  9. package/build/cjs/meta.js.map +1 -1
  10. package/build/es/index.d.mts +936 -381
  11. package/build/es/index.mjs +25 -11
  12. package/build/es/index.mjs.map +1 -1
  13. package/build/es/index.typings.d.mts +1 -1
  14. package/build/es/index.typings.mjs +25 -11
  15. package/build/es/index.typings.mjs.map +1 -1
  16. package/build/es/meta.d.mts +927 -380
  17. package/build/es/meta.mjs +21 -9
  18. package/build/es/meta.mjs.map +1 -1
  19. package/build/internal/cjs/index.d.ts +14 -14
  20. package/build/internal/cjs/index.js +25 -11
  21. package/build/internal/cjs/index.js.map +1 -1
  22. package/build/internal/cjs/index.typings.d.ts +949 -394
  23. package/build/internal/cjs/index.typings.js +25 -11
  24. package/build/internal/cjs/index.typings.js.map +1 -1
  25. package/build/internal/cjs/meta.d.ts +927 -380
  26. package/build/internal/cjs/meta.js +21 -9
  27. package/build/internal/cjs/meta.js.map +1 -1
  28. package/build/internal/es/index.d.mts +14 -14
  29. package/build/internal/es/index.mjs +25 -11
  30. package/build/internal/es/index.mjs.map +1 -1
  31. package/build/internal/es/index.typings.d.mts +949 -394
  32. package/build/internal/es/index.typings.mjs +25 -11
  33. package/build/internal/es/index.typings.mjs.map +1 -1
  34. package/build/internal/es/meta.d.mts +927 -380
  35. package/build/internal/es/meta.mjs +21 -9
  36. package/build/internal/es/meta.mjs.map +1 -1
  37. package/package.json +4 -4
@@ -40,7 +40,7 @@ interface Prompt extends PromptModelRequestOneOf {
40
40
  /** Perplexity chat completion request */
41
41
  perplexityChatCompletionRequest?: InvokeChatCompletionRequest;
42
42
  /** Google AI - generate image request */
43
- googleGenerateImageRequest?: GenerateImageRequest;
43
+ googleGenerateImageRequest?: V1GenerateImageRequest;
44
44
  /** ML platform - generate image request */
45
45
  mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;
46
46
  /** OpenAI image creation response. */
@@ -63,6 +63,10 @@ interface Prompt extends PromptModelRequestOneOf {
63
63
  openAiCreateVideoRequest?: CreateVideoRequest;
64
64
  /** Fireworks - OpenAI payload */
65
65
  fireworksInvokeCustomOpenAiRequest?: InvokeCustomOpenAiModelRequest;
66
+ /** Bytedance - Create content task request (video generation) */
67
+ bytedanceCreateContentTaskRequest?: CreateContentTaskRequest;
68
+ /** Bytedance - Generate image request (Seedream models) */
69
+ bytedanceGenerateImageRequest?: GenerateImageRequest;
66
70
  /**
67
71
  * Prompt id.
68
72
  * @format GUID
@@ -126,7 +130,7 @@ interface PromptModelRequestOneOf {
126
130
  /** Perplexity chat completion request */
127
131
  perplexityChatCompletionRequest?: InvokeChatCompletionRequest;
128
132
  /** Google AI - generate image request */
129
- googleGenerateImageRequest?: GenerateImageRequest;
133
+ googleGenerateImageRequest?: V1GenerateImageRequest;
130
134
  /** ML platform - generate image request */
131
135
  mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;
132
136
  /** OpenAI image creation response. */
@@ -149,6 +153,10 @@ interface PromptModelRequestOneOf {
149
153
  openAiCreateVideoRequest?: CreateVideoRequest;
150
154
  /** Fireworks - OpenAI payload */
151
155
  fireworksInvokeCustomOpenAiRequest?: InvokeCustomOpenAiModelRequest;
156
+ /** Bytedance - Create content task request (video generation) */
157
+ bytedanceCreateContentTaskRequest?: CreateContentTaskRequest;
158
+ /** Bytedance - Generate image request (Seedream models) */
159
+ bytedanceGenerateImageRequest?: GenerateImageRequest;
152
160
  }
153
161
  interface FallbackPromptConfig {
154
162
  /**
@@ -362,10 +370,11 @@ declare enum OpenaiproxyV1Model {
362
370
  GPT_5_MINI_2025_08_07 = "GPT_5_MINI_2025_08_07",
363
371
  GPT_5_NANO_2025_08_07 = "GPT_5_NANO_2025_08_07",
364
372
  GPT_5_2_2025_12_11_COMPLETION = "GPT_5_2_2025_12_11_COMPLETION",
365
- GPT_5_1_2025_11_13_COMPLETION = "GPT_5_1_2025_11_13_COMPLETION"
373
+ GPT_5_1_2025_11_13_COMPLETION = "GPT_5_1_2025_11_13_COMPLETION",
374
+ GPT_5_4_2026_03_05_COMPLETION = "GPT_5_4_2026_03_05_COMPLETION"
366
375
  }
367
376
  /** @enumType */
368
- type OpenaiproxyV1ModelWithLiterals = OpenaiproxyV1Model | 'UNKNOWN' | 'GPT_3_5_TURBO' | 'GPT_3_5_TURBO_0301' | 'GPT_4' | 'GPT_4_0314' | 'GPT_4_32K' | 'GPT_4_32K_0314' | 'GPT_3_5_TURBO_0613' | 'GPT_3_5_TURBO_16K' | 'GPT_3_5_TURBO_16K_0613' | 'GPT_4_0613' | 'GPT_4_32K_0613' | 'GPT_3_5_TURBO_1106' | 'GPT_4_1106_PREVIEW' | 'GPT_4_VISION_PREVIEW' | 'GPT_4_TURBO_PREVIEW' | 'GPT_4_0125_PREVIEW' | 'GPT_3_5_TURBO_0125' | 'GPT_4_TURBO_2024_04_09' | 'GPT_4O_2024_05_13' | 'GPT_4O_MINI_2024_07_18' | 'GPT_4O_2024_08_06' | 'O1_PREVIEW' | 'O1_PREVIEW_2024_09_12' | 'O1_MINI' | 'O1_MINI_2024_09_12' | 'GPT_4O_2024_11_20' | 'O1_2024_12_17' | 'O3_MINI_2025_01_31' | 'GPT_4_OLD' | 'GPT_4_1_2025_04_14' | 'GPT_4_1_MINI_2025_04_14' | 'GPT_4_1_NANO_2025_04_14' | 'O3_2025_04_16' | 'O4_MINI_2025_04_16' | 'GPT_EXP' | 'GPT_EXP_2' | 'GPT_5_2025_08_07' | 'GPT_5_MINI_2025_08_07' | 'GPT_5_NANO_2025_08_07' | 'GPT_5_2_2025_12_11_COMPLETION' | 'GPT_5_1_2025_11_13_COMPLETION';
377
+ type OpenaiproxyV1ModelWithLiterals = OpenaiproxyV1Model | 'UNKNOWN' | 'GPT_3_5_TURBO' | 'GPT_3_5_TURBO_0301' | 'GPT_4' | 'GPT_4_0314' | 'GPT_4_32K' | 'GPT_4_32K_0314' | 'GPT_3_5_TURBO_0613' | 'GPT_3_5_TURBO_16K' | 'GPT_3_5_TURBO_16K_0613' | 'GPT_4_0613' | 'GPT_4_32K_0613' | 'GPT_3_5_TURBO_1106' | 'GPT_4_1106_PREVIEW' | 'GPT_4_VISION_PREVIEW' | 'GPT_4_TURBO_PREVIEW' | 'GPT_4_0125_PREVIEW' | 'GPT_3_5_TURBO_0125' | 'GPT_4_TURBO_2024_04_09' | 'GPT_4O_2024_05_13' | 'GPT_4O_MINI_2024_07_18' | 'GPT_4O_2024_08_06' | 'O1_PREVIEW' | 'O1_PREVIEW_2024_09_12' | 'O1_MINI' | 'O1_MINI_2024_09_12' | 'GPT_4O_2024_11_20' | 'O1_2024_12_17' | 'O3_MINI_2025_01_31' | 'GPT_4_OLD' | 'GPT_4_1_2025_04_14' | 'GPT_4_1_MINI_2025_04_14' | 'GPT_4_1_NANO_2025_04_14' | 'O3_2025_04_16' | 'O4_MINI_2025_04_16' | 'GPT_EXP' | 'GPT_EXP_2' | 'GPT_5_2025_08_07' | 'GPT_5_MINI_2025_08_07' | 'GPT_5_NANO_2025_08_07' | 'GPT_5_2_2025_12_11_COMPLETION' | 'GPT_5_1_2025_11_13_COMPLETION' | 'GPT_5_4_2026_03_05_COMPLETION';
369
378
  interface OpenaiproxyV1ChatCompletionMessage {
370
379
  /** The role of the message author. */
371
380
  role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;
@@ -1012,10 +1021,11 @@ declare enum GoogleproxyV1Model {
1012
1021
  GEMINI_2_5_COMPUTER_USE = "GEMINI_2_5_COMPUTER_USE",
1013
1022
  GEMINI_3_0_PRO = "GEMINI_3_0_PRO",
1014
1023
  GEMINI_3_0_PRO_IMAGE = "GEMINI_3_0_PRO_IMAGE",
1015
- GEMINI_3_0_FLASH = "GEMINI_3_0_FLASH"
1024
+ GEMINI_3_0_FLASH = "GEMINI_3_0_FLASH",
1025
+ GEMINI_3_1_PRO = "GEMINI_3_1_PRO"
1016
1026
  }
1017
1027
  /** @enumType */
1018
- type GoogleproxyV1ModelWithLiterals = GoogleproxyV1Model | 'UNKNOWN_MODEL' | 'GEMINI_1_0_PRO' | 'GEMINI_1_0_PRO_VISION' | 'GEMINI_1_5_PRO' | 'GEMINI_1_5_FLASH' | 'GEMINI_2_0_FLASH' | 'GEMINI_2_0_FLASH_LITE' | 'GEMINI_2_5_PRO' | 'GEMINI_2_5_FLASH' | 'GEMINI_2_5_FLASH_LITE' | 'GEMINI_2_5_FLASH_IMAGE' | 'GEMINI_2_5_COMPUTER_USE' | 'GEMINI_3_0_PRO' | 'GEMINI_3_0_PRO_IMAGE' | 'GEMINI_3_0_FLASH';
1028
+ type GoogleproxyV1ModelWithLiterals = GoogleproxyV1Model | 'UNKNOWN_MODEL' | 'GEMINI_1_0_PRO' | 'GEMINI_1_0_PRO_VISION' | 'GEMINI_1_5_PRO' | 'GEMINI_1_5_FLASH' | 'GEMINI_2_0_FLASH' | 'GEMINI_2_0_FLASH_LITE' | 'GEMINI_2_5_PRO' | 'GEMINI_2_5_FLASH' | 'GEMINI_2_5_FLASH_LITE' | 'GEMINI_2_5_FLASH_IMAGE' | 'GEMINI_2_5_COMPUTER_USE' | 'GEMINI_3_0_PRO' | 'GEMINI_3_0_PRO_IMAGE' | 'GEMINI_3_0_FLASH' | 'GEMINI_3_1_PRO';
1019
1029
  interface Content {
1020
1030
  /**
1021
1031
  * The role in a conversation associated with the content.
@@ -1663,10 +1673,12 @@ declare enum Model {
1663
1673
  CLAUDE_4_5_SONNET_1_0 = "CLAUDE_4_5_SONNET_1_0",
1664
1674
  /** us.anthropic.claude-haiku-4-5-20251001-v1:0 */
1665
1675
  CLAUDE_4_5_HAIKU_1_0 = "CLAUDE_4_5_HAIKU_1_0",
1666
- CLAUDE_4_5_OPUS_1_0 = "CLAUDE_4_5_OPUS_1_0"
1676
+ CLAUDE_4_5_OPUS_1_0 = "CLAUDE_4_5_OPUS_1_0",
1677
+ CLAUDE_4_6_OPUS_1_0 = "CLAUDE_4_6_OPUS_1_0",
1678
+ CLAUDE_4_6_SONNET_1_0 = "CLAUDE_4_6_SONNET_1_0"
1667
1679
  }
1668
1680
  /** @enumType */
1669
- type ModelWithLiterals = Model | 'UNKNOWN' | 'CLAUDE_3_SONNET_1_0' | 'CLAUDE_3_HAIKU_1_0' | 'CLAUDE_3_5_SONNET_1_0' | 'CLAUDE_3_5_SONNET_2_0' | 'CLAUDE_3_5_HAIKU_1_0' | 'CLAUDE_3_7_SONNET_1_0' | 'CLAUDE_4_SONNET_1_0' | 'CLAUDE_4_OPUS_1_0' | 'CLAUDE_4_5_SONNET_1_0' | 'CLAUDE_4_5_HAIKU_1_0' | 'CLAUDE_4_5_OPUS_1_0';
1681
+ type ModelWithLiterals = Model | 'UNKNOWN' | 'CLAUDE_3_SONNET_1_0' | 'CLAUDE_3_HAIKU_1_0' | 'CLAUDE_3_5_SONNET_1_0' | 'CLAUDE_3_5_SONNET_2_0' | 'CLAUDE_3_5_HAIKU_1_0' | 'CLAUDE_3_7_SONNET_1_0' | 'CLAUDE_4_SONNET_1_0' | 'CLAUDE_4_OPUS_1_0' | 'CLAUDE_4_5_SONNET_1_0' | 'CLAUDE_4_5_HAIKU_1_0' | 'CLAUDE_4_5_OPUS_1_0' | 'CLAUDE_4_6_OPUS_1_0' | 'CLAUDE_4_6_SONNET_1_0';
1670
1682
  interface AnthropicClaudeMessage {
1671
1683
  /** The role of the message author. */
1672
1684
  role?: RoleWithLiterals;
@@ -1694,7 +1706,7 @@ interface ContentBlock extends ContentBlockTypeOneOf {
1694
1706
  /** Text content. */
1695
1707
  textContent?: Text;
1696
1708
  /** Image image = 2; // Image content. */
1697
- imageUrl?: ImageUrl;
1709
+ imageUrl?: V1ImageUrl;
1698
1710
  /** Tool use content, describes which tool should be used and with which parameters. */
1699
1711
  toolUse?: ToolUse;
1700
1712
  /** Tool result content, describes the result of tool invocation. */
@@ -1719,7 +1731,7 @@ interface ContentBlockTypeOneOf {
1719
1731
  /** Text content. */
1720
1732
  textContent?: Text;
1721
1733
  /** Image image = 2; // Image content. */
1722
- imageUrl?: ImageUrl;
1734
+ imageUrl?: V1ImageUrl;
1723
1735
  /** Tool use content, describes which tool should be used and with which parameters. */
1724
1736
  toolUse?: ToolUse;
1725
1737
  /** Tool result content, describes the result of tool invocation. */
@@ -1741,7 +1753,7 @@ interface Text {
1741
1753
  /** Enables prompt caching: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching. */
1742
1754
  cacheControl?: CacheControl;
1743
1755
  }
1744
- interface ImageUrl {
1756
+ interface V1ImageUrl {
1745
1757
  /**
1746
1758
  * The URL must be a valid wix mp or wix static URL.
1747
1759
  * @maxLength 100000
@@ -1808,7 +1820,7 @@ interface SimpleContentBlock extends SimpleContentBlockTypeOneOf {
1808
1820
  /** Text content. */
1809
1821
  textContent?: Text;
1810
1822
  /** Image content, represented as URL. Will be downloaded and passed on as base64. */
1811
- imageUrl?: ImageUrl;
1823
+ imageUrl?: V1ImageUrl;
1812
1824
  }
1813
1825
  /** @oneof */
1814
1826
  interface SimpleContentBlockTypeOneOf {
@@ -1822,7 +1834,7 @@ interface SimpleContentBlockTypeOneOf {
1822
1834
  /** Text content. */
1823
1835
  textContent?: Text;
1824
1836
  /** Image content, represented as URL. Will be downloaded and passed on as base64. */
1825
- imageUrl?: ImageUrl;
1837
+ imageUrl?: V1ImageUrl;
1826
1838
  }
1827
1839
  interface Thinking {
1828
1840
  /**
@@ -2056,10 +2068,12 @@ declare enum ClaudeModel {
2056
2068
  CLAUDE_4_OPUS_1_0 = "CLAUDE_4_OPUS_1_0",
2057
2069
  CLAUDE_4_5_SONNET_1_0 = "CLAUDE_4_5_SONNET_1_0",
2058
2070
  CLAUDE_4_5_HAIKU_1_0 = "CLAUDE_4_5_HAIKU_1_0",
2059
- CLAUDE_4_5_OPUS_1_0 = "CLAUDE_4_5_OPUS_1_0"
2071
+ CLAUDE_4_5_OPUS_1_0 = "CLAUDE_4_5_OPUS_1_0",
2072
+ CLAUDE_4_6_OPUS_1_0 = "CLAUDE_4_6_OPUS_1_0",
2073
+ CLAUDE_4_6_SONNET_1_0 = "CLAUDE_4_6_SONNET_1_0"
2060
2074
  }
2061
2075
  /** @enumType */
2062
- type ClaudeModelWithLiterals = ClaudeModel | 'UNKNOWN_CLAUDE_MODEL' | 'CLAUDE_3_SONNET_1_0' | 'CLAUDE_3_HAIKU_1_0' | 'CLAUDE_3_OPUS_1_0' | 'CLAUDE_3_5_SONNET_1_0' | 'CLAUDE_3_5_SONNET_2_0' | 'CLAUDE_3_7_SONNET_1_0' | 'CLAUDE_4_SONNET_1_0' | 'CLAUDE_4_OPUS_1_0' | 'CLAUDE_4_5_SONNET_1_0' | 'CLAUDE_4_5_HAIKU_1_0' | 'CLAUDE_4_5_OPUS_1_0';
2076
+ type ClaudeModelWithLiterals = ClaudeModel | 'UNKNOWN_CLAUDE_MODEL' | 'CLAUDE_3_SONNET_1_0' | 'CLAUDE_3_HAIKU_1_0' | 'CLAUDE_3_OPUS_1_0' | 'CLAUDE_3_5_SONNET_1_0' | 'CLAUDE_3_5_SONNET_2_0' | 'CLAUDE_3_7_SONNET_1_0' | 'CLAUDE_4_SONNET_1_0' | 'CLAUDE_4_OPUS_1_0' | 'CLAUDE_4_5_SONNET_1_0' | 'CLAUDE_4_5_HAIKU_1_0' | 'CLAUDE_4_5_OPUS_1_0' | 'CLAUDE_4_6_OPUS_1_0' | 'CLAUDE_4_6_SONNET_1_0';
2063
2077
  interface V1AnthropicClaudeMessage {
2064
2078
  /** The role of the message author. */
2065
2079
  role?: V1MessageRoleRoleWithLiterals;
@@ -2428,10 +2442,12 @@ declare enum AnthropicModel {
2428
2442
  CLAUDE_4_1_OPUS_1_0 = "CLAUDE_4_1_OPUS_1_0",
2429
2443
  CLAUDE_4_5_SONNET_1_0 = "CLAUDE_4_5_SONNET_1_0",
2430
2444
  CLAUDE_4_5_HAIKU_1_0 = "CLAUDE_4_5_HAIKU_1_0",
2431
- CLAUDE_4_5_OPUS_1_0 = "CLAUDE_4_5_OPUS_1_0"
2445
+ CLAUDE_4_5_OPUS_1_0 = "CLAUDE_4_5_OPUS_1_0",
2446
+ CLAUDE_4_6_OPUS_1_0 = "CLAUDE_4_6_OPUS_1_0",
2447
+ CLAUDE_4_6_SONNET_1_0 = "CLAUDE_4_6_SONNET_1_0"
2432
2448
  }
2433
2449
  /** @enumType */
2434
- type AnthropicModelWithLiterals = AnthropicModel | 'UNKNOWN_ANTHROPIC_MODEL' | 'CLAUDE_3_HAIKU_1_0' | 'CLAUDE_3_5_SONNET_1_0' | 'CLAUDE_3_5_SONNET_2_0' | 'CLAUDE_3_7_SONNET_1_0' | 'CLAUDE_4_SONNET_1_0' | 'CLAUDE_4_OPUS_1_0' | 'CLAUDE_4_1_OPUS_1_0' | 'CLAUDE_4_5_SONNET_1_0' | 'CLAUDE_4_5_HAIKU_1_0' | 'CLAUDE_4_5_OPUS_1_0';
2450
+ type AnthropicModelWithLiterals = AnthropicModel | 'UNKNOWN_ANTHROPIC_MODEL' | 'CLAUDE_3_HAIKU_1_0' | 'CLAUDE_3_5_SONNET_1_0' | 'CLAUDE_3_5_SONNET_2_0' | 'CLAUDE_3_7_SONNET_1_0' | 'CLAUDE_4_SONNET_1_0' | 'CLAUDE_4_OPUS_1_0' | 'CLAUDE_4_1_OPUS_1_0' | 'CLAUDE_4_5_SONNET_1_0' | 'CLAUDE_4_5_HAIKU_1_0' | 'CLAUDE_4_5_OPUS_1_0' | 'CLAUDE_4_6_OPUS_1_0' | 'CLAUDE_4_6_SONNET_1_0';
2435
2451
  interface AnthropicMessage {
2436
2452
  /** The role of the message author. */
2437
2453
  role?: MessageRoleRoleWithLiterals;
@@ -2453,7 +2469,7 @@ interface V1ContentBlock extends V1ContentBlockTypeOneOf {
2453
2469
  /** Text content. */
2454
2470
  textContent?: V1Text;
2455
2471
  /** Image content, represented as URL. Will be downloaded and passed on as base64. */
2456
- image?: V1ImageUrl;
2472
+ image?: AnthropicV1ImageUrl;
2457
2473
  /** Tool use content, describes which tool should be used and with which parameters. */
2458
2474
  toolUse?: V1ToolUse;
2459
2475
  /** Tool result content, describes the result of tool invocation. */
@@ -2490,7 +2506,7 @@ interface V1ContentBlockTypeOneOf {
2490
2506
  /** Text content. */
2491
2507
  textContent?: V1Text;
2492
2508
  /** Image content, represented as URL. Will be downloaded and passed on as base64. */
2493
- image?: V1ImageUrl;
2509
+ image?: AnthropicV1ImageUrl;
2494
2510
  /** Tool use content, describes which tool should be used and with which parameters. */
2495
2511
  toolUse?: V1ToolUse;
2496
2512
  /** Tool result content, describes the result of tool invocation. */
@@ -2704,7 +2720,7 @@ interface SearchResultLocationCitation {
2704
2720
  */
2705
2721
  citedText?: string | null;
2706
2722
  }
2707
- interface V1ImageUrl {
2723
+ interface AnthropicV1ImageUrl {
2708
2724
  /**
2709
2725
  * The URL must be a valid wix mp or wix static URL.
2710
2726
  * @maxLength 100000
@@ -2764,7 +2780,7 @@ interface ToolResultContentBlock extends ToolResultContentBlockTypeOneOf {
2764
2780
  /** Text content. */
2765
2781
  text?: V1Text;
2766
2782
  /** Image content, represented as URL. Will be downloaded and passed on as base64. */
2767
- image?: V1ImageUrl;
2783
+ image?: AnthropicV1ImageUrl;
2768
2784
  /** Document content block. */
2769
2785
  document?: DocumentContent;
2770
2786
  /** Search result block with snippets/citations. */
@@ -2775,7 +2791,7 @@ interface ToolResultContentBlockTypeOneOf {
2775
2791
  /** Text content. */
2776
2792
  text?: V1Text;
2777
2793
  /** Image content, represented as URL. Will be downloaded and passed on as base64. */
2778
- image?: V1ImageUrl;
2794
+ image?: AnthropicV1ImageUrl;
2779
2795
  /** Document content block. */
2780
2796
  document?: DocumentContent;
2781
2797
  /** Search result block with snippets/citations. */
@@ -3721,7 +3737,7 @@ interface SystemContentBlock {
3721
3737
  interface CreateImageRequest {
3722
3738
  /**
3723
3739
  * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
3724
- * @maxLength 4000
3740
+ * @maxLength 50000
3725
3741
  */
3726
3742
  prompt?: string | null;
3727
3743
  /** The model to use for image generation. */
@@ -4989,7 +5005,7 @@ interface InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf {
4989
5005
  regex?: string;
4990
5006
  }
4991
5007
  /** mimics https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api */
4992
- interface GenerateImageRequest {
5008
+ interface V1GenerateImageRequest {
4993
5009
  /** ID of the model to use. */
4994
5010
  model?: ImagenModelWithLiterals;
4995
5011
  /**
@@ -5162,8 +5178,9 @@ interface V1FluxPulid {
5162
5178
  }
5163
5179
  interface CreateImageOpenAiRequest {
5164
5180
  /**
5165
- * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
5166
- * @maxLength 4000
5181
+ * A text description of the desired image(s). The maximum length is 32000 characters for the GPT image models,
5182
+ * 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
5183
+ * @maxLength 50000
5167
5184
  */
5168
5185
  prompt?: string | null;
5169
5186
  /** The model to use for image generation. */
@@ -5226,8 +5243,9 @@ declare enum OpenAiImageModel {
5226
5243
  type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1' | 'GPT_IMAGE_EXP' | 'GPT_IMAGE_EXP_2' | 'GPT_IMAGE_EXP_3' | 'GPT_IMAGE_1_5';
5227
5244
  interface EditImageOpenAiRequest {
5228
5245
  /**
5229
- * A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
5230
- * @maxLength 4000
5246
+ * A text description of the desired image(s). The maximum length is 32000 characters for the GPT image models,
5247
+ * 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
5248
+ * @maxLength 50000
5231
5249
  */
5232
5250
  prompt?: string | null;
5233
5251
  /** The model to use for image generation. */
@@ -5899,10 +5917,14 @@ declare enum V1ResponsesModel {
5899
5917
  GPT_EXP_RESPONSES_2 = "GPT_EXP_RESPONSES_2",
5900
5918
  GPT_EXP_RESPONSES_3 = "GPT_EXP_RESPONSES_3",
5901
5919
  GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX",
5902
- GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
5920
+ GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11",
5921
+ GPT_5_2_CODEX = "GPT_5_2_CODEX",
5922
+ GPT_5_3_CODEX = "GPT_5_3_CODEX",
5923
+ GPT_5_4_2026_03_05 = "GPT_5_4_2026_03_05",
5924
+ GPT_5_4_PRO_2026_03_05 = "GPT_5_4_PRO_2026_03_05"
5903
5925
  }
5904
5926
  /** @enumType */
5905
- type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX' | 'GPT_5_2_2025_12_11';
5927
+ type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX' | 'GPT_5_2_2025_12_11' | 'GPT_5_2_CODEX' | 'GPT_5_3_CODEX' | 'GPT_5_4_2026_03_05' | 'GPT_5_4_PRO_2026_03_05';
5906
5928
  interface V1ResponsesInputItem extends V1ResponsesInputItemItemOneOf {
5907
5929
  /**
5908
5930
  * A message input to the model with a role indicating instruction following hierarchy.
@@ -7501,6 +7523,242 @@ interface InvokeCustomOpenAiModelRequestResponseFormat {
7501
7523
  /** The schema object describes the output object for the model. Currently, only the JSON Schema Object is supported. */
7502
7524
  schema?: Record<string, any> | null;
7503
7525
  }
7526
+ interface CreateContentTaskRequest {
7527
+ /**
7528
+ * The model to use for content creation.
7529
+ * @maxLength 255
7530
+ */
7531
+ model?: string;
7532
+ /**
7533
+ * The context items to create the content from.
7534
+ * @maxSize 50
7535
+ */
7536
+ content?: ContentItem[];
7537
+ /**
7538
+ * Returns the last frame image of the generated video. Default: false
7539
+ * Useful for generating multiple consecutive videos using the last frame as the first frame of the next video.
7540
+ */
7541
+ returnLastFrame?: boolean | null;
7542
+ /**
7543
+ * Service tier for processing the request. Default: default
7544
+ * can be one of:
7545
+ * default: Online inference mode (lower RPM, lower latency)
7546
+ * flex: Offline inference mode (higher TPD quota, 50% price, higher latency)
7547
+ * @maxLength 255
7548
+ */
7549
+ serviceTier?: string | null;
7550
+ /**
7551
+ * Whether the generated video includes audio synchronized with the visuals. Default: true
7552
+ * Only supported by Seedance 1.5 pro
7553
+ */
7554
+ generateAudio?: boolean | null;
7555
+ /**
7556
+ * Whether to enable Draft sample mode. Default: false
7557
+ * Only supported by Seedance 1.5 pro
7558
+ * true: Generate a 480p preview video (lower cost)
7559
+ * false: Generate a standard video
7560
+ */
7561
+ draft?: boolean | null;
7562
+ /**
7563
+ * The resolution of the output video. Default: 720p for Seedance 1.5 pro/lite, 1080p for pro/pro-fast
7564
+ * Valid values: 480p, 720p, 1080p
7565
+ * @maxLength 255
7566
+ */
7567
+ resolution?: string | null;
7568
+ /**
7569
+ * The aspect ratio of the output video. Default varies by model and scenario.
7570
+ * Valid values: 16:9, 4:3, 1:1, 3:4, 9:16, 21:9, adaptive
7571
+ * @maxLength 255
7572
+ */
7573
+ ratio?: string | null;
7574
+ /**
7575
+ * The duration of the output video in seconds. Default: 5
7576
+ * Value range: 2-12s
7577
+ * Seedance 1.5 pro: Use -1 to let the model decide duration (4-12s)
7578
+ * set max to to 120 for future changes
7579
+ * Choose either duration or frames; frames takes priority.
7580
+ * @min -1
7581
+ * @max 120
7582
+ */
7583
+ duration?: number | null;
7584
+ /**
7585
+ * Number of frames for the output video. Not supported by Seedance 1.5 pro.
7586
+ * Valid range: integers within [29, 289] that conform to format 25 + 4n (where n is positive integer)
7587
+ * Choose either duration or frames; frames takes priority.
7588
+ * Set max to 2890 for fuure changes
7589
+ * @min 29
7590
+ * @max 2890
7591
+ */
7592
+ frames?: number | null;
7593
+ /**
7594
+ * The seed controls randomness of output. Default: -1 (random)
7595
+ * Valid values: integers within [-1, 2^32-1]
7596
+ * @min -1
7597
+ * @max 4294967295
7598
+ */
7599
+ seed?: number | null;
7600
+ /**
7601
+ * Specifies whether to fix the camera. Default: false
7602
+ * Not supported for reference-image-to-video
7603
+ */
7604
+ cameraFixed?: boolean | null;
7605
+ /** Specifies whether to add watermarks to the output video. Default: false */
7606
+ watermark?: boolean | null;
7607
+ /**
7608
+ * Skip polling flag - if set to false, will poll until video generation is complete
7609
+ * If not set or true, returns immediately with task ID for manual polling
7610
+ */
7611
+ skipPolling?: boolean | null;
7612
+ }
7613
+ interface ContentItem {
7614
+ /**
7615
+ * The type of the input content
7616
+ * Can be ont of 'text', 'image_url', 'draft_task'
7617
+ * @maxLength 255
7618
+ */
7619
+ type?: string | null;
7620
+ /**
7621
+ * The input text information for the model (required when type=TEXT)
7622
+ * Describes the video to be generated. Can include text prompt and optional parameters.
7623
+ * @maxLength 100000
7624
+ */
7625
+ text?: string | null;
7626
+ /** The input image object for the model (required when type=IMAGE_URL) */
7627
+ imageUrl?: ImageUrl;
7628
+ /**
7629
+ * The location or purpose of the image (required under certain conditions when type=IMAGE_URL)
7630
+ * Used for Image-to-Video scenarios
7631
+ * available values are:
7632
+ * 'last_frame', 'first_frame', 'reference_image'
7633
+ * @maxLength 255
7634
+ */
7635
+ role?: string | null;
7636
+ /**
7637
+ * The draft task object for generating official video from a draft.
7638
+ * Only supported by Seedance 1.5 Pro.
7639
+ * The platform will reuse the inputs from the draft video to generate the official video.
7640
+ */
7641
+ draftTask?: DraftTask;
7642
+ }
7643
+ interface ImageUrl {
7644
+ /**
7645
+ * The image information, can be an image URL or Base64-encoded content
7646
+ * URL: Must be accessible
7647
+ * Base64: Format must be data:image/<format>;base64,<content> (e.g., data:image/png;base64,...)
7648
+ * Requirements: JPEG/PNG/WebP/BMP/TIFF/GIF (Seedance 1.5 Pro also supports HEIC/HEIF)
7649
+ * Aspect ratio: 0.4 to 2.5, shorter side > 300px, longer side < 6000px, size < 30MB
7650
+ * @maxLength 100000
7651
+ */
7652
+ url?: string | null;
7653
+ }
7654
+ interface DraftTask {
7655
+ /**
7656
+ * The draft video task ID.
7657
+ * The platform will automatically reuse the user inputs applied by the draft video
7658
+ * (including model, content.text, content.image_url, generate_audio, seed, ratio, duration, frames, camera_fixed)
7659
+ * to generate the official video.
7660
+ * @maxLength 1000
7661
+ */
7662
+ _id?: string | null;
7663
+ }
7664
+ interface GenerateImageRequest {
7665
+ /**
7666
+ * The model to use for image generation.
7667
+ * Accepts a model ID (e.g. "seedream-5.0-lite", "seedream-4.5", "seedream-4.0") or an Endpoint ID.
7668
+ * @maxLength 255
7669
+ */
7670
+ model?: string;
7671
+ /**
7672
+ * Text prompt describing the image to generate.
7673
+ * Recommended: keep under 600 English words. Excessively long prompts may cause the model
7674
+ * to overlook details and focus only on major elements.
7675
+ * @maxLength 100000
7676
+ */
7677
+ prompt?: string;
7678
+ /**
7679
+ * Reference image(s) as URL or base64-encoded string. Max 14 reference images.
7680
+ *
7681
+ * Input formats:
7682
+ * - Image URL: must be publicly accessible.
7683
+ * - Base64: format must be "data:image/<format>;base64,<content>" (e.g. "data:image/png;base64,...").
7684
+ *
7685
+ * Image requirements:
7686
+ * - Formats: JPEG, PNG, WEBP, BMP, TIFF, GIF
7687
+ * - Aspect ratio (width/height): between [1/16, 16]
7688
+ * - Width and height: > 14px
7689
+ * - Size: up to 10 MB per image
7690
+ * - Total pixels: no more than 6000x6000 = 36,000,000 per image
7691
+ * @maxLength 3825
7692
+ * @maxSize 15
7693
+ */
7694
+ image?: string[];
7695
+ /**
7696
+ * Output image dimensions. Two methods available (cannot be combined):
7697
+ *
7698
+ * Method 1 - Resolution shorthand (let the model determine width/height from prompt context):
7699
+ * seedream-5.0-lite: "2K", "3K"
7700
+ * seedream-4.5: "2K", "4K"
7701
+ * seedream-4.0: "1K", "2K", "4K"
7702
+ *
7703
+ * Method 2 - Explicit pixel dimensions (e.g. "2048x2048"):
7704
+ * Both total pixel range AND aspect ratio [1/16, 16] must be satisfied.
7705
+ * seedream-5.0-lite: total pixels [3,686,400 .. 10,404,496], default "2048x2048"
7706
+ * seedream-4.5: total pixels [3,686,400 .. 16,777,216], default "2048x2048"
7707
+ * seedream-4.0: total pixels [921,600 .. 16,777,216], default "2048x2048"
7708
+ *
7709
+ * Common recommended dimensions (2K):
7710
+ * 1:1 = 2048x2048, 4:3 = 2304x1728, 3:4 = 1728x2304,
7711
+ * 16:9 = 2848x1600, 9:16 = 1600x2848, 21:9 = 3136x1344
7712
+ * @maxLength 255
7713
+ */
7714
+ size?: string | null;
7715
+ /**
7716
+ * How the generated images are returned. Default: "url".
7717
+ * "url": returns a download link valid for 24 hours after generation.
7718
+ * "b64_json": returns the image data as a Base64-encoded string in JSON.
7719
+ * @maxLength 255
7720
+ */
7721
+ responseFormat?: string | null;
7722
+ /** Whether to add an "AI generated" watermark on the bottom-right corner. Default: true. */
7723
+ watermark?: boolean | null;
7724
+ /**
7725
+ * Whether to enable batch (sequential) image generation. Default: "disabled".
7726
+ * "auto": the model decides whether to return multiple images and how many, based on the prompt.
7727
+ * "disabled": only one image is generated.
7728
+ * @maxLength 255
7729
+ */
7730
+ sequentialImageGeneration?: string | null;
7731
+ /** Configuration for batch image generation. Only effective when sequential_image_generation = "auto". */
7732
+ sequentialImageGenerationOptions?: SequentialImageGenerationOptions;
7733
+ /**
7734
+ * Output image file format. Default: "jpeg".
7735
+ * "jpeg": supported by all models (default, cannot be changed on seedream-4.5/4.0).
7736
+ * "png": only supported by seedream-5.0-lite.
7737
+ * @maxLength 255
7738
+ */
7739
+ outputFormat?: string | null;
7740
+ /**
7741
+ * Configuration for prompt optimization.
7742
+ * Supported by seedream-5.0-lite, seedream-4.5 (standard mode only), and seedream-4.0.
7743
+ */
7744
+ optimizePromptOptions?: OptimizePromptOptions;
7745
+ }
7746
+ interface SequentialImageGenerationOptions {
7747
+ /**
7748
+ * Maximum number of images to generate in this request. Default: 15. Range: [1, 15].
7749
+ * Actual count is constrained by: number of input reference images + generated images <= 15.
7750
+ */
7751
+ maxImages?: number | null;
7752
+ }
7753
+ interface OptimizePromptOptions {
7754
+ /**
7755
+ * Prompt optimization mode. Default: "standard".
7756
+ * "standard": higher quality, longer generation time.
7757
+ * "fast": faster but lower quality.
7758
+ * @maxLength 255
7759
+ */
7760
+ mode?: string | null;
7761
+ }
7504
7762
  interface ContentGenerationRequestedEvent {
7505
7763
  /** Prompt that the generation was requested for. */
7506
7764
  prompt?: Prompt;
@@ -7515,6 +7773,8 @@ interface ContentGenerationRequestedEvent {
7515
7773
  * @format GUID
7516
7774
  */
7517
7775
  eventChainId?: string;
7776
+ /** Dynamic request configuration, including dynamic tools and messages. */
7777
+ dynamicRequestConfig?: DynamicRequestConfig;
7518
7778
  }
7519
7779
  interface UserRequestInfo {
7520
7780
  /**
@@ -7540,185 +7800,97 @@ interface UserRequestInfo {
7540
7800
  */
7541
7801
  costAttributionOverrideId?: string | null;
7542
7802
  }
7543
- interface ContentGenerationSucceededEvent {
7544
- /** Model response object that describes the content generation result. */
7545
- response?: GenerateContentModelResponse;
7546
- /** Prompt's final form that was used to issue a GenerateContent request. */
7547
- materializedPrompt?: Prompt;
7803
+ interface DynamicRequestConfig {
7548
7804
  /**
7549
- * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.
7550
- * @format GUID
7805
+ * List of GatewayToolDefinition's, used to overwrite tools in the prompt.
7806
+ * @maxSize 1000
7551
7807
  */
7552
- eventChainId?: string;
7808
+ gatewayToolDefinitions?: GatewayToolDefinition[];
7809
+ /**
7810
+ * List of GatewayMessageDefinition's, which will be converted to model-specific format and appended to the messages saved in the prompt.
7811
+ * @maxSize 10000
7812
+ */
7813
+ gatewayMessageDefinitions?: GatewayMessageDefinition[];
7553
7814
  }
7554
- interface GenerateContentModelResponse extends GenerateContentModelResponseResponseOneOf {
7555
- /** OpenAI chat completion response. */
7556
- openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;
7557
- /** Google bison text completion response. */
7558
- googleTextBisonResponse?: TextBisonPredictResponse;
7559
- /** Google bison chat completion response. */
7560
- googleChatBisonResponse?: ChatBisonPredictResponse;
7561
- /** Azure OpenAI chat completion response. */
7562
- azureChatCompletionResponse?: CreateChatCompletionResponse;
7563
- /** Google Gemini generate content response. */
7564
- googleGeminiGenerateContentResponse?: GenerateContentResponse;
7565
- /** Anthropic Claude via Amazon Bedrock generate content response. */
7566
- anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;
7567
- /** Anthropic Claude via Google vertex generate content response. */
7568
- googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;
7569
- /** Native Anthropic API proxy generate content response. */
7570
- invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;
7571
- /** Llama via Amazon Bedrock text completion response. */
7572
- llamaModelResponse?: InvokeLlamaModelResponse;
7573
- /** Invoke Amazon Converse API response. */
7574
- amazonConverseResponse?: InvokeConverseResponse;
7575
- /** Llama via ML Platform text completion response. */
7576
- mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;
7577
- /** Perplexity chat completion response. */
7578
- perplexityChatCompletionResponse?: InvokeChatCompletionResponse;
7579
- /** OpenAI image generation response. */
7580
- openAiCreateImageResponse?: CreateImageResponse;
7581
- /** Stability AI text to image response. */
7582
- stabilityAiTextToImageResponse?: V1TextToImageResponse;
7583
- /** Stability AI generate core response. */
7584
- stabilityAiGenerateCoreResponse?: GenerateCoreResponse;
7585
- /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */
7586
- stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;
7587
- /** Black Forest Labs image generation response. */
7588
- blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;
7589
- /** Replicate image generation response. */
7590
- replicateCreatePredictionResponse?: CreatePredictionResponse;
7591
- /** Stability AI - Edit Image with prompt response. */
7592
- stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;
7593
- /** Runware AI - Flux TextToImage response. */
7594
- runwareTextToImageResponse?: TextToImageResponse;
7595
- /** Google AI - Generate Image with Imagen Model response. */
7596
- googleGenerateImageResponse?: GenerateImageResponse;
7597
- /** Google AI - Generate Video response. */
7598
- googleGenerateVideoResponse?: GenerateVideoResponse;
7599
- /** ML generate image response. */
7600
- mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;
7601
- /** OpenAI image creation response. */
7602
- openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;
7603
- /** OpenAI image edit response. */
7604
- openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;
7605
- /** Google create chat completion response. */
7606
- googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;
7607
- /** ML Platform - invoke an OpenAI-type interface with a JSON string */
7608
- mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;
7609
- /** Runware Video inference response */
7610
- runwareVideoInferenceResponse?: VideoInferenceResponse;
7611
- /** Open AI Responses API response */
7612
- openAiResponsesResponse?: V1OpenAiResponsesResponse;
7613
- /** Open AI Responses API response via Azure */
7614
- azureOpenAiResponsesResponse?: OpenAiResponsesResponse;
7615
- /** OpenAI video generation response */
7616
- openAiCreateVideoResponse?: CreateVideoResponse;
7617
- /** Fireworks - OpenAI payload */
7618
- fireworksInvokeCustomOpenAiResponse?: InvokeCustomOpenAiModelResponse;
7619
- /** Extracted generated content data from the model's response. */
7620
- generatedContent?: GeneratedContent;
7621
- /** Extracted cost of the request in microcents. */
7622
- cost?: string | null;
7623
- /** Token usage information. */
7624
- tokenUsage?: V1TokenUsage;
7625
- /** Metadata about the response, such as finish reason. */
7626
- responseMetadata?: ResponseMetadata;
7815
+ interface GatewayToolDefinition extends GatewayToolDefinitionToolOneOf {
7816
+ /** Custom tool */
7817
+ customTool?: GatewayToolDefinitionCustomTool;
7818
+ /** Built-in tool */
7819
+ builtInTool?: BuiltInTool;
7627
7820
  }
7628
7821
  /** @oneof */
7629
- interface GenerateContentModelResponseResponseOneOf {
7630
- /** OpenAI chat completion response. */
7631
- openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;
7632
- /** Google bison text completion response. */
7633
- googleTextBisonResponse?: TextBisonPredictResponse;
7634
- /** Google bison chat completion response. */
7635
- googleChatBisonResponse?: ChatBisonPredictResponse;
7636
- /** Azure OpenAI chat completion response. */
7637
- azureChatCompletionResponse?: CreateChatCompletionResponse;
7638
- /** Google Gemini generate content response. */
7639
- googleGeminiGenerateContentResponse?: GenerateContentResponse;
7640
- /** Anthropic Claude via Amazon Bedrock generate content response. */
7641
- anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;
7642
- /** Anthropic Claude via Google vertex generate content response. */
7643
- googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;
7644
- /** Native Anthropic API proxy generate content response. */
7645
- invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;
7646
- /** Llama via Amazon Bedrock text completion response. */
7647
- llamaModelResponse?: InvokeLlamaModelResponse;
7648
- /** Invoke Amazon Converse API response. */
7649
- amazonConverseResponse?: InvokeConverseResponse;
7650
- /** Llama via ML Platform text completion response. */
7651
- mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;
7652
- /** Perplexity chat completion response. */
7653
- perplexityChatCompletionResponse?: InvokeChatCompletionResponse;
7654
- /** OpenAI image generation response. */
7655
- openAiCreateImageResponse?: CreateImageResponse;
7656
- /** Stability AI text to image response. */
7657
- stabilityAiTextToImageResponse?: V1TextToImageResponse;
7658
- /** Stability AI generate core response. */
7659
- stabilityAiGenerateCoreResponse?: GenerateCoreResponse;
7660
- /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */
7661
- stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;
7662
- /** Black Forest Labs image generation response. */
7663
- blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;
7664
- /** Replicate image generation response. */
7665
- replicateCreatePredictionResponse?: CreatePredictionResponse;
7666
- /** Stability AI - Edit Image with prompt response. */
7667
- stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;
7668
- /** Runware AI - Flux TextToImage response. */
7669
- runwareTextToImageResponse?: TextToImageResponse;
7670
- /** Google AI - Generate Image with Imagen Model response. */
7671
- googleGenerateImageResponse?: GenerateImageResponse;
7672
- /** Google AI - Generate Video response. */
7673
- googleGenerateVideoResponse?: GenerateVideoResponse;
7674
- /** ML generate image response. */
7675
- mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;
7676
- /** OpenAI image creation response. */
7677
- openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;
7678
- /** OpenAI image edit response. */
7679
- openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;
7680
- /** Google create chat completion response. */
7681
- googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;
7682
- /** ML Platform - invoke an OpenAI-type interface with a JSON string */
7683
- mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;
7684
- /** Runware Video inference response */
7685
- runwareVideoInferenceResponse?: VideoInferenceResponse;
7686
- /** Open AI Responses API response */
7687
- openAiResponsesResponse?: V1OpenAiResponsesResponse;
7688
- /** Open AI Responses API response via Azure */
7689
- azureOpenAiResponsesResponse?: OpenAiResponsesResponse;
7690
- /** OpenAI video generation response */
7691
- openAiCreateVideoResponse?: CreateVideoResponse;
7692
- /** Fireworks - OpenAI payload */
7693
- fireworksInvokeCustomOpenAiResponse?: InvokeCustomOpenAiModelResponse;
7822
+ interface GatewayToolDefinitionToolOneOf {
7823
+ /** Custom tool */
7824
+ customTool?: GatewayToolDefinitionCustomTool;
7825
+ /** Built-in tool */
7826
+ builtInTool?: BuiltInTool;
7694
7827
  }
7695
- /** Model generation result, at least one of the fields should be present */
7696
- interface GeneratedContent {
7697
- /**
7698
- * Zero or more textual results. Only present when the model returned a text.
7699
- * @maxSize 1000
7700
- */
7701
- texts?: TextContent[];
7828
+ interface GatewayToolDefinitionCustomTool {
7702
7829
  /**
7703
- * Zero or more images. Only present when the model returned an image.
7704
- * @maxSize 1000
7830
+ * The name of the tool to be called.
7831
+ * @maxLength 64
7705
7832
  */
7706
- images?: MediaContent[];
7833
+ name?: string | null;
7707
7834
  /**
7708
- * Zero or more videos. Only present when the model returned a video.
7709
- * @maxSize 1000
7835
+ * The description of what the tool does.
7836
+ * @maxLength 100000
7710
7837
  */
7711
- videos?: MediaContent[];
7838
+ description?: string | null;
7839
+ /** The parameters the tool accepts, described as a JSON Schema object. */
7840
+ parameters?: Record<string, any> | null;
7841
+ }
7842
+ interface BuiltInTool {
7712
7843
  /**
7713
- * Zero or more thinking texts. Only present when the model returned a thought.
7714
- * @maxSize 1000
7844
+ * The name of the tool to be called.
7845
+ * @maxLength 64
7715
7846
  */
7716
- thinkingTexts?: ThinkingTextContent[];
7847
+ name?: string | null;
7848
+ /** Optional parameters specific to the built-in tool. */
7849
+ parameters?: Record<string, any> | null;
7850
+ }
7851
+ interface GatewayMessageDefinition {
7852
+ /** The role of the message author. */
7853
+ role?: GatewayMessageDefinitionRoleWithLiterals;
7717
7854
  /**
7718
- * Zero or more tool call requests. Only present when the model requested to call a tool.
7719
- * @maxSize 1000
7855
+ * The content of the message.
7856
+ * @maxSize 4096
7720
7857
  */
7721
- tools?: ToolUseContent[];
7858
+ content?: GatewayContentBlock[];
7859
+ }
7860
+ declare enum GatewayMessageDefinitionRole {
7861
+ UNKNOWN = "UNKNOWN",
7862
+ USER = "USER",
7863
+ ASSISTANT = "ASSISTANT",
7864
+ SYSTEM = "SYSTEM",
7865
+ TOOL = "TOOL",
7866
+ DEVELOPER = "DEVELOPER"
7867
+ }
7868
+ /** @enumType */
7869
+ type GatewayMessageDefinitionRoleWithLiterals = GatewayMessageDefinitionRole | 'UNKNOWN' | 'USER' | 'ASSISTANT' | 'SYSTEM' | 'TOOL' | 'DEVELOPER';
7870
+ interface GatewayContentBlock extends GatewayContentBlockTypeOneOf {
7871
+ /** Text content. */
7872
+ text?: TextContent;
7873
+ /** Media content, represented as URL. */
7874
+ media?: MediaContent;
7875
+ /** Tool use content, describes which tool should be used and with which parameters. */
7876
+ toolUse?: ToolUseContent;
7877
+ /** Tool result content, describes the result of tool invocation. */
7878
+ toolResult?: ToolResultContent;
7879
+ /** Represents model's internal thought process. */
7880
+ thinking?: ThinkingTextContent;
7881
+ }
7882
+ /** @oneof */
7883
+ interface GatewayContentBlockTypeOneOf {
7884
+ /** Text content. */
7885
+ text?: TextContent;
7886
+ /** Media content, represented as URL. */
7887
+ media?: MediaContent;
7888
+ /** Tool use content, describes which tool should be used and with which parameters. */
7889
+ toolUse?: ToolUseContent;
7890
+ /** Tool result content, describes the result of tool invocation. */
7891
+ toolResult?: ToolResultContent;
7892
+ /** Represents model's internal thought process. */
7893
+ thinking?: ThinkingTextContent;
7722
7894
  }
7723
7895
  interface TextContent {
7724
7896
  /**
@@ -7749,37 +7921,239 @@ interface MediaContent {
7749
7921
  */
7750
7922
  thoughtSignature?: string | null;
7751
7923
  }
7752
- interface ThinkingTextContent {
7924
+ interface ToolUseContent {
7753
7925
  /**
7754
- * The thought text of the model thinking
7755
- * @maxLength 1000000
7926
+ * Tool use id
7927
+ * @maxLength 100
7756
7928
  */
7757
- thoughtText?: string | null;
7929
+ _id?: string | null;
7930
+ /**
7931
+ * Tool use name
7932
+ * @maxLength 1000
7933
+ */
7934
+ name?: string;
7935
+ /** Tool use input */
7936
+ input?: Record<string, any> | null;
7758
7937
  /**
7759
7938
  * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
7760
7939
  * @maxLength 10000000
7761
7940
  */
7762
7941
  thoughtSignature?: string | null;
7763
7942
  }
7764
- interface ToolUseContent {
7943
+ interface ToolResultContent {
7765
7944
  /**
7766
7945
  * Tool use id
7767
7946
  * @maxLength 100
7768
7947
  */
7769
- _id?: string | null;
7948
+ toolUseId?: string | null;
7949
+ /** Tool result is error. */
7950
+ error?: boolean | null;
7770
7951
  /**
7771
- * Tool use name
7772
- * @maxLength 1000
7952
+ * Tool result content.
7953
+ * @maxSize 4096
7773
7954
  */
7774
- name?: string;
7775
- /** Tool use input */
7776
- input?: Record<string, any> | null;
7955
+ content?: GatewayContentBlock[];
7956
+ }
7957
+ interface ThinkingTextContent {
7958
+ /**
7959
+ * The thought text of the model thinking
7960
+ * @maxLength 1000000
7961
+ */
7962
+ thoughtText?: string | null;
7777
7963
  /**
7778
7964
  * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
7779
7965
  * @maxLength 10000000
7780
7966
  */
7781
7967
  thoughtSignature?: string | null;
7782
7968
  }
7969
+ interface ContentGenerationSucceededEvent {
7970
+ /** Model response object that describes the content generation result. */
7971
+ response?: GenerateContentModelResponse;
7972
+ /** Prompt's final form that was used to issue a GenerateContent request. */
7973
+ materializedPrompt?: Prompt;
7974
+ /**
7975
+ * Event chain identifier id. Uniquely generated for each Generate* endpoint call, and stays consistent throughout its lifecycle. Used for correspondence between request events and response events.
7976
+ * @format GUID
7977
+ */
7978
+ eventChainId?: string;
7979
+ }
7980
+ interface GenerateContentModelResponse extends GenerateContentModelResponseResponseOneOf {
7981
+ /** OpenAI chat completion response. */
7982
+ openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;
7983
+ /** Google bison text completion response. */
7984
+ googleTextBisonResponse?: TextBisonPredictResponse;
7985
+ /** Google bison chat completion response. */
7986
+ googleChatBisonResponse?: ChatBisonPredictResponse;
7987
+ /** Azure OpenAI chat completion response. */
7988
+ azureChatCompletionResponse?: CreateChatCompletionResponse;
7989
+ /** Google Gemini generate content response. */
7990
+ googleGeminiGenerateContentResponse?: GenerateContentResponse;
7991
+ /** Anthropic Claude via Amazon Bedrock generate content response. */
7992
+ anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;
7993
+ /** Anthropic Claude via Google vertex generate content response. */
7994
+ googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;
7995
+ /** Native Anthropic API proxy generate content response. */
7996
+ invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;
7997
+ /** Llama via Amazon Bedrock text completion response. */
7998
+ llamaModelResponse?: InvokeLlamaModelResponse;
7999
+ /** Invoke Amazon Converse API response. */
8000
+ amazonConverseResponse?: InvokeConverseResponse;
8001
+ /** Llama via ML Platform text completion response. */
8002
+ mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;
8003
+ /** Perplexity chat completion response. */
8004
+ perplexityChatCompletionResponse?: InvokeChatCompletionResponse;
8005
+ /** OpenAI image generation response. */
8006
+ openAiCreateImageResponse?: CreateImageResponse;
8007
+ /** Stability AI text to image response. */
8008
+ stabilityAiTextToImageResponse?: V1TextToImageResponse;
8009
+ /** Stability AI generate core response. */
8010
+ stabilityAiGenerateCoreResponse?: GenerateCoreResponse;
8011
+ /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */
8012
+ stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;
8013
+ /** Black Forest Labs image generation response. */
8014
+ blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;
8015
+ /** Replicate image generation response. */
8016
+ replicateCreatePredictionResponse?: CreatePredictionResponse;
8017
+ /** Stability AI - Edit Image with prompt response. */
8018
+ stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;
8019
+ /** Runware AI - Flux TextToImage response. */
8020
+ runwareTextToImageResponse?: TextToImageResponse;
8021
+ /** Google AI - Generate Image with Imagen Model response. */
8022
+ googleGenerateImageResponse?: V1GenerateImageResponse;
8023
+ /** Google AI - Generate Video response. */
8024
+ googleGenerateVideoResponse?: GenerateVideoResponse;
8025
+ /** ML generate image response. */
8026
+ mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;
8027
+ /** OpenAI image creation response. */
8028
+ openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;
8029
+ /** OpenAI image edit response. */
8030
+ openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;
8031
+ /** Google create chat completion response. */
8032
+ googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;
8033
+ /** ML Platform - invoke an OpenAI-type interface with a JSON string */
8034
+ mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;
8035
+ /** Runware Video inference response */
8036
+ runwareVideoInferenceResponse?: VideoInferenceResponse;
8037
+ /** Open AI Responses API response */
8038
+ openAiResponsesResponse?: V1OpenAiResponsesResponse;
8039
+ /** Open AI Responses API response via Azure */
8040
+ azureOpenAiResponsesResponse?: OpenAiResponsesResponse;
8041
+ /** OpenAI video generation response */
8042
+ openAiCreateVideoResponse?: CreateVideoResponse;
8043
+ /** Fireworks - OpenAI payload */
8044
+ fireworksInvokeCustomOpenAiResponse?: InvokeCustomOpenAiModelResponse;
8045
+ /** Bytedance - Create content task response (video generation) */
8046
+ bytedanceCreateContentTaskResponse?: CreateContentTaskResponse;
8047
+ /** Bytedance - Generate image response (Seedream models) */
8048
+ bytedanceGenerateImageResponse?: GenerateImageResponse;
8049
+ /** Extracted generated content data from the model's response. */
8050
+ generatedContent?: GeneratedContent;
8051
+ /** Extracted cost of the request in microcents. */
8052
+ cost?: string | null;
8053
+ /** Token usage information. */
8054
+ tokenUsage?: V1TokenUsage;
8055
+ /** Metadata about the response, such as finish reason. */
8056
+ responseMetadata?: ResponseMetadata;
8057
+ }
8058
+ /** @oneof */
8059
+ interface GenerateContentModelResponseResponseOneOf {
8060
+ /** OpenAI chat completion response. */
8061
+ openAiChatCompletionResponse?: OpenaiproxyV1CreateChatCompletionResponse;
8062
+ /** Google bison text completion response. */
8063
+ googleTextBisonResponse?: TextBisonPredictResponse;
8064
+ /** Google bison chat completion response. */
8065
+ googleChatBisonResponse?: ChatBisonPredictResponse;
8066
+ /** Azure OpenAI chat completion response. */
8067
+ azureChatCompletionResponse?: CreateChatCompletionResponse;
8068
+ /** Google Gemini generate content response. */
8069
+ googleGeminiGenerateContentResponse?: GenerateContentResponse;
8070
+ /** Anthropic Claude via Amazon Bedrock generate content response. */
8071
+ anthropicClaudeResponse?: InvokeAnthropicClaudeModelResponse;
8072
+ /** Anthropic Claude via Google vertex generate content response. */
8073
+ googleAnthropicClaudeResponse?: V1InvokeAnthropicClaudeModelResponse;
8074
+ /** Native Anthropic API proxy generate content response. */
8075
+ invokeAnthropicModelResponse?: InvokeAnthropicModelResponse;
8076
+ /** Llama via Amazon Bedrock text completion response. */
8077
+ llamaModelResponse?: InvokeLlamaModelResponse;
8078
+ /** Invoke Amazon Converse API response. */
8079
+ amazonConverseResponse?: InvokeConverseResponse;
8080
+ /** Llama via ML Platform text completion response. */
8081
+ mlPlatformLlamaModelResponse?: InvokeMlPlatformLlamaModelResponse;
8082
+ /** Perplexity chat completion response. */
8083
+ perplexityChatCompletionResponse?: InvokeChatCompletionResponse;
8084
+ /** OpenAI image generation response. */
8085
+ openAiCreateImageResponse?: CreateImageResponse;
8086
+ /** Stability AI text to image response. */
8087
+ stabilityAiTextToImageResponse?: V1TextToImageResponse;
8088
+ /** Stability AI generate core response. */
8089
+ stabilityAiGenerateCoreResponse?: GenerateCoreResponse;
8090
+ /** Stability AI - Stable Diffusion 3.0 & 3.5 response. */
8091
+ stabilityAiStableDiffusionResponse?: GenerateStableDiffusionResponse;
8092
+ /** Black Forest Labs image generation response. */
8093
+ blackForestLabsGenerateImageResponse?: GenerateAnImageResponse;
8094
+ /** Replicate image generation response. */
8095
+ replicateCreatePredictionResponse?: CreatePredictionResponse;
8096
+ /** Stability AI - Edit Image with prompt response. */
8097
+ stabilityAiEditImageWithPromptResponse?: EditImageWithPromptResponse;
8098
+ /** Runware AI - Flux TextToImage response. */
8099
+ runwareTextToImageResponse?: TextToImageResponse;
8100
+ /** Google AI - Generate Image with Imagen Model response. */
8101
+ googleGenerateImageResponse?: V1GenerateImageResponse;
8102
+ /** Google AI - Generate Video response. */
8103
+ googleGenerateVideoResponse?: GenerateVideoResponse;
8104
+ /** ML generate image response. */
8105
+ mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;
8106
+ /** OpenAI image creation response. */
8107
+ openAiCreateOpenAiImageResponse?: CreateImageOpenAiResponse;
8108
+ /** OpenAI image edit response. */
8109
+ openAiEditOpenAiImageResponse?: EditImageOpenAiResponse;
8110
+ /** Google create chat completion response. */
8111
+ googleCreateChatCompletionResponse?: V1CreateChatCompletionResponse;
8112
+ /** ML Platform - invoke an OpenAI-type interface with a JSON string */
8113
+ mlPlatformOpenAiRawResponse?: InvokeMlPlatformOpenAIChatCompletionRawResponse;
8114
+ /** Runware Video inference response */
8115
+ runwareVideoInferenceResponse?: VideoInferenceResponse;
8116
+ /** Open AI Responses API response */
8117
+ openAiResponsesResponse?: V1OpenAiResponsesResponse;
8118
+ /** Open AI Responses API response via Azure */
8119
+ azureOpenAiResponsesResponse?: OpenAiResponsesResponse;
8120
+ /** OpenAI video generation response */
8121
+ openAiCreateVideoResponse?: CreateVideoResponse;
8122
+ /** Fireworks - OpenAI payload */
8123
+ fireworksInvokeCustomOpenAiResponse?: InvokeCustomOpenAiModelResponse;
8124
+ /** Bytedance - Create content task response (video generation) */
8125
+ bytedanceCreateContentTaskResponse?: CreateContentTaskResponse;
8126
+ /** Bytedance - Generate image response (Seedream models) */
8127
+ bytedanceGenerateImageResponse?: GenerateImageResponse;
8128
+ }
8129
+ /** Model generation result, at least one of the fields should be present */
8130
+ interface GeneratedContent {
8131
+ /**
8132
+ * Zero or more textual results. Only present when the model returned a text.
8133
+ * @maxSize 1000
8134
+ */
8135
+ texts?: TextContent[];
8136
+ /**
8137
+ * Zero or more images. Only present when the model returned an image.
8138
+ * @maxSize 1000
8139
+ */
8140
+ images?: MediaContent[];
8141
+ /**
8142
+ * Zero or more videos. Only present when the model returned a video.
8143
+ * @maxSize 1000
8144
+ */
8145
+ videos?: MediaContent[];
8146
+ /**
8147
+ * Zero or more thinking texts. Only present when the model returned a thought.
8148
+ * @maxSize 1000
8149
+ */
8150
+ thinkingTexts?: ThinkingTextContent[];
8151
+ /**
8152
+ * Zero or more tool call requests. Only present when the model requested to call a tool.
8153
+ * @maxSize 1000
8154
+ */
8155
+ tools?: ToolUseContent[];
8156
+ }
7783
8157
  interface V1TokenUsage {
7784
8158
  /** Number of input tokens used in the request. */
7785
8159
  inputTokens?: number | null;
@@ -8092,6 +8466,13 @@ interface Candidate {
8092
8466
  * @maxLength 100000
8093
8467
  */
8094
8468
  finishMessage?: string | null;
8469
+ /**
8470
+ * The raw finish reason string as returned by Google's API.
8471
+ * Preserves the original value even when it cannot be mapped to the FinishReason enum,
8472
+ * ensuring forward compatibility when Google adds new finish reason values.
8473
+ * @maxLength 1000
8474
+ */
8475
+ finishReasonStr?: string | null;
8095
8476
  }
8096
8477
  interface CandidateContent {
8097
8478
  /**
@@ -8997,7 +9378,7 @@ interface TextToImageTaskResult {
8997
9378
  */
8998
9379
  seed?: string | null;
8999
9380
  }
9000
- interface GenerateImageResponse {
9381
+ interface V1GenerateImageResponse {
9001
9382
  /**
9002
9383
  * Array of generated image results, one for each requested sampleCount
9003
9384
  * @maxSize 8
@@ -9104,9 +9485,9 @@ interface CreateImageOpenAiResponse {
9104
9485
  /** Cost of the request in micro cents. */
9105
9486
  microcentsSpent?: string | null;
9106
9487
  /** Usage information from the API response */
9107
- usage?: ImageUsage;
9488
+ usage?: V1ImageUsage;
9108
9489
  }
9109
- interface ImageUsage {
9490
+ interface V1ImageUsage {
9110
9491
  /** Number of tokens in the input */
9111
9492
  inputTokens?: number | null;
9112
9493
  /** Details about input tokens */
@@ -9135,7 +9516,7 @@ interface EditImageOpenAiResponse {
9135
9516
  /** Cost of the request in micro cents. */
9136
9517
  microcentsSpent?: string | null;
9137
9518
  /** Usage information from the API response */
9138
- usage?: ImageUsage;
9519
+ usage?: V1ImageUsage;
9139
9520
  }
9140
9521
  interface V1CreateChatCompletionResponse {
9141
9522
  /**
@@ -9576,75 +9957,255 @@ interface ErrorInfo {
9576
9957
  */
9577
9958
  message?: string | null;
9578
9959
  }
9579
- interface InvokeCustomOpenAiModelResponse {
9960
+ interface InvokeCustomOpenAiModelResponse {
9961
+ /**
9962
+ * A unique identifier for the chat completion.
9963
+ * @maxLength 100
9964
+ */
9965
+ responseId?: string | null;
9966
+ /**
9967
+ * Description of the response object. Will be equal to "chat.completion" for chat completion.
9968
+ * @maxLength 100
9969
+ */
9970
+ object?: string | null;
9971
+ /** Timestamp for when the response was created. */
9972
+ created?: number | null;
9973
+ /**
9974
+ * Model that produced the completion.
9975
+ * @maxLength 10000
9976
+ */
9977
+ modelId?: string;
9978
+ /** A list of chat completion choices. Can be more than one if n is greater than 1. */
9979
+ choices?: InvokeCustomOpenAiModelResponseChoice[];
9980
+ /** TokenUsage object describing the tokens usage per request. */
9981
+ usage?: InvokeCustomOpenAiModelResponseTokenUsage;
9982
+ /** Cost of the request in microcents. */
9983
+ microcentsSpent?: string | null;
9984
+ /**
9985
+ * This fingerprint represents the backend configuration that the model runs with.
9986
+ * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
9987
+ * @maxLength 10000
9988
+ */
9989
+ systemFingerprint?: string | null;
9990
+ }
9991
+ interface InvokeCustomOpenAiModelResponsePromptTokenDetails {
9992
+ /** Audio input tokens present in the prompt. */
9993
+ audioTokens?: number | null;
9994
+ /** Cached tokens present in the prompt. */
9995
+ cachedTokens?: number | null;
9996
+ }
9997
+ interface InvokeCustomOpenAiModelResponseCompletionTokenDetails {
9998
+ /** Reasoning tokens present in the completion. */
9999
+ reasoningTokens?: number | null;
10000
+ /** Audio tokens present in the completion. */
10001
+ audioTokens?: number | null;
10002
+ /** Accepted prediction tokens. */
10003
+ acceptedPredictionTokens?: number | null;
10004
+ /** Rejected prediction tokens. */
10005
+ rejectedPredictionTokens?: number | null;
10006
+ }
10007
+ interface InvokeCustomOpenAiModelResponseChoice {
10008
+ /** Index of this Choice in choices array. */
10009
+ index?: number | null;
10010
+ /** ChatCompletionMessage object that defines the message. */
10011
+ message?: Fireworks_proxyV1ChatCompletionMessage;
10012
+ /**
10013
+ * Reason why the message generation was stopped.
10014
+ * @maxLength 100
10015
+ */
10016
+ finishReason?: string | null;
10017
+ }
10018
+ interface InvokeCustomOpenAiModelResponseTokenUsage {
10019
+ /** Number of LLM tokens required to encode input. */
10020
+ inputTokens?: number | null;
10021
+ /** Number of LLM tokens required to encode output. */
10022
+ outputTokens?: number | null;
10023
+ /** Total number of LLM tokens used for the request. */
10024
+ totalTokens?: number | null;
10025
+ /** Breakdown of tokens used in the prompt. */
10026
+ promptTokenDetails?: InvokeCustomOpenAiModelResponsePromptTokenDetails;
10027
+ /** Breakdown of tokens used in the completion. */
10028
+ completionTokenDetails?: InvokeCustomOpenAiModelResponseCompletionTokenDetails;
10029
+ }
10030
+ interface CreateContentTaskResponse {
10031
+ task?: CommonContentTaskResponse;
10032
+ }
10033
+ interface CommonContentTaskResponse {
10034
+ /**
10035
+ * The ID of the video generation task.
10036
+ * @format GUID
10037
+ */
10038
+ _id?: string | null;
10039
+ /**
10040
+ * The name and version of the model used by the task.
10041
+ * @maxLength 255
10042
+ */
10043
+ model?: string | null;
10044
+ /**
10045
+ * The status of the task.
10046
+ * Valid values: queued, running, cancelled, succeeded, failed, expired.
10047
+ * @maxLength 255
10048
+ */
10049
+ status?: string | null;
10050
+ /** Error information. Null if the task succeeds. */
10051
+ error?: TaskError;
10052
+ /** The time when the task was created (Unix timestamp in seconds). */
10053
+ createdAt?: string | null;
10054
+ /** The time when the task was last updated (Unix timestamp in seconds). */
10055
+ updatedAt?: string | null;
10056
+ /** The output after the video generation task is completed. */
10057
+ content?: TaskContent;
10058
+ /**
10059
+ * The seed value used for this request.
10060
+ * @min -1
10061
+ * @max 4294967295
10062
+ */
10063
+ seed?: number | null;
10064
+ /**
10065
+ * The resolution of the generated video.
10066
+ * @maxLength 255
10067
+ */
10068
+ resolution?: string | null;
10069
+ /**
10070
+ * The width-to-height ratio of the generated video.
10071
+ * @maxLength 255
10072
+ */
10073
+ ratio?: string | null;
10074
+ /**
10075
+ * The length of the generated video in seconds.
10076
+ * Only one of duration and frames is returned.
10077
+ * @min -1
10078
+ * @max 120
10079
+ */
10080
+ duration?: number | null;
10081
+ /**
10082
+ * Number of frames for the generated video.
10083
+ * Only one of duration and frames is returned.
10084
+ * @min 29
10085
+ * @max 2890
10086
+ */
10087
+ frames?: number | null;
10088
+ /** The frame rate of the generated video. */
10089
+ framespersecond?: number | null;
10090
+ /**
10091
+ * Whether the generated video includes audio synchronized with the visuals.
10092
+ * Only supported by Seedance 1.5 pro.
10093
+ */
10094
+ generateAudio?: boolean | null;
10095
+ /**
10096
+ * Whether the generated video is a Draft video.
10097
+ * Only returned by Seedance 1.5 Pro.
10098
+ */
10099
+ draft?: boolean | null;
10100
+ /**
10101
+ * Draft video task ID. Returned when generating official video from a draft.
10102
+ * @maxLength 1000
10103
+ */
10104
+ draftTaskId?: string | null;
10105
+ /**
10106
+ * The service tier actually used to process the task.
10107
+ * @maxLength 255
10108
+ */
10109
+ serviceTier?: string | null;
10110
+ /**
10111
+ * The expiration threshold for the task, in seconds.
10112
+ * @min 3600
10113
+ * @max 259200
10114
+ */
10115
+ executionExpiresAfter?: number | null;
10116
+ /** The token usage for the request. */
10117
+ usage?: TaskUsage;
10118
+ /** The cost of the request in microcents. */
10119
+ microcentsSpent?: string | null;
10120
+ }
10121
+ interface TaskError {
10122
+ /**
10123
+ * The error code.
10124
+ * @maxLength 255
10125
+ */
10126
+ code?: string | null;
10127
+ /**
10128
+ * The error message.
10129
+ * @maxLength 255
10130
+ */
10131
+ message?: string | null;
10132
+ }
10133
+ interface TaskContent {
10134
+ /**
10135
+ * The URL of the output video. Valid for 24 hours.
10136
+ * @maxLength 255
10137
+ */
10138
+ videoUrl?: string | null;
10139
+ /**
10140
+ * URL of the last frame of the generated video. Valid for 24 hours.
10141
+ * Returned only if return_last_frame was set to true in the creation request.
10142
+ * @maxLength 255
10143
+ */
10144
+ lastFrameUrl?: string | null;
10145
+ }
10146
+ interface TaskUsage {
10147
+ /** The number of tokens consumed for the video output. */
10148
+ completionTokens?: number | null;
10149
+ /** Total tokens for this request (input tokens are always 0). */
10150
+ totalTokens?: number | null;
10151
+ }
10152
+ interface GenerateImageResponse {
9580
10153
  /**
9581
- * A unique identifier for the chat completion.
9582
- * @maxLength 100
10154
+ * The model ID used for generation (model name-version).
10155
+ * @maxLength 255
9583
10156
  */
9584
- responseId?: string | null;
10157
+ model?: string | null;
10158
+ /** Unix timestamp (seconds) of the creation time of the request. */
10159
+ created?: string | null;
9585
10160
  /**
9586
- * Description of the response object. Will be equal to "chat.completion" for chat completion.
9587
- * @maxLength 100
10161
+ * Generated image data. May contain multiple entries for batch generation.
10162
+ * When batch-generating, if an image fails due to content moderation the remaining
10163
+ * images continue; if it fails due to an internal error (500), subsequent images are skipped.
9588
10164
  */
9589
- object?: string | null;
9590
- /** Timestamp for when the response was created. */
9591
- created?: number | null;
10165
+ data?: ImageData[];
10166
+ /** Token usage information for this request. */
10167
+ usage?: ImageUsage;
10168
+ /** Request-level error information, if any. */
10169
+ error?: ImageError;
10170
+ /** The cost of the request in microcents (set by proxy, not vendor). */
10171
+ microcentsSpent?: string | null;
10172
+ }
10173
+ interface ImageData {
9592
10174
  /**
9593
- * Model that produced the completion.
10175
+ * URL of the generated image. Returned when response_format = "url".
10176
+ * The link expires 24 hours after generation.
9594
10177
  * @maxLength 10000
9595
10178
  */
9596
- modelId?: string;
9597
- /** A list of chat completion choices. Can be more than one if n is greater than 1. */
9598
- choices?: InvokeCustomOpenAiModelResponseChoice[];
9599
- /** TokenUsage object describing the tokens usage per request. */
9600
- usage?: InvokeCustomOpenAiModelResponseTokenUsage;
9601
- /** Cost of the request in microcents. */
9602
- microcentsSpent?: string | null;
10179
+ url?: string | null;
10180
+ /** Base64-encoded image data. Returned when response_format = "b64_json". */
10181
+ b64Json?: string | null;
9603
10182
  /**
9604
- * This fingerprint represents the backend configuration that the model runs with.
9605
- * Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
9606
- * @maxLength 10000
10183
+ * The width and height of the generated image in pixels, format "<width>x<height>" (e.g. "2048x2048").
10184
+ * @maxLength 255
9607
10185
  */
9608
- systemFingerprint?: string | null;
9609
- }
9610
- interface InvokeCustomOpenAiModelResponsePromptTokenDetails {
9611
- /** Audio input tokens present in the prompt. */
9612
- audioTokens?: number | null;
9613
- /** Cached tokens present in the prompt. */
9614
- cachedTokens?: number | null;
9615
- }
9616
- interface InvokeCustomOpenAiModelResponseCompletionTokenDetails {
9617
- /** Reasoning tokens present in the completion. */
9618
- reasoningTokens?: number | null;
9619
- /** Audio tokens present in the completion. */
9620
- audioTokens?: number | null;
9621
- /** Accepted prediction tokens. */
9622
- acceptedPredictionTokens?: number | null;
9623
- /** Rejected prediction tokens. */
9624
- rejectedPredictionTokens?: number | null;
10186
+ size?: string | null;
10187
+ /** Per-image error information for a failed generation within a batch. */
10188
+ error?: ImageError;
9625
10189
  }
9626
- interface InvokeCustomOpenAiModelResponseChoice {
9627
- /** Index of this Choice in choices array. */
9628
- index?: number | null;
9629
- /** ChatCompletionMessage object that defines the message. */
9630
- message?: Fireworks_proxyV1ChatCompletionMessage;
10190
+ interface ImageError {
9631
10191
  /**
9632
- * Reason why the message generation was stopped.
9633
- * @maxLength 100
10192
+ * Error code. See ByteDance error codes documentation.
10193
+ * @maxLength 255
9634
10194
  */
9635
- finishReason?: string | null;
10195
+ code?: string | null;
10196
+ /**
10197
+ * Human-readable error message.
10198
+ * @maxLength 1000
10199
+ */
10200
+ message?: string | null;
9636
10201
  }
9637
- interface InvokeCustomOpenAiModelResponseTokenUsage {
9638
- /** Number of LLM tokens required to encode input. */
9639
- inputTokens?: number | null;
9640
- /** Number of LLM tokens required to encode output. */
10202
+ interface ImageUsage {
10203
+ /** Number of images successfully generated, excluding failures. Billing is based on this count. */
10204
+ generatedImages?: number | null;
10205
+ /** Number of output tokens consumed. Calculated as sum(image_width * image_height) / 256, rounded. */
9641
10206
  outputTokens?: number | null;
9642
- /** Total number of LLM tokens used for the request. */
10207
+ /** Total tokens consumed by this request. Same as output_tokens (input tokens are not counted). */
9643
10208
  totalTokens?: number | null;
9644
- /** Breakdown of tokens used in the prompt. */
9645
- promptTokenDetails?: InvokeCustomOpenAiModelResponsePromptTokenDetails;
9646
- /** Breakdown of tokens used in the completion. */
9647
- completionTokenDetails?: InvokeCustomOpenAiModelResponseCompletionTokenDetails;
9648
10209
  }
9649
10210
  interface ContentGenerationFailedEvent {
9650
10211
  /**
@@ -9691,112 +10252,6 @@ interface FallbackProperties {
9691
10252
  /** FallbackPromptConfig object that describes optional second Prompt that can be invoked in case main invocation fails. */
9692
10253
  fallbackPromptConfig?: FallbackPromptConfig;
9693
10254
  }
9694
- interface DynamicRequestConfig {
9695
- /**
9696
- * List of GatewayToolDefinition's, used to overwrite tools in the prompt.
9697
- * @maxSize 100
9698
- */
9699
- gatewayToolDefinitions?: GatewayToolDefinition[];
9700
- /**
9701
- * List of GatewayMessageDefinition's, which will be converted to model-specific format and appended to the messages saved in the prompt.
9702
- * @maxSize 100
9703
- */
9704
- gatewayMessageDefinitions?: GatewayMessageDefinition[];
9705
- }
9706
- interface GatewayToolDefinition extends GatewayToolDefinitionToolOneOf {
9707
- /** Custom tool */
9708
- customTool?: GatewayToolDefinitionCustomTool;
9709
- /** Built-in tool */
9710
- builtInTool?: BuiltInTool;
9711
- }
9712
- /** @oneof */
9713
- interface GatewayToolDefinitionToolOneOf {
9714
- /** Custom tool */
9715
- customTool?: GatewayToolDefinitionCustomTool;
9716
- /** Built-in tool */
9717
- builtInTool?: BuiltInTool;
9718
- }
9719
- interface GatewayToolDefinitionCustomTool {
9720
- /**
9721
- * The name of the tool to be called.
9722
- * @maxLength 64
9723
- */
9724
- name?: string | null;
9725
- /**
9726
- * The description of what the tool does.
9727
- * @maxLength 100000
9728
- */
9729
- description?: string | null;
9730
- /** The parameters the tool accepts, described as a JSON Schema object. */
9731
- parameters?: Record<string, any> | null;
9732
- }
9733
- interface BuiltInTool {
9734
- /**
9735
- * The name of the tool to be called.
9736
- * @maxLength 64
9737
- */
9738
- name?: string | null;
9739
- /** Optional parameters specific to the built-in tool. */
9740
- parameters?: Record<string, any> | null;
9741
- }
9742
- interface GatewayMessageDefinition {
9743
- /** The role of the message author. */
9744
- role?: GatewayMessageDefinitionRoleWithLiterals;
9745
- /**
9746
- * The content of the message.
9747
- * @maxSize 4096
9748
- */
9749
- content?: GatewayContentBlock[];
9750
- }
9751
- declare enum GatewayMessageDefinitionRole {
9752
- UNKNOWN = "UNKNOWN",
9753
- USER = "USER",
9754
- ASSISTANT = "ASSISTANT",
9755
- SYSTEM = "SYSTEM",
9756
- TOOL = "TOOL",
9757
- DEVELOPER = "DEVELOPER"
9758
- }
9759
- /** @enumType */
9760
- type GatewayMessageDefinitionRoleWithLiterals = GatewayMessageDefinitionRole | 'UNKNOWN' | 'USER' | 'ASSISTANT' | 'SYSTEM' | 'TOOL' | 'DEVELOPER';
9761
- interface GatewayContentBlock extends GatewayContentBlockTypeOneOf {
9762
- /** Text content. */
9763
- text?: TextContent;
9764
- /** Media content, represented as URL. */
9765
- media?: MediaContent;
9766
- /** Tool use content, describes which tool should be used and with which parameters. */
9767
- toolUse?: ToolUseContent;
9768
- /** Tool result content, describes the result of tool invocation. */
9769
- toolResult?: ToolResultContent;
9770
- /** Represents model's internal thought process. */
9771
- thinking?: ThinkingTextContent;
9772
- }
9773
- /** @oneof */
9774
- interface GatewayContentBlockTypeOneOf {
9775
- /** Text content. */
9776
- text?: TextContent;
9777
- /** Media content, represented as URL. */
9778
- media?: MediaContent;
9779
- /** Tool use content, describes which tool should be used and with which parameters. */
9780
- toolUse?: ToolUseContent;
9781
- /** Tool result content, describes the result of tool invocation. */
9782
- toolResult?: ToolResultContent;
9783
- /** Represents model's internal thought process. */
9784
- thinking?: ThinkingTextContent;
9785
- }
9786
- interface ToolResultContent {
9787
- /**
9788
- * Tool use id
9789
- * @maxLength 100
9790
- */
9791
- toolUseId?: string | null;
9792
- /** Tool result is error. */
9793
- error?: boolean | null;
9794
- /**
9795
- * Tool result content.
9796
- * @maxSize 4096
9797
- */
9798
- content?: GatewayContentBlock[];
9799
- }
9800
10255
  interface GenerateTextByPromptResponse {
9801
10256
  /** ModelResponse object that describes the text generation result. */
9802
10257
  response?: ModelResponse;
@@ -9895,6 +10350,8 @@ interface GenerationRequestedEvent {
9895
10350
  * @format GUID
9896
10351
  */
9897
10352
  eventChainId?: string;
10353
+ /** Dynamic request configuration, including dynamic tools and messages. */
10354
+ dynamicRequestConfig?: DynamicRequestConfig;
9898
10355
  }
9899
10356
  interface TextGenerationSucceededEvent {
9900
10357
  /** ModelResponse object that describes the text generation result. */
@@ -9923,7 +10380,7 @@ interface GeneratedTextChunk extends GeneratedTextChunkModelChunkOneOf {
9923
10380
  /** Azure OpenAI chat completion chunk. */
9924
10381
  azureChatCompletionChunk?: ChatCompletionChunk;
9925
10382
  /** OpenAI chat completion chunk. */
9926
- openaiChatCompletionChunk?: V1ChatCompletionChunk;
10383
+ openaiChatCompletionChunk?: OpenaiproxyV1ChatCompletionChunk;
9927
10384
  /** Anthropic (via Google proxy) chat completion chunk. */
9928
10385
  googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;
9929
10386
  /** Google Gemini GenerateContentResponse chunk. */
@@ -9932,6 +10389,8 @@ interface GeneratedTextChunk extends GeneratedTextChunkModelChunkOneOf {
9932
10389
  amazonAnthropicStreamChunk?: AnthropicStreamChunk;
9933
10390
  /** Native Anthropic API proxy stream chunk. */
9934
10391
  anthropicStreamChunk?: V1AnthropicStreamChunk;
10392
+ /** Fireworks chat completion chunk. */
10393
+ fireworksChatCompletionChunk?: V1ChatCompletionChunk;
9935
10394
  /**
9936
10395
  * Extracted text content from the chunk.
9937
10396
  * @maxLength 100
@@ -9948,7 +10407,7 @@ interface GeneratedTextChunkModelChunkOneOf {
9948
10407
  /** Azure OpenAI chat completion chunk. */
9949
10408
  azureChatCompletionChunk?: ChatCompletionChunk;
9950
10409
  /** OpenAI chat completion chunk. */
9951
- openaiChatCompletionChunk?: V1ChatCompletionChunk;
10410
+ openaiChatCompletionChunk?: OpenaiproxyV1ChatCompletionChunk;
9952
10411
  /** Anthropic (via Google proxy) chat completion chunk. */
9953
10412
  googleAnthropicStreamChunk?: GoogleproxyV1AnthropicStreamChunk;
9954
10413
  /** Google Gemini GenerateContentResponse chunk. */
@@ -9957,6 +10416,8 @@ interface GeneratedTextChunkModelChunkOneOf {
9957
10416
  amazonAnthropicStreamChunk?: AnthropicStreamChunk;
9958
10417
  /** Native Anthropic API proxy stream chunk. */
9959
10418
  anthropicStreamChunk?: V1AnthropicStreamChunk;
10419
+ /** Fireworks chat completion chunk. */
10420
+ fireworksChatCompletionChunk?: V1ChatCompletionChunk;
9960
10421
  }
9961
10422
  interface ChatCompletionChunk {
9962
10423
  /**
@@ -10024,7 +10485,7 @@ interface ChunkChoice {
10024
10485
  /** The index of the choice in the list of choices. */
10025
10486
  index?: number | null;
10026
10487
  }
10027
- interface V1ChatCompletionChunk {
10488
+ interface OpenaiproxyV1ChatCompletionChunk {
10028
10489
  /**
10029
10490
  * A unique identifier for the chat completion. Each chunk has the same ID.
10030
10491
  * @maxLength 100
@@ -10034,7 +10495,7 @@ interface V1ChatCompletionChunk {
10034
10495
  * A list of chat completion choices. Can contain more than one elements if n is greater than 1.
10035
10496
  * Can also be empty for the last chunk if you set stream_options: {"include_usage": true}.
10036
10497
  */
10037
- choices?: ChatCompletionChunkChunkChoice[];
10498
+ choices?: V1ChatCompletionChunkChunkChoice[];
10038
10499
  /**
10039
10500
  * The Unix timestamp (in seconds) of when the chat completion was created.
10040
10501
  * Each chunk has the same timestamp.
@@ -10061,7 +10522,7 @@ interface V1ChatCompletionChunk {
10061
10522
  /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */
10062
10523
  microcentsSpent?: string | null;
10063
10524
  }
10064
- interface ChunkChoiceChunkDelta {
10525
+ interface ChatCompletionChunkChunkChoiceChunkDelta {
10065
10526
  /**
10066
10527
  * The contents of the chunk message.
10067
10528
  * @maxLength 1000
@@ -10075,9 +10536,9 @@ interface ChunkChoiceChunkDelta {
10075
10536
  */
10076
10537
  toolCalls?: V1ChatCompletionMessageToolCall[];
10077
10538
  }
10078
- interface ChatCompletionChunkChunkChoice {
10539
+ interface V1ChatCompletionChunkChunkChoice {
10079
10540
  /** A chat completion delta generated by streamed model responses */
10080
- delta?: ChunkChoiceChunkDelta;
10541
+ delta?: ChatCompletionChunkChunkChoiceChunkDelta;
10081
10542
  /**
10082
10543
  * The reason the model stopped generating tokens. This will be
10083
10544
  * "stop" if the model hit a natural stop point or a provided stop sequence,
@@ -10362,6 +10823,72 @@ interface AnthropicStreamChunkMessageDelta {
10362
10823
  /** Cost of the request so far, in microcents. */
10363
10824
  microcentsSpent?: string | null;
10364
10825
  }
10826
+ interface V1ChatCompletionChunk {
10827
+ /**
10828
+ * A unique identifier for the chat completion. Each chunk has the same ID.
10829
+ * @maxLength 100
10830
+ */
10831
+ responseId?: string | null;
10832
+ /**
10833
+ * A list of chat completion choices. Can contain more than one elements if n is greater than 1.
10834
+ * Can also be empty for the last chunk if you set stream_options: {"include_usage": true}.
10835
+ */
10836
+ choices?: ChatCompletionChunkChunkChoice[];
10837
+ /**
10838
+ * The Unix timestamp (in seconds) of when the chat completion was created.
10839
+ * Each chunk has the same timestamp.
10840
+ */
10841
+ created?: number | null;
10842
+ /** Model that produced the completion. */
10843
+ modelId?: string;
10844
+ /**
10845
+ * This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the
10846
+ * seed request parameter to understand when backend changes have been made that might impact determinism.
10847
+ * @maxLength 10000
10848
+ */
10849
+ systemFingerprint?: string | null;
10850
+ /**
10851
+ * The object type, which is always chat.completion.chunk.
10852
+ * @maxLength 100
10853
+ */
10854
+ object?: string | null;
10855
+ /**
10856
+ * An optional field that will only be present when you set stream_options: {"include_usage": true} in your request.
10857
+ * When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request.
10858
+ */
10859
+ usage?: InvokeCustomOpenAiModelResponseTokenUsage;
10860
+ /** Cost of the entire request in micro cents. Calculated manually and is present only in the last chunk. */
10861
+ microcentsSpent?: string | null;
10862
+ }
10863
+ interface ChunkChoiceChunkDelta {
10864
+ /**
10865
+ * The contents of the chunk message.
10866
+ * @maxLength 1000
10867
+ */
10868
+ content?: string | null;
10869
+ /** The role of the author of this message. */
10870
+ role?: Fireworks_proxyV1ChatCompletionMessageMessageRoleWithLiterals;
10871
+ /**
10872
+ * Tool call requested by the model. Function arguments can be partial jsons and have to be assembled manually.
10873
+ * @maxSize 100
10874
+ */
10875
+ toolCalls?: ChatCompletionMessageToolCall[];
10876
+ }
10877
+ interface ChatCompletionChunkChunkChoice {
10878
+ /** A chat completion delta generated by streamed model responses */
10879
+ delta?: ChunkChoiceChunkDelta;
10880
+ /**
10881
+ * The reason the model stopped generating tokens. This will be
10882
+ * "stop" if the model hit a natural stop point or a provided stop sequence,
10883
+ * "length" if the maximum number of tokens specified in the request was reached,
10884
+ * "content_filter" if content was omitted due to a flag from our content filters,
10885
+ * "tool_calls" if the model called a tool
10886
+ * @maxLength 100
10887
+ */
10888
+ finishReason?: string | null;
10889
+ /** The index of the choice in the list of choices. */
10890
+ index?: number | null;
10891
+ }
10365
10892
  interface GenerateTextByPromptObjectRequest {
10366
10893
  /** Prompt object that describes the text generation request. */
10367
10894
  prompt?: Prompt;
@@ -10927,7 +11454,7 @@ interface ImageModelResponse extends ImageModelResponseResponseOneOf {
10927
11454
  /** Runware AI - Flux TextToImage response. */
10928
11455
  runwareTextToImageResponse?: TextToImageResponse;
10929
11456
  /** Google AI - Generate Image with Imagen Model response. */
10930
- googleGenerateImageResponse?: GenerateImageResponse;
11457
+ googleGenerateImageResponse?: V1GenerateImageResponse;
10931
11458
  /** ML generate image response. */
10932
11459
  mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;
10933
11460
  /** OpenAI image creation response. */
@@ -10956,7 +11483,7 @@ interface ImageModelResponseResponseOneOf {
10956
11483
  /** Runware AI - Flux TextToImage response. */
10957
11484
  runwareTextToImageResponse?: TextToImageResponse;
10958
11485
  /** Google AI - Generate Image with Imagen Model response. */
10959
- googleGenerateImageResponse?: GenerateImageResponse;
11486
+ googleGenerateImageResponse?: V1GenerateImageResponse;
10960
11487
  /** ML generate image response. */
10961
11488
  mlPlatformGenerateImageResponse?: GenerateImageMlPlatformResponse;
10962
11489
  /** OpenAI image creation response. */
@@ -10978,6 +11505,8 @@ interface ImageGenerationRequestedEvent {
10978
11505
  * @format GUID
10979
11506
  */
10980
11507
  eventChainId?: string;
11508
+ /** Dynamic request configuration, including dynamic tools and messages. */
11509
+ dynamicRequestConfig?: DynamicRequestConfig;
10981
11510
  }
10982
11511
  interface ImageGenerationSucceededEvent {
10983
11512
  /** ModelResponse object that describes the image generation result. */
@@ -12574,6 +13103,8 @@ interface PollImageGenerationResultRequest extends PollImageGenerationResultRequ
12574
13103
  runwareGetTaskResultRequest?: GetTaskResultRequest;
12575
13104
  /** OpenAI getVideoResult request */
12576
13105
  openAiGetVideoResultRequest?: GetVideoResultRequest;
13106
+ /** Bytedance getContentTask request */
13107
+ bytedanceGetContentTaskRequest?: GetContentTaskRequest;
12577
13108
  /** Contains additional information for the request. */
12578
13109
  userRequestInfo?: UserRequestInfo;
12579
13110
  }
@@ -12587,6 +13118,8 @@ interface PollImageGenerationResultRequestRequestOneOf {
12587
13118
  runwareGetTaskResultRequest?: GetTaskResultRequest;
12588
13119
  /** OpenAI getVideoResult request */
12589
13120
  openAiGetVideoResultRequest?: GetVideoResultRequest;
13121
+ /** Bytedance getContentTask request */
13122
+ bytedanceGetContentTaskRequest?: GetContentTaskRequest;
12590
13123
  }
12591
13124
  interface V1GetResultRequest {
12592
13125
  /**
@@ -12616,6 +13149,13 @@ interface GetVideoResultRequest {
12616
13149
  */
12617
13150
  _id?: string;
12618
13151
  }
13152
+ interface GetContentTaskRequest {
13153
+ /**
13154
+ * The ID of the video generation task to query.
13155
+ * @maxLength 255
13156
+ */
13157
+ taskId?: string;
13158
+ }
12619
13159
  interface PollImageGenerationResultResponse extends PollImageGenerationResultResponseResponseOneOf {
12620
13160
  /** replicate proxy getResult response */
12621
13161
  replicateGetResultResponse?: V1GetResultResponse;
@@ -12625,6 +13165,8 @@ interface PollImageGenerationResultResponse extends PollImageGenerationResultRes
12625
13165
  runwareGetTaskResultResponse?: GetTaskResultResponse;
12626
13166
  /** OpenAI getVideoResult response */
12627
13167
  openAiGetVideoResultResponse?: GetVideoResultResponse;
13168
+ /** Bytedance getContentTask response */
13169
+ bytedanceGetContentTaskResponse?: GetContentTaskResponse;
12628
13170
  }
12629
13171
  /** @oneof */
12630
13172
  interface PollImageGenerationResultResponseResponseOneOf {
@@ -12636,6 +13178,8 @@ interface PollImageGenerationResultResponseResponseOneOf {
12636
13178
  runwareGetTaskResultResponse?: GetTaskResultResponse;
12637
13179
  /** OpenAI getVideoResult response */
12638
13180
  openAiGetVideoResultResponse?: GetVideoResultResponse;
13181
+ /** Bytedance getContentTask response */
13182
+ bytedanceGetContentTaskResponse?: GetContentTaskResponse;
12639
13183
  }
12640
13184
  interface V1GetResultResponse {
12641
13185
  /**
@@ -12698,6 +13242,9 @@ interface GetTaskResultResponseResponseOneOf {
12698
13242
  interface GetVideoResultResponse {
12699
13243
  videoJob?: VideoJob;
12700
13244
  }
13245
+ interface GetContentTaskResponse {
13246
+ task?: CommonContentTaskResponse;
13247
+ }
12701
13248
  interface DomainEvent extends DomainEventBodyOneOf {
12702
13249
  createdEvent?: EntityCreatedEvent;
12703
13250
  updatedEvent?: EntityUpdatedEvent;
@@ -13184,7 +13731,7 @@ interface PublishPromptOptions {
13184
13731
  /** Perplexity chat completion request */
13185
13732
  perplexityChatCompletionRequest?: InvokeChatCompletionRequest;
13186
13733
  /** Google AI - generate image request */
13187
- googleGenerateImageRequest?: GenerateImageRequest;
13734
+ googleGenerateImageRequest?: V1GenerateImageRequest;
13188
13735
  /** ML platform - generate image request */
13189
13736
  mlPlatformGenerateImageRequest?: GenerateImageMlPlatformRequest;
13190
13737
  /** OpenAI image creation response. */
@@ -13207,6 +13754,10 @@ interface PublishPromptOptions {
13207
13754
  openAiCreateVideoRequest?: CreateVideoRequest;
13208
13755
  /** Fireworks - OpenAI payload */
13209
13756
  fireworksInvokeCustomOpenAiRequest?: InvokeCustomOpenAiModelRequest;
13757
+ /** Bytedance - Create content task request (video generation) */
13758
+ bytedanceCreateContentTaskRequest?: CreateContentTaskRequest;
13759
+ /** Bytedance - Generate image request (Seedream models) */
13760
+ bytedanceGenerateImageRequest?: GenerateImageRequest;
13210
13761
  /**
13211
13762
  * Prompt id.
13212
13763
  * @format GUID
@@ -13295,6 +13846,8 @@ interface PollImageGenerationResultOptions extends PollImageGenerationResultOpti
13295
13846
  runwareGetTaskResultRequest?: GetTaskResultRequest;
13296
13847
  /** OpenAI getVideoResult request */
13297
13848
  openAiGetVideoResultRequest?: GetVideoResultRequest;
13849
+ /** Bytedance getContentTask request */
13850
+ bytedanceGetContentTaskRequest?: GetContentTaskRequest;
13298
13851
  }
13299
13852
  /** @oneof */
13300
13853
  interface PollImageGenerationResultOptionsRequestOneOf {
@@ -13306,6 +13859,8 @@ interface PollImageGenerationResultOptionsRequestOneOf {
13306
13859
  runwareGetTaskResultRequest?: GetTaskResultRequest;
13307
13860
  /** OpenAI getVideoResult request */
13308
13861
  openAiGetVideoResultRequest?: GetVideoResultRequest;
13862
+ /** Bytedance getContentTask request */
13863
+ bytedanceGetContentTaskRequest?: GetContentTaskRequest;
13309
13864
  }
13310
13865
 
13311
- export { type AccountInfo, type Action, type ActionEvent, type AlignmentInfoInChunk, type AnthropicClaudeMessage, type AnthropicMessage, AnthropicModel, type AnthropicModelWithLiterals, type AnthropicStreamChunk, type AnthropicStreamChunkContentOneOf, type AnthropicStreamChunkMessageDelta, type ApplicationBudgetInfo, type AsyncGenerationConfig, type Background, type BashTool, type Blob, type BuiltInTool, type CacheControl, CacheControlType, type CacheControlTypeWithLiterals, type Candidate, type CandidateCitationMetadata, type CandidateCitationMetadataCitation, type CandidateContent, type CandidateContentPart, type CharLocationCitation, ChatBisonModel, type ChatBisonModelWithLiterals, type ChatBisonPredictRequest, type ChatBisonPredictResponse, type ChatBisonPrediction, type ChatCompletionChunk, type ChatCompletionChunkChunkChoice, type ChatCompletionMessage, type ChatCompletionMessageContentPart, type ChatCompletionMessageContentPartContentValueOneOf, type ChatCompletionMessageFunctionWithArgs, type ChatCompletionMessageImageUrlContent, ChatCompletionMessageMessageRole, type ChatCompletionMessageMessageRoleWithLiterals, type ChatCompletionMessageToolCall, ChatCompletionModel, type ChatCompletionModelWithLiterals, type ChatInstance, type ChatMessage, type Choice, type ChunkChoice, type ChunkChoiceChunkDelta, type ChunkDelta, type Citation, type CitationMetadata, type CitationTypeOneOf, type CitationsEnabled, ClaudeModel, type ClaudeModelWithLiterals, ClipGuidancePreset, type ClipGuidancePresetWithLiterals, type CodeExecution, type CodeExecutionResult, type CodeExecutionTool, type CodeExecutionToolResult, type CodeExecutionToolResultContentOneOf, type CodeExecutionToolResultError, type CompletionTokenDetails, type ComputerUse, type ComputerUseTool, type Container, type ContainerUpload, type Content, type ContentBlock, type ContentBlockDelta, type ContentBlockDeltaDeltaOneOf, type ContentBlockLocationCitation, type ContentBlockTypeOneOf, type ContentData, type ContentGenerationFailedEvent, type ContentGenerationRequestedEvent, type ContentGenerationSucceededEvent, type ContentPart, type ContentPartContentValueOneOf, ContentRole, type ContentRoleWithLiterals, type ConverseContentBlock, type ConverseContentBlockContentOneOf, type ConverseInferenceConfig, type ConverseInputSchema, type ConverseMessage, ConverseModel, type ConverseModelWithLiterals, type ConversePerformanceConfig, type ConverseReasoningContent, type ConverseTool, type ConverseToolResult, type ConverseToolResultContent, type ConverseToolResultContentContentOneOf, type ConverseToolUse, type CreateChatCompletionRequest, type CreateChatCompletionRequestFunctionCallOneOf, type CreateChatCompletionRequestFunctionSignature, type CreateChatCompletionRequestResponseFormat, type CreateChatCompletionRequestTool, type CreateChatCompletionResponse, type CreateChatCompletionResponseChoice, type CreateChatCompletionResponseCompletionTokenDetails, type CreateChatCompletionResponsePromptTokenDetails, type CreateChatCompletionResponseTokenUsage, type CreateEmbeddingsRequest, type CreateEmbeddingsResponse, type CreateEmbeddingsResponseEmbeddingUsage, type CreateImageOpenAiRequest, type CreateImageOpenAiResponse, type CreateImageRequest, type CreateImageResponse, type CreateModerationRequest, type CreateModerationResponse, CreatePredictionModel, type CreatePredictionModelWithLiterals, type CreatePredictionRequest, type CreatePredictionRequestInputOneOf, type CreatePredictionResponse, type CreatePredictionResponseTokenUsage, type CreateSpeechRequest, type CreateSpeechResponse, type CreateTranscriptionRequest, CreateTranscriptionRequestResponseFormat, type CreateTranscriptionRequestResponseFormatWithLiterals, type CreateTranscriptionResponse, type CreateVideoRequest, type CreateVideoResponse, type CustomTool, type DatalabOcr, type DatalabOcrOutput, type DocumentContent, type DocumentSource, type DomainEvent, type DomainEventBodyOneOf, type DynamicRequestConfig, type DynamicRetrievalConfig, DynamicRetrievalConfigMode, type DynamicRetrievalConfigModeWithLiterals, EditAction, type EditActionWithLiterals, type EditImageInput, EditImageModel, type EditImageModelWithLiterals, type EditImageOpenAiRequest, type EditImageOpenAiResponse, type EditImageOptions, type EditImageOptionsRequestOneOf, type EditImageRequest, type EditImageResponse, type EditImageWithPromptRequest, EditImageWithPromptRequestModel, type EditImageWithPromptRequestModelWithLiterals, type EditImageWithPromptResponse, ElevenLabsTextToSpeechModel, type ElevenLabsTextToSpeechModelWithLiterals, EmbeddingEncodingFormat, type EmbeddingEncodingFormatWithLiterals, type EmbeddingInfo, type EmbeddingInfoEmbeddingResultOneOf, type EmbeddingInstance, EmbeddingModel, type EmbeddingModelWithLiterals, type EmbeddingPrediction, type EmbeddingUsage, type EntityCreatedEvent, type EntityDeletedEvent, EntityType, type EntityTypeWithLiterals, type EntityUpdatedEvent, Environment, type EnvironmentWithLiterals, type ErrorInfo, type Example, type ExecutableCode, type Expand, type ExperimentalPromptConfig, type Export, type ExtractFromImageMetrics, ExtractFromImageModel, type ExtractFromImageModelWithLiterals, type ExtractFromImageRequest, type ExtractFromImageRequestInputOneOf, type ExtractFromImageResponse, type ExtractFromImageResponseOutputOneOf, type ExtractFromImageResponseTokenUsage, type ExtractFromImageUrls, type FallbackPromptConfig, type FallbackProperties, type FileContent, type FileInput, type FineTuningSpec, FinishReason, type FinishReasonWithLiterals, type Fireworks_proxyV1ChatCompletionMessage, type Fireworks_proxyV1ChatCompletionMessageContentPart, type Fireworks_proxyV1ChatCompletionMessageContentPartContentValueOneOf, type Fireworks_proxyV1ChatCompletionMessageImageUrlContent, Fireworks_proxyV1ChatCompletionMessageMessageRole, type Fireworks_proxyV1ChatCompletionMessageMessageRoleWithLiterals, type FloatEmbedding, type FluxDevControlnet, type FluxPulid, type FrameImage, type FunctionCall, type FunctionCallingConfig, type FunctionDeclaration, type FunctionResponse, type FunctionResponseBlob, type FunctionResponsePart, type FunctionResponsePartDataOneOf, type FunctionSignature, type FunctionWithArgs, type GatewayContentBlock, type GatewayContentBlockTypeOneOf, type GatewayMessageDefinition, GatewayMessageDefinitionRole, type GatewayMessageDefinitionRoleWithLiterals, type GatewayToolDefinition, type GatewayToolDefinitionCustomTool, type GatewayToolDefinitionToolOneOf, GenerateAnImageModel, type GenerateAnImageModelWithLiterals, type GenerateAnImageRequest, type GenerateAnImageResponse, type GenerateAudioOptions, type GenerateAudioOptionsAudioRequestOneOf, type GenerateAudioRequest, type GenerateAudioRequestAudioRequestOneOf, type GenerateAudioResponse, type GenerateAudioResponseAudioResponseOneOf, type GenerateAudioStreamedOptions, type GenerateAudioStreamedOptionsAudioRequestOneOf, type GenerateContentByProjectOptions, type GenerateContentByProjectRequest, type GenerateContentByProjectResponse, type GenerateContentByPromptObjectOptions, type GenerateContentByPromptObjectRequest, type GenerateContentByPromptObjectResponse, type GenerateContentByPromptOptions, type GenerateContentByPromptRequest, type GenerateContentByPromptResponse, type GenerateContentModelResponse, type GenerateContentModelResponseResponseOneOf, type GenerateContentRequest, type GenerateContentResponse, type GenerateCoreRequest, GenerateCoreRequestStylePreset, type GenerateCoreRequestStylePresetWithLiterals, type GenerateCoreResponse, type GenerateEmbeddingOptions, type GenerateEmbeddingOptionsEmbeddingRequestOneOf, type GenerateEmbeddingsRequest, type GenerateEmbeddingsRequestEmbeddingRequestOneOf, type GenerateEmbeddingsResponse, type GenerateEmbeddingsResponseEmbeddingResponseOneOf, type GenerateImageByProjectOptions, type GenerateImageByProjectRequest, type GenerateImageByProjectResponse, type GenerateImageByPromptObjectOptions, type GenerateImageByPromptObjectRequest, type GenerateImageByPromptObjectResponse, type GenerateImageByPromptOptions, type GenerateImageByPromptRequest, type GenerateImageByPromptResponse, GenerateImageMlPlatformModel, type GenerateImageMlPlatformModelWithLiterals, type GenerateImageMlPlatformRequest, type GenerateImageMlPlatformRequestInputOneOf, type GenerateImageMlPlatformResponse, type GenerateImageRequest, type GenerateImageResponse, type GenerateModerationOptions, type GenerateModerationOptionsModerationRequestOneOf, type GenerateModerationRequest, type GenerateModerationRequestModerationRequestOneOf, type GenerateModerationResponse, type GenerateModerationResponseModerationResponseOneOf, type GenerateStableDiffusionRequest, GenerateStableDiffusionRequestOutputFormat, type GenerateStableDiffusionRequestOutputFormatWithLiterals, type GenerateStableDiffusionResponse, type GenerateTextByProjectOptions, type GenerateTextByProjectRequest, type GenerateTextByProjectResponse, type GenerateTextByProjectStreamedOptions, type GenerateTextByPromptObjectOptions, type GenerateTextByPromptObjectRequest, type GenerateTextByPromptObjectResponse, type GenerateTextByPromptObjectStreamedOptions, type GenerateTextByPromptOptions, type GenerateTextByPromptRequest, type GenerateTextByPromptResponse, type GenerateTextByPromptStreamedOptions, type GenerateTranscriptionOptions, type GenerateTranscriptionOptionsTranscriptionRequestOneOf, type GenerateTranscriptionRequest, type GenerateTranscriptionRequestTranscriptionRequestOneOf, type GenerateTranscriptionResponse, type GenerateTranscriptionResponseTranscriptionResponseOneOf, type GenerateVideoInstance, type GenerateVideoParameters, type GenerateVideoRequest, type GenerateVideoResponse, type GeneratedAudioChunk, type GeneratedAudioChunkAudioChunkOneOf, type GeneratedContent, type GeneratedTextChunk, type GeneratedTextChunkModelChunkOneOf, type GeneratedVideo, type GenerationConfig, GenerationMode, type GenerationModeWithLiterals, type GenerationRequestedEvent, type GenerationThinkingConfig, type GetApplicationUsageRequest, type GetApplicationUsageResponse, type GetEmbeddingRequest, type GetEmbeddingResponse, type GetProjectRequest, type GetProjectResponse, type GetPromptOptions, type GetPromptRequest, type GetPromptResponse, type GetResultRequest, type GetResultResponse, type GetStatusRequest, type GetStatusResponse, type GetTaskResultRequest, type GetTaskResultResponse, type GetTaskResultResponseResponseOneOf, type GetVideoResultRequest, type GetVideoResultResponse, type GoogleSearch, type GoogleSearchRetrieval, type GoogleproxyV1AnthropicStreamChunk, type GoogleproxyV1AnthropicStreamChunkContentOneOf, type GoogleproxyV1CacheControl, type GoogleproxyV1ChatCompletionMessage, type GoogleproxyV1ContentBlock, type GoogleproxyV1ContentBlockDelta, type GoogleproxyV1ContentBlockDeltaDeltaOneOf, type GoogleproxyV1ContentBlockTypeOneOf, type GoogleproxyV1ImageUrl, type GoogleproxyV1InputSchema, type GoogleproxyV1McpServer, GoogleproxyV1McpServerType, type GoogleproxyV1McpServerTypeWithLiterals, GoogleproxyV1Model, type GoogleproxyV1ModelWithLiterals, type GoogleproxyV1RedactedThinking, GoogleproxyV1ResponseTypeType, type GoogleproxyV1ResponseTypeTypeWithLiterals, type GoogleproxyV1Text, type GoogleproxyV1Thinking, type GoogleproxyV1ThinkingConfig, type GoogleproxyV1Tool, type GoogleproxyV1ToolChoice, GoogleproxyV1ToolChoiceType, type GoogleproxyV1ToolChoiceTypeWithLiterals, type GoogleproxyV1ToolResult, type GoogleproxyV1ToolUse, type GoogleproxyV1Usage, type GroundingChunk, type GroundingChunkChunkTypeOneOf, type GroundingMetadata, type GroundingSupport, type Guidance, HarmCategory, type HarmCategoryWithLiterals, HarmProbability, type HarmProbabilityWithLiterals, type IdentificationData, type IdentificationDataIdOneOf, type ImageConfig, ImageCoreModel, type ImageCoreModelWithLiterals, ImageEditingModel, type ImageEditingModelWithLiterals, type ImageEditingRequest, type ImageEditingResponse, type ImageGenerationFailedEvent, type ImageGenerationRequestedEvent, type ImageGenerationSucceededEvent, type ImageInput, ImageMediaTypeMediaType, type ImageMediaTypeMediaTypeWithLiterals, ImageModel, type ImageModelResponse, type ImageModelResponseResponseOneOf, type ImageModelWithLiterals, type ImageObject, type ImageOutputOptions, ImageQuality, type ImageQualityWithLiterals, ImageSize, type ImageSizeWithLiterals, ImageStableDiffusionModel, type ImageStableDiffusionModelWithLiterals, ImageStyle, type ImageStyleWithLiterals, type ImageUrl, type ImageUrlContent, type ImageUrlInput, type ImageUsage, ImagenModel, type ImagenModelWithLiterals, type IncompleteDetails, type InputSchema, type Inputs, type Instance, type InvokeAnthropicClaudeModelRequest, type InvokeAnthropicClaudeModelRequestTool, type InvokeAnthropicClaudeModelResponse, type InvokeAnthropicModelRequest, type InvokeAnthropicModelResponse, type InvokeChatCompletionRequest, type InvokeChatCompletionRequestResponseFormat, type InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf, type InvokeChatCompletionResponse, type InvokeChatCompletionResponseChoice, type InvokeChatCompletionResponseUsage, type InvokeConverseRequest, type InvokeConverseResponse, type InvokeConverseResponseTokenUsage, type InvokeCustomOpenAiModelRequest, type InvokeCustomOpenAiModelRequestFunctionCallOneOf, type InvokeCustomOpenAiModelRequestFunctionSignature, type InvokeCustomOpenAiModelRequestResponseFormat, type InvokeCustomOpenAiModelRequestTool, type InvokeCustomOpenAiModelResponse, type InvokeCustomOpenAiModelResponseChoice, type InvokeCustomOpenAiModelResponseCompletionTokenDetails, type InvokeCustomOpenAiModelResponsePromptTokenDetails, type InvokeCustomOpenAiModelResponseTokenUsage, type InvokeLlamaModelRequest, type InvokeLlamaModelResponse, type InvokeMlPlatformLlamaModelRequest, type InvokeMlPlatformLlamaModelResponse, type InvokeMlPlatformOpenAIChatCompletionRawRequest, type InvokeMlPlatformOpenAIChatCompletionRawResponse, type JsonSchema, Language, type LanguageWithLiterals, type Lighting, LlamaModel, type LlamaModelWithLiterals, type LoraModelSelect, type LucatacoFlorence2Large, type Margin, type McpServer, type McpServerToolConfiguration, McpServerType, type McpServerTypeWithLiterals, type McpToolUse, type MediaContent, type MediaResolution, MediaResolutionLevel, type MediaResolutionLevelWithLiterals, MediaType, type MediaTypeWithLiterals, type MessageDelta, type MessageEnvelope, MessageRole, MessageRoleRole, type MessageRoleRoleWithLiterals, type MessageRoleWithLiterals, type Metadata, type Metrics, Modality, type ModalityTokenCount, type ModalityWithLiterals, Mode, type ModeWithLiterals, Model, type ModelResponse, type ModelResponseResponseOneOf, type ModelWithLiterals, type ModerationResult, type MultiModalInput, type MultiModalInputContentValueOneOf, OpenAiImageModel, type OpenAiImageModelWithLiterals, type OpenAiImageTokenDetails, type OpenAiResponsesRequest, type OpenAiResponsesResponse, type OpenAiResponsesResponseIncompleteDetails, type OpenaiproxyV1ChatCompletionMessage, type OpenaiproxyV1ChatCompletionMessageContentPart, type OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf, type OpenaiproxyV1ChatCompletionMessageImageUrlContent, OpenaiproxyV1ChatCompletionMessageMessageRole, type OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals, type OpenaiproxyV1CreateChatCompletionRequest, type OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf, type OpenaiproxyV1CreateChatCompletionRequestResponseFormat, type OpenaiproxyV1CreateChatCompletionResponse, type OpenaiproxyV1CreateChatCompletionResponseChoice, type OpenaiproxyV1CreateChatCompletionResponseTokenUsage, OpenaiproxyV1EmbeddingModel, type OpenaiproxyV1EmbeddingModelWithLiterals, OpenaiproxyV1Model, type OpenaiproxyV1ModelWithLiterals, OutageStatus, type OutageStatusWithLiterals, Outcome, type OutcomeWithLiterals, type OutpaintDirection, type Output, type OutputAnnotation, type OutputAnnotationAnnotationTypeOneOf, type OutputContent, OutputFormat, type OutputFormatWithLiterals, type OutputOptions, type Padding, type PageLocationCitation, type Parameters, type PerceptronIsaac01, type PerplexityImageDescriptor, type PerplexityMessage, PerplexityMessageMessageRole, type PerplexityMessageMessageRoleWithLiterals, PerplexityModel, type PerplexityModelWithLiterals, PersonGeneration, type PersonGenerationWithLiterals, type PollImageGenerationResultOptions, type PollImageGenerationResultOptionsRequestOneOf, type PollImageGenerationResultRequest, type PollImageGenerationResultRequestRequestOneOf, type PollImageGenerationResultResponse, type PollImageGenerationResultResponseResponseOneOf, type PredictParameters, type Prediction, type PredictionMetrics, type PredictionUrls, type Project, type ProjectConfigChangedDomainEvent, type Prompt, type PromptModelRequestOneOf, type PromptTokenDetails, type PronunciationDictionaryLocator, type PrunaaiZImageTurbo, type PublicationDate, type PublishProjectOptions, type PublishProjectRequest, type PublishProjectResponse, type PublishPromptOptions, type PublishPromptRequest, type PublishPromptResponse, type QwenImageLayered, type ReasoningText, type Recraft_proxyV1EditImageRequest, type Recraft_proxyV1EditImageResponse, type RedactedThinking, type RemoveBackgroundRequest, type RemoveBackgroundResponse, type RequestMetadata, type ResponseFormat, type ResponseMetadata, ResponseType, ResponseTypeType, type ResponseTypeTypeWithLiterals, type ResponseTypeWithLiterals, type ResponsesCodeInterpreter, type ResponsesCodeInterpreterContainer, type ResponsesCodeInterpreterContainerAuto, type ResponsesCodeInterpreterContainerContainerTypeOneOf, type ResponsesCodeInterpreterImageOutput, type ResponsesCodeInterpreterLogsOutput, type ResponsesCodeInterpreterOutput, type ResponsesCodeInterpreterOutputOutputTypeOneOf, type ResponsesCodeInterpreterToolCall, type ResponsesFunction, type ResponsesFunctionToolCall, type ResponsesFunctionToolCallOutput, type ResponsesInputItem, type ResponsesInputItemItemOneOf, type ResponsesInputMessage, type ResponsesInputMessageContent, type ResponsesInputMessageContentContentValueOneOf, type ResponsesInputMessageContentFileInput, type ResponsesInputMessageContentImageInput, ResponsesInputMessageResponsesMessageRole, type ResponsesInputMessageResponsesMessageRoleWithLiterals, type ResponsesInputTokensDetails, ResponsesMessageRole, type ResponsesMessageRoleWithLiterals, ResponsesModel, type ResponsesModelWithLiterals, type ResponsesOutput, type ResponsesOutputMessage, type ResponsesOutputMessageOutputContent, type ResponsesOutputOutputOneOf, type ResponsesOutputTokensDetails, type ResponsesReasoning, type ResponsesReasoningContent, type ResponsesReasoningOutput, type ResponsesReasoningSummaryContent, type ResponsesTextFormat, type ResponsesTextFormatFormatOneOf, type ResponsesTextFormatJsonSchema, type ResponsesTokenUsage, type ResponsesTool, type ResponsesToolChoice, type ResponsesToolToolTypeOneOf, type ResponsesWebSearch, type ResponsesWebSearchToolCall, type ResponsesWebSearchToolCallAction, type ResponsesWebSearchUserLocation, type RestoreInfo, type ResultObject, type RetrievalMetadata, type RetrievedContext, type ReveEdit, Role, type RoleWithLiterals, type SafetyAttribute, type SafetyAttributes, type SafetyRating, type SafetySetting, Sampler, type SamplerWithLiterals, type SearchEntryPoint, type SearchResultLocationCitation, type Segment, type Segmentation, type ServerToolUse, type Shadow, type SimpleContentBlock, type SimpleContentBlockTypeOneOf, type SpeechChunk, SpeechModel, type SpeechModelWithLiterals, type SpiGenerationConfig, type Statistics, StylePreset, type StylePresetWithLiterals, type SystemContentBlock, type SystemInstruction, TaskInput, type TaskInputWithLiterals, TaskType, type TaskTypeWithLiterals, type Text, TextBisonModel, type TextBisonModelWithLiterals, type TextBisonPredictRequest, type TextBisonPredictResponse, type TextBisonPrediction, type TextContent, type TextEditorTool, type TextEmbeddingInstance, type TextEmbeddingParameters, type TextGenerationFailedEvent, type TextGenerationSucceededEvent, type TextInstance, type TextPrompt, type TextRemoval, type TextToImageRequest, TextToImageRequestModel, type TextToImageRequestModelWithLiterals, TextToImageRequestStylePreset, type TextToImageRequestStylePresetWithLiterals, type TextToImageResponse, type TextToImageTaskResult, type TextToSpeechChunk, type TextToSpeechRequest, type Thinking, type ThinkingConfig, type ThinkingTextContent, Threshold, type ThresholdWithLiterals, type TimestampGranularities, TimestampGranularity, type TimestampGranularityWithLiterals, type TokenCount, type TokenMetadata, type TokenUsage, type Tool, type ToolCall, type ToolChoice, ToolChoiceType, type ToolChoiceTypeWithLiterals, type ToolConfig, type ToolConfiguration, type ToolResult, type ToolResultContent, type ToolResultContentBlock, type ToolResultContentBlockTypeOneOf, type ToolResultSearchResult, type ToolSpecification, type ToolUse, type ToolUseContent, TranscriptionModel, type TranscriptionModelWithLiterals, Type, type TypeWithLiterals, type UrlCitation, type Usage, type UsageCacheCreation, type UsageMetadata, type UsageServerToolUse, type UserLocation, type UserPerApplicationBudgetInfo, type UserRequestInfo, type V1AnthropicClaudeMessage, type V1AnthropicStreamChunk, type V1AnthropicStreamChunkContentOneOf, type V1AnthropicStreamChunkMessageDelta, type V1CacheControl, V1CacheControlType, type V1CacheControlTypeWithLiterals, type V1ChatCompletionChunk, type V1ChatCompletionMessage, type V1ChatCompletionMessageContentPart, type V1ChatCompletionMessageContentPartContentValueOneOf, type V1ChatCompletionMessageFunctionWithArgs, type V1ChatCompletionMessageImageUrlContent, V1ChatCompletionMessageMessageRole, type V1ChatCompletionMessageMessageRoleWithLiterals, type V1ChatCompletionMessageToolCall, type V1Citation, type V1CodeExecutionResult, type V1ContentBlock, type V1ContentBlockDelta, type V1ContentBlockDeltaDeltaOneOf, type V1ContentBlockTypeOneOf, type V1ContentPart, type V1CreateChatCompletionRequest, type V1CreateChatCompletionRequestResponseFormat, type V1CreateChatCompletionRequestTool, type V1CreateChatCompletionResponse, type V1CreateChatCompletionResponseChoice, type V1CreateChatCompletionResponseTokenUsage, type V1CreateEmbeddingsRequest, type V1CreateEmbeddingsResponse, V1EditImageModel, type V1EditImageModelWithLiterals, type V1EditImageRequest, type V1EditImageResponse, V1EmbeddingEncodingFormat, type V1EmbeddingEncodingFormatWithLiterals, type V1EmbeddingInfo, type V1EmbeddingInfoEmbeddingResultOneOf, V1EmbeddingModel, type V1EmbeddingModelWithLiterals, type V1FineTuningSpec, type V1FloatEmbedding, type V1FluxPulid, type V1GetResultRequest, type V1GetResultResponse, type V1ImageInput, V1ImageMediaTypeMediaType, type V1ImageMediaTypeMediaTypeWithLiterals, V1ImageModel, type V1ImageModelWithLiterals, type V1ImageObject, type V1ImageUrl, type V1InputSchema, type V1InvokeAnthropicClaudeModelRequest, type V1InvokeAnthropicClaudeModelResponse, type V1McpServer, type V1McpServerToolConfiguration, V1McpServerType, type V1McpServerTypeWithLiterals, V1MessageRoleRole, type V1MessageRoleRoleWithLiterals, V1Model, type V1ModelWithLiterals, type V1OpenAiResponsesRequest, type V1OpenAiResponsesResponse, type V1OutputAnnotation, type V1OutputAnnotationAnnotationTypeOneOf, type V1RedactedThinking, V1ResponseTypeType, type V1ResponseTypeTypeWithLiterals, type V1ResponsesCodeInterpreter, type V1ResponsesCodeInterpreterContainer, type V1ResponsesCodeInterpreterContainerAuto, type V1ResponsesCodeInterpreterContainerContainerTypeOneOf, type V1ResponsesCodeInterpreterImageOutput, type V1ResponsesCodeInterpreterLogsOutput, type V1ResponsesCodeInterpreterOutput, type V1ResponsesCodeInterpreterOutputOutputTypeOneOf, type V1ResponsesCodeInterpreterToolCall, type V1ResponsesFunction, type V1ResponsesFunctionToolCall, type V1ResponsesFunctionToolCallOutput, type V1ResponsesInputItem, type V1ResponsesInputItemItemOneOf, type V1ResponsesInputMessage, type V1ResponsesInputMessageContent, type V1ResponsesInputMessageContentContentValueOneOf, type V1ResponsesInputTokensDetails, V1ResponsesModel, type V1ResponsesModelWithLiterals, type V1ResponsesOutput, type V1ResponsesOutputMessage, type V1ResponsesOutputOutputOneOf, type V1ResponsesOutputTokensDetails, type V1ResponsesReasoning, type V1ResponsesReasoningContent, type V1ResponsesReasoningOutput, type V1ResponsesReasoningSummaryContent, type V1ResponsesTextFormat, type V1ResponsesTextFormatFormatOneOf, type V1ResponsesTokenUsage, type V1ResponsesTool, type V1ResponsesToolChoice, type V1ResponsesToolToolTypeOneOf, type V1ResponsesWebSearch, type V1ResponsesWebSearchToolCall, type V1Segment, type V1SimpleContentBlock, type V1SimpleContentBlockTypeOneOf, type V1Text, type V1TextToImageRequest, type V1TextToImageResponse, type V1Thinking, type V1ThinkingConfig, type V1TokenUsage, type V1Tool, type V1ToolChoice, V1ToolChoiceType, type V1ToolChoiceTypeWithLiterals, type V1ToolConfig, type V1ToolKindOneOf, type V1ToolResult, type V1ToolUse, type V1UrlCitation, type V1Usage, V1VideoModel, type V1VideoModelWithLiterals, VideoGenModel, type VideoGenModelWithLiterals, type VideoInferenceRequest, type VideoInferenceResponse, type VideoInferenceTaskResult, type VideoInputs, type VideoJob, VideoModel, type VideoModelWithLiterals, type VoiceSettings, type Web, type WebFetchTool, type WebFetchToolResult, type WebFetchToolResultContentError, type WebFetchToolResultContentOneOf, type WebFetchToolResultContentSuccess, type WebSearchResult, type WebSearchResultList, type WebSearchResultLocationCitation, type WebSearchTool, type WebSearchToolResult, type WebSearchToolResultContentOneOf, type WebSearchToolResultError, type WebSearchUserLocation, WebhookIdentityType, type WebhookIdentityTypeWithLiterals, type Wix_ai_gatewayV1EditImageRequest, type Wix_ai_gatewayV1EditImageRequestRequestOneOf, type Wix_ai_gatewayV1EditImageResponse, type Wix_ai_gatewayV1EditImageResponseResponseOneOf, type Word };
13866
+ export { type AccountInfo, type Action, type ActionEvent, type AlignmentInfoInChunk, type AnthropicClaudeMessage, type AnthropicMessage, AnthropicModel, type AnthropicModelWithLiterals, type AnthropicStreamChunk, type AnthropicStreamChunkContentOneOf, type AnthropicStreamChunkMessageDelta, type AnthropicV1ImageUrl, type ApplicationBudgetInfo, type AsyncGenerationConfig, type Background, type BashTool, type Blob, type BuiltInTool, type CacheControl, CacheControlType, type CacheControlTypeWithLiterals, type Candidate, type CandidateCitationMetadata, type CandidateCitationMetadataCitation, type CandidateContent, type CandidateContentPart, type CharLocationCitation, ChatBisonModel, type ChatBisonModelWithLiterals, type ChatBisonPredictRequest, type ChatBisonPredictResponse, type ChatBisonPrediction, type ChatCompletionChunk, type ChatCompletionChunkChunkChoice, type ChatCompletionChunkChunkChoiceChunkDelta, type ChatCompletionMessage, type ChatCompletionMessageContentPart, type ChatCompletionMessageContentPartContentValueOneOf, type ChatCompletionMessageFunctionWithArgs, type ChatCompletionMessageImageUrlContent, ChatCompletionMessageMessageRole, type ChatCompletionMessageMessageRoleWithLiterals, type ChatCompletionMessageToolCall, ChatCompletionModel, type ChatCompletionModelWithLiterals, type ChatInstance, type ChatMessage, type Choice, type ChunkChoice, type ChunkChoiceChunkDelta, type ChunkDelta, type Citation, type CitationMetadata, type CitationTypeOneOf, type CitationsEnabled, ClaudeModel, type ClaudeModelWithLiterals, ClipGuidancePreset, type ClipGuidancePresetWithLiterals, type CodeExecution, type CodeExecutionResult, type CodeExecutionTool, type CodeExecutionToolResult, type CodeExecutionToolResultContentOneOf, type CodeExecutionToolResultError, type CommonContentTaskResponse, type CompletionTokenDetails, type ComputerUse, type ComputerUseTool, type Container, type ContainerUpload, type Content, type ContentBlock, type ContentBlockDelta, type ContentBlockDeltaDeltaOneOf, type ContentBlockLocationCitation, type ContentBlockTypeOneOf, type ContentData, type ContentGenerationFailedEvent, type ContentGenerationRequestedEvent, type ContentGenerationSucceededEvent, type ContentItem, type ContentPart, type ContentPartContentValueOneOf, ContentRole, type ContentRoleWithLiterals, type ConverseContentBlock, type ConverseContentBlockContentOneOf, type ConverseInferenceConfig, type ConverseInputSchema, type ConverseMessage, ConverseModel, type ConverseModelWithLiterals, type ConversePerformanceConfig, type ConverseReasoningContent, type ConverseTool, type ConverseToolResult, type ConverseToolResultContent, type ConverseToolResultContentContentOneOf, type ConverseToolUse, type CreateChatCompletionRequest, type CreateChatCompletionRequestFunctionCallOneOf, type CreateChatCompletionRequestFunctionSignature, type CreateChatCompletionRequestResponseFormat, type CreateChatCompletionRequestTool, type CreateChatCompletionResponse, type CreateChatCompletionResponseChoice, type CreateChatCompletionResponseCompletionTokenDetails, type CreateChatCompletionResponsePromptTokenDetails, type CreateChatCompletionResponseTokenUsage, type CreateContentTaskRequest, type CreateContentTaskResponse, type CreateEmbeddingsRequest, type CreateEmbeddingsResponse, type CreateEmbeddingsResponseEmbeddingUsage, type CreateImageOpenAiRequest, type CreateImageOpenAiResponse, type CreateImageRequest, type CreateImageResponse, type CreateModerationRequest, type CreateModerationResponse, CreatePredictionModel, type CreatePredictionModelWithLiterals, type CreatePredictionRequest, type CreatePredictionRequestInputOneOf, type CreatePredictionResponse, type CreatePredictionResponseTokenUsage, type CreateSpeechRequest, type CreateSpeechResponse, type CreateTranscriptionRequest, CreateTranscriptionRequestResponseFormat, type CreateTranscriptionRequestResponseFormatWithLiterals, type CreateTranscriptionResponse, type CreateVideoRequest, type CreateVideoResponse, type CustomTool, type DatalabOcr, type DatalabOcrOutput, type DocumentContent, type DocumentSource, type DomainEvent, type DomainEventBodyOneOf, type DraftTask, type DynamicRequestConfig, type DynamicRetrievalConfig, DynamicRetrievalConfigMode, type DynamicRetrievalConfigModeWithLiterals, EditAction, type EditActionWithLiterals, type EditImageInput, EditImageModel, type EditImageModelWithLiterals, type EditImageOpenAiRequest, type EditImageOpenAiResponse, type EditImageOptions, type EditImageOptionsRequestOneOf, type EditImageRequest, type EditImageResponse, type EditImageWithPromptRequest, EditImageWithPromptRequestModel, type EditImageWithPromptRequestModelWithLiterals, type EditImageWithPromptResponse, ElevenLabsTextToSpeechModel, type ElevenLabsTextToSpeechModelWithLiterals, EmbeddingEncodingFormat, type EmbeddingEncodingFormatWithLiterals, type EmbeddingInfo, type EmbeddingInfoEmbeddingResultOneOf, type EmbeddingInstance, EmbeddingModel, type EmbeddingModelWithLiterals, type EmbeddingPrediction, type EmbeddingUsage, type EntityCreatedEvent, type EntityDeletedEvent, EntityType, type EntityTypeWithLiterals, type EntityUpdatedEvent, Environment, type EnvironmentWithLiterals, type ErrorInfo, type Example, type ExecutableCode, type Expand, type ExperimentalPromptConfig, type Export, type ExtractFromImageMetrics, ExtractFromImageModel, type ExtractFromImageModelWithLiterals, type ExtractFromImageRequest, type ExtractFromImageRequestInputOneOf, type ExtractFromImageResponse, type ExtractFromImageResponseOutputOneOf, type ExtractFromImageResponseTokenUsage, type ExtractFromImageUrls, type FallbackPromptConfig, type FallbackProperties, type FileContent, type FileInput, type FineTuningSpec, FinishReason, type FinishReasonWithLiterals, type Fireworks_proxyV1ChatCompletionMessage, type Fireworks_proxyV1ChatCompletionMessageContentPart, type Fireworks_proxyV1ChatCompletionMessageContentPartContentValueOneOf, type Fireworks_proxyV1ChatCompletionMessageImageUrlContent, Fireworks_proxyV1ChatCompletionMessageMessageRole, type Fireworks_proxyV1ChatCompletionMessageMessageRoleWithLiterals, type FloatEmbedding, type FluxDevControlnet, type FluxPulid, type FrameImage, type FunctionCall, type FunctionCallingConfig, type FunctionDeclaration, type FunctionResponse, type FunctionResponseBlob, type FunctionResponsePart, type FunctionResponsePartDataOneOf, type FunctionSignature, type FunctionWithArgs, type GatewayContentBlock, type GatewayContentBlockTypeOneOf, type GatewayMessageDefinition, GatewayMessageDefinitionRole, type GatewayMessageDefinitionRoleWithLiterals, type GatewayToolDefinition, type GatewayToolDefinitionCustomTool, type GatewayToolDefinitionToolOneOf, GenerateAnImageModel, type GenerateAnImageModelWithLiterals, type GenerateAnImageRequest, type GenerateAnImageResponse, type GenerateAudioOptions, type GenerateAudioOptionsAudioRequestOneOf, type GenerateAudioRequest, type GenerateAudioRequestAudioRequestOneOf, type GenerateAudioResponse, type GenerateAudioResponseAudioResponseOneOf, type GenerateAudioStreamedOptions, type GenerateAudioStreamedOptionsAudioRequestOneOf, type GenerateContentByProjectOptions, type GenerateContentByProjectRequest, type GenerateContentByProjectResponse, type GenerateContentByPromptObjectOptions, type GenerateContentByPromptObjectRequest, type GenerateContentByPromptObjectResponse, type GenerateContentByPromptOptions, type GenerateContentByPromptRequest, type GenerateContentByPromptResponse, type GenerateContentModelResponse, type GenerateContentModelResponseResponseOneOf, type GenerateContentRequest, type GenerateContentResponse, type GenerateCoreRequest, GenerateCoreRequestStylePreset, type GenerateCoreRequestStylePresetWithLiterals, type GenerateCoreResponse, type GenerateEmbeddingOptions, type GenerateEmbeddingOptionsEmbeddingRequestOneOf, type GenerateEmbeddingsRequest, type GenerateEmbeddingsRequestEmbeddingRequestOneOf, type GenerateEmbeddingsResponse, type GenerateEmbeddingsResponseEmbeddingResponseOneOf, type GenerateImageByProjectOptions, type GenerateImageByProjectRequest, type GenerateImageByProjectResponse, type GenerateImageByPromptObjectOptions, type GenerateImageByPromptObjectRequest, type GenerateImageByPromptObjectResponse, type GenerateImageByPromptOptions, type GenerateImageByPromptRequest, type GenerateImageByPromptResponse, GenerateImageMlPlatformModel, type GenerateImageMlPlatformModelWithLiterals, type GenerateImageMlPlatformRequest, type GenerateImageMlPlatformRequestInputOneOf, type GenerateImageMlPlatformResponse, type GenerateImageRequest, type GenerateImageResponse, type GenerateModerationOptions, type GenerateModerationOptionsModerationRequestOneOf, type GenerateModerationRequest, type GenerateModerationRequestModerationRequestOneOf, type GenerateModerationResponse, type GenerateModerationResponseModerationResponseOneOf, type GenerateStableDiffusionRequest, GenerateStableDiffusionRequestOutputFormat, type GenerateStableDiffusionRequestOutputFormatWithLiterals, type GenerateStableDiffusionResponse, type GenerateTextByProjectOptions, type GenerateTextByProjectRequest, type GenerateTextByProjectResponse, type GenerateTextByProjectStreamedOptions, type GenerateTextByPromptObjectOptions, type GenerateTextByPromptObjectRequest, type GenerateTextByPromptObjectResponse, type GenerateTextByPromptObjectStreamedOptions, type GenerateTextByPromptOptions, type GenerateTextByPromptRequest, type GenerateTextByPromptResponse, type GenerateTextByPromptStreamedOptions, type GenerateTranscriptionOptions, type GenerateTranscriptionOptionsTranscriptionRequestOneOf, type GenerateTranscriptionRequest, type GenerateTranscriptionRequestTranscriptionRequestOneOf, type GenerateTranscriptionResponse, type GenerateTranscriptionResponseTranscriptionResponseOneOf, type GenerateVideoInstance, type GenerateVideoParameters, type GenerateVideoRequest, type GenerateVideoResponse, type GeneratedAudioChunk, type GeneratedAudioChunkAudioChunkOneOf, type GeneratedContent, type GeneratedTextChunk, type GeneratedTextChunkModelChunkOneOf, type GeneratedVideo, type GenerationConfig, GenerationMode, type GenerationModeWithLiterals, type GenerationRequestedEvent, type GenerationThinkingConfig, type GetApplicationUsageRequest, type GetApplicationUsageResponse, type GetContentTaskRequest, type GetContentTaskResponse, type GetEmbeddingRequest, type GetEmbeddingResponse, type GetProjectRequest, type GetProjectResponse, type GetPromptOptions, type GetPromptRequest, type GetPromptResponse, type GetResultRequest, type GetResultResponse, type GetStatusRequest, type GetStatusResponse, type GetTaskResultRequest, type GetTaskResultResponse, type GetTaskResultResponseResponseOneOf, type GetVideoResultRequest, type GetVideoResultResponse, type GoogleSearch, type GoogleSearchRetrieval, type GoogleproxyV1AnthropicStreamChunk, type GoogleproxyV1AnthropicStreamChunkContentOneOf, type GoogleproxyV1CacheControl, type GoogleproxyV1ChatCompletionMessage, type GoogleproxyV1ContentBlock, type GoogleproxyV1ContentBlockDelta, type GoogleproxyV1ContentBlockDeltaDeltaOneOf, type GoogleproxyV1ContentBlockTypeOneOf, type GoogleproxyV1ImageUrl, type GoogleproxyV1InputSchema, type GoogleproxyV1McpServer, GoogleproxyV1McpServerType, type GoogleproxyV1McpServerTypeWithLiterals, GoogleproxyV1Model, type GoogleproxyV1ModelWithLiterals, type GoogleproxyV1RedactedThinking, GoogleproxyV1ResponseTypeType, type GoogleproxyV1ResponseTypeTypeWithLiterals, type GoogleproxyV1Text, type GoogleproxyV1Thinking, type GoogleproxyV1ThinkingConfig, type GoogleproxyV1Tool, type GoogleproxyV1ToolChoice, GoogleproxyV1ToolChoiceType, type GoogleproxyV1ToolChoiceTypeWithLiterals, type GoogleproxyV1ToolResult, type GoogleproxyV1ToolUse, type GoogleproxyV1Usage, type GroundingChunk, type GroundingChunkChunkTypeOneOf, type GroundingMetadata, type GroundingSupport, type Guidance, HarmCategory, type HarmCategoryWithLiterals, HarmProbability, type HarmProbabilityWithLiterals, type IdentificationData, type IdentificationDataIdOneOf, type ImageConfig, ImageCoreModel, type ImageCoreModelWithLiterals, type ImageData, ImageEditingModel, type ImageEditingModelWithLiterals, type ImageEditingRequest, type ImageEditingResponse, type ImageError, type ImageGenerationFailedEvent, type ImageGenerationRequestedEvent, type ImageGenerationSucceededEvent, type ImageInput, ImageMediaTypeMediaType, type ImageMediaTypeMediaTypeWithLiterals, ImageModel, type ImageModelResponse, type ImageModelResponseResponseOneOf, type ImageModelWithLiterals, type ImageObject, type ImageOutputOptions, ImageQuality, type ImageQualityWithLiterals, ImageSize, type ImageSizeWithLiterals, ImageStableDiffusionModel, type ImageStableDiffusionModelWithLiterals, ImageStyle, type ImageStyleWithLiterals, type ImageUrl, type ImageUrlContent, type ImageUrlInput, type ImageUsage, ImagenModel, type ImagenModelWithLiterals, type IncompleteDetails, type InputSchema, type Inputs, type Instance, type InvokeAnthropicClaudeModelRequest, type InvokeAnthropicClaudeModelRequestTool, type InvokeAnthropicClaudeModelResponse, type InvokeAnthropicModelRequest, type InvokeAnthropicModelResponse, type InvokeChatCompletionRequest, type InvokeChatCompletionRequestResponseFormat, type InvokeChatCompletionRequestResponseFormatFormatDetailsOneOf, type InvokeChatCompletionResponse, type InvokeChatCompletionResponseChoice, type InvokeChatCompletionResponseUsage, type InvokeConverseRequest, type InvokeConverseResponse, type InvokeConverseResponseTokenUsage, type InvokeCustomOpenAiModelRequest, type InvokeCustomOpenAiModelRequestFunctionCallOneOf, type InvokeCustomOpenAiModelRequestFunctionSignature, type InvokeCustomOpenAiModelRequestResponseFormat, type InvokeCustomOpenAiModelRequestTool, type InvokeCustomOpenAiModelResponse, type InvokeCustomOpenAiModelResponseChoice, type InvokeCustomOpenAiModelResponseCompletionTokenDetails, type InvokeCustomOpenAiModelResponsePromptTokenDetails, type InvokeCustomOpenAiModelResponseTokenUsage, type InvokeLlamaModelRequest, type InvokeLlamaModelResponse, type InvokeMlPlatformLlamaModelRequest, type InvokeMlPlatformLlamaModelResponse, type InvokeMlPlatformOpenAIChatCompletionRawRequest, type InvokeMlPlatformOpenAIChatCompletionRawResponse, type JsonSchema, Language, type LanguageWithLiterals, type Lighting, LlamaModel, type LlamaModelWithLiterals, type LoraModelSelect, type LucatacoFlorence2Large, type Margin, type McpServer, type McpServerToolConfiguration, McpServerType, type McpServerTypeWithLiterals, type McpToolUse, type MediaContent, type MediaResolution, MediaResolutionLevel, type MediaResolutionLevelWithLiterals, MediaType, type MediaTypeWithLiterals, type MessageDelta, type MessageEnvelope, MessageRole, MessageRoleRole, type MessageRoleRoleWithLiterals, type MessageRoleWithLiterals, type Metadata, type Metrics, Modality, type ModalityTokenCount, type ModalityWithLiterals, Mode, type ModeWithLiterals, Model, type ModelResponse, type ModelResponseResponseOneOf, type ModelWithLiterals, type ModerationResult, type MultiModalInput, type MultiModalInputContentValueOneOf, OpenAiImageModel, type OpenAiImageModelWithLiterals, type OpenAiImageTokenDetails, type OpenAiResponsesRequest, type OpenAiResponsesResponse, type OpenAiResponsesResponseIncompleteDetails, type OpenaiproxyV1ChatCompletionChunk, type OpenaiproxyV1ChatCompletionMessage, type OpenaiproxyV1ChatCompletionMessageContentPart, type OpenaiproxyV1ChatCompletionMessageContentPartContentValueOneOf, type OpenaiproxyV1ChatCompletionMessageImageUrlContent, OpenaiproxyV1ChatCompletionMessageMessageRole, type OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals, type OpenaiproxyV1CreateChatCompletionRequest, type OpenaiproxyV1CreateChatCompletionRequestFunctionCallOneOf, type OpenaiproxyV1CreateChatCompletionRequestResponseFormat, type OpenaiproxyV1CreateChatCompletionResponse, type OpenaiproxyV1CreateChatCompletionResponseChoice, type OpenaiproxyV1CreateChatCompletionResponseTokenUsage, OpenaiproxyV1EmbeddingModel, type OpenaiproxyV1EmbeddingModelWithLiterals, OpenaiproxyV1Model, type OpenaiproxyV1ModelWithLiterals, type OptimizePromptOptions, OutageStatus, type OutageStatusWithLiterals, Outcome, type OutcomeWithLiterals, type OutpaintDirection, type Output, type OutputAnnotation, type OutputAnnotationAnnotationTypeOneOf, type OutputContent, OutputFormat, type OutputFormatWithLiterals, type OutputOptions, type Padding, type PageLocationCitation, type Parameters, type PerceptronIsaac01, type PerplexityImageDescriptor, type PerplexityMessage, PerplexityMessageMessageRole, type PerplexityMessageMessageRoleWithLiterals, PerplexityModel, type PerplexityModelWithLiterals, PersonGeneration, type PersonGenerationWithLiterals, type PollImageGenerationResultOptions, type PollImageGenerationResultOptionsRequestOneOf, type PollImageGenerationResultRequest, type PollImageGenerationResultRequestRequestOneOf, type PollImageGenerationResultResponse, type PollImageGenerationResultResponseResponseOneOf, type PredictParameters, type Prediction, type PredictionMetrics, type PredictionUrls, type Project, type ProjectConfigChangedDomainEvent, type Prompt, type PromptModelRequestOneOf, type PromptTokenDetails, type PronunciationDictionaryLocator, type PrunaaiZImageTurbo, type PublicationDate, type PublishProjectOptions, type PublishProjectRequest, type PublishProjectResponse, type PublishPromptOptions, type PublishPromptRequest, type PublishPromptResponse, type QwenImageLayered, type ReasoningText, type Recraft_proxyV1EditImageRequest, type Recraft_proxyV1EditImageResponse, type RedactedThinking, type RemoveBackgroundRequest, type RemoveBackgroundResponse, type RequestMetadata, type ResponseFormat, type ResponseMetadata, ResponseType, ResponseTypeType, type ResponseTypeTypeWithLiterals, type ResponseTypeWithLiterals, type ResponsesCodeInterpreter, type ResponsesCodeInterpreterContainer, type ResponsesCodeInterpreterContainerAuto, type ResponsesCodeInterpreterContainerContainerTypeOneOf, type ResponsesCodeInterpreterImageOutput, type ResponsesCodeInterpreterLogsOutput, type ResponsesCodeInterpreterOutput, type ResponsesCodeInterpreterOutputOutputTypeOneOf, type ResponsesCodeInterpreterToolCall, type ResponsesFunction, type ResponsesFunctionToolCall, type ResponsesFunctionToolCallOutput, type ResponsesInputItem, type ResponsesInputItemItemOneOf, type ResponsesInputMessage, type ResponsesInputMessageContent, type ResponsesInputMessageContentContentValueOneOf, type ResponsesInputMessageContentFileInput, type ResponsesInputMessageContentImageInput, ResponsesInputMessageResponsesMessageRole, type ResponsesInputMessageResponsesMessageRoleWithLiterals, type ResponsesInputTokensDetails, ResponsesMessageRole, type ResponsesMessageRoleWithLiterals, ResponsesModel, type ResponsesModelWithLiterals, type ResponsesOutput, type ResponsesOutputMessage, type ResponsesOutputMessageOutputContent, type ResponsesOutputOutputOneOf, type ResponsesOutputTokensDetails, type ResponsesReasoning, type ResponsesReasoningContent, type ResponsesReasoningOutput, type ResponsesReasoningSummaryContent, type ResponsesTextFormat, type ResponsesTextFormatFormatOneOf, type ResponsesTextFormatJsonSchema, type ResponsesTokenUsage, type ResponsesTool, type ResponsesToolChoice, type ResponsesToolToolTypeOneOf, type ResponsesWebSearch, type ResponsesWebSearchToolCall, type ResponsesWebSearchToolCallAction, type ResponsesWebSearchUserLocation, type RestoreInfo, type ResultObject, type RetrievalMetadata, type RetrievedContext, type ReveEdit, Role, type RoleWithLiterals, type SafetyAttribute, type SafetyAttributes, type SafetyRating, type SafetySetting, Sampler, type SamplerWithLiterals, type SearchEntryPoint, type SearchResultLocationCitation, type Segment, type Segmentation, type SequentialImageGenerationOptions, type ServerToolUse, type Shadow, type SimpleContentBlock, type SimpleContentBlockTypeOneOf, type SpeechChunk, SpeechModel, type SpeechModelWithLiterals, type SpiGenerationConfig, type Statistics, StylePreset, type StylePresetWithLiterals, type SystemContentBlock, type SystemInstruction, type TaskContent, type TaskError, TaskInput, type TaskInputWithLiterals, TaskType, type TaskTypeWithLiterals, type TaskUsage, type Text, TextBisonModel, type TextBisonModelWithLiterals, type TextBisonPredictRequest, type TextBisonPredictResponse, type TextBisonPrediction, type TextContent, type TextEditorTool, type TextEmbeddingInstance, type TextEmbeddingParameters, type TextGenerationFailedEvent, type TextGenerationSucceededEvent, type TextInstance, type TextPrompt, type TextRemoval, type TextToImageRequest, TextToImageRequestModel, type TextToImageRequestModelWithLiterals, TextToImageRequestStylePreset, type TextToImageRequestStylePresetWithLiterals, type TextToImageResponse, type TextToImageTaskResult, type TextToSpeechChunk, type TextToSpeechRequest, type Thinking, type ThinkingConfig, type ThinkingTextContent, Threshold, type ThresholdWithLiterals, type TimestampGranularities, TimestampGranularity, type TimestampGranularityWithLiterals, type TokenCount, type TokenMetadata, type TokenUsage, type Tool, type ToolCall, type ToolChoice, ToolChoiceType, type ToolChoiceTypeWithLiterals, type ToolConfig, type ToolConfiguration, type ToolResult, type ToolResultContent, type ToolResultContentBlock, type ToolResultContentBlockTypeOneOf, type ToolResultSearchResult, type ToolSpecification, type ToolUse, type ToolUseContent, TranscriptionModel, type TranscriptionModelWithLiterals, Type, type TypeWithLiterals, type UrlCitation, type Usage, type UsageCacheCreation, type UsageMetadata, type UsageServerToolUse, type UserLocation, type UserPerApplicationBudgetInfo, type UserRequestInfo, type V1AnthropicClaudeMessage, type V1AnthropicStreamChunk, type V1AnthropicStreamChunkContentOneOf, type V1AnthropicStreamChunkMessageDelta, type V1CacheControl, V1CacheControlType, type V1CacheControlTypeWithLiterals, type V1ChatCompletionChunk, type V1ChatCompletionChunkChunkChoice, type V1ChatCompletionMessage, type V1ChatCompletionMessageContentPart, type V1ChatCompletionMessageContentPartContentValueOneOf, type V1ChatCompletionMessageFunctionWithArgs, type V1ChatCompletionMessageImageUrlContent, V1ChatCompletionMessageMessageRole, type V1ChatCompletionMessageMessageRoleWithLiterals, type V1ChatCompletionMessageToolCall, type V1Citation, type V1CodeExecutionResult, type V1ContentBlock, type V1ContentBlockDelta, type V1ContentBlockDeltaDeltaOneOf, type V1ContentBlockTypeOneOf, type V1ContentPart, type V1CreateChatCompletionRequest, type V1CreateChatCompletionRequestResponseFormat, type V1CreateChatCompletionRequestTool, type V1CreateChatCompletionResponse, type V1CreateChatCompletionResponseChoice, type V1CreateChatCompletionResponseTokenUsage, type V1CreateEmbeddingsRequest, type V1CreateEmbeddingsResponse, V1EditImageModel, type V1EditImageModelWithLiterals, type V1EditImageRequest, type V1EditImageResponse, V1EmbeddingEncodingFormat, type V1EmbeddingEncodingFormatWithLiterals, type V1EmbeddingInfo, type V1EmbeddingInfoEmbeddingResultOneOf, V1EmbeddingModel, type V1EmbeddingModelWithLiterals, type V1FineTuningSpec, type V1FloatEmbedding, type V1FluxPulid, type V1GenerateImageRequest, type V1GenerateImageResponse, type V1GetResultRequest, type V1GetResultResponse, type V1ImageInput, V1ImageMediaTypeMediaType, type V1ImageMediaTypeMediaTypeWithLiterals, V1ImageModel, type V1ImageModelWithLiterals, type V1ImageObject, type V1ImageUrl, type V1ImageUsage, type V1InputSchema, type V1InvokeAnthropicClaudeModelRequest, type V1InvokeAnthropicClaudeModelResponse, type V1McpServer, type V1McpServerToolConfiguration, V1McpServerType, type V1McpServerTypeWithLiterals, V1MessageRoleRole, type V1MessageRoleRoleWithLiterals, V1Model, type V1ModelWithLiterals, type V1OpenAiResponsesRequest, type V1OpenAiResponsesResponse, type V1OutputAnnotation, type V1OutputAnnotationAnnotationTypeOneOf, type V1RedactedThinking, V1ResponseTypeType, type V1ResponseTypeTypeWithLiterals, type V1ResponsesCodeInterpreter, type V1ResponsesCodeInterpreterContainer, type V1ResponsesCodeInterpreterContainerAuto, type V1ResponsesCodeInterpreterContainerContainerTypeOneOf, type V1ResponsesCodeInterpreterImageOutput, type V1ResponsesCodeInterpreterLogsOutput, type V1ResponsesCodeInterpreterOutput, type V1ResponsesCodeInterpreterOutputOutputTypeOneOf, type V1ResponsesCodeInterpreterToolCall, type V1ResponsesFunction, type V1ResponsesFunctionToolCall, type V1ResponsesFunctionToolCallOutput, type V1ResponsesInputItem, type V1ResponsesInputItemItemOneOf, type V1ResponsesInputMessage, type V1ResponsesInputMessageContent, type V1ResponsesInputMessageContentContentValueOneOf, type V1ResponsesInputTokensDetails, V1ResponsesModel, type V1ResponsesModelWithLiterals, type V1ResponsesOutput, type V1ResponsesOutputMessage, type V1ResponsesOutputOutputOneOf, type V1ResponsesOutputTokensDetails, type V1ResponsesReasoning, type V1ResponsesReasoningContent, type V1ResponsesReasoningOutput, type V1ResponsesReasoningSummaryContent, type V1ResponsesTextFormat, type V1ResponsesTextFormatFormatOneOf, type V1ResponsesTokenUsage, type V1ResponsesTool, type V1ResponsesToolChoice, type V1ResponsesToolToolTypeOneOf, type V1ResponsesWebSearch, type V1ResponsesWebSearchToolCall, type V1Segment, type V1SimpleContentBlock, type V1SimpleContentBlockTypeOneOf, type V1Text, type V1TextToImageRequest, type V1TextToImageResponse, type V1Thinking, type V1ThinkingConfig, type V1TokenUsage, type V1Tool, type V1ToolChoice, V1ToolChoiceType, type V1ToolChoiceTypeWithLiterals, type V1ToolConfig, type V1ToolKindOneOf, type V1ToolResult, type V1ToolUse, type V1UrlCitation, type V1Usage, V1VideoModel, type V1VideoModelWithLiterals, VideoGenModel, type VideoGenModelWithLiterals, type VideoInferenceRequest, type VideoInferenceResponse, type VideoInferenceTaskResult, type VideoInputs, type VideoJob, VideoModel, type VideoModelWithLiterals, type VoiceSettings, type Web, type WebFetchTool, type WebFetchToolResult, type WebFetchToolResultContentError, type WebFetchToolResultContentOneOf, type WebFetchToolResultContentSuccess, type WebSearchResult, type WebSearchResultList, type WebSearchResultLocationCitation, type WebSearchTool, type WebSearchToolResult, type WebSearchToolResultContentOneOf, type WebSearchToolResultError, type WebSearchUserLocation, WebhookIdentityType, type WebhookIdentityTypeWithLiterals, type Wix_ai_gatewayV1EditImageRequest, type Wix_ai_gatewayV1EditImageRequestRequestOneOf, type Wix_ai_gatewayV1EditImageResponse, type Wix_ai_gatewayV1EditImageResponseResponseOneOf, type Word };