@wix/auto_sdk_ai-gateway_generators 1.0.64 → 1.0.66

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -363,10 +363,11 @@ declare enum OpenaiproxyV1Model {
363
363
  GPT_5_2025_08_07 = "GPT_5_2025_08_07",
364
364
  GPT_5_MINI_2025_08_07 = "GPT_5_MINI_2025_08_07",
365
365
  GPT_5_NANO_2025_08_07 = "GPT_5_NANO_2025_08_07",
366
- GPT_5_2_2025_12_11_COMPLETION = "GPT_5_2_2025_12_11_COMPLETION"
366
+ GPT_5_2_2025_12_11_COMPLETION = "GPT_5_2_2025_12_11_COMPLETION",
367
+ GPT_5_1_2025_11_13_COMPLETION = "GPT_5_1_2025_11_13_COMPLETION"
367
368
  }
368
369
  /** @enumType */
369
- type OpenaiproxyV1ModelWithLiterals = OpenaiproxyV1Model | 'UNKNOWN' | 'GPT_3_5_TURBO' | 'GPT_3_5_TURBO_0301' | 'GPT_4' | 'GPT_4_0314' | 'GPT_4_32K' | 'GPT_4_32K_0314' | 'GPT_3_5_TURBO_0613' | 'GPT_3_5_TURBO_16K' | 'GPT_3_5_TURBO_16K_0613' | 'GPT_4_0613' | 'GPT_4_32K_0613' | 'GPT_3_5_TURBO_1106' | 'GPT_4_1106_PREVIEW' | 'GPT_4_VISION_PREVIEW' | 'GPT_4_TURBO_PREVIEW' | 'GPT_4_0125_PREVIEW' | 'GPT_3_5_TURBO_0125' | 'GPT_4_TURBO_2024_04_09' | 'GPT_4O_2024_05_13' | 'GPT_4O_MINI_2024_07_18' | 'GPT_4O_2024_08_06' | 'O1_PREVIEW' | 'O1_PREVIEW_2024_09_12' | 'O1_MINI' | 'O1_MINI_2024_09_12' | 'GPT_4O_2024_11_20' | 'O1_2024_12_17' | 'O3_MINI_2025_01_31' | 'GPT_4_OLD' | 'GPT_4_1_2025_04_14' | 'GPT_4_1_MINI_2025_04_14' | 'GPT_4_1_NANO_2025_04_14' | 'O3_2025_04_16' | 'O4_MINI_2025_04_16' | 'GPT_EXP' | 'GPT_EXP_2' | 'GPT_5_2025_08_07' | 'GPT_5_MINI_2025_08_07' | 'GPT_5_NANO_2025_08_07' | 'GPT_5_2_2025_12_11_COMPLETION';
370
+ type OpenaiproxyV1ModelWithLiterals = OpenaiproxyV1Model | 'UNKNOWN' | 'GPT_3_5_TURBO' | 'GPT_3_5_TURBO_0301' | 'GPT_4' | 'GPT_4_0314' | 'GPT_4_32K' | 'GPT_4_32K_0314' | 'GPT_3_5_TURBO_0613' | 'GPT_3_5_TURBO_16K' | 'GPT_3_5_TURBO_16K_0613' | 'GPT_4_0613' | 'GPT_4_32K_0613' | 'GPT_3_5_TURBO_1106' | 'GPT_4_1106_PREVIEW' | 'GPT_4_VISION_PREVIEW' | 'GPT_4_TURBO_PREVIEW' | 'GPT_4_0125_PREVIEW' | 'GPT_3_5_TURBO_0125' | 'GPT_4_TURBO_2024_04_09' | 'GPT_4O_2024_05_13' | 'GPT_4O_MINI_2024_07_18' | 'GPT_4O_2024_08_06' | 'O1_PREVIEW' | 'O1_PREVIEW_2024_09_12' | 'O1_MINI' | 'O1_MINI_2024_09_12' | 'GPT_4O_2024_11_20' | 'O1_2024_12_17' | 'O3_MINI_2025_01_31' | 'GPT_4_OLD' | 'GPT_4_1_2025_04_14' | 'GPT_4_1_MINI_2025_04_14' | 'GPT_4_1_NANO_2025_04_14' | 'O3_2025_04_16' | 'O4_MINI_2025_04_16' | 'GPT_EXP' | 'GPT_EXP_2' | 'GPT_5_2025_08_07' | 'GPT_5_MINI_2025_08_07' | 'GPT_5_NANO_2025_08_07' | 'GPT_5_2_2025_12_11_COMPLETION' | 'GPT_5_1_2025_11_13_COMPLETION';
370
371
  interface OpenaiproxyV1ChatCompletionMessage {
371
372
  /** The role of the message author. */
372
373
  role?: OpenaiproxyV1ChatCompletionMessageMessageRoleWithLiterals;
@@ -4748,8 +4749,20 @@ interface TextToImageRequest {
4748
4749
  * @maxSize 10
4749
4750
  */
4750
4751
  loraModels?: LoraModelSelect[];
4751
- /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */
4752
+ /**
4753
+ * Contains provider-specific configuration settings that customize the behavior of different AI models and services.
4754
+ * @deprecated Contains provider-specific configuration settings that customize the behavior of different AI models and services.
4755
+ * @replacedBy provider_settings_string
4756
+ * @targetRemovalDate 2030-01-01
4757
+ */
4752
4758
  providerSettings?: Record<string, any> | null;
4759
+ /**
4760
+ * Contains provider-specific configuration settings that customize the behavior of different AI models and services.
4761
+ * Uses a string representation that's parsed directly into JSON to avoid issues with number parsing.
4762
+ * https://stackoverflow.com/questions/51818125/how-to-use-ints-in-a-protobuf-struct
4763
+ * @maxLength 1000000
4764
+ */
4765
+ providerSettingsString?: string | null;
4753
4766
  /** Inputs for the image generation process. */
4754
4767
  inputs?: Inputs;
4755
4768
  }
@@ -5719,8 +5732,20 @@ interface VideoInferenceRequest {
5719
5732
  * @max 4
5720
5733
  */
5721
5734
  numberResults?: number | null;
5722
- /** Contains provider-specific configuration settings that customize the behavior of different AI models and services. */
5735
+ /**
5736
+ * Contains provider-specific configuration settings that customize the behavior of different AI models and services.
5737
+ * @deprecated Contains provider-specific configuration settings that customize the behavior of different AI models and services.
5738
+ * @replacedBy provider_settings_string
5739
+ * @targetRemovalDate 2030-01-01
5740
+ */
5723
5741
  providerSettings?: Record<string, any> | null;
5742
+ /**
5743
+ * Contains provider-specific configuration settings that customize the behavior of different AI models and services.
5744
+ * Uses a string representation that's parsed directly into JSON to avoid issues with number parsing.
5745
+ * https://stackoverflow.com/questions/51818125/how-to-use-ints-in-a-protobuf-struct
5746
+ * @maxLength 1000000
5747
+ */
5748
+ providerSettingsString?: string | null;
5724
5749
  /**
5725
5750
  * Skip polling flag - if set to false, will poll until video generation is complete
5726
5751
  * If not set or true, returns immediately with task UUID for manual polling
@@ -3397,6 +3397,7 @@ var OpenaiproxyV1Model = /* @__PURE__ */ ((OpenaiproxyV1Model2) => {
3397
3397
  OpenaiproxyV1Model2["GPT_5_MINI_2025_08_07"] = "GPT_5_MINI_2025_08_07";
3398
3398
  OpenaiproxyV1Model2["GPT_5_NANO_2025_08_07"] = "GPT_5_NANO_2025_08_07";
3399
3399
  OpenaiproxyV1Model2["GPT_5_2_2025_12_11_COMPLETION"] = "GPT_5_2_2025_12_11_COMPLETION";
3400
+ OpenaiproxyV1Model2["GPT_5_1_2025_11_13_COMPLETION"] = "GPT_5_1_2025_11_13_COMPLETION";
3400
3401
  return OpenaiproxyV1Model2;
3401
3402
  })(OpenaiproxyV1Model || {});
3402
3403
  var OpenaiproxyV1ChatCompletionMessageMessageRole = /* @__PURE__ */ ((OpenaiproxyV1ChatCompletionMessageMessageRole2) => {