@wix/auto_sdk_ai-gateway_prompts 1.0.34 → 1.0.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/cjs/index.js +5 -0
- package/build/cjs/index.js.map +1 -1
- package/build/cjs/index.typings.d.ts +11 -6
- package/build/cjs/index.typings.js +5 -0
- package/build/cjs/index.typings.js.map +1 -1
- package/build/cjs/meta.d.ts +11 -6
- package/build/cjs/meta.js +5 -0
- package/build/cjs/meta.js.map +1 -1
- package/build/es/index.mjs +5 -0
- package/build/es/index.mjs.map +1 -1
- package/build/es/index.typings.d.mts +11 -6
- package/build/es/index.typings.mjs +5 -0
- package/build/es/index.typings.mjs.map +1 -1
- package/build/es/meta.d.mts +11 -6
- package/build/es/meta.mjs +5 -0
- package/build/es/meta.mjs.map +1 -1
- package/build/internal/cjs/index.js +5 -0
- package/build/internal/cjs/index.js.map +1 -1
- package/build/internal/cjs/index.typings.d.ts +11 -6
- package/build/internal/cjs/index.typings.js +5 -0
- package/build/internal/cjs/index.typings.js.map +1 -1
- package/build/internal/cjs/meta.d.ts +11 -6
- package/build/internal/cjs/meta.js +5 -0
- package/build/internal/cjs/meta.js.map +1 -1
- package/build/internal/es/index.mjs +5 -0
- package/build/internal/es/index.mjs.map +1 -1
- package/build/internal/es/index.typings.d.mts +11 -6
- package/build/internal/es/index.typings.mjs +5 -0
- package/build/internal/es/index.typings.mjs.map +1 -1
- package/build/internal/es/meta.d.mts +11 -6
- package/build/internal/es/meta.mjs +5 -0
- package/build/internal/es/meta.mjs.map +1 -1
- package/package.json +2 -2
|
@@ -3060,10 +3060,13 @@ interface CreateImageOpenAiResponse {
|
|
|
3060
3060
|
declare enum OpenAiImageModel {
|
|
3061
3061
|
UNKNOWN_IMAGE_CREATION_MODEL = "UNKNOWN_IMAGE_CREATION_MODEL",
|
|
3062
3062
|
GPT_4O_IMAGE = "GPT_4O_IMAGE",
|
|
3063
|
-
GPT_IMAGE_1 = "GPT_IMAGE_1"
|
|
3063
|
+
GPT_IMAGE_1 = "GPT_IMAGE_1",
|
|
3064
|
+
GPT_IMAGE_EXP = "GPT_IMAGE_EXP",
|
|
3065
|
+
GPT_IMAGE_EXP_2 = "GPT_IMAGE_EXP_2",
|
|
3066
|
+
GPT_IMAGE_EXP_3 = "GPT_IMAGE_EXP_3"
|
|
3064
3067
|
}
|
|
3065
3068
|
/** @enumType */
|
|
3066
|
-
type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1';
|
|
3069
|
+
type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1' | 'GPT_IMAGE_EXP' | 'GPT_IMAGE_EXP_2' | 'GPT_IMAGE_EXP_3';
|
|
3067
3070
|
interface ImageUsage {
|
|
3068
3071
|
/** Number of tokens in the input */
|
|
3069
3072
|
inputTokens?: number | null;
|
|
@@ -3461,10 +3464,11 @@ declare enum V1ResponsesModel {
|
|
|
3461
3464
|
GPT_EXP_RESPONSES = "GPT_EXP_RESPONSES",
|
|
3462
3465
|
GPT_EXP_RESPONSES_2 = "GPT_EXP_RESPONSES_2",
|
|
3463
3466
|
GPT_EXP_RESPONSES_3 = "GPT_EXP_RESPONSES_3",
|
|
3464
|
-
GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX"
|
|
3467
|
+
GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX",
|
|
3468
|
+
GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
|
|
3465
3469
|
}
|
|
3466
3470
|
/** @enumType */
|
|
3467
|
-
type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX';
|
|
3471
|
+
type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX' | 'GPT_5_2_2025_12_11';
|
|
3468
3472
|
interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {
|
|
3469
3473
|
/** An output message from the model. */
|
|
3470
3474
|
outputMessage?: V1ResponsesOutputMessage;
|
|
@@ -3868,10 +3872,11 @@ declare enum ResponsesModel {
|
|
|
3868
3872
|
MODEL_UNSPECIFIED = "MODEL_UNSPECIFIED",
|
|
3869
3873
|
GPT_5_2025_08_07_RESPONSES = "GPT_5_2025_08_07_RESPONSES",
|
|
3870
3874
|
GPT_5_MINI_2025_08_07_RESPONSES = "GPT_5_MINI_2025_08_07_RESPONSES",
|
|
3871
|
-
GPT_5_NANO_2025_08_07_RESPONSES = "GPT_5_NANO_2025_08_07_RESPONSES"
|
|
3875
|
+
GPT_5_NANO_2025_08_07_RESPONSES = "GPT_5_NANO_2025_08_07_RESPONSES",
|
|
3876
|
+
GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
|
|
3872
3877
|
}
|
|
3873
3878
|
/** @enumType */
|
|
3874
|
-
type ResponsesModelWithLiterals = ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES';
|
|
3879
|
+
type ResponsesModelWithLiterals = ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'GPT_5_2_2025_12_11';
|
|
3875
3880
|
interface ResponsesOutput extends ResponsesOutputOutputOneOf {
|
|
3876
3881
|
/** An output message from the model. */
|
|
3877
3882
|
outputMessage?: ResponsesOutputMessage;
|
|
@@ -1079,6 +1079,9 @@ var OpenAiImageModel = /* @__PURE__ */ ((OpenAiImageModel2) => {
|
|
|
1079
1079
|
OpenAiImageModel2["UNKNOWN_IMAGE_CREATION_MODEL"] = "UNKNOWN_IMAGE_CREATION_MODEL";
|
|
1080
1080
|
OpenAiImageModel2["GPT_4O_IMAGE"] = "GPT_4O_IMAGE";
|
|
1081
1081
|
OpenAiImageModel2["GPT_IMAGE_1"] = "GPT_IMAGE_1";
|
|
1082
|
+
OpenAiImageModel2["GPT_IMAGE_EXP"] = "GPT_IMAGE_EXP";
|
|
1083
|
+
OpenAiImageModel2["GPT_IMAGE_EXP_2"] = "GPT_IMAGE_EXP_2";
|
|
1084
|
+
OpenAiImageModel2["GPT_IMAGE_EXP_3"] = "GPT_IMAGE_EXP_3";
|
|
1082
1085
|
return OpenAiImageModel2;
|
|
1083
1086
|
})(OpenAiImageModel || {});
|
|
1084
1087
|
var V1ChatCompletionMessageMessageRole = /* @__PURE__ */ ((V1ChatCompletionMessageMessageRole2) => {
|
|
@@ -1116,6 +1119,7 @@ var V1ResponsesModel = /* @__PURE__ */ ((V1ResponsesModel2) => {
|
|
|
1116
1119
|
V1ResponsesModel2["GPT_EXP_RESPONSES_2"] = "GPT_EXP_RESPONSES_2";
|
|
1117
1120
|
V1ResponsesModel2["GPT_EXP_RESPONSES_3"] = "GPT_EXP_RESPONSES_3";
|
|
1118
1121
|
V1ResponsesModel2["GPT_5_1_CODEX_MAX"] = "GPT_5_1_CODEX_MAX";
|
|
1122
|
+
V1ResponsesModel2["GPT_5_2_2025_12_11"] = "GPT_5_2_2025_12_11";
|
|
1119
1123
|
return V1ResponsesModel2;
|
|
1120
1124
|
})(V1ResponsesModel || {});
|
|
1121
1125
|
var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {
|
|
@@ -1123,6 +1127,7 @@ var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {
|
|
|
1123
1127
|
ResponsesModel2["GPT_5_2025_08_07_RESPONSES"] = "GPT_5_2025_08_07_RESPONSES";
|
|
1124
1128
|
ResponsesModel2["GPT_5_MINI_2025_08_07_RESPONSES"] = "GPT_5_MINI_2025_08_07_RESPONSES";
|
|
1125
1129
|
ResponsesModel2["GPT_5_NANO_2025_08_07_RESPONSES"] = "GPT_5_NANO_2025_08_07_RESPONSES";
|
|
1130
|
+
ResponsesModel2["GPT_5_2_2025_12_11"] = "GPT_5_2_2025_12_11";
|
|
1126
1131
|
return ResponsesModel2;
|
|
1127
1132
|
})(ResponsesModel || {});
|
|
1128
1133
|
var TextBisonModel = /* @__PURE__ */ ((TextBisonModel2) => {
|