@wix/auto_sdk_ai-gateway_prompts 1.0.34 → 1.0.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/cjs/index.js +5 -0
- package/build/cjs/index.js.map +1 -1
- package/build/cjs/index.typings.d.ts +11 -6
- package/build/cjs/index.typings.js +5 -0
- package/build/cjs/index.typings.js.map +1 -1
- package/build/cjs/meta.d.ts +11 -6
- package/build/cjs/meta.js +5 -0
- package/build/cjs/meta.js.map +1 -1
- package/build/es/index.mjs +5 -0
- package/build/es/index.mjs.map +1 -1
- package/build/es/index.typings.d.mts +11 -6
- package/build/es/index.typings.mjs +5 -0
- package/build/es/index.typings.mjs.map +1 -1
- package/build/es/meta.d.mts +11 -6
- package/build/es/meta.mjs +5 -0
- package/build/es/meta.mjs.map +1 -1
- package/build/internal/cjs/index.js +5 -0
- package/build/internal/cjs/index.js.map +1 -1
- package/build/internal/cjs/index.typings.d.ts +11 -6
- package/build/internal/cjs/index.typings.js +5 -0
- package/build/internal/cjs/index.typings.js.map +1 -1
- package/build/internal/cjs/meta.d.ts +11 -6
- package/build/internal/cjs/meta.js +5 -0
- package/build/internal/cjs/meta.js.map +1 -1
- package/build/internal/es/index.mjs +5 -0
- package/build/internal/es/index.mjs.map +1 -1
- package/build/internal/es/index.typings.d.mts +11 -6
- package/build/internal/es/index.typings.mjs +5 -0
- package/build/internal/es/index.typings.mjs.map +1 -1
- package/build/internal/es/meta.d.mts +11 -6
- package/build/internal/es/meta.mjs +5 -0
- package/build/internal/es/meta.mjs.map +1 -1
- package/package.json +2 -2
|
@@ -3060,10 +3060,13 @@ interface CreateImageOpenAiResponse {
|
|
|
3060
3060
|
declare enum OpenAiImageModel {
|
|
3061
3061
|
UNKNOWN_IMAGE_CREATION_MODEL = "UNKNOWN_IMAGE_CREATION_MODEL",
|
|
3062
3062
|
GPT_4O_IMAGE = "GPT_4O_IMAGE",
|
|
3063
|
-
GPT_IMAGE_1 = "GPT_IMAGE_1"
|
|
3063
|
+
GPT_IMAGE_1 = "GPT_IMAGE_1",
|
|
3064
|
+
GPT_IMAGE_EXP = "GPT_IMAGE_EXP",
|
|
3065
|
+
GPT_IMAGE_EXP_2 = "GPT_IMAGE_EXP_2",
|
|
3066
|
+
GPT_IMAGE_EXP_3 = "GPT_IMAGE_EXP_3"
|
|
3064
3067
|
}
|
|
3065
3068
|
/** @enumType */
|
|
3066
|
-
type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1';
|
|
3069
|
+
type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1' | 'GPT_IMAGE_EXP' | 'GPT_IMAGE_EXP_2' | 'GPT_IMAGE_EXP_3';
|
|
3067
3070
|
interface ImageUsage {
|
|
3068
3071
|
/** Number of tokens in the input */
|
|
3069
3072
|
inputTokens?: number | null;
|
|
@@ -3461,10 +3464,11 @@ declare enum V1ResponsesModel {
|
|
|
3461
3464
|
GPT_EXP_RESPONSES = "GPT_EXP_RESPONSES",
|
|
3462
3465
|
GPT_EXP_RESPONSES_2 = "GPT_EXP_RESPONSES_2",
|
|
3463
3466
|
GPT_EXP_RESPONSES_3 = "GPT_EXP_RESPONSES_3",
|
|
3464
|
-
GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX"
|
|
3467
|
+
GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX",
|
|
3468
|
+
GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
|
|
3465
3469
|
}
|
|
3466
3470
|
/** @enumType */
|
|
3467
|
-
type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX';
|
|
3471
|
+
type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX' | 'GPT_5_2_2025_12_11';
|
|
3468
3472
|
interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {
|
|
3469
3473
|
/** An output message from the model. */
|
|
3470
3474
|
outputMessage?: V1ResponsesOutputMessage;
|
|
@@ -3868,10 +3872,11 @@ declare enum ResponsesModel {
|
|
|
3868
3872
|
MODEL_UNSPECIFIED = "MODEL_UNSPECIFIED",
|
|
3869
3873
|
GPT_5_2025_08_07_RESPONSES = "GPT_5_2025_08_07_RESPONSES",
|
|
3870
3874
|
GPT_5_MINI_2025_08_07_RESPONSES = "GPT_5_MINI_2025_08_07_RESPONSES",
|
|
3871
|
-
GPT_5_NANO_2025_08_07_RESPONSES = "GPT_5_NANO_2025_08_07_RESPONSES"
|
|
3875
|
+
GPT_5_NANO_2025_08_07_RESPONSES = "GPT_5_NANO_2025_08_07_RESPONSES",
|
|
3876
|
+
GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
|
|
3872
3877
|
}
|
|
3873
3878
|
/** @enumType */
|
|
3874
|
-
type ResponsesModelWithLiterals = ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES';
|
|
3879
|
+
type ResponsesModelWithLiterals = ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'GPT_5_2_2025_12_11';
|
|
3875
3880
|
interface ResponsesOutput extends ResponsesOutputOutputOneOf {
|
|
3876
3881
|
/** An output message from the model. */
|
|
3877
3882
|
outputMessage?: ResponsesOutputMessage;
|
|
@@ -1190,6 +1190,9 @@ var OpenAiImageModel = /* @__PURE__ */ ((OpenAiImageModel2) => {
|
|
|
1190
1190
|
OpenAiImageModel2["UNKNOWN_IMAGE_CREATION_MODEL"] = "UNKNOWN_IMAGE_CREATION_MODEL";
|
|
1191
1191
|
OpenAiImageModel2["GPT_4O_IMAGE"] = "GPT_4O_IMAGE";
|
|
1192
1192
|
OpenAiImageModel2["GPT_IMAGE_1"] = "GPT_IMAGE_1";
|
|
1193
|
+
OpenAiImageModel2["GPT_IMAGE_EXP"] = "GPT_IMAGE_EXP";
|
|
1194
|
+
OpenAiImageModel2["GPT_IMAGE_EXP_2"] = "GPT_IMAGE_EXP_2";
|
|
1195
|
+
OpenAiImageModel2["GPT_IMAGE_EXP_3"] = "GPT_IMAGE_EXP_3";
|
|
1193
1196
|
return OpenAiImageModel2;
|
|
1194
1197
|
})(OpenAiImageModel || {});
|
|
1195
1198
|
var V1ChatCompletionMessageMessageRole = /* @__PURE__ */ ((V1ChatCompletionMessageMessageRole2) => {
|
|
@@ -1227,6 +1230,7 @@ var V1ResponsesModel = /* @__PURE__ */ ((V1ResponsesModel2) => {
|
|
|
1227
1230
|
V1ResponsesModel2["GPT_EXP_RESPONSES_2"] = "GPT_EXP_RESPONSES_2";
|
|
1228
1231
|
V1ResponsesModel2["GPT_EXP_RESPONSES_3"] = "GPT_EXP_RESPONSES_3";
|
|
1229
1232
|
V1ResponsesModel2["GPT_5_1_CODEX_MAX"] = "GPT_5_1_CODEX_MAX";
|
|
1233
|
+
V1ResponsesModel2["GPT_5_2_2025_12_11"] = "GPT_5_2_2025_12_11";
|
|
1230
1234
|
return V1ResponsesModel2;
|
|
1231
1235
|
})(V1ResponsesModel || {});
|
|
1232
1236
|
var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {
|
|
@@ -1234,6 +1238,7 @@ var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {
|
|
|
1234
1238
|
ResponsesModel2["GPT_5_2025_08_07_RESPONSES"] = "GPT_5_2025_08_07_RESPONSES";
|
|
1235
1239
|
ResponsesModel2["GPT_5_MINI_2025_08_07_RESPONSES"] = "GPT_5_MINI_2025_08_07_RESPONSES";
|
|
1236
1240
|
ResponsesModel2["GPT_5_NANO_2025_08_07_RESPONSES"] = "GPT_5_NANO_2025_08_07_RESPONSES";
|
|
1241
|
+
ResponsesModel2["GPT_5_2_2025_12_11"] = "GPT_5_2_2025_12_11";
|
|
1237
1242
|
return ResponsesModel2;
|
|
1238
1243
|
})(ResponsesModel || {});
|
|
1239
1244
|
var TextBisonModel = /* @__PURE__ */ ((TextBisonModel2) => {
|