@wix/auto_sdk_ai-gateway_prompts 1.0.34 → 1.0.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/build/cjs/index.js +5 -0
- package/build/cjs/index.js.map +1 -1
- package/build/cjs/index.typings.d.ts +11 -6
- package/build/cjs/index.typings.js +5 -0
- package/build/cjs/index.typings.js.map +1 -1
- package/build/cjs/meta.d.ts +11 -6
- package/build/cjs/meta.js +5 -0
- package/build/cjs/meta.js.map +1 -1
- package/build/es/index.mjs +5 -0
- package/build/es/index.mjs.map +1 -1
- package/build/es/index.typings.d.mts +11 -6
- package/build/es/index.typings.mjs +5 -0
- package/build/es/index.typings.mjs.map +1 -1
- package/build/es/meta.d.mts +11 -6
- package/build/es/meta.mjs +5 -0
- package/build/es/meta.mjs.map +1 -1
- package/build/internal/cjs/index.js +5 -0
- package/build/internal/cjs/index.js.map +1 -1
- package/build/internal/cjs/index.typings.d.ts +11 -6
- package/build/internal/cjs/index.typings.js +5 -0
- package/build/internal/cjs/index.typings.js.map +1 -1
- package/build/internal/cjs/meta.d.ts +11 -6
- package/build/internal/cjs/meta.js +5 -0
- package/build/internal/cjs/meta.js.map +1 -1
- package/build/internal/es/index.mjs +5 -0
- package/build/internal/es/index.mjs.map +1 -1
- package/build/internal/es/index.typings.d.mts +11 -6
- package/build/internal/es/index.typings.mjs +5 -0
- package/build/internal/es/index.typings.mjs.map +1 -1
- package/build/internal/es/meta.d.mts +11 -6
- package/build/internal/es/meta.mjs +5 -0
- package/build/internal/es/meta.mjs.map +1 -1
- package/package.json +2 -2
|
@@ -3057,10 +3057,13 @@ interface CreateImageOpenAiResponse {
|
|
|
3057
3057
|
declare enum OpenAiImageModel {
|
|
3058
3058
|
UNKNOWN_IMAGE_CREATION_MODEL = "UNKNOWN_IMAGE_CREATION_MODEL",
|
|
3059
3059
|
GPT_4O_IMAGE = "GPT_4O_IMAGE",
|
|
3060
|
-
GPT_IMAGE_1 = "GPT_IMAGE_1"
|
|
3060
|
+
GPT_IMAGE_1 = "GPT_IMAGE_1",
|
|
3061
|
+
GPT_IMAGE_EXP = "GPT_IMAGE_EXP",
|
|
3062
|
+
GPT_IMAGE_EXP_2 = "GPT_IMAGE_EXP_2",
|
|
3063
|
+
GPT_IMAGE_EXP_3 = "GPT_IMAGE_EXP_3"
|
|
3061
3064
|
}
|
|
3062
3065
|
/** @enumType */
|
|
3063
|
-
type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1';
|
|
3066
|
+
type OpenAiImageModelWithLiterals = OpenAiImageModel | 'UNKNOWN_IMAGE_CREATION_MODEL' | 'GPT_4O_IMAGE' | 'GPT_IMAGE_1' | 'GPT_IMAGE_EXP' | 'GPT_IMAGE_EXP_2' | 'GPT_IMAGE_EXP_3';
|
|
3064
3067
|
interface ImageUsage {
|
|
3065
3068
|
/** Number of tokens in the input */
|
|
3066
3069
|
inputTokens?: number | null;
|
|
@@ -3458,10 +3461,11 @@ declare enum V1ResponsesModel {
|
|
|
3458
3461
|
GPT_EXP_RESPONSES = "GPT_EXP_RESPONSES",
|
|
3459
3462
|
GPT_EXP_RESPONSES_2 = "GPT_EXP_RESPONSES_2",
|
|
3460
3463
|
GPT_EXP_RESPONSES_3 = "GPT_EXP_RESPONSES_3",
|
|
3461
|
-
GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX"
|
|
3464
|
+
GPT_5_1_CODEX_MAX = "GPT_5_1_CODEX_MAX",
|
|
3465
|
+
GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
|
|
3462
3466
|
}
|
|
3463
3467
|
/** @enumType */
|
|
3464
|
-
type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX';
|
|
3468
|
+
type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES' | 'GPT_EXP_RESPONSES_2' | 'GPT_EXP_RESPONSES_3' | 'GPT_5_1_CODEX_MAX' | 'GPT_5_2_2025_12_11';
|
|
3465
3469
|
interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {
|
|
3466
3470
|
/** An output message from the model. */
|
|
3467
3471
|
outputMessage?: V1ResponsesOutputMessage;
|
|
@@ -3865,10 +3869,11 @@ declare enum ResponsesModel {
|
|
|
3865
3869
|
MODEL_UNSPECIFIED = "MODEL_UNSPECIFIED",
|
|
3866
3870
|
GPT_5_2025_08_07_RESPONSES = "GPT_5_2025_08_07_RESPONSES",
|
|
3867
3871
|
GPT_5_MINI_2025_08_07_RESPONSES = "GPT_5_MINI_2025_08_07_RESPONSES",
|
|
3868
|
-
GPT_5_NANO_2025_08_07_RESPONSES = "GPT_5_NANO_2025_08_07_RESPONSES"
|
|
3872
|
+
GPT_5_NANO_2025_08_07_RESPONSES = "GPT_5_NANO_2025_08_07_RESPONSES",
|
|
3873
|
+
GPT_5_2_2025_12_11 = "GPT_5_2_2025_12_11"
|
|
3869
3874
|
}
|
|
3870
3875
|
/** @enumType */
|
|
3871
|
-
type ResponsesModelWithLiterals = ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES';
|
|
3876
|
+
type ResponsesModelWithLiterals = ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'GPT_5_2_2025_12_11';
|
|
3872
3877
|
interface ResponsesOutput extends ResponsesOutputOutputOneOf {
|
|
3873
3878
|
/** An output message from the model. */
|
|
3874
3879
|
outputMessage?: ResponsesOutputMessage;
|
|
@@ -1194,6 +1194,9 @@ var OpenAiImageModel = /* @__PURE__ */ ((OpenAiImageModel2) => {
|
|
|
1194
1194
|
OpenAiImageModel2["UNKNOWN_IMAGE_CREATION_MODEL"] = "UNKNOWN_IMAGE_CREATION_MODEL";
|
|
1195
1195
|
OpenAiImageModel2["GPT_4O_IMAGE"] = "GPT_4O_IMAGE";
|
|
1196
1196
|
OpenAiImageModel2["GPT_IMAGE_1"] = "GPT_IMAGE_1";
|
|
1197
|
+
OpenAiImageModel2["GPT_IMAGE_EXP"] = "GPT_IMAGE_EXP";
|
|
1198
|
+
OpenAiImageModel2["GPT_IMAGE_EXP_2"] = "GPT_IMAGE_EXP_2";
|
|
1199
|
+
OpenAiImageModel2["GPT_IMAGE_EXP_3"] = "GPT_IMAGE_EXP_3";
|
|
1197
1200
|
return OpenAiImageModel2;
|
|
1198
1201
|
})(OpenAiImageModel || {});
|
|
1199
1202
|
var V1ChatCompletionMessageMessageRole = /* @__PURE__ */ ((V1ChatCompletionMessageMessageRole2) => {
|
|
@@ -1231,6 +1234,7 @@ var V1ResponsesModel = /* @__PURE__ */ ((V1ResponsesModel2) => {
|
|
|
1231
1234
|
V1ResponsesModel2["GPT_EXP_RESPONSES_2"] = "GPT_EXP_RESPONSES_2";
|
|
1232
1235
|
V1ResponsesModel2["GPT_EXP_RESPONSES_3"] = "GPT_EXP_RESPONSES_3";
|
|
1233
1236
|
V1ResponsesModel2["GPT_5_1_CODEX_MAX"] = "GPT_5_1_CODEX_MAX";
|
|
1237
|
+
V1ResponsesModel2["GPT_5_2_2025_12_11"] = "GPT_5_2_2025_12_11";
|
|
1234
1238
|
return V1ResponsesModel2;
|
|
1235
1239
|
})(V1ResponsesModel || {});
|
|
1236
1240
|
var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {
|
|
@@ -1238,6 +1242,7 @@ var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {
|
|
|
1238
1242
|
ResponsesModel2["GPT_5_2025_08_07_RESPONSES"] = "GPT_5_2025_08_07_RESPONSES";
|
|
1239
1243
|
ResponsesModel2["GPT_5_MINI_2025_08_07_RESPONSES"] = "GPT_5_MINI_2025_08_07_RESPONSES";
|
|
1240
1244
|
ResponsesModel2["GPT_5_NANO_2025_08_07_RESPONSES"] = "GPT_5_NANO_2025_08_07_RESPONSES";
|
|
1245
|
+
ResponsesModel2["GPT_5_2_2025_12_11"] = "GPT_5_2_2025_12_11";
|
|
1241
1246
|
return ResponsesModel2;
|
|
1242
1247
|
})(ResponsesModel || {});
|
|
1243
1248
|
var TextBisonModel = /* @__PURE__ */ ((TextBisonModel2) => {
|