@wix/auto_sdk_ai-gateway_prompts 1.0.31 → 1.0.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -190,6 +190,11 @@ interface TextContent {
190
190
  * @maxLength 1000000
191
191
  */
192
192
  generatedText?: string | null;
193
+ /**
194
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
195
+ * @maxLength 10000000
196
+ */
197
+ thoughtSignature?: string | null;
193
198
  }
194
199
  interface MediaContent {
195
200
  /**
@@ -202,6 +207,11 @@ interface MediaContent {
202
207
  * @maxLength 5000
203
208
  */
204
209
  url?: string;
210
+ /**
211
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
212
+ * @maxLength 10000000
213
+ */
214
+ thoughtSignature?: string | null;
205
215
  }
206
216
  interface ThinkingTextContent {
207
217
  /**
@@ -209,6 +219,11 @@ interface ThinkingTextContent {
209
219
  * @maxLength 1000000
210
220
  */
211
221
  thoughtText?: string | null;
222
+ /**
223
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
224
+ * @maxLength 10000000
225
+ */
226
+ thoughtSignature?: string | null;
212
227
  }
213
228
  interface ToolUseContent {
214
229
  /**
@@ -223,6 +238,11 @@ interface ToolUseContent {
223
238
  name?: string;
224
239
  /** Tool use input */
225
240
  input?: Record<string, any> | null;
241
+ /**
242
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
243
+ * @maxLength 10000000
244
+ */
245
+ thoughtSignature?: string | null;
226
246
  }
227
247
  interface V1TokenUsage {
228
248
  /** Number of input tokens used in the request. */
@@ -946,7 +966,7 @@ interface CandidateContentPart {
946
966
  thought?: boolean | null;
947
967
  /**
948
968
  * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
949
- * @maxLength 1000000
969
+ * @maxLength 10000000
950
970
  */
951
971
  thoughtSignature?: string | null;
952
972
  }
@@ -3436,10 +3456,11 @@ declare enum V1ResponsesModel {
3436
3456
  GPT_5_CODEX = "GPT_5_CODEX",
3437
3457
  GPT_5_1_2025_11_13 = "GPT_5_1_2025_11_13",
3438
3458
  GPT_5_1_CODEX = "GPT_5_1_CODEX",
3439
- GPT_5_1_CODEX_MINI = "GPT_5_1_CODEX_MINI"
3459
+ GPT_5_1_CODEX_MINI = "GPT_5_1_CODEX_MINI",
3460
+ GPT_EXP_RESPONSES = "GPT_EXP_RESPONSES"
3440
3461
  }
3441
3462
  /** @enumType */
3442
- type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI';
3463
+ type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES';
3443
3464
  interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {
3444
3465
  /** An output message from the model. */
3445
3466
  outputMessage?: V1ResponsesOutputMessage;
@@ -4962,7 +4983,7 @@ interface V1ContentPart {
4962
4983
  thought?: boolean | null;
4963
4984
  /**
4964
4985
  * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
4965
- * @maxLength 1000000
4986
+ * @maxLength 10000000
4966
4987
  */
4967
4988
  thoughtSignature?: string | null;
4968
4989
  }
@@ -1227,6 +1227,7 @@ var V1ResponsesModel = /* @__PURE__ */ ((V1ResponsesModel2) => {
1227
1227
  V1ResponsesModel2["GPT_5_1_2025_11_13"] = "GPT_5_1_2025_11_13";
1228
1228
  V1ResponsesModel2["GPT_5_1_CODEX"] = "GPT_5_1_CODEX";
1229
1229
  V1ResponsesModel2["GPT_5_1_CODEX_MINI"] = "GPT_5_1_CODEX_MINI";
1230
+ V1ResponsesModel2["GPT_EXP_RESPONSES"] = "GPT_EXP_RESPONSES";
1230
1231
  return V1ResponsesModel2;
1231
1232
  })(V1ResponsesModel || {});
1232
1233
  var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {