@wix/auto_sdk_ai-gateway_prompts 1.0.31 → 1.0.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -188,6 +188,11 @@ interface TextContent {
188
188
  * @maxLength 1000000
189
189
  */
190
190
  generatedText?: string | null;
191
+ /**
192
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
193
+ * @maxLength 10000000
194
+ */
195
+ thoughtSignature?: string | null;
191
196
  }
192
197
  interface MediaContent {
193
198
  /**
@@ -200,6 +205,11 @@ interface MediaContent {
200
205
  * @maxLength 5000
201
206
  */
202
207
  url?: string;
208
+ /**
209
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
210
+ * @maxLength 10000000
211
+ */
212
+ thoughtSignature?: string | null;
203
213
  }
204
214
  interface ThinkingTextContent {
205
215
  /**
@@ -207,6 +217,11 @@ interface ThinkingTextContent {
207
217
  * @maxLength 1000000
208
218
  */
209
219
  thoughtText?: string | null;
220
+ /**
221
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
222
+ * @maxLength 10000000
223
+ */
224
+ thoughtSignature?: string | null;
210
225
  }
211
226
  interface ToolUseContent {
212
227
  /**
@@ -221,6 +236,11 @@ interface ToolUseContent {
221
236
  name?: string;
222
237
  /** Tool use input */
223
238
  input?: Record<string, any> | null;
239
+ /**
240
+ * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
241
+ * @maxLength 10000000
242
+ */
243
+ thoughtSignature?: string | null;
224
244
  }
225
245
  interface V1TokenUsage {
226
246
  /** Number of input tokens used in the request. */
@@ -944,7 +964,7 @@ interface CandidateContentPart {
944
964
  thought?: boolean | null;
945
965
  /**
946
966
  * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
947
- * @maxLength 1000000
967
+ * @maxLength 10000000
948
968
  */
949
969
  thoughtSignature?: string | null;
950
970
  }
@@ -3434,10 +3454,11 @@ declare enum V1ResponsesModel {
3434
3454
  GPT_5_CODEX = "GPT_5_CODEX",
3435
3455
  GPT_5_1_2025_11_13 = "GPT_5_1_2025_11_13",
3436
3456
  GPT_5_1_CODEX = "GPT_5_1_CODEX",
3437
- GPT_5_1_CODEX_MINI = "GPT_5_1_CODEX_MINI"
3457
+ GPT_5_1_CODEX_MINI = "GPT_5_1_CODEX_MINI",
3458
+ GPT_EXP_RESPONSES = "GPT_EXP_RESPONSES"
3438
3459
  }
3439
3460
  /** @enumType */
3440
- type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI';
3461
+ type V1ResponsesModelWithLiterals = V1ResponsesModel | 'MODEL_UNSPECIFIED' | 'GPT_5_2025_08_07_RESPONSES' | 'GPT_5_MINI_2025_08_07_RESPONSES' | 'GPT_5_NANO_2025_08_07_RESPONSES' | 'O3_PRO_2025_06_10' | 'O3_DEEP_RESEARCH_2025_06_26' | 'GPT_5_CODEX' | 'GPT_5_1_2025_11_13' | 'GPT_5_1_CODEX' | 'GPT_5_1_CODEX_MINI' | 'GPT_EXP_RESPONSES';
3441
3462
  interface V1ResponsesOutput extends V1ResponsesOutputOutputOneOf {
3442
3463
  /** An output message from the model. */
3443
3464
  outputMessage?: V1ResponsesOutputMessage;
@@ -4960,7 +4981,7 @@ interface V1ContentPart {
4960
4981
  thought?: boolean | null;
4961
4982
  /**
4962
4983
  * Optional. An opaque signature for the thought so it can be reused in subsequent requests. A base64-encoded string.
4963
- * @maxLength 1000000
4984
+ * @maxLength 10000000
4964
4985
  */
4965
4986
  thoughtSignature?: string | null;
4966
4987
  }
@@ -1119,6 +1119,7 @@ var V1ResponsesModel = /* @__PURE__ */ ((V1ResponsesModel2) => {
1119
1119
  V1ResponsesModel2["GPT_5_1_2025_11_13"] = "GPT_5_1_2025_11_13";
1120
1120
  V1ResponsesModel2["GPT_5_1_CODEX"] = "GPT_5_1_CODEX";
1121
1121
  V1ResponsesModel2["GPT_5_1_CODEX_MINI"] = "GPT_5_1_CODEX_MINI";
1122
+ V1ResponsesModel2["GPT_EXP_RESPONSES"] = "GPT_EXP_RESPONSES";
1122
1123
  return V1ResponsesModel2;
1123
1124
  })(V1ResponsesModel || {});
1124
1125
  var ResponsesModel = /* @__PURE__ */ ((ResponsesModel2) => {