@orq-ai/node 4.1.0-rc.39 → 4.1.0-rc.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/bin/mcp-server.js +662 -352
  2. package/bin/mcp-server.js.map +49 -49
  3. package/examples/package-lock.json +1 -1
  4. package/jsr.json +1 -1
  5. package/lib/config.d.ts +2 -2
  6. package/lib/config.js +2 -2
  7. package/mcp-server/mcp-server.js +1 -1
  8. package/mcp-server/server.js +1 -1
  9. package/models/components/conversationresponse.js +2 -2
  10. package/models/components/partdoneevent.js +2 -2
  11. package/models/components/reasoningpart.js +2 -2
  12. package/models/operations/createagentrequest.d.ts +370 -42
  13. package/models/operations/createagentrequest.d.ts.map +1 -1
  14. package/models/operations/createagentrequest.js +277 -48
  15. package/models/operations/createagentrequest.js.map +1 -1
  16. package/models/operations/createbudget.js +2 -2
  17. package/models/operations/createcontact.js +2 -2
  18. package/models/operations/createconversation.js +2 -2
  19. package/models/operations/createdataset.js +2 -2
  20. package/models/operations/createdatasetitem.js +8 -8
  21. package/models/operations/createdatasource.js +2 -2
  22. package/models/operations/createeval.js +28 -28
  23. package/models/operations/createprompt.d.ts +10 -10
  24. package/models/operations/createprompt.d.ts.map +1 -1
  25. package/models/operations/createprompt.js +13 -13
  26. package/models/operations/createprompt.js.map +1 -1
  27. package/models/operations/createtool.js +12 -12
  28. package/models/operations/fileget.js +2 -2
  29. package/models/operations/filelist.js +2 -2
  30. package/models/operations/fileupload.js +2 -2
  31. package/models/operations/generateconversationname.js +2 -2
  32. package/models/operations/getalltools.js +12 -12
  33. package/models/operations/getbudget.js +2 -2
  34. package/models/operations/getevals.js +28 -28
  35. package/models/operations/listagents.d.ts +96 -4
  36. package/models/operations/listagents.d.ts.map +1 -1
  37. package/models/operations/listagents.js +57 -7
  38. package/models/operations/listagents.js.map +1 -1
  39. package/models/operations/listbudgets.js +2 -2
  40. package/models/operations/listcontacts.js +2 -2
  41. package/models/operations/listdatasetdatapoints.js +8 -8
  42. package/models/operations/listdatasets.js +2 -2
  43. package/models/operations/listdatasources.js +2 -2
  44. package/models/operations/retrieveagentrequest.d.ts +96 -4
  45. package/models/operations/retrieveagentrequest.d.ts.map +1 -1
  46. package/models/operations/retrieveagentrequest.js +59 -7
  47. package/models/operations/retrieveagentrequest.js.map +1 -1
  48. package/models/operations/retrievecontact.js +2 -2
  49. package/models/operations/retrieveconversation.js +2 -2
  50. package/models/operations/retrievedatapoint.js +8 -8
  51. package/models/operations/retrievedataset.js +2 -2
  52. package/models/operations/retrievedatasource.js +2 -2
  53. package/models/operations/retrievetool.js +12 -12
  54. package/models/operations/runagent.d.ts +96 -4
  55. package/models/operations/runagent.d.ts.map +1 -1
  56. package/models/operations/runagent.js +65 -11
  57. package/models/operations/runagent.js.map +1 -1
  58. package/models/operations/streamrunagent.d.ts +96 -4
  59. package/models/operations/streamrunagent.d.ts.map +1 -1
  60. package/models/operations/streamrunagent.js +63 -11
  61. package/models/operations/streamrunagent.js.map +1 -1
  62. package/models/operations/updateagent.d.ts +370 -42
  63. package/models/operations/updateagent.d.ts.map +1 -1
  64. package/models/operations/updateagent.js +276 -48
  65. package/models/operations/updateagent.js.map +1 -1
  66. package/models/operations/updatebudget.js +2 -2
  67. package/models/operations/updatecontact.js +2 -2
  68. package/models/operations/updateconversation.js +2 -2
  69. package/models/operations/updatedatapoint.js +8 -8
  70. package/models/operations/updatedataset.js +2 -2
  71. package/models/operations/updatedatasource.js +2 -2
  72. package/models/operations/updateeval.js +28 -28
  73. package/models/operations/updatetool.js +14 -14
  74. package/package.json +1 -1
  75. package/src/lib/config.ts +2 -2
  76. package/src/mcp-server/mcp-server.ts +1 -1
  77. package/src/mcp-server/server.ts +1 -1
  78. package/src/models/components/conversationresponse.ts +2 -2
  79. package/src/models/components/partdoneevent.ts +2 -2
  80. package/src/models/components/reasoningpart.ts +2 -2
  81. package/src/models/operations/createagentrequest.ts +696 -86
  82. package/src/models/operations/createbudget.ts +2 -2
  83. package/src/models/operations/createcontact.ts +2 -2
  84. package/src/models/operations/createconversation.ts +2 -2
  85. package/src/models/operations/createdataset.ts +2 -2
  86. package/src/models/operations/createdatasetitem.ts +8 -8
  87. package/src/models/operations/createdatasource.ts +2 -2
  88. package/src/models/operations/createeval.ts +28 -28
  89. package/src/models/operations/createprompt.ts +25 -23
  90. package/src/models/operations/createtool.ts +12 -12
  91. package/src/models/operations/fileget.ts +2 -2
  92. package/src/models/operations/filelist.ts +2 -2
  93. package/src/models/operations/fileupload.ts +2 -2
  94. package/src/models/operations/generateconversationname.ts +2 -2
  95. package/src/models/operations/getalltools.ts +12 -12
  96. package/src/models/operations/getbudget.ts +2 -2
  97. package/src/models/operations/getevals.ts +28 -28
  98. package/src/models/operations/listagents.ts +122 -8
  99. package/src/models/operations/listbudgets.ts +2 -2
  100. package/src/models/operations/listcontacts.ts +2 -2
  101. package/src/models/operations/listdatasetdatapoints.ts +8 -8
  102. package/src/models/operations/listdatasets.ts +2 -2
  103. package/src/models/operations/listdatasources.ts +2 -2
  104. package/src/models/operations/retrieveagentrequest.ts +130 -8
  105. package/src/models/operations/retrievecontact.ts +2 -2
  106. package/src/models/operations/retrieveconversation.ts +2 -2
  107. package/src/models/operations/retrievedatapoint.ts +8 -8
  108. package/src/models/operations/retrievedataset.ts +2 -2
  109. package/src/models/operations/retrievedatasource.ts +2 -2
  110. package/src/models/operations/retrievetool.ts +12 -12
  111. package/src/models/operations/runagent.ts +126 -10
  112. package/src/models/operations/streamrunagent.ts +128 -10
  113. package/src/models/operations/updateagent.ts +698 -86
  114. package/src/models/operations/updatebudget.ts +2 -2
  115. package/src/models/operations/updatecontact.ts +2 -2
  116. package/src/models/operations/updateconversation.ts +2 -2
  117. package/src/models/operations/updatedatapoint.ts +8 -8
  118. package/src/models/operations/updatedataset.ts +2 -2
  119. package/src/models/operations/updatedatasource.ts +2 -2
  120. package/src/models/operations/updateeval.ts +28 -28
  121. package/src/models/operations/updatetool.ts +14 -14
@@ -117,6 +117,42 @@ export type RunAgentModelConfigurationResponseFormat =
117
117
  | RunAgentResponseFormatJSONObject
118
118
  | RunAgentResponseFormatJSONSchema;
119
119
 
120
+ /**
121
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
122
+ *
123
+ * @remarks
124
+ *
125
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
126
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
127
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
128
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
129
+ *
130
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
131
+ */
132
+ export const RunAgentModelConfigurationReasoningEffort = {
133
+ None: "none",
134
+ Minimal: "minimal",
135
+ Low: "low",
136
+ Medium: "medium",
137
+ High: "high",
138
+ Xhigh: "xhigh",
139
+ } as const;
140
+ /**
141
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
142
+ *
143
+ * @remarks
144
+ *
145
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
146
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
147
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
148
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
149
+ *
150
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
151
+ */
152
+ export type RunAgentModelConfigurationReasoningEffort = ClosedEnum<
153
+ typeof RunAgentModelConfigurationReasoningEffort
154
+ >;
155
+
120
156
  /**
121
157
  * Up to 4 sequences where the API will stop generating further tokens.
122
158
  */
@@ -233,9 +269,18 @@ export type RunAgentModelConfigurationParameters = {
233
269
  | RunAgentResponseFormatJSONSchema
234
270
  | undefined;
235
271
  /**
236
- * Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
272
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
273
+ *
274
+ * @remarks
275
+ *
276
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
277
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
278
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
279
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
280
+ *
281
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
237
282
  */
238
- reasoningEffort?: string | undefined;
283
+ reasoningEffort?: RunAgentModelConfigurationReasoningEffort | undefined;
239
284
  /**
240
285
  * Adjusts response verbosity. Lower levels yield shorter answers.
241
286
  */
@@ -425,6 +470,42 @@ export type RunAgentFallbackModelConfigurationResponseFormat =
425
470
  | RunAgentResponseFormatAgentsJSONObject
426
471
  | RunAgentResponseFormatAgentsRequestJSONSchema;
427
472
 
473
+ /**
474
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
475
+ *
476
+ * @remarks
477
+ *
478
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
479
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
480
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
481
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
482
+ *
483
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
484
+ */
485
+ export const RunAgentFallbackModelConfigurationReasoningEffort = {
486
+ None: "none",
487
+ Minimal: "minimal",
488
+ Low: "low",
489
+ Medium: "medium",
490
+ High: "high",
491
+ Xhigh: "xhigh",
492
+ } as const;
493
+ /**
494
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
495
+ *
496
+ * @remarks
497
+ *
498
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
499
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
500
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
501
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
502
+ *
503
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
504
+ */
505
+ export type RunAgentFallbackModelConfigurationReasoningEffort = ClosedEnum<
506
+ typeof RunAgentFallbackModelConfigurationReasoningEffort
507
+ >;
508
+
428
509
  /**
429
510
  * Up to 4 sequences where the API will stop generating further tokens.
430
511
  */
@@ -545,9 +626,20 @@ export type RunAgentFallbackModelConfigurationParameters = {
545
626
  | RunAgentResponseFormatAgentsRequestJSONSchema
546
627
  | undefined;
547
628
  /**
548
- * Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
629
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
630
+ *
631
+ * @remarks
632
+ *
633
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
634
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
635
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
636
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
637
+ *
638
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
549
639
  */
550
- reasoningEffort?: string | undefined;
640
+ reasoningEffort?:
641
+ | RunAgentFallbackModelConfigurationReasoningEffort
642
+ | undefined;
551
643
  /**
552
644
  * Adjusts response verbosity. Lower levels yield shorter answers.
553
645
  */
@@ -1852,6 +1944,15 @@ export function runAgentModelConfigurationResponseFormatFromJSON(
1852
1944
  );
1853
1945
  }
1854
1946
 
1947
+ /** @internal */
1948
+ export const RunAgentModelConfigurationReasoningEffort$inboundSchema:
1949
+ z.ZodNativeEnum<typeof RunAgentModelConfigurationReasoningEffort> = z
1950
+ .nativeEnum(RunAgentModelConfigurationReasoningEffort);
1951
+ /** @internal */
1952
+ export const RunAgentModelConfigurationReasoningEffort$outboundSchema:
1953
+ z.ZodNativeEnum<typeof RunAgentModelConfigurationReasoningEffort> =
1954
+ RunAgentModelConfigurationReasoningEffort$inboundSchema;
1955
+
1855
1956
  /** @internal */
1856
1957
  export const RunAgentModelConfigurationStop$inboundSchema: z.ZodType<
1857
1958
  RunAgentModelConfigurationStop,
@@ -2160,7 +2261,8 @@ export const RunAgentModelConfigurationParameters$inboundSchema: z.ZodType<
2160
2261
  z.lazy(() => RunAgentResponseFormatJSONObject$inboundSchema),
2161
2262
  z.lazy(() => RunAgentResponseFormatJSONSchema$inboundSchema),
2162
2263
  ]).optional(),
2163
- reasoning_effort: z.string().optional(),
2264
+ reasoning_effort: RunAgentModelConfigurationReasoningEffort$inboundSchema
2265
+ .optional(),
2164
2266
  verbosity: z.string().optional(),
2165
2267
  seed: z.nullable(z.number()).optional(),
2166
2268
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -2254,7 +2356,8 @@ export const RunAgentModelConfigurationParameters$outboundSchema: z.ZodType<
2254
2356
  z.lazy(() => RunAgentResponseFormatJSONObject$outboundSchema),
2255
2357
  z.lazy(() => RunAgentResponseFormatJSONSchema$outboundSchema),
2256
2358
  ]).optional(),
2257
- reasoningEffort: z.string().optional(),
2359
+ reasoningEffort: RunAgentModelConfigurationReasoningEffort$outboundSchema
2360
+ .optional(),
2258
2361
  verbosity: z.string().optional(),
2259
2362
  seed: z.nullable(z.number()).optional(),
2260
2363
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -2788,6 +2891,15 @@ export function runAgentFallbackModelConfigurationResponseFormatFromJSON(
2788
2891
  );
2789
2892
  }
2790
2893
 
2894
+ /** @internal */
2895
+ export const RunAgentFallbackModelConfigurationReasoningEffort$inboundSchema:
2896
+ z.ZodNativeEnum<typeof RunAgentFallbackModelConfigurationReasoningEffort> = z
2897
+ .nativeEnum(RunAgentFallbackModelConfigurationReasoningEffort);
2898
+ /** @internal */
2899
+ export const RunAgentFallbackModelConfigurationReasoningEffort$outboundSchema:
2900
+ z.ZodNativeEnum<typeof RunAgentFallbackModelConfigurationReasoningEffort> =
2901
+ RunAgentFallbackModelConfigurationReasoningEffort$inboundSchema;
2902
+
2791
2903
  /** @internal */
2792
2904
  export const RunAgentFallbackModelConfigurationStop$inboundSchema: z.ZodType<
2793
2905
  RunAgentFallbackModelConfigurationStop,
@@ -3118,7 +3230,9 @@ export const RunAgentFallbackModelConfigurationParameters$inboundSchema:
3118
3230
  z.lazy(() => RunAgentResponseFormatAgentsJSONObject$inboundSchema),
3119
3231
  z.lazy(() => RunAgentResponseFormatAgentsRequestJSONSchema$inboundSchema),
3120
3232
  ]).optional(),
3121
- reasoning_effort: z.string().optional(),
3233
+ reasoning_effort:
3234
+ RunAgentFallbackModelConfigurationReasoningEffort$inboundSchema
3235
+ .optional(),
3122
3236
  verbosity: z.string().optional(),
3123
3237
  seed: z.nullable(z.number()).optional(),
3124
3238
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -3217,7 +3331,9 @@ export const RunAgentFallbackModelConfigurationParameters$outboundSchema:
3217
3331
  RunAgentResponseFormatAgentsRequestJSONSchema$outboundSchema
3218
3332
  ),
3219
3333
  ]).optional(),
3220
- reasoningEffort: z.string().optional(),
3334
+ reasoningEffort:
3335
+ RunAgentFallbackModelConfigurationReasoningEffort$outboundSchema
3336
+ .optional(),
3221
3337
  verbosity: z.string().optional(),
3222
3338
  seed: z.nullable(z.number()).optional(),
3223
3339
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -3874,7 +3990,7 @@ export function schemaFromJSON(
3874
3990
  /** @internal */
3875
3991
  export const Tools$inboundSchema: z.ZodType<Tools, z.ZodTypeDef, unknown> = z
3876
3992
  .object({
3877
- id: z.string().default("01KC8FGRK1CMK8G1B9P7JRFYJD"),
3993
+ id: z.string().default("01KC8Y89E2V63MW10QZ0FDS8J0"),
3878
3994
  name: z.string(),
3879
3995
  description: z.string().optional(),
3880
3996
  schema: z.lazy(() => Schema$inboundSchema),
@@ -3893,7 +4009,7 @@ export const Tools$outboundSchema: z.ZodType<
3893
4009
  z.ZodTypeDef,
3894
4010
  Tools
3895
4011
  > = z.object({
3896
- id: z.string().default("01KC8FGRK1CMK8G1B9P7JRFYJD"),
4012
+ id: z.string().default("01KC8Y89E2V63MW10QZ0FDS8J0"),
3897
4013
  name: z.string(),
3898
4014
  description: z.string().optional(),
3899
4015
  schema: z.lazy(() => Schema$outboundSchema),
@@ -117,6 +117,42 @@ export type StreamRunAgentModelConfigurationResponseFormat =
117
117
  | StreamRunAgentResponseFormatJSONObject
118
118
  | StreamRunAgentResponseFormatJSONSchema;
119
119
 
120
+ /**
121
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
122
+ *
123
+ * @remarks
124
+ *
125
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
126
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
127
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
128
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
129
+ *
130
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
131
+ */
132
+ export const StreamRunAgentModelConfigurationReasoningEffort = {
133
+ None: "none",
134
+ Minimal: "minimal",
135
+ Low: "low",
136
+ Medium: "medium",
137
+ High: "high",
138
+ Xhigh: "xhigh",
139
+ } as const;
140
+ /**
141
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
142
+ *
143
+ * @remarks
144
+ *
145
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
146
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
147
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
148
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
149
+ *
150
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
151
+ */
152
+ export type StreamRunAgentModelConfigurationReasoningEffort = ClosedEnum<
153
+ typeof StreamRunAgentModelConfigurationReasoningEffort
154
+ >;
155
+
120
156
  /**
121
157
  * Up to 4 sequences where the API will stop generating further tokens.
122
158
  */
@@ -237,9 +273,18 @@ export type StreamRunAgentModelConfigurationParameters = {
237
273
  | StreamRunAgentResponseFormatJSONSchema
238
274
  | undefined;
239
275
  /**
240
- * Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
276
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
277
+ *
278
+ * @remarks
279
+ *
280
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
281
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
282
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
283
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
284
+ *
285
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
241
286
  */
242
- reasoningEffort?: string | undefined;
287
+ reasoningEffort?: StreamRunAgentModelConfigurationReasoningEffort | undefined;
243
288
  /**
244
289
  * Adjusts response verbosity. Lower levels yield shorter answers.
245
290
  */
@@ -440,6 +485,41 @@ export type StreamRunAgentFallbackModelConfigurationResponseFormat =
440
485
  | StreamRunAgentResponseFormatAgentsJSONObject
441
486
  | StreamRunAgentResponseFormatAgentsRequestJSONSchema;
442
487
 
488
+ /**
489
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
490
+ *
491
+ * @remarks
492
+ *
493
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
494
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
495
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
496
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
497
+ *
498
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
499
+ */
500
+ export const StreamRunAgentFallbackModelConfigurationReasoningEffort = {
501
+ None: "none",
502
+ Minimal: "minimal",
503
+ Low: "low",
504
+ Medium: "medium",
505
+ High: "high",
506
+ Xhigh: "xhigh",
507
+ } as const;
508
+ /**
509
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
510
+ *
511
+ * @remarks
512
+ *
513
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
514
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
515
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
516
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
517
+ *
518
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
519
+ */
520
+ export type StreamRunAgentFallbackModelConfigurationReasoningEffort =
521
+ ClosedEnum<typeof StreamRunAgentFallbackModelConfigurationReasoningEffort>;
522
+
443
523
  /**
444
524
  * Up to 4 sequences where the API will stop generating further tokens.
445
525
  */
@@ -562,9 +642,20 @@ export type StreamRunAgentFallbackModelConfigurationParameters = {
562
642
  | StreamRunAgentResponseFormatAgentsRequestJSONSchema
563
643
  | undefined;
564
644
  /**
565
- * Constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
645
+ * Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
646
+ *
647
+ * @remarks
648
+ *
649
+ * - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.
650
+ * - All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.
651
+ * - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
652
+ * - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
653
+ *
654
+ * Any of "none", "minimal", "low", "medium", "high", "xhigh".
566
655
  */
567
- reasoningEffort?: string | undefined;
656
+ reasoningEffort?:
657
+ | StreamRunAgentFallbackModelConfigurationReasoningEffort
658
+ | undefined;
568
659
  /**
569
660
  * Adjusts response verbosity. Lower levels yield shorter answers.
570
661
  */
@@ -1829,6 +1920,15 @@ export function streamRunAgentModelConfigurationResponseFormatFromJSON(
1829
1920
  );
1830
1921
  }
1831
1922
 
1923
+ /** @internal */
1924
+ export const StreamRunAgentModelConfigurationReasoningEffort$inboundSchema:
1925
+ z.ZodNativeEnum<typeof StreamRunAgentModelConfigurationReasoningEffort> = z
1926
+ .nativeEnum(StreamRunAgentModelConfigurationReasoningEffort);
1927
+ /** @internal */
1928
+ export const StreamRunAgentModelConfigurationReasoningEffort$outboundSchema:
1929
+ z.ZodNativeEnum<typeof StreamRunAgentModelConfigurationReasoningEffort> =
1930
+ StreamRunAgentModelConfigurationReasoningEffort$inboundSchema;
1931
+
1832
1932
  /** @internal */
1833
1933
  export const StreamRunAgentModelConfigurationStop$inboundSchema: z.ZodType<
1834
1934
  StreamRunAgentModelConfigurationStop,
@@ -2153,7 +2253,9 @@ export const StreamRunAgentModelConfigurationParameters$inboundSchema:
2153
2253
  z.lazy(() => StreamRunAgentResponseFormatJSONObject$inboundSchema),
2154
2254
  z.lazy(() => StreamRunAgentResponseFormatJSONSchema$inboundSchema),
2155
2255
  ]).optional(),
2156
- reasoning_effort: z.string().optional(),
2256
+ reasoning_effort:
2257
+ StreamRunAgentModelConfigurationReasoningEffort$inboundSchema
2258
+ .optional(),
2157
2259
  verbosity: z.string().optional(),
2158
2260
  seed: z.nullable(z.number()).optional(),
2159
2261
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -2250,7 +2352,8 @@ export const StreamRunAgentModelConfigurationParameters$outboundSchema:
2250
2352
  z.lazy(() => StreamRunAgentResponseFormatJSONObject$outboundSchema),
2251
2353
  z.lazy(() => StreamRunAgentResponseFormatJSONSchema$outboundSchema),
2252
2354
  ]).optional(),
2253
- reasoningEffort: z.string().optional(),
2355
+ reasoningEffort:
2356
+ StreamRunAgentModelConfigurationReasoningEffort$outboundSchema.optional(),
2254
2357
  verbosity: z.string().optional(),
2255
2358
  seed: z.nullable(z.number()).optional(),
2256
2359
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -2812,6 +2915,17 @@ export function streamRunAgentFallbackModelConfigurationResponseFormatFromJSON(
2812
2915
  );
2813
2916
  }
2814
2917
 
2918
+ /** @internal */
2919
+ export const StreamRunAgentFallbackModelConfigurationReasoningEffort$inboundSchema:
2920
+ z.ZodNativeEnum<
2921
+ typeof StreamRunAgentFallbackModelConfigurationReasoningEffort
2922
+ > = z.nativeEnum(StreamRunAgentFallbackModelConfigurationReasoningEffort);
2923
+ /** @internal */
2924
+ export const StreamRunAgentFallbackModelConfigurationReasoningEffort$outboundSchema:
2925
+ z.ZodNativeEnum<
2926
+ typeof StreamRunAgentFallbackModelConfigurationReasoningEffort
2927
+ > = StreamRunAgentFallbackModelConfigurationReasoningEffort$inboundSchema;
2928
+
2815
2929
  /** @internal */
2816
2930
  export const StreamRunAgentFallbackModelConfigurationStop$inboundSchema:
2817
2931
  z.ZodType<
@@ -3158,7 +3272,9 @@ export const StreamRunAgentFallbackModelConfigurationParameters$inboundSchema:
3158
3272
  StreamRunAgentResponseFormatAgentsRequestJSONSchema$inboundSchema
3159
3273
  ),
3160
3274
  ]).optional(),
3161
- reasoning_effort: z.string().optional(),
3275
+ reasoning_effort:
3276
+ StreamRunAgentFallbackModelConfigurationReasoningEffort$inboundSchema
3277
+ .optional(),
3162
3278
  verbosity: z.string().optional(),
3163
3279
  seed: z.nullable(z.number()).optional(),
3164
3280
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -3262,7 +3378,9 @@ export const StreamRunAgentFallbackModelConfigurationParameters$outboundSchema:
3262
3378
  StreamRunAgentResponseFormatAgentsRequestJSONSchema$outboundSchema
3263
3379
  ),
3264
3380
  ]).optional(),
3265
- reasoningEffort: z.string().optional(),
3381
+ reasoningEffort:
3382
+ StreamRunAgentFallbackModelConfigurationReasoningEffort$outboundSchema
3383
+ .optional(),
3266
3384
  verbosity: z.string().optional(),
3267
3385
  seed: z.nullable(z.number()).optional(),
3268
3386
  stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(),
@@ -3973,7 +4091,7 @@ export const AgentToolInputRunTools$inboundSchema: z.ZodType<
3973
4091
  z.ZodTypeDef,
3974
4092
  unknown
3975
4093
  > = z.object({
3976
- id: z.string().default("01KC8FGRMYM3VJ9EZZQGSV5TXA"),
4094
+ id: z.string().default("01KC8Y89FWNY82P5TAEYJ320V5"),
3977
4095
  name: z.string(),
3978
4096
  description: z.string().optional(),
3979
4097
  schema: z.lazy(() => AgentToolInputRunSchema$inboundSchema),
@@ -3992,7 +4110,7 @@ export const AgentToolInputRunTools$outboundSchema: z.ZodType<
3992
4110
  z.ZodTypeDef,
3993
4111
  AgentToolInputRunTools
3994
4112
  > = z.object({
3995
- id: z.string().default("01KC8FGRMYM3VJ9EZZQGSV5TXA"),
4113
+ id: z.string().default("01KC8Y89FWNY82P5TAEYJ320V5"),
3996
4114
  name: z.string(),
3997
4115
  description: z.string().optional(),
3998
4116
  schema: z.lazy(() => AgentToolInputRunSchema$outboundSchema),