openai 0.35.1 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +17 -0
  3. data/README.md +21 -15
  4. data/lib/openai/helpers/structured_output/union_of.rb +5 -1
  5. data/lib/openai/internal/transport/pooled_net_requester.rb +6 -2
  6. data/lib/openai/internal/type/enum.rb +6 -6
  7. data/lib/openai/models/batch_create_params.rb +9 -6
  8. data/lib/openai/models/beta/assistant_create_params.rb +9 -5
  9. data/lib/openai/models/beta/assistant_update_params.rb +9 -5
  10. data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
  11. data/lib/openai/models/chat/completion_create_params.rb +37 -6
  12. data/lib/openai/models/chat_model.rb +5 -0
  13. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  14. data/lib/openai/models/conversations/conversation_item.rb +13 -1
  15. data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
  16. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  17. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
  18. data/lib/openai/models/evals/run_cancel_response.rb +20 -12
  19. data/lib/openai/models/evals/run_create_params.rb +20 -12
  20. data/lib/openai/models/evals/run_create_response.rb +20 -12
  21. data/lib/openai/models/evals/run_list_response.rb +20 -12
  22. data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
  23. data/lib/openai/models/graders/score_model_grader.rb +9 -5
  24. data/lib/openai/models/reasoning.rb +10 -6
  25. data/lib/openai/models/reasoning_effort.rb +10 -5
  26. data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
  27. data/lib/openai/models/responses/function_shell_tool.rb +20 -0
  28. data/lib/openai/models/responses/input_token_count_params.rb +14 -8
  29. data/lib/openai/models/responses/response.rb +46 -11
  30. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
  31. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
  32. data/lib/openai/models/responses/response_create_params.rb +42 -9
  33. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
  34. data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
  35. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
  36. data/lib/openai/models/responses/response_input_item.rb +395 -1
  37. data/lib/openai/models/responses/response_item.rb +13 -1
  38. data/lib/openai/models/responses/response_item_list.rb +2 -2
  39. data/lib/openai/models/responses/response_output_item.rb +13 -1
  40. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  41. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  42. data/lib/openai/models/responses/tool.rb +7 -1
  43. data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
  44. data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
  45. data/lib/openai/resources/chat/completions.rb +6 -2
  46. data/lib/openai/resources/conversations/items.rb +3 -3
  47. data/lib/openai/resources/conversations.rb +1 -1
  48. data/lib/openai/resources/responses/input_items.rb +1 -1
  49. data/lib/openai/resources/responses/input_tokens.rb +3 -3
  50. data/lib/openai/resources/responses.rb +12 -8
  51. data/lib/openai/version.rb +1 -1
  52. data/lib/openai.rb +10 -0
  53. data/manifest.yaml +1 -0
  54. data/rbi/openai/internal/transport/pooled_net_requester.rbi +6 -2
  55. data/rbi/openai/models/batch_create_params.rbi +17 -9
  56. data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
  57. data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
  58. data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
  59. data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
  60. data/rbi/openai/models/chat_model.rbi +7 -0
  61. data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
  62. data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
  63. data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
  64. data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
  65. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
  66. data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
  67. data/rbi/openai/models/evals/run_create_params.rbi +44 -20
  68. data/rbi/openai/models/evals/run_create_response.rbi +40 -20
  69. data/rbi/openai/models/evals/run_list_response.rbi +40 -20
  70. data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
  71. data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
  72. data/rbi/openai/models/reasoning.rbi +18 -10
  73. data/rbi/openai/models/reasoning_effort.rbi +10 -5
  74. data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
  75. data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
  76. data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
  77. data/rbi/openai/models/responses/response.rbi +73 -2
  78. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
  79. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
  80. data/rbi/openai/models/responses/response_create_params.rbi +87 -5
  81. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
  82. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
  83. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
  84. data/rbi/openai/models/responses/response_input_item.rbi +675 -0
  85. data/rbi/openai/models/responses/response_item.rbi +4 -0
  86. data/rbi/openai/models/responses/response_item_list.rbi +4 -0
  87. data/rbi/openai/models/responses/response_output_item.rbi +4 -0
  88. data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
  89. data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
  90. data/rbi/openai/models/responses/tool.rbi +2 -0
  91. data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
  92. data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
  93. data/rbi/openai/resources/batches.rbi +4 -3
  94. data/rbi/openai/resources/beta/assistants.rbi +18 -10
  95. data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
  96. data/rbi/openai/resources/chat/completions.rbi +38 -12
  97. data/rbi/openai/resources/conversations/items.rbi +4 -0
  98. data/rbi/openai/resources/conversations.rbi +4 -0
  99. data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
  100. data/rbi/openai/resources/responses.rbi +28 -2
  101. data/sig/openai/internal/transport/pooled_net_requester.rbs +4 -1
  102. data/sig/openai/models/batch_create_params.rbs +2 -0
  103. data/sig/openai/models/chat/completion_create_params.rbs +16 -0
  104. data/sig/openai/models/chat_model.rbs +11 -1
  105. data/sig/openai/models/conversations/conversation_item.rbs +4 -0
  106. data/sig/openai/models/reasoning_effort.rbs +2 -1
  107. data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
  108. data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
  109. data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
  110. data/sig/openai/models/responses/response.rbs +18 -0
  111. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
  112. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
  113. data/sig/openai/models/responses/response_create_params.rbs +18 -0
  114. data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
  115. data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
  116. data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
  117. data/sig/openai/models/responses/response_input_item.rbs +276 -0
  118. data/sig/openai/models/responses/response_item.rbs +4 -0
  119. data/sig/openai/models/responses/response_output_item.rbs +4 -0
  120. data/sig/openai/models/responses/tool.rbs +2 -0
  121. data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
  122. data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
  123. data/sig/openai/resources/chat/completions.rbs +2 -0
  124. data/sig/openai/resources/responses.rbs +2 -0
  125. metadata +29 -2
@@ -422,12 +422,16 @@ module OpenAI
422
422
 
423
423
  # Constrains effort on reasoning for
424
424
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
425
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
426
- # effort can result in faster responses and fewer tokens used on reasoning in a
427
- # response.
425
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
426
+ # reasoning effort can result in faster responses and fewer tokens used on
427
+ # reasoning in a response.
428
428
  #
429
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
430
- # effort.
429
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
430
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
431
+ # calls are supported for all reasoning values in gpt-5.1.
432
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
433
+ # support `none`.
434
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
431
435
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
432
436
  attr_accessor :reasoning_effort
433
437
 
@@ -482,12 +486,16 @@ module OpenAI
482
486
  model: nil,
483
487
  # Constrains effort on reasoning for
484
488
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
485
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
486
- # effort can result in faster responses and fewer tokens used on reasoning in a
487
- # response.
489
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
490
+ # reasoning effort can result in faster responses and fewer tokens used on
491
+ # reasoning in a response.
488
492
  #
489
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
490
- # effort.
493
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
494
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
495
+ # calls are supported for all reasoning values in gpt-5.1.
496
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
497
+ # support `none`.
498
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
491
499
  reasoning_effort: nil,
492
500
  # Sampling temperature. This is a query parameter used to select responses.
493
501
  temperature: nil,
@@ -1081,12 +1089,16 @@ module OpenAI
1081
1089
 
1082
1090
  # Constrains effort on reasoning for
1083
1091
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1084
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1085
- # effort can result in faster responses and fewer tokens used on reasoning in a
1086
- # response.
1092
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1093
+ # reasoning effort can result in faster responses and fewer tokens used on
1094
+ # reasoning in a response.
1087
1095
  #
1088
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1089
- # effort.
1096
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1097
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1098
+ # calls are supported for all reasoning values in gpt-5.1.
1099
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1100
+ # support `none`.
1101
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1090
1102
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
1091
1103
  attr_accessor :reasoning_effort
1092
1104
 
@@ -1152,7 +1164,9 @@ module OpenAI
1152
1164
  OpenAI::Responses::Tool::CodeInterpreter,
1153
1165
  OpenAI::Responses::Tool::ImageGeneration,
1154
1166
  OpenAI::Responses::Tool::LocalShell,
1167
+ OpenAI::Responses::FunctionShellTool,
1155
1168
  OpenAI::Responses::CustomTool,
1169
+ OpenAI::Responses::ApplyPatchTool,
1156
1170
  OpenAI::Responses::WebSearchTool,
1157
1171
  OpenAI::Responses::WebSearchPreviewTool
1158
1172
  )
@@ -1174,7 +1188,9 @@ module OpenAI
1174
1188
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1175
1189
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1176
1190
  OpenAI::Responses::Tool::LocalShell::OrHash,
1191
+ OpenAI::Responses::FunctionShellTool::OrHash,
1177
1192
  OpenAI::Responses::CustomTool::OrHash,
1193
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1178
1194
  OpenAI::Responses::WebSearchTool::OrHash,
1179
1195
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1180
1196
  )
@@ -1209,7 +1225,9 @@ module OpenAI
1209
1225
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1210
1226
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1211
1227
  OpenAI::Responses::Tool::LocalShell::OrHash,
1228
+ OpenAI::Responses::FunctionShellTool::OrHash,
1212
1229
  OpenAI::Responses::CustomTool::OrHash,
1230
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1213
1231
  OpenAI::Responses::WebSearchTool::OrHash,
1214
1232
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1215
1233
  )
@@ -1222,12 +1240,16 @@ module OpenAI
1222
1240
  max_completion_tokens: nil,
1223
1241
  # Constrains effort on reasoning for
1224
1242
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1225
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1226
- # effort can result in faster responses and fewer tokens used on reasoning in a
1227
- # response.
1243
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1244
+ # reasoning effort can result in faster responses and fewer tokens used on
1245
+ # reasoning in a response.
1228
1246
  #
1229
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1230
- # effort.
1247
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1248
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1249
+ # calls are supported for all reasoning values in gpt-5.1.
1250
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1251
+ # support `none`.
1252
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1231
1253
  reasoning_effort: nil,
1232
1254
  # A seed value to initialize the randomness, during sampling.
1233
1255
  seed: nil,
@@ -1279,7 +1301,9 @@ module OpenAI
1279
1301
  OpenAI::Responses::Tool::CodeInterpreter,
1280
1302
  OpenAI::Responses::Tool::ImageGeneration,
1281
1303
  OpenAI::Responses::Tool::LocalShell,
1304
+ OpenAI::Responses::FunctionShellTool,
1282
1305
  OpenAI::Responses::CustomTool,
1306
+ OpenAI::Responses::ApplyPatchTool,
1283
1307
  OpenAI::Responses::WebSearchTool,
1284
1308
  OpenAI::Responses::WebSearchPreviewTool
1285
1309
  )
@@ -512,12 +512,16 @@ module OpenAI
512
512
 
513
513
  # Constrains effort on reasoning for
514
514
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
515
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
516
- # effort can result in faster responses and fewer tokens used on reasoning in a
517
- # response.
515
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
516
+ # reasoning effort can result in faster responses and fewer tokens used on
517
+ # reasoning in a response.
518
518
  #
519
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
520
- # effort.
519
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
520
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
521
+ # calls are supported for all reasoning values in gpt-5.1.
522
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
523
+ # support `none`.
524
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
521
525
  sig do
522
526
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
523
527
  end
@@ -574,12 +578,16 @@ module OpenAI
574
578
  model: nil,
575
579
  # Constrains effort on reasoning for
576
580
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
577
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
578
- # effort can result in faster responses and fewer tokens used on reasoning in a
579
- # response.
581
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
582
+ # reasoning effort can result in faster responses and fewer tokens used on
583
+ # reasoning in a response.
580
584
  #
581
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
582
- # effort.
585
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
586
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
587
+ # calls are supported for all reasoning values in gpt-5.1.
588
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
589
+ # support `none`.
590
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
583
591
  reasoning_effort: nil,
584
592
  # Sampling temperature. This is a query parameter used to select responses.
585
593
  temperature: nil,
@@ -1123,12 +1131,16 @@ module OpenAI
1123
1131
 
1124
1132
  # Constrains effort on reasoning for
1125
1133
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1126
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1127
- # effort can result in faster responses and fewer tokens used on reasoning in a
1128
- # response.
1134
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1135
+ # reasoning effort can result in faster responses and fewer tokens used on
1136
+ # reasoning in a response.
1129
1137
  #
1130
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1131
- # effort.
1138
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1139
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1140
+ # calls are supported for all reasoning values in gpt-5.1.
1141
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1142
+ # support `none`.
1143
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1132
1144
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1133
1145
  attr_accessor :reasoning_effort
1134
1146
 
@@ -1199,7 +1211,9 @@ module OpenAI
1199
1211
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1200
1212
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1201
1213
  OpenAI::Responses::Tool::LocalShell::OrHash,
1214
+ OpenAI::Responses::FunctionShellTool::OrHash,
1202
1215
  OpenAI::Responses::CustomTool::OrHash,
1216
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1203
1217
  OpenAI::Responses::WebSearchTool::OrHash,
1204
1218
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1205
1219
  )
@@ -1234,7 +1248,9 @@ module OpenAI
1234
1248
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1235
1249
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1236
1250
  OpenAI::Responses::Tool::LocalShell::OrHash,
1251
+ OpenAI::Responses::FunctionShellTool::OrHash,
1237
1252
  OpenAI::Responses::CustomTool::OrHash,
1253
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1238
1254
  OpenAI::Responses::WebSearchTool::OrHash,
1239
1255
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1240
1256
  )
@@ -1247,12 +1263,16 @@ module OpenAI
1247
1263
  max_completion_tokens: nil,
1248
1264
  # Constrains effort on reasoning for
1249
1265
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1250
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1251
- # effort can result in faster responses and fewer tokens used on reasoning in a
1252
- # response.
1266
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1267
+ # reasoning effort can result in faster responses and fewer tokens used on
1268
+ # reasoning in a response.
1253
1269
  #
1254
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1255
- # effort.
1270
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1271
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1272
+ # calls are supported for all reasoning values in gpt-5.1.
1273
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1274
+ # support `none`.
1275
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1256
1276
  reasoning_effort: nil,
1257
1277
  # A seed value to initialize the randomness, during sampling.
1258
1278
  seed: nil,
@@ -508,12 +508,16 @@ module OpenAI
508
508
 
509
509
  # Constrains effort on reasoning for
510
510
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
511
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
512
- # effort can result in faster responses and fewer tokens used on reasoning in a
513
- # response.
511
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
512
+ # reasoning effort can result in faster responses and fewer tokens used on
513
+ # reasoning in a response.
514
514
  #
515
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
516
- # effort.
515
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
516
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
517
+ # calls are supported for all reasoning values in gpt-5.1.
518
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
519
+ # support `none`.
520
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
517
521
  sig do
518
522
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
519
523
  end
@@ -570,12 +574,16 @@ module OpenAI
570
574
  model: nil,
571
575
  # Constrains effort on reasoning for
572
576
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
573
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
574
- # effort can result in faster responses and fewer tokens used on reasoning in a
575
- # response.
577
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
578
+ # reasoning effort can result in faster responses and fewer tokens used on
579
+ # reasoning in a response.
576
580
  #
577
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
578
- # effort.
581
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
582
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
583
+ # calls are supported for all reasoning values in gpt-5.1.
584
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
585
+ # support `none`.
586
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
579
587
  reasoning_effort: nil,
580
588
  # Sampling temperature. This is a query parameter used to select responses.
581
589
  temperature: nil,
@@ -1119,12 +1127,16 @@ module OpenAI
1119
1127
 
1120
1128
  # Constrains effort on reasoning for
1121
1129
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1122
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1123
- # effort can result in faster responses and fewer tokens used on reasoning in a
1124
- # response.
1130
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1131
+ # reasoning effort can result in faster responses and fewer tokens used on
1132
+ # reasoning in a response.
1125
1133
  #
1126
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1127
- # effort.
1134
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1135
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1136
+ # calls are supported for all reasoning values in gpt-5.1.
1137
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1138
+ # support `none`.
1139
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1128
1140
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1129
1141
  attr_accessor :reasoning_effort
1130
1142
 
@@ -1195,7 +1207,9 @@ module OpenAI
1195
1207
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1196
1208
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1197
1209
  OpenAI::Responses::Tool::LocalShell::OrHash,
1210
+ OpenAI::Responses::FunctionShellTool::OrHash,
1198
1211
  OpenAI::Responses::CustomTool::OrHash,
1212
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1199
1213
  OpenAI::Responses::WebSearchTool::OrHash,
1200
1214
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1201
1215
  )
@@ -1230,7 +1244,9 @@ module OpenAI
1230
1244
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1231
1245
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1232
1246
  OpenAI::Responses::Tool::LocalShell::OrHash,
1247
+ OpenAI::Responses::FunctionShellTool::OrHash,
1233
1248
  OpenAI::Responses::CustomTool::OrHash,
1249
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1234
1250
  OpenAI::Responses::WebSearchTool::OrHash,
1235
1251
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1236
1252
  )
@@ -1243,12 +1259,16 @@ module OpenAI
1243
1259
  max_completion_tokens: nil,
1244
1260
  # Constrains effort on reasoning for
1245
1261
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1246
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1247
- # effort can result in faster responses and fewer tokens used on reasoning in a
1248
- # response.
1262
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1263
+ # reasoning effort can result in faster responses and fewer tokens used on
1264
+ # reasoning in a response.
1249
1265
  #
1250
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1251
- # effort.
1266
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1267
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1268
+ # calls are supported for all reasoning values in gpt-5.1.
1269
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1270
+ # support `none`.
1271
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1252
1272
  reasoning_effort: nil,
1253
1273
  # A seed value to initialize the randomness, during sampling.
1254
1274
  seed: nil,
@@ -514,12 +514,16 @@ module OpenAI
514
514
 
515
515
  # Constrains effort on reasoning for
516
516
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
517
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
518
- # effort can result in faster responses and fewer tokens used on reasoning in a
519
- # response.
517
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
518
+ # reasoning effort can result in faster responses and fewer tokens used on
519
+ # reasoning in a response.
520
520
  #
521
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
522
- # effort.
521
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
522
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
523
+ # calls are supported for all reasoning values in gpt-5.1.
524
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
525
+ # support `none`.
526
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
523
527
  sig do
524
528
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
525
529
  end
@@ -576,12 +580,16 @@ module OpenAI
576
580
  model: nil,
577
581
  # Constrains effort on reasoning for
578
582
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
579
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
580
- # effort can result in faster responses and fewer tokens used on reasoning in a
581
- # response.
583
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
584
+ # reasoning effort can result in faster responses and fewer tokens used on
585
+ # reasoning in a response.
582
586
  #
583
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
584
- # effort.
587
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
588
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
589
+ # calls are supported for all reasoning values in gpt-5.1.
590
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
591
+ # support `none`.
592
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
585
593
  reasoning_effort: nil,
586
594
  # Sampling temperature. This is a query parameter used to select responses.
587
595
  temperature: nil,
@@ -1125,12 +1133,16 @@ module OpenAI
1125
1133
 
1126
1134
  # Constrains effort on reasoning for
1127
1135
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1128
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1129
- # effort can result in faster responses and fewer tokens used on reasoning in a
1130
- # response.
1136
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1137
+ # reasoning effort can result in faster responses and fewer tokens used on
1138
+ # reasoning in a response.
1131
1139
  #
1132
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1133
- # effort.
1140
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1141
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1142
+ # calls are supported for all reasoning values in gpt-5.1.
1143
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1144
+ # support `none`.
1145
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1134
1146
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1135
1147
  attr_accessor :reasoning_effort
1136
1148
 
@@ -1201,7 +1213,9 @@ module OpenAI
1201
1213
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1202
1214
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1203
1215
  OpenAI::Responses::Tool::LocalShell::OrHash,
1216
+ OpenAI::Responses::FunctionShellTool::OrHash,
1204
1217
  OpenAI::Responses::CustomTool::OrHash,
1218
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1205
1219
  OpenAI::Responses::WebSearchTool::OrHash,
1206
1220
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1207
1221
  )
@@ -1236,7 +1250,9 @@ module OpenAI
1236
1250
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1237
1251
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1238
1252
  OpenAI::Responses::Tool::LocalShell::OrHash,
1253
+ OpenAI::Responses::FunctionShellTool::OrHash,
1239
1254
  OpenAI::Responses::CustomTool::OrHash,
1255
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1240
1256
  OpenAI::Responses::WebSearchTool::OrHash,
1241
1257
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1242
1258
  )
@@ -1249,12 +1265,16 @@ module OpenAI
1249
1265
  max_completion_tokens: nil,
1250
1266
  # Constrains effort on reasoning for
1251
1267
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1252
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1253
- # effort can result in faster responses and fewer tokens used on reasoning in a
1254
- # response.
1268
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1269
+ # reasoning effort can result in faster responses and fewer tokens used on
1270
+ # reasoning in a response.
1255
1271
  #
1256
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1257
- # effort.
1272
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1273
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1274
+ # calls are supported for all reasoning values in gpt-5.1.
1275
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1276
+ # support `none`.
1277
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1258
1278
  reasoning_effort: nil,
1259
1279
  # A seed value to initialize the randomness, during sampling.
1260
1280
  seed: nil,
@@ -396,12 +396,16 @@ module OpenAI
396
396
 
397
397
  # Constrains effort on reasoning for
398
398
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
399
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
400
- # effort can result in faster responses and fewer tokens used on reasoning in a
401
- # response.
399
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
400
+ # reasoning effort can result in faster responses and fewer tokens used on
401
+ # reasoning in a response.
402
402
  #
403
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
404
- # effort.
403
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
404
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
405
+ # calls are supported for all reasoning values in gpt-5.1.
406
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
407
+ # support `none`.
408
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
405
409
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
406
410
  attr_accessor :reasoning_effort
407
411
 
@@ -432,12 +436,16 @@ module OpenAI
432
436
  max_completions_tokens: nil,
433
437
  # Constrains effort on reasoning for
434
438
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
435
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
436
- # effort can result in faster responses and fewer tokens used on reasoning in a
437
- # response.
439
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
440
+ # reasoning effort can result in faster responses and fewer tokens used on
441
+ # reasoning in a response.
438
442
  #
439
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
440
- # effort.
443
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
444
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
445
+ # calls are supported for all reasoning values in gpt-5.1.
446
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
447
+ # support `none`.
448
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
441
449
  reasoning_effort: nil,
442
450
  # A seed value to initialize the randomness, during sampling.
443
451
  seed: nil,
@@ -8,12 +8,16 @@ module OpenAI
8
8
 
9
9
  # Constrains effort on reasoning for
10
10
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
11
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
12
- # effort can result in faster responses and fewer tokens used on reasoning in a
13
- # response.
11
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
12
+ # reasoning effort can result in faster responses and fewer tokens used on
13
+ # reasoning in a response.
14
14
  #
15
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
16
- # effort.
15
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
16
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
17
+ # calls are supported for all reasoning values in gpt-5.1.
18
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
19
+ # support `none`.
20
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
17
21
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
18
22
  attr_accessor :effort
19
23
 
@@ -48,12 +52,16 @@ module OpenAI
48
52
  def self.new(
49
53
  # Constrains effort on reasoning for
50
54
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
51
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
52
- # effort can result in faster responses and fewer tokens used on reasoning in a
53
- # response.
55
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
56
+ # reasoning effort can result in faster responses and fewer tokens used on
57
+ # reasoning in a response.
54
58
  #
55
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
56
- # effort.
59
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
60
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
61
+ # calls are supported for all reasoning values in gpt-5.1.
62
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
63
+ # support `none`.
64
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
57
65
  effort: nil,
58
66
  # **Deprecated:** use `summary` instead.
59
67
  #
@@ -4,18 +4,23 @@ module OpenAI
4
4
  module Models
5
5
  # Constrains effort on reasoning for
6
6
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
7
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
8
- # effort can result in faster responses and fewer tokens used on reasoning in a
9
- # response.
7
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
8
+ # reasoning effort can result in faster responses and fewer tokens used on
9
+ # reasoning in a response.
10
10
  #
11
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
12
- # effort.
11
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
12
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
13
+ # calls are supported for all reasoning values in gpt-5.1.
14
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
15
+ # support `none`.
16
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
13
17
  module ReasoningEffort
14
18
  extend OpenAI::Internal::Type::Enum
15
19
 
16
20
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ReasoningEffort) }
17
21
  OrSymbol = T.type_alias { T.any(Symbol, String) }
18
22
 
23
+ NONE = T.let(:none, OpenAI::ReasoningEffort::TaggedSymbol)
19
24
  MINIMAL = T.let(:minimal, OpenAI::ReasoningEffort::TaggedSymbol)
20
25
  LOW = T.let(:low, OpenAI::ReasoningEffort::TaggedSymbol)
21
26
  MEDIUM = T.let(:medium, OpenAI::ReasoningEffort::TaggedSymbol)
@@ -0,0 +1,30 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ApplyPatchTool < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(OpenAI::Responses::ApplyPatchTool, OpenAI::Internal::AnyHash)
10
+ end
11
+
12
+ # The type of the tool. Always `apply_patch`.
13
+ sig { returns(Symbol) }
14
+ attr_accessor :type
15
+
16
+ # Allows the assistant to create, delete, or update files using unified diffs.
17
+ sig { params(type: Symbol).returns(T.attached_class) }
18
+ def self.new(
19
+ # The type of the tool. Always `apply_patch`.
20
+ type: :apply_patch
21
+ )
22
+ end
23
+
24
+ sig { override.returns({ type: Symbol }) }
25
+ def to_hash
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,33 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class FunctionShellTool < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(
10
+ OpenAI::Responses::FunctionShellTool,
11
+ OpenAI::Internal::AnyHash
12
+ )
13
+ end
14
+
15
+ # The type of the shell tool. Always `shell`.
16
+ sig { returns(Symbol) }
17
+ attr_accessor :type
18
+
19
+ # A tool that allows the model to execute shell commands.
20
+ sig { params(type: Symbol).returns(T.attached_class) }
21
+ def self.new(
22
+ # The type of the shell tool. Always `shell`.
23
+ type: :shell
24
+ )
25
+ end
26
+
27
+ sig { override.returns({ type: Symbol }) }
28
+ def to_hash
29
+ end
30
+ end
31
+ end
32
+ end
33
+ end