openai 0.35.2 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +21 -15
  4. data/lib/openai/internal/type/enum.rb +6 -6
  5. data/lib/openai/models/batch_create_params.rb +9 -6
  6. data/lib/openai/models/beta/assistant_create_params.rb +9 -5
  7. data/lib/openai/models/beta/assistant_update_params.rb +9 -5
  8. data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
  9. data/lib/openai/models/chat/completion_create_params.rb +37 -6
  10. data/lib/openai/models/chat_model.rb +5 -0
  11. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  12. data/lib/openai/models/conversations/conversation_item.rb +13 -1
  13. data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
  14. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  15. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
  16. data/lib/openai/models/evals/run_cancel_response.rb +20 -12
  17. data/lib/openai/models/evals/run_create_params.rb +20 -12
  18. data/lib/openai/models/evals/run_create_response.rb +20 -12
  19. data/lib/openai/models/evals/run_list_response.rb +20 -12
  20. data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
  21. data/lib/openai/models/graders/score_model_grader.rb +9 -5
  22. data/lib/openai/models/reasoning.rb +10 -6
  23. data/lib/openai/models/reasoning_effort.rb +10 -5
  24. data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
  25. data/lib/openai/models/responses/function_shell_tool.rb +20 -0
  26. data/lib/openai/models/responses/input_token_count_params.rb +14 -8
  27. data/lib/openai/models/responses/response.rb +46 -11
  28. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
  29. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
  30. data/lib/openai/models/responses/response_create_params.rb +42 -9
  31. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
  32. data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
  33. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
  34. data/lib/openai/models/responses/response_input_item.rb +395 -1
  35. data/lib/openai/models/responses/response_item.rb +13 -1
  36. data/lib/openai/models/responses/response_item_list.rb +2 -2
  37. data/lib/openai/models/responses/response_output_item.rb +13 -1
  38. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  39. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  40. data/lib/openai/models/responses/tool.rb +7 -1
  41. data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
  42. data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
  43. data/lib/openai/resources/chat/completions.rb +6 -2
  44. data/lib/openai/resources/conversations/items.rb +3 -3
  45. data/lib/openai/resources/conversations.rb +1 -1
  46. data/lib/openai/resources/responses/input_items.rb +1 -1
  47. data/lib/openai/resources/responses/input_tokens.rb +3 -3
  48. data/lib/openai/resources/responses.rb +12 -8
  49. data/lib/openai/version.rb +1 -1
  50. data/lib/openai.rb +9 -0
  51. data/rbi/openai/models/batch_create_params.rbi +17 -9
  52. data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
  53. data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
  54. data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
  55. data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
  56. data/rbi/openai/models/chat_model.rbi +7 -0
  57. data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
  58. data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
  59. data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
  60. data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
  61. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
  62. data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
  63. data/rbi/openai/models/evals/run_create_params.rbi +44 -20
  64. data/rbi/openai/models/evals/run_create_response.rbi +40 -20
  65. data/rbi/openai/models/evals/run_list_response.rbi +40 -20
  66. data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
  67. data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
  68. data/rbi/openai/models/reasoning.rbi +18 -10
  69. data/rbi/openai/models/reasoning_effort.rbi +10 -5
  70. data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
  71. data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
  72. data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
  73. data/rbi/openai/models/responses/response.rbi +73 -2
  74. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
  75. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
  76. data/rbi/openai/models/responses/response_create_params.rbi +87 -5
  77. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
  78. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
  79. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
  80. data/rbi/openai/models/responses/response_input_item.rbi +675 -0
  81. data/rbi/openai/models/responses/response_item.rbi +4 -0
  82. data/rbi/openai/models/responses/response_item_list.rbi +4 -0
  83. data/rbi/openai/models/responses/response_output_item.rbi +4 -0
  84. data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
  85. data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
  86. data/rbi/openai/models/responses/tool.rbi +2 -0
  87. data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
  88. data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
  89. data/rbi/openai/resources/batches.rbi +4 -3
  90. data/rbi/openai/resources/beta/assistants.rbi +18 -10
  91. data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
  92. data/rbi/openai/resources/chat/completions.rbi +38 -12
  93. data/rbi/openai/resources/conversations/items.rbi +4 -0
  94. data/rbi/openai/resources/conversations.rbi +4 -0
  95. data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
  96. data/rbi/openai/resources/responses.rbi +28 -2
  97. data/sig/openai/models/batch_create_params.rbs +2 -0
  98. data/sig/openai/models/chat/completion_create_params.rbs +16 -0
  99. data/sig/openai/models/chat_model.rbs +11 -1
  100. data/sig/openai/models/conversations/conversation_item.rbs +4 -0
  101. data/sig/openai/models/reasoning_effort.rbs +2 -1
  102. data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
  103. data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
  104. data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
  105. data/sig/openai/models/responses/response.rbs +18 -0
  106. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
  107. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
  108. data/sig/openai/models/responses/response_create_params.rbs +18 -0
  109. data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
  110. data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
  111. data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
  112. data/sig/openai/models/responses/response_input_item.rbs +276 -0
  113. data/sig/openai/models/responses/response_item.rbs +4 -0
  114. data/sig/openai/models/responses/response_output_item.rbs +4 -0
  115. data/sig/openai/models/responses/tool.rbs +2 -0
  116. data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
  117. data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
  118. data/sig/openai/resources/chat/completions.rbs +2 -0
  119. data/sig/openai/resources/responses.rbs +2 -0
  120. metadata +29 -2
@@ -512,12 +512,16 @@ module OpenAI
512
512
 
513
513
  # Constrains effort on reasoning for
514
514
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
515
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
516
- # effort can result in faster responses and fewer tokens used on reasoning in a
517
- # response.
515
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
516
+ # reasoning effort can result in faster responses and fewer tokens used on
517
+ # reasoning in a response.
518
518
  #
519
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
520
- # effort.
519
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
520
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
521
+ # calls are supported for all reasoning values in gpt-5.1.
522
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
523
+ # support `none`.
524
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
521
525
  sig do
522
526
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
523
527
  end
@@ -574,12 +578,16 @@ module OpenAI
574
578
  model: nil,
575
579
  # Constrains effort on reasoning for
576
580
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
577
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
578
- # effort can result in faster responses and fewer tokens used on reasoning in a
579
- # response.
581
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
582
+ # reasoning effort can result in faster responses and fewer tokens used on
583
+ # reasoning in a response.
580
584
  #
581
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
582
- # effort.
585
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
586
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
587
+ # calls are supported for all reasoning values in gpt-5.1.
588
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
589
+ # support `none`.
590
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
583
591
  reasoning_effort: nil,
584
592
  # Sampling temperature. This is a query parameter used to select responses.
585
593
  temperature: nil,
@@ -1123,12 +1131,16 @@ module OpenAI
1123
1131
 
1124
1132
  # Constrains effort on reasoning for
1125
1133
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1126
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1127
- # effort can result in faster responses and fewer tokens used on reasoning in a
1128
- # response.
1134
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1135
+ # reasoning effort can result in faster responses and fewer tokens used on
1136
+ # reasoning in a response.
1129
1137
  #
1130
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1131
- # effort.
1138
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1139
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1140
+ # calls are supported for all reasoning values in gpt-5.1.
1141
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1142
+ # support `none`.
1143
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1132
1144
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1133
1145
  attr_accessor :reasoning_effort
1134
1146
 
@@ -1199,7 +1211,9 @@ module OpenAI
1199
1211
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1200
1212
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1201
1213
  OpenAI::Responses::Tool::LocalShell::OrHash,
1214
+ OpenAI::Responses::FunctionShellTool::OrHash,
1202
1215
  OpenAI::Responses::CustomTool::OrHash,
1216
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1203
1217
  OpenAI::Responses::WebSearchTool::OrHash,
1204
1218
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1205
1219
  )
@@ -1234,7 +1248,9 @@ module OpenAI
1234
1248
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1235
1249
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1236
1250
  OpenAI::Responses::Tool::LocalShell::OrHash,
1251
+ OpenAI::Responses::FunctionShellTool::OrHash,
1237
1252
  OpenAI::Responses::CustomTool::OrHash,
1253
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1238
1254
  OpenAI::Responses::WebSearchTool::OrHash,
1239
1255
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1240
1256
  )
@@ -1247,12 +1263,16 @@ module OpenAI
1247
1263
  max_completion_tokens: nil,
1248
1264
  # Constrains effort on reasoning for
1249
1265
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1250
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1251
- # effort can result in faster responses and fewer tokens used on reasoning in a
1252
- # response.
1266
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1267
+ # reasoning effort can result in faster responses and fewer tokens used on
1268
+ # reasoning in a response.
1253
1269
  #
1254
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1255
- # effort.
1270
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1271
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1272
+ # calls are supported for all reasoning values in gpt-5.1.
1273
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1274
+ # support `none`.
1275
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1256
1276
  reasoning_effort: nil,
1257
1277
  # A seed value to initialize the randomness, during sampling.
1258
1278
  seed: nil,
@@ -508,12 +508,16 @@ module OpenAI
508
508
 
509
509
  # Constrains effort on reasoning for
510
510
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
511
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
512
- # effort can result in faster responses and fewer tokens used on reasoning in a
513
- # response.
511
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
512
+ # reasoning effort can result in faster responses and fewer tokens used on
513
+ # reasoning in a response.
514
514
  #
515
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
516
- # effort.
515
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
516
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
517
+ # calls are supported for all reasoning values in gpt-5.1.
518
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
519
+ # support `none`.
520
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
517
521
  sig do
518
522
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
519
523
  end
@@ -570,12 +574,16 @@ module OpenAI
570
574
  model: nil,
571
575
  # Constrains effort on reasoning for
572
576
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
573
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
574
- # effort can result in faster responses and fewer tokens used on reasoning in a
575
- # response.
577
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
578
+ # reasoning effort can result in faster responses and fewer tokens used on
579
+ # reasoning in a response.
576
580
  #
577
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
578
- # effort.
581
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
582
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
583
+ # calls are supported for all reasoning values in gpt-5.1.
584
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
585
+ # support `none`.
586
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
579
587
  reasoning_effort: nil,
580
588
  # Sampling temperature. This is a query parameter used to select responses.
581
589
  temperature: nil,
@@ -1119,12 +1127,16 @@ module OpenAI
1119
1127
 
1120
1128
  # Constrains effort on reasoning for
1121
1129
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1122
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1123
- # effort can result in faster responses and fewer tokens used on reasoning in a
1124
- # response.
1130
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1131
+ # reasoning effort can result in faster responses and fewer tokens used on
1132
+ # reasoning in a response.
1125
1133
  #
1126
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1127
- # effort.
1134
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1135
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1136
+ # calls are supported for all reasoning values in gpt-5.1.
1137
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1138
+ # support `none`.
1139
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1128
1140
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1129
1141
  attr_accessor :reasoning_effort
1130
1142
 
@@ -1195,7 +1207,9 @@ module OpenAI
1195
1207
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1196
1208
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1197
1209
  OpenAI::Responses::Tool::LocalShell::OrHash,
1210
+ OpenAI::Responses::FunctionShellTool::OrHash,
1198
1211
  OpenAI::Responses::CustomTool::OrHash,
1212
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1199
1213
  OpenAI::Responses::WebSearchTool::OrHash,
1200
1214
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1201
1215
  )
@@ -1230,7 +1244,9 @@ module OpenAI
1230
1244
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1231
1245
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1232
1246
  OpenAI::Responses::Tool::LocalShell::OrHash,
1247
+ OpenAI::Responses::FunctionShellTool::OrHash,
1233
1248
  OpenAI::Responses::CustomTool::OrHash,
1249
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1234
1250
  OpenAI::Responses::WebSearchTool::OrHash,
1235
1251
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1236
1252
  )
@@ -1243,12 +1259,16 @@ module OpenAI
1243
1259
  max_completion_tokens: nil,
1244
1260
  # Constrains effort on reasoning for
1245
1261
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1246
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1247
- # effort can result in faster responses and fewer tokens used on reasoning in a
1248
- # response.
1262
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1263
+ # reasoning effort can result in faster responses and fewer tokens used on
1264
+ # reasoning in a response.
1249
1265
  #
1250
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1251
- # effort.
1266
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1267
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1268
+ # calls are supported for all reasoning values in gpt-5.1.
1269
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1270
+ # support `none`.
1271
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1252
1272
  reasoning_effort: nil,
1253
1273
  # A seed value to initialize the randomness, during sampling.
1254
1274
  seed: nil,
@@ -514,12 +514,16 @@ module OpenAI
514
514
 
515
515
  # Constrains effort on reasoning for
516
516
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
517
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
518
- # effort can result in faster responses and fewer tokens used on reasoning in a
519
- # response.
517
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
518
+ # reasoning effort can result in faster responses and fewer tokens used on
519
+ # reasoning in a response.
520
520
  #
521
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
522
- # effort.
521
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
522
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
523
+ # calls are supported for all reasoning values in gpt-5.1.
524
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
525
+ # support `none`.
526
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
523
527
  sig do
524
528
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
525
529
  end
@@ -576,12 +580,16 @@ module OpenAI
576
580
  model: nil,
577
581
  # Constrains effort on reasoning for
578
582
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
579
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
580
- # effort can result in faster responses and fewer tokens used on reasoning in a
581
- # response.
583
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
584
+ # reasoning effort can result in faster responses and fewer tokens used on
585
+ # reasoning in a response.
582
586
  #
583
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
584
- # effort.
587
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
588
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
589
+ # calls are supported for all reasoning values in gpt-5.1.
590
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
591
+ # support `none`.
592
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
585
593
  reasoning_effort: nil,
586
594
  # Sampling temperature. This is a query parameter used to select responses.
587
595
  temperature: nil,
@@ -1125,12 +1133,16 @@ module OpenAI
1125
1133
 
1126
1134
  # Constrains effort on reasoning for
1127
1135
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1128
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1129
- # effort can result in faster responses and fewer tokens used on reasoning in a
1130
- # response.
1136
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1137
+ # reasoning effort can result in faster responses and fewer tokens used on
1138
+ # reasoning in a response.
1131
1139
  #
1132
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1133
- # effort.
1140
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1141
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1142
+ # calls are supported for all reasoning values in gpt-5.1.
1143
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1144
+ # support `none`.
1145
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1134
1146
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1135
1147
  attr_accessor :reasoning_effort
1136
1148
 
@@ -1201,7 +1213,9 @@ module OpenAI
1201
1213
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1202
1214
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1203
1215
  OpenAI::Responses::Tool::LocalShell::OrHash,
1216
+ OpenAI::Responses::FunctionShellTool::OrHash,
1204
1217
  OpenAI::Responses::CustomTool::OrHash,
1218
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1205
1219
  OpenAI::Responses::WebSearchTool::OrHash,
1206
1220
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1207
1221
  )
@@ -1236,7 +1250,9 @@ module OpenAI
1236
1250
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1237
1251
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1238
1252
  OpenAI::Responses::Tool::LocalShell::OrHash,
1253
+ OpenAI::Responses::FunctionShellTool::OrHash,
1239
1254
  OpenAI::Responses::CustomTool::OrHash,
1255
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1240
1256
  OpenAI::Responses::WebSearchTool::OrHash,
1241
1257
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1242
1258
  )
@@ -1249,12 +1265,16 @@ module OpenAI
1249
1265
  max_completion_tokens: nil,
1250
1266
  # Constrains effort on reasoning for
1251
1267
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1252
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1253
- # effort can result in faster responses and fewer tokens used on reasoning in a
1254
- # response.
1268
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1269
+ # reasoning effort can result in faster responses and fewer tokens used on
1270
+ # reasoning in a response.
1255
1271
  #
1256
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1257
- # effort.
1272
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1273
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1274
+ # calls are supported for all reasoning values in gpt-5.1.
1275
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1276
+ # support `none`.
1277
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1258
1278
  reasoning_effort: nil,
1259
1279
  # A seed value to initialize the randomness, during sampling.
1260
1280
  seed: nil,
@@ -396,12 +396,16 @@ module OpenAI
396
396
 
397
397
  # Constrains effort on reasoning for
398
398
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
399
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
400
- # effort can result in faster responses and fewer tokens used on reasoning in a
401
- # response.
399
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
400
+ # reasoning effort can result in faster responses and fewer tokens used on
401
+ # reasoning in a response.
402
402
  #
403
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
404
- # effort.
403
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
404
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
405
+ # calls are supported for all reasoning values in gpt-5.1.
406
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
407
+ # support `none`.
408
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
405
409
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
406
410
  attr_accessor :reasoning_effort
407
411
 
@@ -432,12 +436,16 @@ module OpenAI
432
436
  max_completions_tokens: nil,
433
437
  # Constrains effort on reasoning for
434
438
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
435
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
436
- # effort can result in faster responses and fewer tokens used on reasoning in a
437
- # response.
439
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
440
+ # reasoning effort can result in faster responses and fewer tokens used on
441
+ # reasoning in a response.
438
442
  #
439
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
440
- # effort.
443
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
444
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
445
+ # calls are supported for all reasoning values in gpt-5.1.
446
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
447
+ # support `none`.
448
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
441
449
  reasoning_effort: nil,
442
450
  # A seed value to initialize the randomness, during sampling.
443
451
  seed: nil,
@@ -8,12 +8,16 @@ module OpenAI
8
8
 
9
9
  # Constrains effort on reasoning for
10
10
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
11
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
12
- # effort can result in faster responses and fewer tokens used on reasoning in a
13
- # response.
11
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
12
+ # reasoning effort can result in faster responses and fewer tokens used on
13
+ # reasoning in a response.
14
14
  #
15
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
16
- # effort.
15
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
16
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
17
+ # calls are supported for all reasoning values in gpt-5.1.
18
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
19
+ # support `none`.
20
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
17
21
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
18
22
  attr_accessor :effort
19
23
 
@@ -48,12 +52,16 @@ module OpenAI
48
52
  def self.new(
49
53
  # Constrains effort on reasoning for
50
54
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
51
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
52
- # effort can result in faster responses and fewer tokens used on reasoning in a
53
- # response.
55
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
56
+ # reasoning effort can result in faster responses and fewer tokens used on
57
+ # reasoning in a response.
54
58
  #
55
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
56
- # effort.
59
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
60
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
61
+ # calls are supported for all reasoning values in gpt-5.1.
62
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
63
+ # support `none`.
64
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
57
65
  effort: nil,
58
66
  # **Deprecated:** use `summary` instead.
59
67
  #
@@ -4,18 +4,23 @@ module OpenAI
4
4
  module Models
5
5
  # Constrains effort on reasoning for
6
6
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
7
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
8
- # effort can result in faster responses and fewer tokens used on reasoning in a
9
- # response.
7
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
8
+ # reasoning effort can result in faster responses and fewer tokens used on
9
+ # reasoning in a response.
10
10
  #
11
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
12
- # effort.
11
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
12
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
13
+ # calls are supported for all reasoning values in gpt-5.1.
14
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
15
+ # support `none`.
16
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
13
17
  module ReasoningEffort
14
18
  extend OpenAI::Internal::Type::Enum
15
19
 
16
20
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ReasoningEffort) }
17
21
  OrSymbol = T.type_alias { T.any(Symbol, String) }
18
22
 
23
+ NONE = T.let(:none, OpenAI::ReasoningEffort::TaggedSymbol)
19
24
  MINIMAL = T.let(:minimal, OpenAI::ReasoningEffort::TaggedSymbol)
20
25
  LOW = T.let(:low, OpenAI::ReasoningEffort::TaggedSymbol)
21
26
  MEDIUM = T.let(:medium, OpenAI::ReasoningEffort::TaggedSymbol)
@@ -0,0 +1,30 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ApplyPatchTool < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(OpenAI::Responses::ApplyPatchTool, OpenAI::Internal::AnyHash)
10
+ end
11
+
12
+ # The type of the tool. Always `apply_patch`.
13
+ sig { returns(Symbol) }
14
+ attr_accessor :type
15
+
16
+ # Allows the assistant to create, delete, or update files using unified diffs.
17
+ sig { params(type: Symbol).returns(T.attached_class) }
18
+ def self.new(
19
+ # The type of the tool. Always `apply_patch`.
20
+ type: :apply_patch
21
+ )
22
+ end
23
+
24
+ sig { override.returns({ type: Symbol }) }
25
+ def to_hash
26
+ end
27
+ end
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,33 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class FunctionShellTool < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(
10
+ OpenAI::Responses::FunctionShellTool,
11
+ OpenAI::Internal::AnyHash
12
+ )
13
+ end
14
+
15
+ # The type of the shell tool. Always `shell`.
16
+ sig { returns(Symbol) }
17
+ attr_accessor :type
18
+
19
+ # A tool that allows the model to execute shell commands.
20
+ sig { params(type: Symbol).returns(T.attached_class) }
21
+ def self.new(
22
+ # The type of the shell tool. Always `shell`.
23
+ type: :shell
24
+ )
25
+ end
26
+
27
+ sig { override.returns({ type: Symbol }) }
28
+ def to_hash
29
+ end
30
+ end
31
+ end
32
+ end
33
+ end
@@ -100,7 +100,9 @@ module OpenAI
100
100
  OpenAI::Responses::ToolChoiceTypes,
101
101
  OpenAI::Responses::ToolChoiceFunction,
102
102
  OpenAI::Responses::ToolChoiceMcp,
103
- OpenAI::Responses::ToolChoiceCustom
103
+ OpenAI::Responses::ToolChoiceCustom,
104
+ OpenAI::Responses::ToolChoiceApplyPatch,
105
+ OpenAI::Responses::ToolChoiceShell
104
106
  )
105
107
  )
106
108
  )
@@ -121,7 +123,9 @@ module OpenAI
121
123
  OpenAI::Responses::Tool::CodeInterpreter,
122
124
  OpenAI::Responses::Tool::ImageGeneration,
123
125
  OpenAI::Responses::Tool::LocalShell,
126
+ OpenAI::Responses::FunctionShellTool,
124
127
  OpenAI::Responses::CustomTool,
128
+ OpenAI::Responses::ApplyPatchTool,
125
129
  OpenAI::Responses::WebSearchTool,
126
130
  OpenAI::Responses::WebSearchPreviewTool
127
131
  )
@@ -181,7 +185,9 @@ module OpenAI
181
185
  OpenAI::Responses::ToolChoiceTypes::OrHash,
182
186
  OpenAI::Responses::ToolChoiceFunction::OrHash,
183
187
  OpenAI::Responses::ToolChoiceMcp::OrHash,
184
- OpenAI::Responses::ToolChoiceCustom::OrHash
188
+ OpenAI::Responses::ToolChoiceCustom::OrHash,
189
+ OpenAI::Responses::ToolChoiceApplyPatch::OrHash,
190
+ OpenAI::Responses::ToolChoiceShell::OrHash
185
191
  )
186
192
  ),
187
193
  tools:
@@ -195,7 +201,9 @@ module OpenAI
195
201
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
196
202
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
197
203
  OpenAI::Responses::Tool::LocalShell::OrHash,
204
+ OpenAI::Responses::FunctionShellTool::OrHash,
198
205
  OpenAI::Responses::CustomTool::OrHash,
206
+ OpenAI::Responses::ApplyPatchTool::OrHash,
199
207
  OpenAI::Responses::WebSearchTool::OrHash,
200
208
  OpenAI::Responses::WebSearchPreviewTool::OrHash
201
209
  )
@@ -283,7 +291,9 @@ module OpenAI
283
291
  OpenAI::Responses::ToolChoiceTypes,
284
292
  OpenAI::Responses::ToolChoiceFunction,
285
293
  OpenAI::Responses::ToolChoiceMcp,
286
- OpenAI::Responses::ToolChoiceCustom
294
+ OpenAI::Responses::ToolChoiceCustom,
295
+ OpenAI::Responses::ToolChoiceApplyPatch,
296
+ OpenAI::Responses::ToolChoiceShell
287
297
  )
288
298
  ),
289
299
  tools:
@@ -297,7 +307,9 @@ module OpenAI
297
307
  OpenAI::Responses::Tool::CodeInterpreter,
298
308
  OpenAI::Responses::Tool::ImageGeneration,
299
309
  OpenAI::Responses::Tool::LocalShell,
310
+ OpenAI::Responses::FunctionShellTool,
300
311
  OpenAI::Responses::CustomTool,
312
+ OpenAI::Responses::ApplyPatchTool,
301
313
  OpenAI::Responses::WebSearchTool,
302
314
  OpenAI::Responses::WebSearchPreviewTool
303
315
  )
@@ -542,7 +554,9 @@ module OpenAI
542
554
  OpenAI::Responses::ToolChoiceTypes,
543
555
  OpenAI::Responses::ToolChoiceFunction,
544
556
  OpenAI::Responses::ToolChoiceMcp,
545
- OpenAI::Responses::ToolChoiceCustom
557
+ OpenAI::Responses::ToolChoiceCustom,
558
+ OpenAI::Responses::ToolChoiceApplyPatch,
559
+ OpenAI::Responses::ToolChoiceShell
546
560
  )
547
561
  end
548
562