openai 0.35.2 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +8 -0
  3. data/README.md +21 -15
  4. data/lib/openai/internal/type/enum.rb +6 -6
  5. data/lib/openai/models/batch_create_params.rb +9 -6
  6. data/lib/openai/models/beta/assistant_create_params.rb +9 -5
  7. data/lib/openai/models/beta/assistant_update_params.rb +9 -5
  8. data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
  9. data/lib/openai/models/chat/completion_create_params.rb +37 -6
  10. data/lib/openai/models/chat_model.rb +5 -0
  11. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  12. data/lib/openai/models/conversations/conversation_item.rb +13 -1
  13. data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
  14. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  15. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
  16. data/lib/openai/models/evals/run_cancel_response.rb +20 -12
  17. data/lib/openai/models/evals/run_create_params.rb +20 -12
  18. data/lib/openai/models/evals/run_create_response.rb +20 -12
  19. data/lib/openai/models/evals/run_list_response.rb +20 -12
  20. data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
  21. data/lib/openai/models/graders/score_model_grader.rb +9 -5
  22. data/lib/openai/models/reasoning.rb +10 -6
  23. data/lib/openai/models/reasoning_effort.rb +10 -5
  24. data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
  25. data/lib/openai/models/responses/function_shell_tool.rb +20 -0
  26. data/lib/openai/models/responses/input_token_count_params.rb +14 -8
  27. data/lib/openai/models/responses/response.rb +46 -11
  28. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
  29. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
  30. data/lib/openai/models/responses/response_create_params.rb +42 -9
  31. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
  32. data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
  33. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
  34. data/lib/openai/models/responses/response_input_item.rb +395 -1
  35. data/lib/openai/models/responses/response_item.rb +13 -1
  36. data/lib/openai/models/responses/response_item_list.rb +2 -2
  37. data/lib/openai/models/responses/response_output_item.rb +13 -1
  38. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  39. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  40. data/lib/openai/models/responses/tool.rb +7 -1
  41. data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
  42. data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
  43. data/lib/openai/resources/chat/completions.rb +6 -2
  44. data/lib/openai/resources/conversations/items.rb +3 -3
  45. data/lib/openai/resources/conversations.rb +1 -1
  46. data/lib/openai/resources/responses/input_items.rb +1 -1
  47. data/lib/openai/resources/responses/input_tokens.rb +3 -3
  48. data/lib/openai/resources/responses.rb +12 -8
  49. data/lib/openai/version.rb +1 -1
  50. data/lib/openai.rb +9 -0
  51. data/rbi/openai/models/batch_create_params.rbi +17 -9
  52. data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
  53. data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
  54. data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
  55. data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
  56. data/rbi/openai/models/chat_model.rbi +7 -0
  57. data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
  58. data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
  59. data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
  60. data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
  61. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
  62. data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
  63. data/rbi/openai/models/evals/run_create_params.rbi +44 -20
  64. data/rbi/openai/models/evals/run_create_response.rbi +40 -20
  65. data/rbi/openai/models/evals/run_list_response.rbi +40 -20
  66. data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
  67. data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
  68. data/rbi/openai/models/reasoning.rbi +18 -10
  69. data/rbi/openai/models/reasoning_effort.rbi +10 -5
  70. data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
  71. data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
  72. data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
  73. data/rbi/openai/models/responses/response.rbi +73 -2
  74. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
  75. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
  76. data/rbi/openai/models/responses/response_create_params.rbi +87 -5
  77. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
  78. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
  79. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
  80. data/rbi/openai/models/responses/response_input_item.rbi +675 -0
  81. data/rbi/openai/models/responses/response_item.rbi +4 -0
  82. data/rbi/openai/models/responses/response_item_list.rbi +4 -0
  83. data/rbi/openai/models/responses/response_output_item.rbi +4 -0
  84. data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
  85. data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
  86. data/rbi/openai/models/responses/tool.rbi +2 -0
  87. data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
  88. data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
  89. data/rbi/openai/resources/batches.rbi +4 -3
  90. data/rbi/openai/resources/beta/assistants.rbi +18 -10
  91. data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
  92. data/rbi/openai/resources/chat/completions.rbi +38 -12
  93. data/rbi/openai/resources/conversations/items.rbi +4 -0
  94. data/rbi/openai/resources/conversations.rbi +4 -0
  95. data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
  96. data/rbi/openai/resources/responses.rbi +28 -2
  97. data/sig/openai/models/batch_create_params.rbs +2 -0
  98. data/sig/openai/models/chat/completion_create_params.rbs +16 -0
  99. data/sig/openai/models/chat_model.rbs +11 -1
  100. data/sig/openai/models/conversations/conversation_item.rbs +4 -0
  101. data/sig/openai/models/reasoning_effort.rbs +2 -1
  102. data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
  103. data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
  104. data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
  105. data/sig/openai/models/responses/response.rbs +18 -0
  106. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
  107. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
  108. data/sig/openai/models/responses/response_create_params.rbs +18 -0
  109. data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
  110. data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
  111. data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
  112. data/sig/openai/models/responses/response_input_item.rbs +276 -0
  113. data/sig/openai/models/responses/response_item.rbs +4 -0
  114. data/sig/openai/models/responses/response_output_item.rbs +4 -0
  115. data/sig/openai/models/responses/tool.rbs +2 -0
  116. data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
  117. data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
  118. data/sig/openai/resources/chat/completions.rbs +2 -0
  119. data/sig/openai/resources/responses.rbs +2 -0
  120. metadata +29 -2
@@ -228,12 +228,16 @@ module OpenAI
228
228
  # @!attribute reasoning_effort
229
229
  # Constrains effort on reasoning for
230
230
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
231
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
232
- # effort can result in faster responses and fewer tokens used on reasoning in a
233
- # response.
231
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
232
+ # reasoning effort can result in faster responses and fewer tokens used on
233
+ # reasoning in a response.
234
234
  #
235
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
236
- # effort.
235
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
236
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
237
+ # calls are supported for all reasoning values in gpt-5.1.
238
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
239
+ # support `none`.
240
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
237
241
  #
238
242
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
239
243
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -588,12 +592,16 @@ module OpenAI
588
592
  # @!attribute reasoning_effort
589
593
  # Constrains effort on reasoning for
590
594
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
591
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
592
- # effort can result in faster responses and fewer tokens used on reasoning in a
593
- # response.
595
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
596
+ # reasoning effort can result in faster responses and fewer tokens used on
597
+ # reasoning in a response.
594
598
  #
595
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
596
- # effort.
599
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
600
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
601
+ # calls are supported for all reasoning values in gpt-5.1.
602
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
603
+ # support `none`.
604
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
597
605
  #
598
606
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
599
607
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -637,7 +645,7 @@ module OpenAI
637
645
  # the model to call your own code. Learn more about
638
646
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
639
647
  #
640
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
648
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
641
649
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
642
650
 
643
651
  # @!attribute top_p
@@ -661,7 +669,7 @@ module OpenAI
661
669
  #
662
670
  # @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
663
671
  #
664
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
672
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
665
673
  #
666
674
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
667
675
 
@@ -316,12 +316,16 @@ module OpenAI
316
316
  # @!attribute reasoning_effort
317
317
  # Constrains effort on reasoning for
318
318
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
- # effort can result in faster responses and fewer tokens used on reasoning in a
321
- # response.
319
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
320
+ # reasoning effort can result in faster responses and fewer tokens used on
321
+ # reasoning in a response.
322
322
  #
323
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
324
- # effort.
323
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
324
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
325
+ # calls are supported for all reasoning values in gpt-5.1.
326
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
+ # support `none`.
328
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
325
329
  #
326
330
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
327
331
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -660,12 +664,16 @@ module OpenAI
660
664
  # @!attribute reasoning_effort
661
665
  # Constrains effort on reasoning for
662
666
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
663
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
664
- # effort can result in faster responses and fewer tokens used on reasoning in a
665
- # response.
667
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
668
+ # reasoning effort can result in faster responses and fewer tokens used on
669
+ # reasoning in a response.
666
670
  #
667
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
668
- # effort.
671
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
672
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
673
+ # calls are supported for all reasoning values in gpt-5.1.
674
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
675
+ # support `none`.
676
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
669
677
  #
670
678
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
671
679
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -709,7 +717,7 @@ module OpenAI
709
717
  # the model to call your own code. Learn more about
710
718
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
711
719
  #
712
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
720
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
713
721
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
714
722
 
715
723
  # @!attribute top_p
@@ -733,7 +741,7 @@ module OpenAI
733
741
  #
734
742
  # @param text [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
735
743
  #
736
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
744
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
737
745
  #
738
746
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
739
747
 
@@ -316,12 +316,16 @@ module OpenAI
316
316
  # @!attribute reasoning_effort
317
317
  # Constrains effort on reasoning for
318
318
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
- # effort can result in faster responses and fewer tokens used on reasoning in a
321
- # response.
319
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
320
+ # reasoning effort can result in faster responses and fewer tokens used on
321
+ # reasoning in a response.
322
322
  #
323
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
324
- # effort.
323
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
324
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
325
+ # calls are supported for all reasoning values in gpt-5.1.
326
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
+ # support `none`.
328
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
325
329
  #
326
330
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
327
331
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -660,12 +664,16 @@ module OpenAI
660
664
  # @!attribute reasoning_effort
661
665
  # Constrains effort on reasoning for
662
666
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
663
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
664
- # effort can result in faster responses and fewer tokens used on reasoning in a
665
- # response.
667
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
668
+ # reasoning effort can result in faster responses and fewer tokens used on
669
+ # reasoning in a response.
666
670
  #
667
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
668
- # effort.
671
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
672
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
673
+ # calls are supported for all reasoning values in gpt-5.1.
674
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
675
+ # support `none`.
676
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
669
677
  #
670
678
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
671
679
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -708,7 +716,7 @@ module OpenAI
708
716
  # the model to call your own code. Learn more about
709
717
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
710
718
  #
711
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
719
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
712
720
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
713
721
 
714
722
  # @!attribute top_p
@@ -732,7 +740,7 @@ module OpenAI
732
740
  #
733
741
  # @param text [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
734
742
  #
735
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
743
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
736
744
  #
737
745
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
738
746
 
@@ -316,12 +316,16 @@ module OpenAI
316
316
  # @!attribute reasoning_effort
317
317
  # Constrains effort on reasoning for
318
318
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
- # effort can result in faster responses and fewer tokens used on reasoning in a
321
- # response.
319
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
320
+ # reasoning effort can result in faster responses and fewer tokens used on
321
+ # reasoning in a response.
322
322
  #
323
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
324
- # effort.
323
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
324
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
325
+ # calls are supported for all reasoning values in gpt-5.1.
326
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
+ # support `none`.
328
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
325
329
  #
326
330
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
327
331
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -664,12 +668,16 @@ module OpenAI
664
668
  # @!attribute reasoning_effort
665
669
  # Constrains effort on reasoning for
666
670
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
667
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
668
- # effort can result in faster responses and fewer tokens used on reasoning in a
669
- # response.
671
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
672
+ # reasoning effort can result in faster responses and fewer tokens used on
673
+ # reasoning in a response.
670
674
  #
671
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
672
- # effort.
675
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
676
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
677
+ # calls are supported for all reasoning values in gpt-5.1.
678
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
679
+ # support `none`.
680
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
673
681
  #
674
682
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
675
683
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -713,7 +721,7 @@ module OpenAI
713
721
  # the model to call your own code. Learn more about
714
722
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
715
723
  #
716
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
724
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
717
725
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
718
726
 
719
727
  # @!attribute top_p
@@ -737,7 +745,7 @@ module OpenAI
737
745
  #
738
746
  # @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
739
747
  #
740
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
748
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
741
749
  #
742
750
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
743
751
 
@@ -222,12 +222,16 @@ module OpenAI
222
222
  # @!attribute reasoning_effort
223
223
  # Constrains effort on reasoning for
224
224
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
225
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
226
- # effort can result in faster responses and fewer tokens used on reasoning in a
227
- # response.
225
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
226
+ # reasoning effort can result in faster responses and fewer tokens used on
227
+ # reasoning in a response.
228
228
  #
229
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
230
- # effort.
229
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
230
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
231
+ # calls are supported for all reasoning values in gpt-5.1.
232
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
233
+ # support `none`.
234
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
231
235
  #
232
236
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
233
237
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -6,12 +6,16 @@ module OpenAI
6
6
  # @!attribute effort
7
7
  # Constrains effort on reasoning for
8
8
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
9
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
10
- # effort can result in faster responses and fewer tokens used on reasoning in a
11
- # response.
12
- #
13
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
14
- # effort.
9
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
10
+ # reasoning effort can result in faster responses and fewer tokens used on
11
+ # reasoning in a response.
12
+ #
13
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
14
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
15
+ # calls are supported for all reasoning values in gpt-5.1.
16
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
17
+ # support `none`.
18
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
15
19
  #
16
20
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
17
21
  optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -4,15 +4,20 @@ module OpenAI
4
4
  module Models
5
5
  # Constrains effort on reasoning for
6
6
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
7
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
8
- # effort can result in faster responses and fewer tokens used on reasoning in a
9
- # response.
7
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
8
+ # reasoning effort can result in faster responses and fewer tokens used on
9
+ # reasoning in a response.
10
10
  #
11
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
12
- # effort.
11
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
12
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
13
+ # calls are supported for all reasoning values in gpt-5.1.
14
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
15
+ # support `none`.
16
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
13
17
  module ReasoningEffort
14
18
  extend OpenAI::Internal::Type::Enum
15
19
 
20
+ NONE = :none
16
21
  MINIMAL = :minimal
17
22
  LOW = :low
18
23
  MEDIUM = :medium
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ApplyPatchTool < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute type
8
+ # The type of the tool. Always `apply_patch`.
9
+ #
10
+ # @return [Symbol, :apply_patch]
11
+ required :type, const: :apply_patch
12
+
13
+ # @!method initialize(type: :apply_patch)
14
+ # Allows the assistant to create, delete, or update files using unified diffs.
15
+ #
16
+ # @param type [Symbol, :apply_patch] The type of the tool. Always `apply_patch`.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class FunctionShellTool < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute type
8
+ # The type of the shell tool. Always `shell`.
9
+ #
10
+ # @return [Symbol, :shell]
11
+ required :type, const: :shell
12
+
13
+ # @!method initialize(type: :shell)
14
+ # A tool that allows the model to execute shell commands.
15
+ #
16
+ # @param type [Symbol, :shell] The type of the shell tool. Always `shell`.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -24,7 +24,7 @@ module OpenAI
24
24
  # @!attribute input
25
25
  # Text, image, or file inputs to the model, used to generate a response
26
26
  #
27
- # @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
27
+ # @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
28
28
  optional :input, union: -> { OpenAI::Responses::InputTokenCountParams::Input }, nil?: true
29
29
 
30
30
  # @!attribute instructions
@@ -83,14 +83,14 @@ module OpenAI
83
83
  # response. See the `tools` parameter to see how to specify which tools the model
84
84
  # can call.
85
85
  #
86
- # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil]
86
+ # @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil]
87
87
  optional :tool_choice, union: -> { OpenAI::Responses::InputTokenCountParams::ToolChoice }, nil?: true
88
88
 
89
89
  # @!attribute tools
90
90
  # An array of tools the model may call while generating a response. You can
91
91
  # specify which tool to use by setting the `tool_choice` parameter.
92
92
  #
93
- # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
93
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
94
94
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }, nil?: true
95
95
 
96
96
  # @!attribute truncation
@@ -109,7 +109,7 @@ module OpenAI
109
109
  #
110
110
  # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
111
111
  #
112
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
112
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
113
113
  #
114
114
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
115
115
  #
@@ -123,9 +123,9 @@ module OpenAI
123
123
  #
124
124
  # @param text [OpenAI::Models::Responses::InputTokenCountParams::Text, nil] Configuration options for a text response from the model. Can be plain
125
125
  #
126
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] How the model should select which tool (or tools) to use when generating
126
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil] How the model should select which tool (or tools) to use when generating
127
127
  #
128
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil] An array of tools the model may call while generating a response. You can specif
128
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil] An array of tools the model may call while generating a response. You can specif
129
129
  #
130
130
  # @param truncation [Symbol, OpenAI::Models::Responses::InputTokenCountParams::Truncation] The truncation strategy to use for the model response. - `auto`: If the input to
131
131
  #
@@ -158,7 +158,7 @@ module OpenAI
158
158
  variant -> { OpenAI::Models::Responses::InputTokenCountParams::Input::ResponseInputItemArray }
159
159
 
160
160
  # @!method self.variants
161
- # @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
161
+ # @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
162
162
 
163
163
  # @type [OpenAI::Internal::Type::Converter]
164
164
  ResponseInputItemArray =
@@ -259,8 +259,14 @@ module OpenAI
259
259
  # Use this option to force the model to call a specific custom tool.
260
260
  variant -> { OpenAI::Responses::ToolChoiceCustom }
261
261
 
262
+ # Forces the model to call the apply_patch tool when executing a tool call.
263
+ variant -> { OpenAI::Responses::ToolChoiceApplyPatch }
264
+
265
+ # Forces the model to call the function shell tool when a tool call is required.
266
+ variant -> { OpenAI::Responses::ToolChoiceShell }
267
+
262
268
  # @!method self.variants
263
- # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
269
+ # @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell)]
264
270
  end
265
271
 
266
272
  # The truncation strategy to use for the model response. - `auto`: If the input to