openai 0.35.1 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +17 -0
  3. data/README.md +21 -15
  4. data/lib/openai/helpers/structured_output/union_of.rb +5 -1
  5. data/lib/openai/internal/transport/pooled_net_requester.rb +6 -2
  6. data/lib/openai/internal/type/enum.rb +6 -6
  7. data/lib/openai/models/batch_create_params.rb +9 -6
  8. data/lib/openai/models/beta/assistant_create_params.rb +9 -5
  9. data/lib/openai/models/beta/assistant_update_params.rb +9 -5
  10. data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
  11. data/lib/openai/models/chat/completion_create_params.rb +37 -6
  12. data/lib/openai/models/chat_model.rb +5 -0
  13. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  14. data/lib/openai/models/conversations/conversation_item.rb +13 -1
  15. data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
  16. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  17. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
  18. data/lib/openai/models/evals/run_cancel_response.rb +20 -12
  19. data/lib/openai/models/evals/run_create_params.rb +20 -12
  20. data/lib/openai/models/evals/run_create_response.rb +20 -12
  21. data/lib/openai/models/evals/run_list_response.rb +20 -12
  22. data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
  23. data/lib/openai/models/graders/score_model_grader.rb +9 -5
  24. data/lib/openai/models/reasoning.rb +10 -6
  25. data/lib/openai/models/reasoning_effort.rb +10 -5
  26. data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
  27. data/lib/openai/models/responses/function_shell_tool.rb +20 -0
  28. data/lib/openai/models/responses/input_token_count_params.rb +14 -8
  29. data/lib/openai/models/responses/response.rb +46 -11
  30. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
  31. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
  32. data/lib/openai/models/responses/response_create_params.rb +42 -9
  33. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
  34. data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
  35. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
  36. data/lib/openai/models/responses/response_input_item.rb +395 -1
  37. data/lib/openai/models/responses/response_item.rb +13 -1
  38. data/lib/openai/models/responses/response_item_list.rb +2 -2
  39. data/lib/openai/models/responses/response_output_item.rb +13 -1
  40. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  41. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  42. data/lib/openai/models/responses/tool.rb +7 -1
  43. data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
  44. data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
  45. data/lib/openai/resources/chat/completions.rb +6 -2
  46. data/lib/openai/resources/conversations/items.rb +3 -3
  47. data/lib/openai/resources/conversations.rb +1 -1
  48. data/lib/openai/resources/responses/input_items.rb +1 -1
  49. data/lib/openai/resources/responses/input_tokens.rb +3 -3
  50. data/lib/openai/resources/responses.rb +12 -8
  51. data/lib/openai/version.rb +1 -1
  52. data/lib/openai.rb +10 -0
  53. data/manifest.yaml +1 -0
  54. data/rbi/openai/internal/transport/pooled_net_requester.rbi +6 -2
  55. data/rbi/openai/models/batch_create_params.rbi +17 -9
  56. data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
  57. data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
  58. data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
  59. data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
  60. data/rbi/openai/models/chat_model.rbi +7 -0
  61. data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
  62. data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
  63. data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
  64. data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
  65. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
  66. data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
  67. data/rbi/openai/models/evals/run_create_params.rbi +44 -20
  68. data/rbi/openai/models/evals/run_create_response.rbi +40 -20
  69. data/rbi/openai/models/evals/run_list_response.rbi +40 -20
  70. data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
  71. data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
  72. data/rbi/openai/models/reasoning.rbi +18 -10
  73. data/rbi/openai/models/reasoning_effort.rbi +10 -5
  74. data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
  75. data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
  76. data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
  77. data/rbi/openai/models/responses/response.rbi +73 -2
  78. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
  79. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
  80. data/rbi/openai/models/responses/response_create_params.rbi +87 -5
  81. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
  82. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
  83. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
  84. data/rbi/openai/models/responses/response_input_item.rbi +675 -0
  85. data/rbi/openai/models/responses/response_item.rbi +4 -0
  86. data/rbi/openai/models/responses/response_item_list.rbi +4 -0
  87. data/rbi/openai/models/responses/response_output_item.rbi +4 -0
  88. data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
  89. data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
  90. data/rbi/openai/models/responses/tool.rbi +2 -0
  91. data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
  92. data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
  93. data/rbi/openai/resources/batches.rbi +4 -3
  94. data/rbi/openai/resources/beta/assistants.rbi +18 -10
  95. data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
  96. data/rbi/openai/resources/chat/completions.rbi +38 -12
  97. data/rbi/openai/resources/conversations/items.rbi +4 -0
  98. data/rbi/openai/resources/conversations.rbi +4 -0
  99. data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
  100. data/rbi/openai/resources/responses.rbi +28 -2
  101. data/sig/openai/internal/transport/pooled_net_requester.rbs +4 -1
  102. data/sig/openai/models/batch_create_params.rbs +2 -0
  103. data/sig/openai/models/chat/completion_create_params.rbs +16 -0
  104. data/sig/openai/models/chat_model.rbs +11 -1
  105. data/sig/openai/models/conversations/conversation_item.rbs +4 -0
  106. data/sig/openai/models/reasoning_effort.rbs +2 -1
  107. data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
  108. data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
  109. data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
  110. data/sig/openai/models/responses/response.rbs +18 -0
  111. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
  112. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
  113. data/sig/openai/models/responses/response_create_params.rbs +18 -0
  114. data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
  115. data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
  116. data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
  117. data/sig/openai/models/responses/response_input_item.rbs +276 -0
  118. data/sig/openai/models/responses/response_item.rbs +4 -0
  119. data/sig/openai/models/responses/response_output_item.rbs +4 -0
  120. data/sig/openai/models/responses/tool.rbs +2 -0
  121. data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
  122. data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
  123. data/sig/openai/resources/chat/completions.rbs +2 -0
  124. data/sig/openai/resources/responses.rbs +2 -0
  125. metadata +29 -2
@@ -46,6 +46,18 @@ module OpenAI
46
46
  # The output of a local shell tool call.
47
47
  variant :local_shell_call_output, -> { OpenAI::Responses::ResponseItem::LocalShellCallOutput }
48
48
 
49
+ # A tool call that executes one or more shell commands in a managed environment.
50
+ variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall }
51
+
52
+ # The output of a shell tool call.
53
+ variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput }
54
+
55
+ # A tool call that applies file diffs by creating, deleting, or updating files.
56
+ variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall }
57
+
58
+ # The output emitted by an apply patch tool call.
59
+ variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput }
60
+
49
61
  # A list of tools available on an MCP server.
50
62
  variant :mcp_list_tools, -> { OpenAI::Responses::ResponseItem::McpListTools }
51
63
 
@@ -574,7 +586,7 @@ module OpenAI
574
586
  end
575
587
 
576
588
  # @!method self.variants
577
- # @return [Array(OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall)]
589
+ # @return [Array(OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall)]
578
590
  end
579
591
  end
580
592
  end
@@ -7,7 +7,7 @@ module OpenAI
7
7
  # @!attribute data
8
8
  # A list of items used to generate this response.
9
9
  #
10
- # @return [Array<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>]
10
+ # @return [Array<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>]
11
11
  required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseItem] }
12
12
 
13
13
  # @!attribute first_id
@@ -37,7 +37,7 @@ module OpenAI
37
37
  # @!method initialize(data:, first_id:, has_more:, last_id:, object: :list)
38
38
  # A list of Response items.
39
39
  #
40
- # @param data [Array<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>] A list of items used to generate this response.
40
+ # @param data [Array<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>] A list of items used to generate this response.
41
41
  #
42
42
  # @param first_id [String] The ID of the first item in the list.
43
43
  #
@@ -43,6 +43,18 @@ module OpenAI
43
43
  # A tool call to run a command on the local shell.
44
44
  variant :local_shell_call, -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall }
45
45
 
46
+ # A tool call that executes one or more shell commands in a managed environment.
47
+ variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall }
48
+
49
+ # The output of a shell tool call.
50
+ variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput }
51
+
52
+ # A tool call that applies file diffs by creating, deleting, or updating files.
53
+ variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall }
54
+
55
+ # The output emitted by an apply patch tool call.
56
+ variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput }
57
+
46
58
  # An invocation of a tool on an MCP server.
47
59
  variant :mcp_call, -> { OpenAI::Responses::ResponseOutputItem::McpCall }
48
60
 
@@ -468,7 +480,7 @@ module OpenAI
468
480
  end
469
481
 
470
482
  # @!method self.variants
471
- # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall)]
483
+ # @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall)]
472
484
  end
473
485
  end
474
486
  end
@@ -7,7 +7,7 @@ module OpenAI
7
7
  # @!attribute item
8
8
  # The output item that was added.
9
9
  #
10
- # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall]
10
+ # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall]
11
11
  required :item, union: -> { OpenAI::Responses::ResponseOutputItem }
12
12
 
13
13
  # @!attribute output_index
@@ -34,7 +34,7 @@ module OpenAI
34
34
  #
35
35
  # Emitted when a new output item is added.
36
36
  #
37
- # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was added.
37
+ # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was added.
38
38
  #
39
39
  # @param output_index [Integer] The index of the output item that was added.
40
40
  #
@@ -7,7 +7,7 @@ module OpenAI
7
7
  # @!attribute item
8
8
  # The output item that was marked done.
9
9
  #
10
- # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall]
10
+ # @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall]
11
11
  required :item, union: -> { OpenAI::Responses::ResponseOutputItem }
12
12
 
13
13
  # @!attribute output_index
@@ -34,7 +34,7 @@ module OpenAI
34
34
  #
35
35
  # Emitted when an output item is marked done.
36
36
  #
37
- # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was marked done.
37
+ # @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall] The output item that was marked done.
38
38
  #
39
39
  # @param output_index [Integer] The index of the output item that was marked done.
40
40
  #
@@ -33,9 +33,15 @@ module OpenAI
33
33
  # A tool that allows the model to execute shell commands in a local environment.
34
34
  variant :local_shell, -> { OpenAI::Responses::Tool::LocalShell }
35
35
 
36
+ # A tool that allows the model to execute shell commands.
37
+ variant :shell, -> { OpenAI::Responses::FunctionShellTool }
38
+
36
39
  # A custom tool that processes input using a specified format. Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
37
40
  variant :custom, -> { OpenAI::Responses::CustomTool }
38
41
 
42
+ # Allows the assistant to create, delete, or update files using unified diffs.
43
+ variant :apply_patch, -> { OpenAI::Responses::ApplyPatchTool }
44
+
39
45
  # Search the Internet for sources related to the prompt. Learn more about the
40
46
  # [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
41
47
  variant -> { OpenAI::Responses::WebSearchTool }
@@ -682,7 +688,7 @@ module OpenAI
682
688
  end
683
689
 
684
690
  # @!method self.variants
685
- # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool)]
691
+ # @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool)]
686
692
  end
687
693
  end
688
694
  end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ToolChoiceApplyPatch < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute type
8
+ # The tool to call. Always `apply_patch`.
9
+ #
10
+ # @return [Symbol, :apply_patch]
11
+ required :type, const: :apply_patch
12
+
13
+ # @!method initialize(type: :apply_patch)
14
+ # Forces the model to call the apply_patch tool when executing a tool call.
15
+ #
16
+ # @param type [Symbol, :apply_patch] The tool to call. Always `apply_patch`.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -0,0 +1,20 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ToolChoiceShell < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute type
8
+ # The tool to call. Always `shell`.
9
+ #
10
+ # @return [Symbol, :shell]
11
+ required :type, const: :shell
12
+
13
+ # @!method initialize(type: :shell)
14
+ # Forces the model to call the function shell tool when a tool call is required.
15
+ #
16
+ # @param type [Symbol, :shell] The tool to call. Always `shell`.
17
+ end
18
+ end
19
+ end
20
+ end
@@ -30,7 +30,7 @@ module OpenAI
30
30
  # unsupported parameters in reasoning models,
31
31
  # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
32
32
  #
33
- # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
33
+ # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
34
34
  #
35
35
  # @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
36
36
  #
@@ -66,6 +66,8 @@ module OpenAI
66
66
  #
67
67
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
68
68
  #
69
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
70
+ #
69
71
  # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
70
72
  #
71
73
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
@@ -275,7 +277,7 @@ module OpenAI
275
277
  # unsupported parameters in reasoning models,
276
278
  # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
277
279
  #
278
- # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
280
+ # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
279
281
  #
280
282
  # @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
281
283
  #
@@ -311,6 +313,8 @@ module OpenAI
311
313
  #
312
314
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
313
315
  #
316
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
317
+ #
314
318
  # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
315
319
  #
316
320
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
@@ -13,7 +13,7 @@ module OpenAI
13
13
  #
14
14
  # @param conversation_id [String] Path param: The ID of the conversation to add the item to.
15
15
  #
16
- # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Body param: The items to add to the conversation. You may add up to 20 items at
16
+ # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Body param: The items to add to the conversation. You may add up to 20 items at
17
17
  #
18
18
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Query param: Additional fields to include in the response. See the `include`
19
19
  #
@@ -50,7 +50,7 @@ module OpenAI
50
50
  #
51
51
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
52
52
  #
53
- # @return [OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput]
53
+ # @return [OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput]
54
54
  #
55
55
  # @see OpenAI::Models::Conversations::ItemRetrieveParams
56
56
  def retrieve(item_id, params)
@@ -87,7 +87,7 @@ module OpenAI
87
87
  #
88
88
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
89
89
  #
90
- # @return [OpenAI::Internal::ConversationCursorPage<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
90
+ # @return [OpenAI::Internal::ConversationCursorPage<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
91
91
  #
92
92
  # @see OpenAI::Models::Conversations::ItemListParams
93
93
  def list(conversation_id, params = {})
@@ -13,7 +13,7 @@ module OpenAI
13
13
  #
14
14
  # @overload create(items: nil, metadata: nil, request_options: {})
15
15
  #
16
- # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
16
+ # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
17
17
  #
18
18
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
19
19
  #
@@ -23,7 +23,7 @@ module OpenAI
23
23
  #
24
24
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
25
25
  #
26
- # @return [OpenAI::Internal::CursorPage<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>]
26
+ # @return [OpenAI::Internal::CursorPage<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>]
27
27
  #
28
28
  # @see OpenAI::Models::Responses::InputItemListParams
29
29
  def list(response_id, params = {})
@@ -13,7 +13,7 @@ module OpenAI
13
13
  #
14
14
  # @param conversation [String, OpenAI::Models::Responses::ResponseConversationParam, nil] The conversation that this response belongs to. Items from this conversation are
15
15
  #
16
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
16
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
17
17
  #
18
18
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
19
19
  #
@@ -27,9 +27,9 @@ module OpenAI
27
27
  #
28
28
  # @param text [OpenAI::Models::Responses::InputTokenCountParams::Text, nil] Configuration options for a text response from the model. Can be plain
29
29
  #
30
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil] How the model should select which tool (or tools) to use when generating
30
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil] How the model should select which tool (or tools) to use when generating
31
31
  #
32
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil] An array of tools the model may call while generating a response. You can specif
32
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil] An array of tools the model may call while generating a response. You can specif
33
33
  #
34
34
  # @param truncation [Symbol, OpenAI::Models::Responses::InputTokenCountParams::Truncation] The truncation strategy to use for the model response. - `auto`: If the input to
35
35
  #
@@ -26,7 +26,7 @@ module OpenAI
26
26
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
27
27
  # your own data as input for the model's response.
28
28
  #
29
- # @overload create(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
29
+ # @overload create(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
30
30
  #
31
31
  # @param background [Boolean, nil] Whether to run the model response in the background.
32
32
  #
@@ -34,7 +34,7 @@ module OpenAI
34
34
  #
35
35
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently suppo
36
36
  #
37
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
37
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
38
38
  #
39
39
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
40
40
  #
@@ -54,6 +54,8 @@ module OpenAI
54
54
  #
55
55
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
56
56
  #
57
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
58
+ #
57
59
  # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
58
60
  #
59
61
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
@@ -68,9 +70,9 @@ module OpenAI
68
70
  #
69
71
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
70
72
  #
71
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
73
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating
72
74
  #
73
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
75
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
74
76
  #
75
77
  # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
76
78
  #
@@ -125,7 +127,7 @@ module OpenAI
125
127
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
126
128
  # your own data as input for the model's response.
127
129
  #
128
- # @overload stream(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
130
+ # @overload stream(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
129
131
  #
130
132
  # @param background [Boolean, nil] Whether to run the model response in the background.
131
133
  #
@@ -133,7 +135,7 @@ module OpenAI
133
135
  #
134
136
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently suppo
135
137
  #
136
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
138
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
137
139
  #
138
140
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
139
141
  #
@@ -271,6 +273,8 @@ module OpenAI
271
273
  #
272
274
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
273
275
  #
276
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
277
+ #
274
278
  # @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
275
279
  #
276
280
  # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
@@ -285,9 +289,9 @@ module OpenAI
285
289
  #
286
290
  # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
287
291
  #
288
- # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
292
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating
289
293
  #
290
- # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
294
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
291
295
  #
292
296
  # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
293
297
  #
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.35.1"
4
+ VERSION = "0.36.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -8,6 +8,7 @@ require "erb"
8
8
  require "etc"
9
9
  require "json"
10
10
  require "net/http"
11
+ require "openssl"
11
12
  require "pathname"
12
13
  require "rbconfig"
13
14
  require "securerandom"
@@ -527,15 +528,19 @@ require_relative "openai/models/response_format_json_schema"
527
528
  require_relative "openai/models/response_format_text"
528
529
  require_relative "openai/models/response_format_text_grammar"
529
530
  require_relative "openai/models/response_format_text_python"
531
+ require_relative "openai/models/responses/apply_patch_tool"
530
532
  require_relative "openai/models/responses/computer_tool"
531
533
  require_relative "openai/models/responses/custom_tool"
532
534
  require_relative "openai/models/responses/easy_input_message"
533
535
  require_relative "openai/models/responses/file_search_tool"
536
+ require_relative "openai/models/responses/function_shell_tool"
534
537
  require_relative "openai/models/responses/function_tool"
535
538
  require_relative "openai/models/responses/input_item_list_params"
536
539
  require_relative "openai/models/responses/input_token_count_params"
537
540
  require_relative "openai/models/responses/input_token_count_response"
538
541
  require_relative "openai/models/responses/response"
542
+ require_relative "openai/models/responses/response_apply_patch_tool_call"
543
+ require_relative "openai/models/responses/response_apply_patch_tool_call_output"
539
544
  require_relative "openai/models/responses/response_audio_delta_event"
540
545
  require_relative "openai/models/responses/response_audio_done_event"
541
546
  require_relative "openai/models/responses/response_audio_transcript_delta_event"
@@ -575,6 +580,9 @@ require_relative "openai/models/responses/response_function_call_arguments_delta
575
580
  require_relative "openai/models/responses/response_function_call_arguments_done_event"
576
581
  require_relative "openai/models/responses/response_function_call_output_item"
577
582
  require_relative "openai/models/responses/response_function_call_output_item_list"
583
+ require_relative "openai/models/responses/response_function_shell_call_output_content"
584
+ require_relative "openai/models/responses/response_function_shell_tool_call"
585
+ require_relative "openai/models/responses/response_function_shell_tool_call_output"
578
586
  require_relative "openai/models/responses/response_function_tool_call_item"
579
587
  require_relative "openai/models/responses/response_function_tool_call_output_item"
580
588
  require_relative "openai/models/responses/response_function_web_search"
@@ -633,10 +641,12 @@ require_relative "openai/models/responses/response_web_search_call_in_progress_e
633
641
  require_relative "openai/models/responses/response_web_search_call_searching_event"
634
642
  require_relative "openai/models/responses/tool"
635
643
  require_relative "openai/models/responses/tool_choice_allowed"
644
+ require_relative "openai/models/responses/tool_choice_apply_patch"
636
645
  require_relative "openai/models/responses/tool_choice_custom"
637
646
  require_relative "openai/models/responses/tool_choice_function"
638
647
  require_relative "openai/models/responses/tool_choice_mcp"
639
648
  require_relative "openai/models/responses/tool_choice_options"
649
+ require_relative "openai/models/responses/tool_choice_shell"
640
650
  require_relative "openai/models/responses/tool_choice_types"
641
651
  require_relative "openai/models/responses/web_search_preview_tool"
642
652
  require_relative "openai/models/responses/web_search_tool"
data/manifest.yaml CHANGED
@@ -6,6 +6,7 @@ dependencies:
6
6
  - etc
7
7
  - json
8
8
  - net/http
9
+ - openssl
9
10
  - pathname
10
11
  - rbconfig
11
12
  - securerandom
@@ -26,8 +26,12 @@ module OpenAI
26
26
 
27
27
  class << self
28
28
  # @api private
29
- sig { params(url: URI::Generic).returns(Net::HTTP) }
30
- def connect(url)
29
+ sig do
30
+ params(cert_store: OpenSSL::X509::Store, url: URI::Generic).returns(
31
+ Net::HTTP
32
+ )
33
+ end
34
+ def connect(cert_store:, url:)
31
35
  end
32
36
 
33
37
  # @api private
@@ -17,9 +17,10 @@ module OpenAI
17
17
  attr_accessor :completion_window
18
18
 
19
19
  # The endpoint to be used for all requests in the batch. Currently
20
- # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
21
- # are supported. Note that `/v1/embeddings` batches are also restricted to a
22
- # maximum of 50,000 embedding inputs across all requests in the batch.
20
+ # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
21
+ # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
22
+ # restricted to a maximum of 50,000 embedding inputs across all requests in the
23
+ # batch.
23
24
  sig { returns(OpenAI::BatchCreateParams::Endpoint::OrSymbol) }
24
25
  attr_accessor :endpoint
25
26
 
@@ -74,9 +75,10 @@ module OpenAI
74
75
  # is supported.
75
76
  completion_window:,
76
77
  # The endpoint to be used for all requests in the batch. Currently
77
- # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
78
- # are supported. Note that `/v1/embeddings` batches are also restricted to a
79
- # maximum of 50,000 embedding inputs across all requests in the batch.
78
+ # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
79
+ # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
80
+ # restricted to a maximum of 50,000 embedding inputs across all requests in the
81
+ # batch.
80
82
  endpoint:,
81
83
  # The ID of an uploaded file that contains requests for the new batch.
82
84
  #
@@ -145,9 +147,10 @@ module OpenAI
145
147
  end
146
148
 
147
149
  # The endpoint to be used for all requests in the batch. Currently
148
- # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
149
- # are supported. Note that `/v1/embeddings` batches are also restricted to a
150
- # maximum of 50,000 embedding inputs across all requests in the batch.
150
+ # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
151
+ # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
152
+ # restricted to a maximum of 50,000 embedding inputs across all requests in the
153
+ # batch.
151
154
  module Endpoint
152
155
  extend OpenAI::Internal::Type::Enum
153
156
 
@@ -175,6 +178,11 @@ module OpenAI
175
178
  :"/v1/completions",
176
179
  OpenAI::BatchCreateParams::Endpoint::TaggedSymbol
177
180
  )
181
+ V1_MODERATIONS =
182
+ T.let(
183
+ :"/v1/moderations",
184
+ OpenAI::BatchCreateParams::Endpoint::TaggedSymbol
185
+ )
178
186
 
179
187
  sig do
180
188
  override.returns(