openai 0.18.1 → 0.19.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +20 -0
- data/README.md +1 -1
- data/lib/openai/client.rb +4 -0
- data/lib/openai/helpers/structured_output/boolean.rb +1 -0
- data/lib/openai/internal/conversation_cursor_page.rb +92 -0
- data/lib/openai/internal/transport/base_client.rb +1 -4
- data/lib/openai/internal/transport/pooled_net_requester.rb +1 -9
- data/lib/openai/internal/util.rb +1 -1
- data/lib/openai/models/audio/transcription.rb +1 -4
- data/lib/openai/models/audio/transcription_create_params.rb +2 -7
- data/lib/openai/models/audio/transcription_text_done_event.rb +1 -4
- data/lib/openai/models/beta/assistant_create_params.rb +6 -19
- data/lib/openai/models/beta/assistant_stream_event.rb +6 -24
- data/lib/openai/models/beta/assistant_update_params.rb +1 -4
- data/lib/openai/models/beta/message_stream_event.rb +1 -4
- data/lib/openai/models/beta/run_step_stream_event.rb +1 -4
- data/lib/openai/models/beta/thread_create_and_run_params.rb +10 -32
- data/lib/openai/models/beta/thread_create_params.rb +7 -22
- data/lib/openai/models/beta/threads/message.rb +3 -10
- data/lib/openai/models/beta/threads/message_create_params.rb +2 -7
- data/lib/openai/models/beta/threads/run.rb +2 -7
- data/lib/openai/models/beta/threads/run_create_params.rb +3 -10
- data/lib/openai/models/beta/threads/run_submit_tool_outputs_params.rb +1 -3
- data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call.rb +5 -17
- data/lib/openai/models/beta/threads/runs/code_interpreter_tool_call_delta.rb +1 -3
- data/lib/openai/models/beta/threads/runs/file_search_tool_call.rb +4 -12
- data/lib/openai/models/beta/threads/runs/run_step_delta_message_delta.rb +1 -4
- data/lib/openai/models/beta/threads/runs/tool_calls_step_details.rb +1 -4
- data/lib/openai/models/beta/threads/text.rb +1 -4
- data/lib/openai/models/chat/chat_completion_chunk.rb +1 -3
- data/lib/openai/models/chat/chat_completion_custom_tool.rb +2 -7
- data/lib/openai/models/conversations/computer_screenshot_content.rb +38 -0
- data/lib/openai/models/conversations/container_file_citation_body.rb +58 -0
- data/lib/openai/models/conversations/conversation.rb +51 -0
- data/lib/openai/models/conversations/conversation_create_params.rb +39 -0
- data/lib/openai/models/conversations/conversation_delete_params.rb +16 -0
- data/lib/openai/models/conversations/conversation_deleted.rb +29 -0
- data/lib/openai/models/conversations/conversation_deleted_resource.rb +30 -0
- data/lib/openai/models/conversations/conversation_item.rb +568 -0
- data/lib/openai/models/conversations/conversation_item_list.rb +55 -0
- data/lib/openai/models/conversations/conversation_retrieve_params.rb +16 -0
- data/lib/openai/models/conversations/conversation_update_params.rb +31 -0
- data/lib/openai/models/conversations/file_citation_body.rb +42 -0
- data/lib/openai/models/conversations/input_file_content.rb +42 -0
- data/lib/openai/models/conversations/input_image_content.rb +62 -0
- data/lib/openai/models/conversations/input_text_content.rb +26 -0
- data/lib/openai/models/conversations/item_create_params.rb +37 -0
- data/lib/openai/models/conversations/item_delete_params.rb +22 -0
- data/lib/openai/models/conversations/item_list_params.rb +84 -0
- data/lib/openai/models/conversations/item_retrieve_params.rb +36 -0
- data/lib/openai/models/conversations/lob_prob.rb +35 -0
- data/lib/openai/models/conversations/message.rb +115 -0
- data/lib/openai/models/conversations/output_text_content.rb +57 -0
- data/lib/openai/models/conversations/refusal_content.rb +26 -0
- data/lib/openai/models/conversations/summary_text_content.rb +23 -0
- data/lib/openai/models/conversations/text_content.rb +23 -0
- data/lib/openai/models/conversations/top_log_prob.rb +29 -0
- data/lib/openai/models/conversations/url_citation_body.rb +50 -0
- data/lib/openai/models/eval_create_params.rb +6 -20
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +29 -53
- data/lib/openai/models/evals/create_eval_jsonl_run_data_source.rb +1 -3
- data/lib/openai/models/evals/run_create_params.rb +18 -54
- data/lib/openai/models/fine_tuning/reinforcement_hyperparameters.rb +1 -4
- data/lib/openai/models/moderation.rb +5 -15
- data/lib/openai/models/responses/input_item_list_params.rb +1 -9
- data/lib/openai/models/responses/response.rb +26 -1
- data/lib/openai/models/responses/response_computer_tool_call.rb +2 -6
- data/lib/openai/models/responses/response_computer_tool_call_output_item.rb +1 -3
- data/lib/openai/models/responses/response_conversation_param.rb +20 -0
- data/lib/openai/models/responses/response_create_params.rb +34 -1
- data/lib/openai/models/responses/response_input_item.rb +2 -7
- data/lib/openai/models/responses/response_input_message_item.rb +1 -4
- data/lib/openai/models/responses/response_output_item.rb +1 -3
- data/lib/openai/models/responses/response_output_message.rb +1 -3
- data/lib/openai/models/responses/response_output_text.rb +3 -10
- data/lib/openai/models/responses/response_stream_event.rb +4 -16
- data/lib/openai/models/responses/response_text_delta_event.rb +1 -3
- data/lib/openai/models/responses/response_text_done_event.rb +1 -3
- data/lib/openai/models/responses/tool.rb +145 -34
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/conversations/items.rb +141 -0
- data/lib/openai/resources/conversations.rb +112 -0
- data/lib/openai/resources/responses/input_items.rb +1 -3
- data/lib/openai/resources/responses.rb +6 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +31 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +5 -5
- data/rbi/openai/internal/conversation_cursor_page.rbi +25 -0
- data/rbi/openai/models/conversations/computer_screenshot_content.rbi +60 -0
- data/rbi/openai/models/conversations/container_file_citation_body.rbi +82 -0
- data/rbi/openai/models/conversations/conversation.rbi +76 -0
- data/rbi/openai/models/conversations/conversation_create_params.rbi +144 -0
- data/rbi/openai/models/conversations/conversation_delete_params.rbi +32 -0
- data/rbi/openai/models/conversations/conversation_deleted.rbi +40 -0
- data/rbi/openai/models/conversations/conversation_deleted_resource.rbi +40 -0
- data/rbi/openai/models/conversations/conversation_item.rbi +835 -0
- data/rbi/openai/models/conversations/conversation_item_list.rbi +101 -0
- data/rbi/openai/models/conversations/conversation_retrieve_params.rbi +32 -0
- data/rbi/openai/models/conversations/conversation_update_params.rbi +56 -0
- data/rbi/openai/models/conversations/file_citation_body.rbi +61 -0
- data/rbi/openai/models/conversations/input_file_content.rbi +72 -0
- data/rbi/openai/models/conversations/input_image_content.rbi +113 -0
- data/rbi/openai/models/conversations/input_text_content.rbi +38 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +150 -0
- data/rbi/openai/models/conversations/item_delete_params.rbi +40 -0
- data/rbi/openai/models/conversations/item_list_params.rbi +174 -0
- data/rbi/openai/models/conversations/item_retrieve_params.rbi +70 -0
- data/rbi/openai/models/conversations/lob_prob.rbi +50 -0
- data/rbi/openai/models/conversations/message.rbi +196 -0
- data/rbi/openai/models/conversations/output_text_content.rbi +110 -0
- data/rbi/openai/models/conversations/refusal_content.rbi +38 -0
- data/rbi/openai/models/conversations/summary_text_content.rbi +31 -0
- data/rbi/openai/models/conversations/text_content.rbi +28 -0
- data/rbi/openai/models/conversations/top_log_prob.rbi +41 -0
- data/rbi/openai/models/conversations/url_citation_body.rbi +74 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +33 -33
- data/rbi/openai/models/responses/input_item_list_params.rbi +0 -11
- data/rbi/openai/models/responses/response.rbi +49 -0
- data/rbi/openai/models/responses/response_conversation_param.rbi +33 -0
- data/rbi/openai/models/responses/response_create_params.rbi +54 -0
- data/rbi/openai/models/responses/tool.rbi +243 -31
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/conversations/items.rbi +152 -0
- data/rbi/openai/resources/conversations.rbi +110 -0
- data/rbi/openai/resources/responses/input_items.rbi +0 -3
- data/rbi/openai/resources/responses.rbi +26 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/internal/conversation_cursor_page.rbs +15 -0
- data/sig/openai/models/conversations/computer_screenshot_content.rbs +28 -0
- data/sig/openai/models/conversations/container_file_citation_body.rbs +47 -0
- data/sig/openai/models/conversations/conversation.rbs +37 -0
- data/sig/openai/models/conversations/conversation_create_params.rbs +33 -0
- data/sig/openai/models/conversations/conversation_delete_params.rbs +17 -0
- data/sig/openai/models/conversations/conversation_deleted.rbs +28 -0
- data/sig/openai/models/conversations/conversation_deleted_resource.rbs +28 -0
- data/sig/openai/models/conversations/conversation_item.rbs +403 -0
- data/sig/openai/models/conversations/conversation_item_list.rbs +44 -0
- data/sig/openai/models/conversations/conversation_retrieve_params.rbs +17 -0
- data/sig/openai/models/conversations/conversation_update_params.rbs +26 -0
- data/sig/openai/models/conversations/file_citation_body.rbs +37 -0
- data/sig/openai/models/conversations/input_file_content.rbs +41 -0
- data/sig/openai/models/conversations/input_image_content.rbs +49 -0
- data/sig/openai/models/conversations/input_text_content.rbs +17 -0
- data/sig/openai/models/conversations/item_create_params.rbs +37 -0
- data/sig/openai/models/conversations/item_delete_params.rbs +25 -0
- data/sig/openai/models/conversations/item_list_params.rbs +66 -0
- data/sig/openai/models/conversations/item_retrieve_params.rbs +37 -0
- data/sig/openai/models/conversations/lob_prob.rbs +37 -0
- data/sig/openai/models/conversations/message.rbs +95 -0
- data/sig/openai/models/conversations/output_text_content.rbs +52 -0
- data/sig/openai/models/conversations/refusal_content.rbs +17 -0
- data/sig/openai/models/conversations/summary_text_content.rbs +17 -0
- data/sig/openai/models/conversations/text_content.rbs +17 -0
- data/sig/openai/models/conversations/top_log_prob.rbs +28 -0
- data/sig/openai/models/conversations/url_citation_body.rbs +42 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +22 -22
- data/sig/openai/models/responses/input_item_list_params.rbs +0 -7
- data/sig/openai/models/responses/response.rbs +15 -0
- data/sig/openai/models/responses/response_conversation_param.rbs +15 -0
- data/sig/openai/models/responses/response_create_params.rbs +14 -0
- data/sig/openai/models/responses/tool.rbs +83 -18
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/conversations/items.rbs +38 -0
- data/sig/openai/resources/conversations.rbs +31 -0
- data/sig/openai/resources/responses/input_items.rbs +0 -1
- data/sig/openai/resources/responses.rbs +2 -0
- metadata +95 -2
@@ -0,0 +1,568 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Conversations
|
6
|
+
# A single item within a conversation. The set of possible types are the same as
|
7
|
+
# the `output` type of a
|
8
|
+
# [Response object](https://platform.openai.com/docs/api-reference/responses/object#responses/object-output).
|
9
|
+
#
|
10
|
+
# @see OpenAI::Resources::Conversations::Items#retrieve
|
11
|
+
module ConversationItem
|
12
|
+
extend OpenAI::Internal::Type::Union
|
13
|
+
|
14
|
+
discriminator :type
|
15
|
+
|
16
|
+
variant :message, -> { OpenAI::Conversations::Message }
|
17
|
+
|
18
|
+
# A tool call to run a function. See the
|
19
|
+
# [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
|
20
|
+
variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCallItem }
|
21
|
+
|
22
|
+
variant :function_call_output, -> { OpenAI::Responses::ResponseFunctionToolCallOutputItem }
|
23
|
+
|
24
|
+
# The results of a file search tool call. See the
|
25
|
+
# [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
|
26
|
+
variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall }
|
27
|
+
|
28
|
+
# The results of a web search tool call. See the
|
29
|
+
# [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
|
30
|
+
variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch }
|
31
|
+
|
32
|
+
# An image generation request made by the model.
|
33
|
+
variant :image_generation_call, -> { OpenAI::Conversations::ConversationItem::ImageGenerationCall }
|
34
|
+
|
35
|
+
# A tool call to a computer use tool. See the
|
36
|
+
# [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
|
37
|
+
variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall }
|
38
|
+
|
39
|
+
variant :computer_call_output, -> { OpenAI::Responses::ResponseComputerToolCallOutputItem }
|
40
|
+
|
41
|
+
# A description of the chain of thought used by a reasoning model while generating
|
42
|
+
# a response. Be sure to include these items in your `input` to the Responses API
|
43
|
+
# for subsequent turns of a conversation if you are manually
|
44
|
+
# [managing context](https://platform.openai.com/docs/guides/conversation-state).
|
45
|
+
variant :reasoning, -> { OpenAI::Responses::ResponseReasoningItem }
|
46
|
+
|
47
|
+
# A tool call to run code.
|
48
|
+
variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall }
|
49
|
+
|
50
|
+
# A tool call to run a command on the local shell.
|
51
|
+
variant :local_shell_call, -> { OpenAI::Conversations::ConversationItem::LocalShellCall }
|
52
|
+
|
53
|
+
# The output of a local shell tool call.
|
54
|
+
variant :local_shell_call_output, -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput }
|
55
|
+
|
56
|
+
# A list of tools available on an MCP server.
|
57
|
+
variant :mcp_list_tools, -> { OpenAI::Conversations::ConversationItem::McpListTools }
|
58
|
+
|
59
|
+
# A request for human approval of a tool invocation.
|
60
|
+
variant :mcp_approval_request, -> { OpenAI::Conversations::ConversationItem::McpApprovalRequest }
|
61
|
+
|
62
|
+
# A response to an MCP approval request.
|
63
|
+
variant :mcp_approval_response, -> { OpenAI::Conversations::ConversationItem::McpApprovalResponse }
|
64
|
+
|
65
|
+
# An invocation of a tool on an MCP server.
|
66
|
+
variant :mcp_call, -> { OpenAI::Conversations::ConversationItem::McpCall }
|
67
|
+
|
68
|
+
# A call to a custom tool created by the model.
|
69
|
+
variant :custom_tool_call, -> { OpenAI::Responses::ResponseCustomToolCall }
|
70
|
+
|
71
|
+
# The output of a custom tool call from your code, being sent back to the model.
|
72
|
+
variant :custom_tool_call_output, -> { OpenAI::Responses::ResponseCustomToolCallOutput }
|
73
|
+
|
74
|
+
class ImageGenerationCall < OpenAI::Internal::Type::BaseModel
|
75
|
+
# @!attribute id
|
76
|
+
# The unique ID of the image generation call.
|
77
|
+
#
|
78
|
+
# @return [String]
|
79
|
+
required :id, String
|
80
|
+
|
81
|
+
# @!attribute result
|
82
|
+
# The generated image encoded in base64.
|
83
|
+
#
|
84
|
+
# @return [String, nil]
|
85
|
+
required :result, String, nil?: true
|
86
|
+
|
87
|
+
# @!attribute status
|
88
|
+
# The status of the image generation call.
|
89
|
+
#
|
90
|
+
# @return [Symbol, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::Status]
|
91
|
+
required :status, enum: -> { OpenAI::Conversations::ConversationItem::ImageGenerationCall::Status }
|
92
|
+
|
93
|
+
# @!attribute type
|
94
|
+
# The type of the image generation call. Always `image_generation_call`.
|
95
|
+
#
|
96
|
+
# @return [Symbol, :image_generation_call]
|
97
|
+
required :type, const: :image_generation_call
|
98
|
+
|
99
|
+
# @!method initialize(id:, result:, status:, type: :image_generation_call)
|
100
|
+
# Some parameter documentations has been truncated, see
|
101
|
+
# {OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall} for more
|
102
|
+
# details.
|
103
|
+
#
|
104
|
+
# An image generation request made by the model.
|
105
|
+
#
|
106
|
+
# @param id [String] The unique ID of the image generation call.
|
107
|
+
#
|
108
|
+
# @param result [String, nil] The generated image encoded in base64.
|
109
|
+
#
|
110
|
+
# @param status [Symbol, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall::Status] The status of the image generation call.
|
111
|
+
#
|
112
|
+
# @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`.
|
113
|
+
|
114
|
+
# The status of the image generation call.
|
115
|
+
#
|
116
|
+
# @see OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall#status
|
117
|
+
module Status
|
118
|
+
extend OpenAI::Internal::Type::Enum
|
119
|
+
|
120
|
+
IN_PROGRESS = :in_progress
|
121
|
+
COMPLETED = :completed
|
122
|
+
GENERATING = :generating
|
123
|
+
FAILED = :failed
|
124
|
+
|
125
|
+
# @!method self.values
|
126
|
+
# @return [Array<Symbol>]
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
class LocalShellCall < OpenAI::Internal::Type::BaseModel
|
131
|
+
# @!attribute id
|
132
|
+
# The unique ID of the local shell call.
|
133
|
+
#
|
134
|
+
# @return [String]
|
135
|
+
required :id, String
|
136
|
+
|
137
|
+
# @!attribute action
|
138
|
+
# Execute a shell command on the server.
|
139
|
+
#
|
140
|
+
# @return [OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Action]
|
141
|
+
required :action, -> { OpenAI::Conversations::ConversationItem::LocalShellCall::Action }
|
142
|
+
|
143
|
+
# @!attribute call_id
|
144
|
+
# The unique ID of the local shell tool call generated by the model.
|
145
|
+
#
|
146
|
+
# @return [String]
|
147
|
+
required :call_id, String
|
148
|
+
|
149
|
+
# @!attribute status
|
150
|
+
# The status of the local shell call.
|
151
|
+
#
|
152
|
+
# @return [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Status]
|
153
|
+
required :status, enum: -> { OpenAI::Conversations::ConversationItem::LocalShellCall::Status }
|
154
|
+
|
155
|
+
# @!attribute type
|
156
|
+
# The type of the local shell call. Always `local_shell_call`.
|
157
|
+
#
|
158
|
+
# @return [Symbol, :local_shell_call]
|
159
|
+
required :type, const: :local_shell_call
|
160
|
+
|
161
|
+
# @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call)
|
162
|
+
# Some parameter documentations has been truncated, see
|
163
|
+
# {OpenAI::Models::Conversations::ConversationItem::LocalShellCall} for more
|
164
|
+
# details.
|
165
|
+
#
|
166
|
+
# A tool call to run a command on the local shell.
|
167
|
+
#
|
168
|
+
# @param id [String] The unique ID of the local shell call.
|
169
|
+
#
|
170
|
+
# @param action [OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Action] Execute a shell command on the server.
|
171
|
+
#
|
172
|
+
# @param call_id [String] The unique ID of the local shell tool call generated by the model.
|
173
|
+
#
|
174
|
+
# @param status [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Status] The status of the local shell call.
|
175
|
+
#
|
176
|
+
# @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`.
|
177
|
+
|
178
|
+
# @see OpenAI::Models::Conversations::ConversationItem::LocalShellCall#action
|
179
|
+
class Action < OpenAI::Internal::Type::BaseModel
|
180
|
+
# @!attribute command
|
181
|
+
# The command to run.
|
182
|
+
#
|
183
|
+
# @return [Array<String>]
|
184
|
+
required :command, OpenAI::Internal::Type::ArrayOf[String]
|
185
|
+
|
186
|
+
# @!attribute env
|
187
|
+
# Environment variables to set for the command.
|
188
|
+
#
|
189
|
+
# @return [Hash{Symbol=>String}]
|
190
|
+
required :env, OpenAI::Internal::Type::HashOf[String]
|
191
|
+
|
192
|
+
# @!attribute type
|
193
|
+
# The type of the local shell action. Always `exec`.
|
194
|
+
#
|
195
|
+
# @return [Symbol, :exec]
|
196
|
+
required :type, const: :exec
|
197
|
+
|
198
|
+
# @!attribute timeout_ms
|
199
|
+
# Optional timeout in milliseconds for the command.
|
200
|
+
#
|
201
|
+
# @return [Integer, nil]
|
202
|
+
optional :timeout_ms, Integer, nil?: true
|
203
|
+
|
204
|
+
# @!attribute user
|
205
|
+
# Optional user to run the command as.
|
206
|
+
#
|
207
|
+
# @return [String, nil]
|
208
|
+
optional :user, String, nil?: true
|
209
|
+
|
210
|
+
# @!attribute working_directory
|
211
|
+
# Optional working directory to run the command in.
|
212
|
+
#
|
213
|
+
# @return [String, nil]
|
214
|
+
optional :working_directory, String, nil?: true
|
215
|
+
|
216
|
+
# @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec)
|
217
|
+
# Some parameter documentations has been truncated, see
|
218
|
+
# {OpenAI::Models::Conversations::ConversationItem::LocalShellCall::Action} for
|
219
|
+
# more details.
|
220
|
+
#
|
221
|
+
# Execute a shell command on the server.
|
222
|
+
#
|
223
|
+
# @param command [Array<String>] The command to run.
|
224
|
+
#
|
225
|
+
# @param env [Hash{Symbol=>String}] Environment variables to set for the command.
|
226
|
+
#
|
227
|
+
# @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command.
|
228
|
+
#
|
229
|
+
# @param user [String, nil] Optional user to run the command as.
|
230
|
+
#
|
231
|
+
# @param working_directory [String, nil] Optional working directory to run the command in.
|
232
|
+
#
|
233
|
+
# @param type [Symbol, :exec] The type of the local shell action. Always `exec`.
|
234
|
+
end
|
235
|
+
|
236
|
+
# The status of the local shell call.
|
237
|
+
#
|
238
|
+
# @see OpenAI::Models::Conversations::ConversationItem::LocalShellCall#status
|
239
|
+
module Status
|
240
|
+
extend OpenAI::Internal::Type::Enum
|
241
|
+
|
242
|
+
IN_PROGRESS = :in_progress
|
243
|
+
COMPLETED = :completed
|
244
|
+
INCOMPLETE = :incomplete
|
245
|
+
|
246
|
+
# @!method self.values
|
247
|
+
# @return [Array<Symbol>]
|
248
|
+
end
|
249
|
+
end
|
250
|
+
|
251
|
+
class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel
|
252
|
+
# @!attribute id
|
253
|
+
# The unique ID of the local shell tool call generated by the model.
|
254
|
+
#
|
255
|
+
# @return [String]
|
256
|
+
required :id, String
|
257
|
+
|
258
|
+
# @!attribute output
|
259
|
+
# A JSON string of the output of the local shell tool call.
|
260
|
+
#
|
261
|
+
# @return [String]
|
262
|
+
required :output, String
|
263
|
+
|
264
|
+
# @!attribute type
|
265
|
+
# The type of the local shell tool call output. Always `local_shell_call_output`.
|
266
|
+
#
|
267
|
+
# @return [Symbol, :local_shell_call_output]
|
268
|
+
required :type, const: :local_shell_call_output
|
269
|
+
|
270
|
+
# @!attribute status
|
271
|
+
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
|
272
|
+
#
|
273
|
+
# @return [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::Status, nil]
|
274
|
+
optional :status,
|
275
|
+
enum: -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput::Status },
|
276
|
+
nil?: true
|
277
|
+
|
278
|
+
# @!method initialize(id:, output:, status: nil, type: :local_shell_call_output)
|
279
|
+
# Some parameter documentations has been truncated, see
|
280
|
+
# {OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput} for more
|
281
|
+
# details.
|
282
|
+
#
|
283
|
+
# The output of a local shell tool call.
|
284
|
+
#
|
285
|
+
# @param id [String] The unique ID of the local shell tool call generated by the model.
|
286
|
+
#
|
287
|
+
# @param output [String] A JSON string of the output of the local shell tool call.
|
288
|
+
#
|
289
|
+
# @param status [Symbol, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`.
|
290
|
+
#
|
291
|
+
# @param type [Symbol, :local_shell_call_output] The type of the local shell tool call output. Always `local_shell_call_output`.
|
292
|
+
|
293
|
+
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
|
294
|
+
#
|
295
|
+
# @see OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput#status
|
296
|
+
module Status
|
297
|
+
extend OpenAI::Internal::Type::Enum
|
298
|
+
|
299
|
+
IN_PROGRESS = :in_progress
|
300
|
+
COMPLETED = :completed
|
301
|
+
INCOMPLETE = :incomplete
|
302
|
+
|
303
|
+
# @!method self.values
|
304
|
+
# @return [Array<Symbol>]
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
class McpListTools < OpenAI::Internal::Type::BaseModel
|
309
|
+
# @!attribute id
|
310
|
+
# The unique ID of the list.
|
311
|
+
#
|
312
|
+
# @return [String]
|
313
|
+
required :id, String
|
314
|
+
|
315
|
+
# @!attribute server_label
|
316
|
+
# The label of the MCP server.
|
317
|
+
#
|
318
|
+
# @return [String]
|
319
|
+
required :server_label, String
|
320
|
+
|
321
|
+
# @!attribute tools
|
322
|
+
# The tools available on the server.
|
323
|
+
#
|
324
|
+
# @return [Array<OpenAI::Models::Conversations::ConversationItem::McpListTools::Tool>]
|
325
|
+
required :tools,
|
326
|
+
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Conversations::ConversationItem::McpListTools::Tool] }
|
327
|
+
|
328
|
+
# @!attribute type
|
329
|
+
# The type of the item. Always `mcp_list_tools`.
|
330
|
+
#
|
331
|
+
# @return [Symbol, :mcp_list_tools]
|
332
|
+
required :type, const: :mcp_list_tools
|
333
|
+
|
334
|
+
# @!attribute error
|
335
|
+
# Error message if the server could not list tools.
|
336
|
+
#
|
337
|
+
# @return [String, nil]
|
338
|
+
optional :error, String, nil?: true
|
339
|
+
|
340
|
+
# @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools)
|
341
|
+
# Some parameter documentations has been truncated, see
|
342
|
+
# {OpenAI::Models::Conversations::ConversationItem::McpListTools} for more
|
343
|
+
# details.
|
344
|
+
#
|
345
|
+
# A list of tools available on an MCP server.
|
346
|
+
#
|
347
|
+
# @param id [String] The unique ID of the list.
|
348
|
+
#
|
349
|
+
# @param server_label [String] The label of the MCP server.
|
350
|
+
#
|
351
|
+
# @param tools [Array<OpenAI::Models::Conversations::ConversationItem::McpListTools::Tool>] The tools available on the server.
|
352
|
+
#
|
353
|
+
# @param error [String, nil] Error message if the server could not list tools.
|
354
|
+
#
|
355
|
+
# @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`.
|
356
|
+
|
357
|
+
class Tool < OpenAI::Internal::Type::BaseModel
|
358
|
+
# @!attribute input_schema
|
359
|
+
# The JSON schema describing the tool's input.
|
360
|
+
#
|
361
|
+
# @return [Object]
|
362
|
+
required :input_schema, OpenAI::Internal::Type::Unknown
|
363
|
+
|
364
|
+
# @!attribute name
|
365
|
+
# The name of the tool.
|
366
|
+
#
|
367
|
+
# @return [String]
|
368
|
+
required :name, String
|
369
|
+
|
370
|
+
# @!attribute annotations
|
371
|
+
# Additional annotations about the tool.
|
372
|
+
#
|
373
|
+
# @return [Object, nil]
|
374
|
+
optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true
|
375
|
+
|
376
|
+
# @!attribute description
|
377
|
+
# The description of the tool.
|
378
|
+
#
|
379
|
+
# @return [String, nil]
|
380
|
+
optional :description, String, nil?: true
|
381
|
+
|
382
|
+
# @!method initialize(input_schema:, name:, annotations: nil, description: nil)
|
383
|
+
# Some parameter documentations has been truncated, see
|
384
|
+
# {OpenAI::Models::Conversations::ConversationItem::McpListTools::Tool} for more
|
385
|
+
# details.
|
386
|
+
#
|
387
|
+
# A tool available on an MCP server.
|
388
|
+
#
|
389
|
+
# @param input_schema [Object] The JSON schema describing the tool's input.
|
390
|
+
#
|
391
|
+
# @param name [String] The name of the tool.
|
392
|
+
#
|
393
|
+
# @param annotations [Object, nil] Additional annotations about the tool.
|
394
|
+
#
|
395
|
+
# @param description [String, nil] The description of the tool.
|
396
|
+
end
|
397
|
+
end
|
398
|
+
|
399
|
+
class McpApprovalRequest < OpenAI::Internal::Type::BaseModel
|
400
|
+
# @!attribute id
|
401
|
+
# The unique ID of the approval request.
|
402
|
+
#
|
403
|
+
# @return [String]
|
404
|
+
required :id, String
|
405
|
+
|
406
|
+
# @!attribute arguments
|
407
|
+
# A JSON string of arguments for the tool.
|
408
|
+
#
|
409
|
+
# @return [String]
|
410
|
+
required :arguments, String
|
411
|
+
|
412
|
+
# @!attribute name
|
413
|
+
# The name of the tool to run.
|
414
|
+
#
|
415
|
+
# @return [String]
|
416
|
+
required :name, String
|
417
|
+
|
418
|
+
# @!attribute server_label
|
419
|
+
# The label of the MCP server making the request.
|
420
|
+
#
|
421
|
+
# @return [String]
|
422
|
+
required :server_label, String
|
423
|
+
|
424
|
+
# @!attribute type
|
425
|
+
# The type of the item. Always `mcp_approval_request`.
|
426
|
+
#
|
427
|
+
# @return [Symbol, :mcp_approval_request]
|
428
|
+
required :type, const: :mcp_approval_request
|
429
|
+
|
430
|
+
# @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request)
|
431
|
+
# Some parameter documentations has been truncated, see
|
432
|
+
# {OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest} for more
|
433
|
+
# details.
|
434
|
+
#
|
435
|
+
# A request for human approval of a tool invocation.
|
436
|
+
#
|
437
|
+
# @param id [String] The unique ID of the approval request.
|
438
|
+
#
|
439
|
+
# @param arguments [String] A JSON string of arguments for the tool.
|
440
|
+
#
|
441
|
+
# @param name [String] The name of the tool to run.
|
442
|
+
#
|
443
|
+
# @param server_label [String] The label of the MCP server making the request.
|
444
|
+
#
|
445
|
+
# @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`.
|
446
|
+
end
|
447
|
+
|
448
|
+
class McpApprovalResponse < OpenAI::Internal::Type::BaseModel
|
449
|
+
# @!attribute id
|
450
|
+
# The unique ID of the approval response
|
451
|
+
#
|
452
|
+
# @return [String]
|
453
|
+
required :id, String
|
454
|
+
|
455
|
+
# @!attribute approval_request_id
|
456
|
+
# The ID of the approval request being answered.
|
457
|
+
#
|
458
|
+
# @return [String]
|
459
|
+
required :approval_request_id, String
|
460
|
+
|
461
|
+
# @!attribute approve
|
462
|
+
# Whether the request was approved.
|
463
|
+
#
|
464
|
+
# @return [Boolean]
|
465
|
+
required :approve, OpenAI::Internal::Type::Boolean
|
466
|
+
|
467
|
+
# @!attribute type
|
468
|
+
# The type of the item. Always `mcp_approval_response`.
|
469
|
+
#
|
470
|
+
# @return [Symbol, :mcp_approval_response]
|
471
|
+
required :type, const: :mcp_approval_response
|
472
|
+
|
473
|
+
# @!attribute reason
|
474
|
+
# Optional reason for the decision.
|
475
|
+
#
|
476
|
+
# @return [String, nil]
|
477
|
+
optional :reason, String, nil?: true
|
478
|
+
|
479
|
+
# @!method initialize(id:, approval_request_id:, approve:, reason: nil, type: :mcp_approval_response)
|
480
|
+
# Some parameter documentations has been truncated, see
|
481
|
+
# {OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse} for more
|
482
|
+
# details.
|
483
|
+
#
|
484
|
+
# A response to an MCP approval request.
|
485
|
+
#
|
486
|
+
# @param id [String] The unique ID of the approval response
|
487
|
+
#
|
488
|
+
# @param approval_request_id [String] The ID of the approval request being answered.
|
489
|
+
#
|
490
|
+
# @param approve [Boolean] Whether the request was approved.
|
491
|
+
#
|
492
|
+
# @param reason [String, nil] Optional reason for the decision.
|
493
|
+
#
|
494
|
+
# @param type [Symbol, :mcp_approval_response] The type of the item. Always `mcp_approval_response`.
|
495
|
+
end
|
496
|
+
|
497
|
+
class McpCall < OpenAI::Internal::Type::BaseModel
|
498
|
+
# @!attribute id
|
499
|
+
# The unique ID of the tool call.
|
500
|
+
#
|
501
|
+
# @return [String]
|
502
|
+
required :id, String
|
503
|
+
|
504
|
+
# @!attribute arguments
|
505
|
+
# A JSON string of the arguments passed to the tool.
|
506
|
+
#
|
507
|
+
# @return [String]
|
508
|
+
required :arguments, String
|
509
|
+
|
510
|
+
# @!attribute name
|
511
|
+
# The name of the tool that was run.
|
512
|
+
#
|
513
|
+
# @return [String]
|
514
|
+
required :name, String
|
515
|
+
|
516
|
+
# @!attribute server_label
|
517
|
+
# The label of the MCP server running the tool.
|
518
|
+
#
|
519
|
+
# @return [String]
|
520
|
+
required :server_label, String
|
521
|
+
|
522
|
+
# @!attribute type
|
523
|
+
# The type of the item. Always `mcp_call`.
|
524
|
+
#
|
525
|
+
# @return [Symbol, :mcp_call]
|
526
|
+
required :type, const: :mcp_call
|
527
|
+
|
528
|
+
# @!attribute error
|
529
|
+
# The error from the tool call, if any.
|
530
|
+
#
|
531
|
+
# @return [String, nil]
|
532
|
+
optional :error, String, nil?: true
|
533
|
+
|
534
|
+
# @!attribute output
|
535
|
+
# The output from the tool call.
|
536
|
+
#
|
537
|
+
# @return [String, nil]
|
538
|
+
optional :output, String, nil?: true
|
539
|
+
|
540
|
+
# @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call)
|
541
|
+
# Some parameter documentations has been truncated, see
|
542
|
+
# {OpenAI::Models::Conversations::ConversationItem::McpCall} for more details.
|
543
|
+
#
|
544
|
+
# An invocation of a tool on an MCP server.
|
545
|
+
#
|
546
|
+
# @param id [String] The unique ID of the tool call.
|
547
|
+
#
|
548
|
+
# @param arguments [String] A JSON string of the arguments passed to the tool.
|
549
|
+
#
|
550
|
+
# @param name [String] The name of the tool that was run.
|
551
|
+
#
|
552
|
+
# @param server_label [String] The label of the MCP server running the tool.
|
553
|
+
#
|
554
|
+
# @param error [String, nil] The error from the tool call, if any.
|
555
|
+
#
|
556
|
+
# @param output [String, nil] The output from the tool call.
|
557
|
+
#
|
558
|
+
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
|
559
|
+
end
|
560
|
+
|
561
|
+
# @!method self.variants
|
562
|
+
# @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)]
|
563
|
+
end
|
564
|
+
end
|
565
|
+
|
566
|
+
ConversationItem = Conversations::ConversationItem
|
567
|
+
end
|
568
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Conversations
|
6
|
+
# @see OpenAI::Resources::Conversations::Items#create
|
7
|
+
class ConversationItemList < OpenAI::Internal::Type::BaseModel
|
8
|
+
# @!attribute data
|
9
|
+
# A list of conversation items.
|
10
|
+
#
|
11
|
+
# @return [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
|
12
|
+
required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::ConversationItem] }
|
13
|
+
|
14
|
+
# @!attribute first_id
|
15
|
+
# The ID of the first item in the list.
|
16
|
+
#
|
17
|
+
# @return [String]
|
18
|
+
required :first_id, String
|
19
|
+
|
20
|
+
# @!attribute has_more
|
21
|
+
# Whether there are more items available.
|
22
|
+
#
|
23
|
+
# @return [Boolean]
|
24
|
+
required :has_more, OpenAI::Internal::Type::Boolean
|
25
|
+
|
26
|
+
# @!attribute last_id
|
27
|
+
# The ID of the last item in the list.
|
28
|
+
#
|
29
|
+
# @return [String]
|
30
|
+
required :last_id, String
|
31
|
+
|
32
|
+
# @!attribute object
|
33
|
+
# The type of object returned, must be `list`.
|
34
|
+
#
|
35
|
+
# @return [Symbol, :list]
|
36
|
+
required :object, const: :list
|
37
|
+
|
38
|
+
# @!method initialize(data:, first_id:, has_more:, last_id:, object: :list)
|
39
|
+
# A list of Conversation items.
|
40
|
+
#
|
41
|
+
# @param data [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>] A list of conversation items.
|
42
|
+
#
|
43
|
+
# @param first_id [String] The ID of the first item in the list.
|
44
|
+
#
|
45
|
+
# @param has_more [Boolean] Whether there are more items available.
|
46
|
+
#
|
47
|
+
# @param last_id [String] The ID of the last item in the list.
|
48
|
+
#
|
49
|
+
# @param object [Symbol, :list] The type of object returned, must be `list`.
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
ConversationItemList = Conversations::ConversationItemList
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Conversations
|
6
|
+
# @see OpenAI::Resources::Conversations#retrieve
|
7
|
+
class ConversationRetrieveParams < OpenAI::Internal::Type::BaseModel
|
8
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
9
|
+
include OpenAI::Internal::Type::RequestParameters
|
10
|
+
|
11
|
+
# @!method initialize(request_options: {})
|
12
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Conversations
|
6
|
+
# @see OpenAI::Resources::Conversations#update
|
7
|
+
class ConversationUpdateParams < OpenAI::Internal::Type::BaseModel
|
8
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
9
|
+
include OpenAI::Internal::Type::RequestParameters
|
10
|
+
|
11
|
+
# @!attribute metadata
|
12
|
+
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
13
|
+
# for storing additional information about the object in a structured format, and
|
14
|
+
# querying for objects via API or the dashboard. Keys are strings with a maximum
|
15
|
+
# length of 64 characters. Values are strings with a maximum length of 512
|
16
|
+
# characters.
|
17
|
+
#
|
18
|
+
# @return [Hash{Symbol=>String}]
|
19
|
+
required :metadata, OpenAI::Internal::Type::HashOf[String]
|
20
|
+
|
21
|
+
# @!method initialize(metadata:, request_options: {})
|
22
|
+
# Some parameter documentations has been truncated, see
|
23
|
+
# {OpenAI::Models::Conversations::ConversationUpdateParams} for more details.
|
24
|
+
#
|
25
|
+
# @param metadata [Hash{Symbol=>String}] Set of 16 key-value pairs that can be attached to an object. This can be
|
26
|
+
#
|
27
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|