openai 0.22.1 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +1 -1
- data/lib/openai/models/realtime/audio_transcription.rb +60 -0
- data/lib/openai/models/realtime/client_secret_create_params.rb +18 -9
- data/lib/openai/models/realtime/client_secret_create_response.rb +11 -250
- data/lib/openai/models/realtime/conversation_item.rb +1 -1
- data/lib/openai/models/realtime/conversation_item_added.rb +14 -1
- data/lib/openai/models/realtime/conversation_item_done.rb +3 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rb +10 -8
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rb +14 -5
- data/lib/openai/models/realtime/conversation_item_truncate_event.rb +2 -2
- data/lib/openai/models/realtime/input_audio_buffer_append_event.rb +10 -5
- data/lib/openai/models/realtime/models.rb +58 -0
- data/lib/openai/models/realtime/noise_reduction_type.rb +20 -0
- data/lib/openai/models/realtime/realtime_audio_config.rb +6 -427
- data/lib/openai/models/realtime/realtime_audio_config_input.rb +89 -0
- data/lib/openai/models/realtime/realtime_audio_config_output.rb +100 -0
- data/lib/openai/models/realtime/realtime_audio_formats.rb +121 -0
- data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +131 -0
- data/lib/openai/models/realtime/realtime_client_event.rb +31 -23
- data/lib/openai/models/realtime/realtime_conversation_item_assistant_message.rb +43 -10
- data/lib/openai/models/realtime/realtime_conversation_item_function_call.rb +16 -7
- data/lib/openai/models/realtime/realtime_conversation_item_function_call_output.rb +15 -7
- data/lib/openai/models/realtime/realtime_conversation_item_system_message.rb +18 -6
- data/lib/openai/models/realtime/realtime_conversation_item_user_message.rb +62 -13
- data/lib/openai/models/realtime/realtime_response.rb +117 -107
- data/lib/openai/models/realtime/realtime_response_create_audio_output.rb +100 -0
- data/lib/openai/models/realtime/realtime_response_create_mcp_tool.rb +310 -0
- data/lib/openai/models/realtime/realtime_response_create_params.rb +225 -0
- data/lib/openai/models/realtime/realtime_response_status.rb +1 -1
- data/lib/openai/models/realtime/realtime_response_usage.rb +5 -2
- data/lib/openai/models/realtime/realtime_response_usage_input_token_details.rb +58 -8
- data/lib/openai/models/realtime/realtime_server_event.rb +21 -5
- data/lib/openai/models/realtime/realtime_session.rb +9 -125
- data/lib/openai/models/realtime/realtime_session_client_secret.rb +36 -0
- data/lib/openai/models/realtime/realtime_session_create_request.rb +50 -71
- data/lib/openai/models/realtime/realtime_session_create_response.rb +621 -219
- data/lib/openai/models/realtime/realtime_tools_config_union.rb +2 -53
- data/lib/openai/models/realtime/realtime_tracing_config.rb +7 -6
- data/lib/openai/models/realtime/realtime_transcription_session_audio.rb +19 -0
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input.rb +90 -0
- data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +131 -0
- data/lib/openai/models/realtime/realtime_transcription_session_client_secret.rb +38 -0
- data/lib/openai/models/realtime/realtime_transcription_session_create_request.rb +12 -270
- data/lib/openai/models/realtime/realtime_transcription_session_create_response.rb +78 -0
- data/lib/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rb +66 -0
- data/lib/openai/models/realtime/realtime_transcription_session_turn_detection.rb +57 -0
- data/lib/openai/models/realtime/realtime_truncation.rb +8 -40
- data/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +34 -0
- data/lib/openai/models/realtime/response_cancel_event.rb +3 -1
- data/lib/openai/models/realtime/response_create_event.rb +18 -348
- data/lib/openai/models/realtime/response_done_event.rb +7 -0
- data/lib/openai/models/realtime/session_created_event.rb +20 -4
- data/lib/openai/models/realtime/session_update_event.rb +36 -12
- data/lib/openai/models/realtime/session_updated_event.rb +20 -4
- data/lib/openai/models/realtime/transcription_session_created.rb +8 -243
- data/lib/openai/models/realtime/transcription_session_update.rb +179 -3
- data/lib/openai/models/realtime/transcription_session_updated_event.rb +8 -243
- data/lib/openai/resources/realtime/client_secrets.rb +2 -3
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -1
- data/rbi/openai/models/realtime/audio_transcription.rbi +132 -0
- data/rbi/openai/models/realtime/client_secret_create_params.rbi +25 -11
- data/rbi/openai/models/realtime/client_secret_create_response.rbi +2 -587
- data/rbi/openai/models/realtime/conversation_item_added.rbi +14 -1
- data/rbi/openai/models/realtime/conversation_item_done.rbi +3 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rbi +11 -8
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rbi +15 -5
- data/rbi/openai/models/realtime/conversation_item_truncate_event.rbi +2 -2
- data/rbi/openai/models/realtime/input_audio_buffer_append_event.rbi +10 -5
- data/rbi/openai/models/realtime/models.rbi +97 -0
- data/rbi/openai/models/realtime/noise_reduction_type.rbi +31 -0
- data/rbi/openai/models/realtime/realtime_audio_config.rbi +8 -956
- data/rbi/openai/models/realtime/realtime_audio_config_input.rbi +221 -0
- data/rbi/openai/models/realtime/realtime_audio_config_output.rbi +222 -0
- data/rbi/openai/models/realtime/realtime_audio_formats.rbi +329 -0
- data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +262 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_assistant_message.rbi +51 -10
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call.rbi +16 -7
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call_output.rbi +14 -7
- data/rbi/openai/models/realtime/realtime_conversation_item_system_message.rbi +16 -6
- data/rbi/openai/models/realtime/realtime_conversation_item_user_message.rbi +110 -12
- data/rbi/openai/models/realtime/realtime_response.rbi +287 -212
- data/rbi/openai/models/realtime/realtime_response_create_audio_output.rbi +250 -0
- data/rbi/openai/models/realtime/realtime_response_create_mcp_tool.rbi +616 -0
- data/rbi/openai/models/realtime/realtime_response_create_params.rbi +529 -0
- data/rbi/openai/models/realtime/realtime_response_usage.rbi +8 -2
- data/rbi/openai/models/realtime/realtime_response_usage_input_token_details.rbi +106 -7
- data/rbi/openai/models/realtime/realtime_server_event.rbi +4 -1
- data/rbi/openai/models/realtime/realtime_session.rbi +12 -262
- data/rbi/openai/models/realtime/realtime_session_client_secret.rbi +49 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +112 -133
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +1229 -405
- data/rbi/openai/models/realtime/realtime_tools_config_union.rbi +1 -117
- data/rbi/openai/models/realtime/realtime_tracing_config.rbi +11 -10
- data/rbi/openai/models/realtime/realtime_transcription_session_audio.rbi +50 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input.rbi +226 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +259 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_client_secret.rbi +51 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_create_request.rbi +25 -597
- data/rbi/openai/models/realtime/realtime_transcription_session_create_response.rbi +195 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbi +144 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_turn_detection.rbi +94 -0
- data/rbi/openai/models/realtime/realtime_truncation.rbi +5 -56
- data/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +45 -0
- data/rbi/openai/models/realtime/response_cancel_event.rbi +3 -1
- data/rbi/openai/models/realtime/response_create_event.rbi +19 -786
- data/rbi/openai/models/realtime/response_done_event.rbi +7 -0
- data/rbi/openai/models/realtime/session_created_event.rbi +42 -9
- data/rbi/openai/models/realtime/session_update_event.rbi +57 -19
- data/rbi/openai/models/realtime/session_updated_event.rbi +42 -9
- data/rbi/openai/models/realtime/transcription_session_created.rbi +17 -591
- data/rbi/openai/models/realtime/transcription_session_update.rbi +425 -7
- data/rbi/openai/models/realtime/transcription_session_updated_event.rbi +14 -591
- data/rbi/openai/resources/realtime/client_secrets.rbi +5 -3
- data/sig/openai/models/realtime/audio_transcription.rbs +57 -0
- data/sig/openai/models/realtime/client_secret_create_response.rbs +1 -251
- data/sig/openai/models/realtime/models.rbs +57 -0
- data/sig/openai/models/realtime/noise_reduction_type.rbs +16 -0
- data/sig/openai/models/realtime/realtime_audio_config.rbs +12 -331
- data/sig/openai/models/realtime/realtime_audio_config_input.rbs +72 -0
- data/sig/openai/models/realtime/realtime_audio_config_output.rbs +72 -0
- data/sig/openai/models/realtime/realtime_audio_formats.rbs +128 -0
- data/sig/openai/models/realtime/realtime_audio_input_turn_detection.rbs +99 -0
- data/sig/openai/models/realtime/realtime_conversation_item_assistant_message.rbs +17 -2
- data/sig/openai/models/realtime/realtime_conversation_item_user_message.rbs +30 -1
- data/sig/openai/models/realtime/realtime_response.rbs +103 -82
- data/sig/openai/models/realtime/realtime_response_create_audio_output.rbs +84 -0
- data/sig/openai/models/realtime/realtime_response_create_mcp_tool.rbs +218 -0
- data/sig/openai/models/realtime/realtime_response_create_params.rbs +148 -0
- data/sig/openai/models/realtime/realtime_response_usage_input_token_details.rbs +50 -1
- data/sig/openai/models/realtime/realtime_session.rbs +16 -106
- data/sig/openai/models/realtime/realtime_session_client_secret.rbs +20 -0
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +27 -43
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +389 -187
- data/sig/openai/models/realtime/realtime_tools_config_union.rbs +1 -53
- data/sig/openai/models/realtime/realtime_transcription_session_audio.rbs +24 -0
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input.rbs +72 -0
- data/sig/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbs +99 -0
- data/sig/openai/models/realtime/realtime_transcription_session_client_secret.rbs +20 -0
- data/sig/openai/models/realtime/realtime_transcription_session_create_request.rbs +11 -203
- data/sig/openai/models/realtime/realtime_transcription_session_create_response.rbs +69 -0
- data/sig/openai/models/realtime/realtime_transcription_session_input_audio_transcription.rbs +59 -0
- data/sig/openai/models/realtime/realtime_transcription_session_turn_detection.rbs +47 -0
- data/sig/openai/models/realtime/realtime_truncation.rbs +1 -28
- data/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +21 -0
- data/sig/openai/models/realtime/response_create_event.rbs +6 -249
- data/sig/openai/models/realtime/session_created_event.rbs +14 -4
- data/sig/openai/models/realtime/session_update_event.rbs +14 -4
- data/sig/openai/models/realtime/session_updated_event.rbs +14 -4
- data/sig/openai/models/realtime/transcription_session_created.rbs +4 -254
- data/sig/openai/models/realtime/transcription_session_update.rbs +154 -4
- data/sig/openai/models/realtime/transcription_session_updated_event.rbs +4 -254
- metadata +59 -5
- data/lib/openai/models/realtime/realtime_client_secret_config.rb +0 -64
- data/rbi/openai/models/realtime/realtime_client_secret_config.rbi +0 -147
- data/sig/openai/models/realtime/realtime_client_secret_config.rbs +0 -60
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Realtime
|
|
6
|
+
class RealtimeResponseCreateMcpTool < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
# @!attribute server_label
|
|
8
|
+
# A label for this MCP server, used to identify it in tool calls.
|
|
9
|
+
#
|
|
10
|
+
# @return [String]
|
|
11
|
+
required :server_label, String
|
|
12
|
+
|
|
13
|
+
# @!attribute type
|
|
14
|
+
# The type of the MCP tool. Always `mcp`.
|
|
15
|
+
#
|
|
16
|
+
# @return [Symbol, :mcp]
|
|
17
|
+
required :type, const: :mcp
|
|
18
|
+
|
|
19
|
+
# @!attribute allowed_tools
|
|
20
|
+
# List of allowed tool names or a filter object.
|
|
21
|
+
#
|
|
22
|
+
# @return [Array<String>, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::AllowedTools::McpToolFilter, nil]
|
|
23
|
+
optional :allowed_tools,
|
|
24
|
+
union: -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::AllowedTools },
|
|
25
|
+
nil?: true
|
|
26
|
+
|
|
27
|
+
# @!attribute authorization
|
|
28
|
+
# An OAuth access token that can be used with a remote MCP server, either with a
|
|
29
|
+
# custom MCP server URL or a service connector. Your application must handle the
|
|
30
|
+
# OAuth authorization flow and provide the token here.
|
|
31
|
+
#
|
|
32
|
+
# @return [String, nil]
|
|
33
|
+
optional :authorization, String
|
|
34
|
+
|
|
35
|
+
# @!attribute connector_id
|
|
36
|
+
# Identifier for service connectors, like those available in ChatGPT. One of
|
|
37
|
+
# `server_url` or `connector_id` must be provided. Learn more about service
|
|
38
|
+
# connectors
|
|
39
|
+
# [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
|
|
40
|
+
#
|
|
41
|
+
# Currently supported `connector_id` values are:
|
|
42
|
+
#
|
|
43
|
+
# - Dropbox: `connector_dropbox`
|
|
44
|
+
# - Gmail: `connector_gmail`
|
|
45
|
+
# - Google Calendar: `connector_googlecalendar`
|
|
46
|
+
# - Google Drive: `connector_googledrive`
|
|
47
|
+
# - Microsoft Teams: `connector_microsoftteams`
|
|
48
|
+
# - Outlook Calendar: `connector_outlookcalendar`
|
|
49
|
+
# - Outlook Email: `connector_outlookemail`
|
|
50
|
+
# - SharePoint: `connector_sharepoint`
|
|
51
|
+
#
|
|
52
|
+
# @return [Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::ConnectorID, nil]
|
|
53
|
+
optional :connector_id, enum: -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::ConnectorID }
|
|
54
|
+
|
|
55
|
+
# @!attribute headers
|
|
56
|
+
# Optional HTTP headers to send to the MCP server. Use for authentication or other
|
|
57
|
+
# purposes.
|
|
58
|
+
#
|
|
59
|
+
# @return [Hash{Symbol=>String}, nil]
|
|
60
|
+
optional :headers, OpenAI::Internal::Type::HashOf[String], nil?: true
|
|
61
|
+
|
|
62
|
+
# @!attribute require_approval
|
|
63
|
+
# Specify which of the MCP server's tools require approval.
|
|
64
|
+
#
|
|
65
|
+
# @return [OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalSetting, nil]
|
|
66
|
+
optional :require_approval,
|
|
67
|
+
union: -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::RequireApproval },
|
|
68
|
+
nil?: true
|
|
69
|
+
|
|
70
|
+
# @!attribute server_description
|
|
71
|
+
# Optional description of the MCP server, used to provide more context.
|
|
72
|
+
#
|
|
73
|
+
# @return [String, nil]
|
|
74
|
+
optional :server_description, String
|
|
75
|
+
|
|
76
|
+
# @!attribute server_url
|
|
77
|
+
# The URL for the MCP server. One of `server_url` or `connector_id` must be
|
|
78
|
+
# provided.
|
|
79
|
+
#
|
|
80
|
+
# @return [String, nil]
|
|
81
|
+
optional :server_url, String
|
|
82
|
+
|
|
83
|
+
# @!method initialize(server_label:, allowed_tools: nil, authorization: nil, connector_id: nil, headers: nil, require_approval: nil, server_description: nil, server_url: nil, type: :mcp)
|
|
84
|
+
# Some parameter documentations has been truncated, see
|
|
85
|
+
# {OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool} for more details.
|
|
86
|
+
#
|
|
87
|
+
# Give the model access to additional tools via remote Model Context Protocol
|
|
88
|
+
# (MCP) servers.
|
|
89
|
+
# [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
|
|
90
|
+
#
|
|
91
|
+
# @param server_label [String] A label for this MCP server, used to identify it in tool calls.
|
|
92
|
+
#
|
|
93
|
+
# @param allowed_tools [Array<String>, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::AllowedTools::McpToolFilter, nil] List of allowed tool names or a filter object.
|
|
94
|
+
#
|
|
95
|
+
# @param authorization [String] An OAuth access token that can be used with a remote MCP server, either
|
|
96
|
+
#
|
|
97
|
+
# @param connector_id [Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::ConnectorID] Identifier for service connectors, like those available in ChatGPT. One of
|
|
98
|
+
#
|
|
99
|
+
# @param headers [Hash{Symbol=>String}, nil] Optional HTTP headers to send to the MCP server. Use for authentication
|
|
100
|
+
#
|
|
101
|
+
# @param require_approval [OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalSetting, nil] Specify which of the MCP server's tools require approval.
|
|
102
|
+
#
|
|
103
|
+
# @param server_description [String] Optional description of the MCP server, used to provide more context.
|
|
104
|
+
#
|
|
105
|
+
# @param server_url [String] The URL for the MCP server. One of `server_url` or `connector_id` must be
|
|
106
|
+
#
|
|
107
|
+
# @param type [Symbol, :mcp] The type of the MCP tool. Always `mcp`.
|
|
108
|
+
|
|
109
|
+
# List of allowed tool names or a filter object.
|
|
110
|
+
#
|
|
111
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool#allowed_tools
|
|
112
|
+
module AllowedTools
|
|
113
|
+
extend OpenAI::Internal::Type::Union
|
|
114
|
+
|
|
115
|
+
# A string array of allowed tool names
|
|
116
|
+
variant -> { OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::AllowedTools::StringArray }
|
|
117
|
+
|
|
118
|
+
# A filter object to specify which tools are allowed.
|
|
119
|
+
variant -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::AllowedTools::McpToolFilter }
|
|
120
|
+
|
|
121
|
+
class McpToolFilter < OpenAI::Internal::Type::BaseModel
|
|
122
|
+
# @!attribute read_only
|
|
123
|
+
# Indicates whether or not a tool modifies data or is read-only. If an MCP server
|
|
124
|
+
# is
|
|
125
|
+
# [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
|
|
126
|
+
# it will match this filter.
|
|
127
|
+
#
|
|
128
|
+
# @return [Boolean, nil]
|
|
129
|
+
optional :read_only, OpenAI::Internal::Type::Boolean
|
|
130
|
+
|
|
131
|
+
# @!attribute tool_names
|
|
132
|
+
# List of allowed tool names.
|
|
133
|
+
#
|
|
134
|
+
# @return [Array<String>, nil]
|
|
135
|
+
optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
|
|
136
|
+
|
|
137
|
+
# @!method initialize(read_only: nil, tool_names: nil)
|
|
138
|
+
# Some parameter documentations has been truncated, see
|
|
139
|
+
# {OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::AllowedTools::McpToolFilter}
|
|
140
|
+
# for more details.
|
|
141
|
+
#
|
|
142
|
+
# A filter object to specify which tools are allowed.
|
|
143
|
+
#
|
|
144
|
+
# @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an
|
|
145
|
+
#
|
|
146
|
+
# @param tool_names [Array<String>] List of allowed tool names.
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
# @!method self.variants
|
|
150
|
+
# @return [Array(Array<String>, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::AllowedTools::McpToolFilter)]
|
|
151
|
+
|
|
152
|
+
# @type [OpenAI::Internal::Type::Converter]
|
|
153
|
+
StringArray = OpenAI::Internal::Type::ArrayOf[String]
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
# Identifier for service connectors, like those available in ChatGPT. One of
|
|
157
|
+
# `server_url` or `connector_id` must be provided. Learn more about service
|
|
158
|
+
# connectors
|
|
159
|
+
# [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
|
|
160
|
+
#
|
|
161
|
+
# Currently supported `connector_id` values are:
|
|
162
|
+
#
|
|
163
|
+
# - Dropbox: `connector_dropbox`
|
|
164
|
+
# - Gmail: `connector_gmail`
|
|
165
|
+
# - Google Calendar: `connector_googlecalendar`
|
|
166
|
+
# - Google Drive: `connector_googledrive`
|
|
167
|
+
# - Microsoft Teams: `connector_microsoftteams`
|
|
168
|
+
# - Outlook Calendar: `connector_outlookcalendar`
|
|
169
|
+
# - Outlook Email: `connector_outlookemail`
|
|
170
|
+
# - SharePoint: `connector_sharepoint`
|
|
171
|
+
#
|
|
172
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool#connector_id
|
|
173
|
+
module ConnectorID
|
|
174
|
+
extend OpenAI::Internal::Type::Enum
|
|
175
|
+
|
|
176
|
+
CONNECTOR_DROPBOX = :connector_dropbox
|
|
177
|
+
CONNECTOR_GMAIL = :connector_gmail
|
|
178
|
+
CONNECTOR_GOOGLECALENDAR = :connector_googlecalendar
|
|
179
|
+
CONNECTOR_GOOGLEDRIVE = :connector_googledrive
|
|
180
|
+
CONNECTOR_MICROSOFTTEAMS = :connector_microsoftteams
|
|
181
|
+
CONNECTOR_OUTLOOKCALENDAR = :connector_outlookcalendar
|
|
182
|
+
CONNECTOR_OUTLOOKEMAIL = :connector_outlookemail
|
|
183
|
+
CONNECTOR_SHAREPOINT = :connector_sharepoint
|
|
184
|
+
|
|
185
|
+
# @!method self.values
|
|
186
|
+
# @return [Array<Symbol>]
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
# Specify which of the MCP server's tools require approval.
|
|
190
|
+
#
|
|
191
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool#require_approval
|
|
192
|
+
module RequireApproval
|
|
193
|
+
extend OpenAI::Internal::Type::Union
|
|
194
|
+
|
|
195
|
+
# Specify which of the MCP server's tools require approval. Can be
|
|
196
|
+
# `always`, `never`, or a filter object associated with tools
|
|
197
|
+
# that require approval.
|
|
198
|
+
variant -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter }
|
|
199
|
+
|
|
200
|
+
# Specify a single approval policy for all tools. One of `always` or
|
|
201
|
+
# `never`. When set to `always`, all tools will require approval. When
|
|
202
|
+
# set to `never`, all tools will not require approval.
|
|
203
|
+
variant enum: -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalSetting }
|
|
204
|
+
|
|
205
|
+
class McpToolApprovalFilter < OpenAI::Internal::Type::BaseModel
|
|
206
|
+
# @!attribute always
|
|
207
|
+
# A filter object to specify which tools are allowed.
|
|
208
|
+
#
|
|
209
|
+
# @return [OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Always, nil]
|
|
210
|
+
optional :always,
|
|
211
|
+
-> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Always }
|
|
212
|
+
|
|
213
|
+
# @!attribute never
|
|
214
|
+
# A filter object to specify which tools are allowed.
|
|
215
|
+
#
|
|
216
|
+
# @return [OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Never, nil]
|
|
217
|
+
optional :never,
|
|
218
|
+
-> { OpenAI::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Never }
|
|
219
|
+
|
|
220
|
+
# @!method initialize(always: nil, never: nil)
|
|
221
|
+
# Some parameter documentations has been truncated, see
|
|
222
|
+
# {OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter}
|
|
223
|
+
# for more details.
|
|
224
|
+
#
|
|
225
|
+
# Specify which of the MCP server's tools require approval. Can be `always`,
|
|
226
|
+
# `never`, or a filter object associated with tools that require approval.
|
|
227
|
+
#
|
|
228
|
+
# @param always [OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Always] A filter object to specify which tools are allowed.
|
|
229
|
+
#
|
|
230
|
+
# @param never [OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Never] A filter object to specify which tools are allowed.
|
|
231
|
+
|
|
232
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter#always
|
|
233
|
+
class Always < OpenAI::Internal::Type::BaseModel
|
|
234
|
+
# @!attribute read_only
|
|
235
|
+
# Indicates whether or not a tool modifies data or is read-only. If an MCP server
|
|
236
|
+
# is
|
|
237
|
+
# [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
|
|
238
|
+
# it will match this filter.
|
|
239
|
+
#
|
|
240
|
+
# @return [Boolean, nil]
|
|
241
|
+
optional :read_only, OpenAI::Internal::Type::Boolean
|
|
242
|
+
|
|
243
|
+
# @!attribute tool_names
|
|
244
|
+
# List of allowed tool names.
|
|
245
|
+
#
|
|
246
|
+
# @return [Array<String>, nil]
|
|
247
|
+
optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
|
|
248
|
+
|
|
249
|
+
# @!method initialize(read_only: nil, tool_names: nil)
|
|
250
|
+
# Some parameter documentations has been truncated, see
|
|
251
|
+
# {OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Always}
|
|
252
|
+
# for more details.
|
|
253
|
+
#
|
|
254
|
+
# A filter object to specify which tools are allowed.
|
|
255
|
+
#
|
|
256
|
+
# @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an
|
|
257
|
+
#
|
|
258
|
+
# @param tool_names [Array<String>] List of allowed tool names.
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter#never
|
|
262
|
+
class Never < OpenAI::Internal::Type::BaseModel
|
|
263
|
+
# @!attribute read_only
|
|
264
|
+
# Indicates whether or not a tool modifies data or is read-only. If an MCP server
|
|
265
|
+
# is
|
|
266
|
+
# [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
|
|
267
|
+
# it will match this filter.
|
|
268
|
+
#
|
|
269
|
+
# @return [Boolean, nil]
|
|
270
|
+
optional :read_only, OpenAI::Internal::Type::Boolean
|
|
271
|
+
|
|
272
|
+
# @!attribute tool_names
|
|
273
|
+
# List of allowed tool names.
|
|
274
|
+
#
|
|
275
|
+
# @return [Array<String>, nil]
|
|
276
|
+
optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
|
|
277
|
+
|
|
278
|
+
# @!method initialize(read_only: nil, tool_names: nil)
|
|
279
|
+
# Some parameter documentations has been truncated, see
|
|
280
|
+
# {OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter::Never}
|
|
281
|
+
# for more details.
|
|
282
|
+
#
|
|
283
|
+
# A filter object to specify which tools are allowed.
|
|
284
|
+
#
|
|
285
|
+
# @param read_only [Boolean] Indicates whether or not a tool modifies data or is read-only. If an
|
|
286
|
+
#
|
|
287
|
+
# @param tool_names [Array<String>] List of allowed tool names.
|
|
288
|
+
end
|
|
289
|
+
end
|
|
290
|
+
|
|
291
|
+
# Specify a single approval policy for all tools. One of `always` or `never`. When
|
|
292
|
+
# set to `always`, all tools will require approval. When set to `never`, all tools
|
|
293
|
+
# will not require approval.
|
|
294
|
+
module McpToolApprovalSetting
|
|
295
|
+
extend OpenAI::Internal::Type::Enum
|
|
296
|
+
|
|
297
|
+
ALWAYS = :always
|
|
298
|
+
NEVER = :never
|
|
299
|
+
|
|
300
|
+
# @!method self.values
|
|
301
|
+
# @return [Array<Symbol>]
|
|
302
|
+
end
|
|
303
|
+
|
|
304
|
+
# @!method self.variants
|
|
305
|
+
# @return [Array(OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool::RequireApproval::McpToolApprovalSetting)]
|
|
306
|
+
end
|
|
307
|
+
end
|
|
308
|
+
end
|
|
309
|
+
end
|
|
310
|
+
end
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Realtime
|
|
6
|
+
class RealtimeResponseCreateParams < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
# @!attribute audio
|
|
8
|
+
# Configuration for audio input and output.
|
|
9
|
+
#
|
|
10
|
+
# @return [OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput, nil]
|
|
11
|
+
optional :audio, -> { OpenAI::Realtime::RealtimeResponseCreateAudioOutput }
|
|
12
|
+
|
|
13
|
+
# @!attribute conversation
|
|
14
|
+
# Controls which conversation the response is added to. Currently supports `auto`
|
|
15
|
+
# and `none`, with `auto` as the default value. The `auto` value means that the
|
|
16
|
+
# contents of the response will be added to the default conversation. Set this to
|
|
17
|
+
# `none` to create an out-of-band response which will not add items to default
|
|
18
|
+
# conversation.
|
|
19
|
+
#
|
|
20
|
+
# @return [String, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateParams::Conversation, nil]
|
|
21
|
+
optional :conversation, union: -> { OpenAI::Realtime::RealtimeResponseCreateParams::Conversation }
|
|
22
|
+
|
|
23
|
+
# @!attribute input
|
|
24
|
+
# Input items to include in the prompt for the model. Using this field creates a
|
|
25
|
+
# new context for this Response instead of using the default conversation. An
|
|
26
|
+
# empty array `[]` will clear the context for this Response. Note that this can
|
|
27
|
+
# include references to items that previously appeared in the session using their
|
|
28
|
+
# id.
|
|
29
|
+
#
|
|
30
|
+
# @return [Array<OpenAI::Models::Realtime::RealtimeConversationItemSystemMessage, OpenAI::Models::Realtime::RealtimeConversationItemUserMessage, OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage, OpenAI::Models::Realtime::RealtimeConversationItemFunctionCall, OpenAI::Models::Realtime::RealtimeConversationItemFunctionCallOutput, OpenAI::Models::Realtime::RealtimeMcpApprovalResponse, OpenAI::Models::Realtime::RealtimeMcpListTools, OpenAI::Models::Realtime::RealtimeMcpToolCall, OpenAI::Models::Realtime::RealtimeMcpApprovalRequest>, nil]
|
|
31
|
+
optional :input, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Realtime::ConversationItem] }
|
|
32
|
+
|
|
33
|
+
# @!attribute instructions
|
|
34
|
+
# The default system instructions (i.e. system message) prepended to model calls.
|
|
35
|
+
# This field allows the client to guide the model on desired responses. The model
|
|
36
|
+
# can be instructed on response content and format, (e.g. "be extremely succinct",
|
|
37
|
+
# "act friendly", "here are examples of good responses") and on audio behavior
|
|
38
|
+
# (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
|
39
|
+
# instructions are not guaranteed to be followed by the model, but they provide
|
|
40
|
+
# guidance to the model on the desired behavior. Note that the server sets default
|
|
41
|
+
# instructions which will be used if this field is not set and are visible in the
|
|
42
|
+
# `session.created` event at the start of the session.
|
|
43
|
+
#
|
|
44
|
+
# @return [String, nil]
|
|
45
|
+
optional :instructions, String
|
|
46
|
+
|
|
47
|
+
# @!attribute max_output_tokens
|
|
48
|
+
# Maximum number of output tokens for a single assistant response, inclusive of
|
|
49
|
+
# tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
|
50
|
+
# `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
|
51
|
+
#
|
|
52
|
+
# @return [Integer, Symbol, :inf, nil]
|
|
53
|
+
optional :max_output_tokens, union: -> { OpenAI::Realtime::RealtimeResponseCreateParams::MaxOutputTokens }
|
|
54
|
+
|
|
55
|
+
# @!attribute metadata
|
|
56
|
+
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
57
|
+
# for storing additional information about the object in a structured format, and
|
|
58
|
+
# querying for objects via API or the dashboard.
|
|
59
|
+
#
|
|
60
|
+
# Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
61
|
+
# a maximum length of 512 characters.
|
|
62
|
+
#
|
|
63
|
+
# @return [Hash{Symbol=>String}, nil]
|
|
64
|
+
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
|
|
65
|
+
|
|
66
|
+
# @!attribute output_modalities
|
|
67
|
+
# The set of modalities the model used to respond, currently the only possible
|
|
68
|
+
# values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
|
|
69
|
+
# transcript. Setting the output to mode `text` will disable audio output from the
|
|
70
|
+
# model.
|
|
71
|
+
#
|
|
72
|
+
# @return [Array<Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateParams::OutputModality>, nil]
|
|
73
|
+
optional :output_modalities,
|
|
74
|
+
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Realtime::RealtimeResponseCreateParams::OutputModality] }
|
|
75
|
+
|
|
76
|
+
# @!attribute prompt
|
|
77
|
+
# Reference to a prompt template and its variables.
|
|
78
|
+
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
|
|
79
|
+
#
|
|
80
|
+
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
|
|
81
|
+
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
|
|
82
|
+
|
|
83
|
+
# @!attribute tool_choice
|
|
84
|
+
# How the model chooses tools. Provide one of the string modes or force a specific
|
|
85
|
+
# function/MCP tool.
|
|
86
|
+
#
|
|
87
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, nil]
|
|
88
|
+
optional :tool_choice, union: -> { OpenAI::Realtime::RealtimeResponseCreateParams::ToolChoice }
|
|
89
|
+
|
|
90
|
+
# @!attribute tools
|
|
91
|
+
# Tools available to the model.
|
|
92
|
+
#
|
|
93
|
+
# @return [Array<OpenAI::Models::Realtime::Models, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool>, nil]
|
|
94
|
+
optional :tools,
|
|
95
|
+
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Realtime::RealtimeResponseCreateParams::Tool] }
|
|
96
|
+
|
|
97
|
+
# @!method initialize(audio: nil, conversation: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, output_modalities: nil, prompt: nil, tool_choice: nil, tools: nil)
|
|
98
|
+
# Some parameter documentations has been truncated, see
|
|
99
|
+
# {OpenAI::Models::Realtime::RealtimeResponseCreateParams} for more details.
|
|
100
|
+
#
|
|
101
|
+
# Create a new Realtime response with these parameters
|
|
102
|
+
#
|
|
103
|
+
# @param audio [OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput] Configuration for audio input and output.
|
|
104
|
+
#
|
|
105
|
+
# @param conversation [String, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateParams::Conversation] Controls which conversation the response is added to. Currently supports
|
|
106
|
+
#
|
|
107
|
+
# @param input [Array<OpenAI::Models::Realtime::RealtimeConversationItemSystemMessage, OpenAI::Models::Realtime::RealtimeConversationItemUserMessage, OpenAI::Models::Realtime::RealtimeConversationItemAssistantMessage, OpenAI::Models::Realtime::RealtimeConversationItemFunctionCall, OpenAI::Models::Realtime::RealtimeConversationItemFunctionCallOutput, OpenAI::Models::Realtime::RealtimeMcpApprovalResponse, OpenAI::Models::Realtime::RealtimeMcpListTools, OpenAI::Models::Realtime::RealtimeMcpToolCall, OpenAI::Models::Realtime::RealtimeMcpApprovalRequest>] Input items to include in the prompt for the model. Using this field
|
|
108
|
+
#
|
|
109
|
+
# @param instructions [String] The default system instructions (i.e. system message) prepended to model calls.
|
|
110
|
+
#
|
|
111
|
+
# @param max_output_tokens [Integer, Symbol, :inf] Maximum number of output tokens for a single assistant response,
|
|
112
|
+
#
|
|
113
|
+
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
|
114
|
+
#
|
|
115
|
+
# @param output_modalities [Array<Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateParams::OutputModality>] The set of modalities the model used to respond, currently the only possible val
|
|
116
|
+
#
|
|
117
|
+
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
|
118
|
+
#
|
|
119
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model chooses tools. Provide one of the string modes or force a specific
|
|
120
|
+
#
|
|
121
|
+
# @param tools [Array<OpenAI::Models::Realtime::Models, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool>] Tools available to the model.
|
|
122
|
+
|
|
123
|
+
# Controls which conversation the response is added to. Currently supports `auto`
|
|
124
|
+
# and `none`, with `auto` as the default value. The `auto` value means that the
|
|
125
|
+
# contents of the response will be added to the default conversation. Set this to
|
|
126
|
+
# `none` to create an out-of-band response which will not add items to default
|
|
127
|
+
# conversation.
|
|
128
|
+
#
|
|
129
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateParams#conversation
|
|
130
|
+
module Conversation
|
|
131
|
+
extend OpenAI::Internal::Type::Union
|
|
132
|
+
|
|
133
|
+
variant String
|
|
134
|
+
|
|
135
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeResponseCreateParams::Conversation::AUTO }
|
|
136
|
+
|
|
137
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeResponseCreateParams::Conversation::NONE }
|
|
138
|
+
|
|
139
|
+
# @!method self.variants
|
|
140
|
+
# @return [Array(String, Symbol)]
|
|
141
|
+
|
|
142
|
+
define_sorbet_constant!(:Variants) do
|
|
143
|
+
T.type_alias { T.any(String, OpenAI::Realtime::RealtimeResponseCreateParams::Conversation::TaggedSymbol) }
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# @!group
|
|
147
|
+
|
|
148
|
+
AUTO = :auto
|
|
149
|
+
NONE = :none
|
|
150
|
+
|
|
151
|
+
# @!endgroup
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
# Maximum number of output tokens for a single assistant response, inclusive of
|
|
155
|
+
# tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
|
156
|
+
# `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
|
157
|
+
#
|
|
158
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateParams#max_output_tokens
|
|
159
|
+
module MaxOutputTokens
|
|
160
|
+
extend OpenAI::Internal::Type::Union
|
|
161
|
+
|
|
162
|
+
variant Integer
|
|
163
|
+
|
|
164
|
+
variant const: :inf
|
|
165
|
+
|
|
166
|
+
# @!method self.variants
|
|
167
|
+
# @return [Array(Integer, Symbol, :inf)]
|
|
168
|
+
end
|
|
169
|
+
|
|
170
|
+
module OutputModality
|
|
171
|
+
extend OpenAI::Internal::Type::Enum
|
|
172
|
+
|
|
173
|
+
TEXT = :text
|
|
174
|
+
AUDIO = :audio
|
|
175
|
+
|
|
176
|
+
# @!method self.values
|
|
177
|
+
# @return [Array<Symbol>]
|
|
178
|
+
end
|
|
179
|
+
|
|
180
|
+
# How the model chooses tools. Provide one of the string modes or force a specific
|
|
181
|
+
# function/MCP tool.
|
|
182
|
+
#
|
|
183
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseCreateParams#tool_choice
|
|
184
|
+
module ToolChoice
|
|
185
|
+
extend OpenAI::Internal::Type::Union
|
|
186
|
+
|
|
187
|
+
# Controls which (if any) tool is called by the model.
|
|
188
|
+
#
|
|
189
|
+
# `none` means the model will not call any tool and instead generates a message.
|
|
190
|
+
#
|
|
191
|
+
# `auto` means the model can pick between generating a message or calling one or
|
|
192
|
+
# more tools.
|
|
193
|
+
#
|
|
194
|
+
# `required` means the model must call one or more tools.
|
|
195
|
+
variant enum: -> { OpenAI::Responses::ToolChoiceOptions }
|
|
196
|
+
|
|
197
|
+
# Use this option to force the model to call a specific function.
|
|
198
|
+
variant -> { OpenAI::Responses::ToolChoiceFunction }
|
|
199
|
+
|
|
200
|
+
# Use this option to force the model to call a specific tool on a remote MCP server.
|
|
201
|
+
variant -> { OpenAI::Responses::ToolChoiceMcp }
|
|
202
|
+
|
|
203
|
+
# @!method self.variants
|
|
204
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
# Give the model access to additional tools via remote Model Context Protocol
|
|
208
|
+
# (MCP) servers.
|
|
209
|
+
# [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
|
|
210
|
+
module Tool
|
|
211
|
+
extend OpenAI::Internal::Type::Union
|
|
212
|
+
|
|
213
|
+
variant -> { OpenAI::Realtime::Models }
|
|
214
|
+
|
|
215
|
+
# Give the model access to additional tools via remote Model Context Protocol
|
|
216
|
+
# (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
|
|
217
|
+
variant -> { OpenAI::Realtime::RealtimeResponseCreateMcpTool }
|
|
218
|
+
|
|
219
|
+
# @!method self.variants
|
|
220
|
+
# @return [Array(OpenAI::Models::Realtime::Models, OpenAI::Models::Realtime::RealtimeResponseCreateMcpTool)]
|
|
221
|
+
end
|
|
222
|
+
end
|
|
223
|
+
end
|
|
224
|
+
end
|
|
225
|
+
end
|
|
@@ -36,7 +36,7 @@ module OpenAI
|
|
|
36
36
|
#
|
|
37
37
|
# @param error [OpenAI::Models::Realtime::RealtimeResponseStatus::Error] A description of the error that caused the response to fail,
|
|
38
38
|
#
|
|
39
|
-
# @param reason [Symbol, OpenAI::Models::Realtime::RealtimeResponseStatus::Reason] The reason the Response did not complete. For a `cancelled` Response,
|
|
39
|
+
# @param reason [Symbol, OpenAI::Models::Realtime::RealtimeResponseStatus::Reason] The reason the Response did not complete. For a `cancelled` Response, one of `t
|
|
40
40
|
#
|
|
41
41
|
# @param type [Symbol, OpenAI::Models::Realtime::RealtimeResponseStatus::Type] The type of error that caused the response to fail, corresponding
|
|
42
42
|
|
|
@@ -5,7 +5,10 @@ module OpenAI
|
|
|
5
5
|
module Realtime
|
|
6
6
|
class RealtimeResponseUsage < OpenAI::Internal::Type::BaseModel
|
|
7
7
|
# @!attribute input_token_details
|
|
8
|
-
# Details about the input tokens used in the Response.
|
|
8
|
+
# Details about the input tokens used in the Response. Cached tokens are tokens
|
|
9
|
+
# from previous turns in the conversation that are included as context for the
|
|
10
|
+
# current response. Cached tokens here are counted as a subset of input tokens,
|
|
11
|
+
# meaning input tokens will include cached and uncached tokens.
|
|
9
12
|
#
|
|
10
13
|
# @return [OpenAI::Models::Realtime::RealtimeResponseUsageInputTokenDetails, nil]
|
|
11
14
|
optional :input_token_details, -> { OpenAI::Realtime::RealtimeResponseUsageInputTokenDetails }
|
|
@@ -46,7 +49,7 @@ module OpenAI
|
|
|
46
49
|
# Conversation, thus output from previous turns (text and audio tokens) will
|
|
47
50
|
# become the input for later turns.
|
|
48
51
|
#
|
|
49
|
-
# @param input_token_details [OpenAI::Models::Realtime::RealtimeResponseUsageInputTokenDetails] Details about the input tokens used in the Response.
|
|
52
|
+
# @param input_token_details [OpenAI::Models::Realtime::RealtimeResponseUsageInputTokenDetails] Details about the input tokens used in the Response. Cached tokens are tokens fr
|
|
50
53
|
#
|
|
51
54
|
# @param input_tokens [Integer] The number of input tokens used in the Response, including text and
|
|
52
55
|
#
|
|
@@ -5,31 +5,81 @@ module OpenAI
|
|
|
5
5
|
module Realtime
|
|
6
6
|
class RealtimeResponseUsageInputTokenDetails < OpenAI::Internal::Type::BaseModel
|
|
7
7
|
# @!attribute audio_tokens
|
|
8
|
-
# The number of audio tokens used
|
|
8
|
+
# The number of audio tokens used as input for the Response.
|
|
9
9
|
#
|
|
10
10
|
# @return [Integer, nil]
|
|
11
11
|
optional :audio_tokens, Integer
|
|
12
12
|
|
|
13
13
|
# @!attribute cached_tokens
|
|
14
|
-
# The number of cached tokens used
|
|
14
|
+
# The number of cached tokens used as input for the Response.
|
|
15
15
|
#
|
|
16
16
|
# @return [Integer, nil]
|
|
17
17
|
optional :cached_tokens, Integer
|
|
18
18
|
|
|
19
|
+
# @!attribute cached_tokens_details
|
|
20
|
+
# Details about the cached tokens used as input for the Response.
|
|
21
|
+
#
|
|
22
|
+
# @return [OpenAI::Models::Realtime::RealtimeResponseUsageInputTokenDetails::CachedTokensDetails, nil]
|
|
23
|
+
optional :cached_tokens_details,
|
|
24
|
+
-> { OpenAI::Realtime::RealtimeResponseUsageInputTokenDetails::CachedTokensDetails }
|
|
25
|
+
|
|
26
|
+
# @!attribute image_tokens
|
|
27
|
+
# The number of image tokens used as input for the Response.
|
|
28
|
+
#
|
|
29
|
+
# @return [Integer, nil]
|
|
30
|
+
optional :image_tokens, Integer
|
|
31
|
+
|
|
19
32
|
# @!attribute text_tokens
|
|
20
|
-
# The number of text tokens used
|
|
33
|
+
# The number of text tokens used as input for the Response.
|
|
21
34
|
#
|
|
22
35
|
# @return [Integer, nil]
|
|
23
36
|
optional :text_tokens, Integer
|
|
24
37
|
|
|
25
|
-
# @!method initialize(audio_tokens: nil, cached_tokens: nil, text_tokens: nil)
|
|
26
|
-
# Details about the input tokens used in the Response.
|
|
38
|
+
# @!method initialize(audio_tokens: nil, cached_tokens: nil, cached_tokens_details: nil, image_tokens: nil, text_tokens: nil)
|
|
39
|
+
# Details about the input tokens used in the Response. Cached tokens are tokens
|
|
40
|
+
# from previous turns in the conversation that are included as context for the
|
|
41
|
+
# current response. Cached tokens here are counted as a subset of input tokens,
|
|
42
|
+
# meaning input tokens will include cached and uncached tokens.
|
|
43
|
+
#
|
|
44
|
+
# @param audio_tokens [Integer] The number of audio tokens used as input for the Response.
|
|
27
45
|
#
|
|
28
|
-
# @param
|
|
46
|
+
# @param cached_tokens [Integer] The number of cached tokens used as input for the Response.
|
|
29
47
|
#
|
|
30
|
-
# @param
|
|
48
|
+
# @param cached_tokens_details [OpenAI::Models::Realtime::RealtimeResponseUsageInputTokenDetails::CachedTokensDetails] Details about the cached tokens used as input for the Response.
|
|
31
49
|
#
|
|
32
|
-
# @param
|
|
50
|
+
# @param image_tokens [Integer] The number of image tokens used as input for the Response.
|
|
51
|
+
#
|
|
52
|
+
# @param text_tokens [Integer] The number of text tokens used as input for the Response.
|
|
53
|
+
|
|
54
|
+
# @see OpenAI::Models::Realtime::RealtimeResponseUsageInputTokenDetails#cached_tokens_details
|
|
55
|
+
class CachedTokensDetails < OpenAI::Internal::Type::BaseModel
|
|
56
|
+
# @!attribute audio_tokens
|
|
57
|
+
# The number of cached audio tokens used as input for the Response.
|
|
58
|
+
#
|
|
59
|
+
# @return [Integer, nil]
|
|
60
|
+
optional :audio_tokens, Integer
|
|
61
|
+
|
|
62
|
+
# @!attribute image_tokens
|
|
63
|
+
# The number of cached image tokens used as input for the Response.
|
|
64
|
+
#
|
|
65
|
+
# @return [Integer, nil]
|
|
66
|
+
optional :image_tokens, Integer
|
|
67
|
+
|
|
68
|
+
# @!attribute text_tokens
|
|
69
|
+
# The number of cached text tokens used as input for the Response.
|
|
70
|
+
#
|
|
71
|
+
# @return [Integer, nil]
|
|
72
|
+
optional :text_tokens, Integer
|
|
73
|
+
|
|
74
|
+
# @!method initialize(audio_tokens: nil, image_tokens: nil, text_tokens: nil)
|
|
75
|
+
# Details about the cached tokens used as input for the Response.
|
|
76
|
+
#
|
|
77
|
+
# @param audio_tokens [Integer] The number of cached audio tokens used as input for the Response.
|
|
78
|
+
#
|
|
79
|
+
# @param image_tokens [Integer] The number of cached image tokens used as input for the Response.
|
|
80
|
+
#
|
|
81
|
+
# @param text_tokens [Integer] The number of cached text tokens used as input for the Response.
|
|
82
|
+
end
|
|
33
83
|
end
|
|
34
84
|
end
|
|
35
85
|
end
|