openai 0.33.0 → 0.34.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +13 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/conversations/conversation_item.rb +0 -4
  5. data/lib/openai/models/conversations/item_list_params.rb +1 -1
  6. data/lib/openai/models/custom_tool_input_format.rb +0 -6
  7. data/lib/openai/models/image_edit_params.rb +1 -1
  8. data/lib/openai/models/responses/custom_tool.rb +0 -6
  9. data/lib/openai/models/responses/input_token_count_params.rb +283 -0
  10. data/lib/openai/models/responses/input_token_count_response.rb +24 -0
  11. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +4 -4
  12. data/lib/openai/models/responses/response_computer_tool_call.rb +13 -18
  13. data/lib/openai/models/responses/response_computer_tool_call_output_item.rb +7 -7
  14. data/lib/openai/models/responses/response_create_params.rb +1 -1
  15. data/lib/openai/models/responses/response_includable.rb +5 -3
  16. data/lib/openai/models/responses/response_input_item.rb +0 -4
  17. data/lib/openai/models/responses/response_item.rb +0 -4
  18. data/lib/openai/models/responses/response_output_item.rb +0 -4
  19. data/lib/openai/models/responses/tool.rb +2 -12
  20. data/lib/openai/resources/conversations/items.rb +1 -1
  21. data/lib/openai/resources/images.rb +2 -2
  22. data/lib/openai/resources/responses/input_tokens.rb +61 -0
  23. data/lib/openai/resources/responses.rb +6 -2
  24. data/lib/openai/version.rb +1 -1
  25. data/lib/openai.rb +3 -0
  26. data/rbi/openai/models/custom_tool_input_format.rbi +0 -2
  27. data/rbi/openai/models/responses/custom_tool.rbi +0 -2
  28. data/rbi/openai/models/responses/input_token_count_params.rbi +601 -0
  29. data/rbi/openai/models/responses/input_token_count_response.rbi +35 -0
  30. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +4 -4
  31. data/rbi/openai/models/responses/response_computer_tool_call.rbi +19 -13
  32. data/rbi/openai/models/responses/response_computer_tool_call_output_item.rbi +16 -8
  33. data/rbi/openai/models/responses/response_includable.rbi +18 -8
  34. data/rbi/openai/models/responses/tool.rbi +0 -1
  35. data/rbi/openai/resources/responses/input_tokens.rbi +120 -0
  36. data/rbi/openai/resources/responses.rbi +3 -0
  37. data/sig/openai/models/responses/input_token_count_params.rbs +165 -0
  38. data/sig/openai/models/responses/input_token_count_response.rbs +24 -0
  39. data/sig/openai/models/responses/response_computer_tool_call.rbs +9 -5
  40. data/sig/openai/models/responses/response_computer_tool_call_output_item.rbs +9 -5
  41. data/sig/openai/models/responses/response_includable.rbs +11 -7
  42. data/sig/openai/resources/responses/input_tokens.rbs +24 -0
  43. data/sig/openai/resources/responses.rbs +2 -0
  44. metadata +11 -2
@@ -173,8 +173,7 @@ module OpenAI
173
173
  end
174
174
  attr_accessor :button
175
175
 
176
- # Specifies the event type. For a click action, this property is always set to
177
- # `click`.
176
+ # Specifies the event type. For a click action, this property is always `click`.
178
177
  sig { returns(Symbol) }
179
178
  attr_accessor :type
180
179
 
@@ -204,8 +203,7 @@ module OpenAI
204
203
  x:,
205
204
  # The y-coordinate where the click occurred.
206
205
  y_:,
207
- # Specifies the event type. For a click action, this property is always set to
208
- # `click`.
206
+ # Specifies the event type. For a click action, this property is always `click`.
209
207
  type: :click
210
208
  )
211
209
  end
@@ -410,7 +408,7 @@ module OpenAI
410
408
  sig { returns(Integer) }
411
409
  attr_accessor :y_
412
410
 
413
- # A series of x/y coordinate pairs in the drag path.
411
+ # An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.
414
412
  sig { params(x: Integer, y_: Integer).returns(T.attached_class) }
415
413
  def self.new(
416
414
  # The x-coordinate.
@@ -695,31 +693,39 @@ module OpenAI
695
693
  attr_accessor :id
696
694
 
697
695
  # The type of the pending safety check.
698
- sig { returns(String) }
696
+ sig { returns(T.nilable(String)) }
699
697
  attr_accessor :code
700
698
 
701
699
  # Details about the pending safety check.
702
- sig { returns(String) }
700
+ sig { returns(T.nilable(String)) }
703
701
  attr_accessor :message
704
702
 
705
703
  # A pending safety check for the computer call.
706
704
  sig do
707
- params(id: String, code: String, message: String).returns(
708
- T.attached_class
709
- )
705
+ params(
706
+ id: String,
707
+ code: T.nilable(String),
708
+ message: T.nilable(String)
709
+ ).returns(T.attached_class)
710
710
  end
711
711
  def self.new(
712
712
  # The ID of the pending safety check.
713
713
  id:,
714
714
  # The type of the pending safety check.
715
- code:,
715
+ code: nil,
716
716
  # Details about the pending safety check.
717
- message:
717
+ message: nil
718
718
  )
719
719
  end
720
720
 
721
721
  sig do
722
- override.returns({ id: String, code: String, message: String })
722
+ override.returns(
723
+ {
724
+ id: String,
725
+ code: T.nilable(String),
726
+ message: T.nilable(String)
727
+ }
728
+ )
723
729
  end
724
730
  def to_hash
725
731
  end
@@ -147,31 +147,39 @@ module OpenAI
147
147
  attr_accessor :id
148
148
 
149
149
  # The type of the pending safety check.
150
- sig { returns(String) }
150
+ sig { returns(T.nilable(String)) }
151
151
  attr_accessor :code
152
152
 
153
153
  # Details about the pending safety check.
154
- sig { returns(String) }
154
+ sig { returns(T.nilable(String)) }
155
155
  attr_accessor :message
156
156
 
157
157
  # A pending safety check for the computer call.
158
158
  sig do
159
- params(id: String, code: String, message: String).returns(
160
- T.attached_class
161
- )
159
+ params(
160
+ id: String,
161
+ code: T.nilable(String),
162
+ message: T.nilable(String)
163
+ ).returns(T.attached_class)
162
164
  end
163
165
  def self.new(
164
166
  # The ID of the pending safety check.
165
167
  id:,
166
168
  # The type of the pending safety check.
167
- code:,
169
+ code: nil,
168
170
  # Details about the pending safety check.
169
- message:
171
+ message: nil
170
172
  )
171
173
  end
172
174
 
173
175
  sig do
174
- override.returns({ id: String, code: String, message: String })
176
+ override.returns(
177
+ {
178
+ id: String,
179
+ code: T.nilable(String),
180
+ message: T.nilable(String)
181
+ }
182
+ )
175
183
  end
176
184
  def to_hash
177
185
  end
@@ -28,19 +28,19 @@ module OpenAI
28
28
  T.type_alias { T.all(Symbol, OpenAI::Responses::ResponseIncludable) }
29
29
  OrSymbol = T.type_alias { T.any(Symbol, String) }
30
30
 
31
- CODE_INTERPRETER_CALL_OUTPUTS =
31
+ FILE_SEARCH_CALL_RESULTS =
32
32
  T.let(
33
- :"code_interpreter_call.outputs",
33
+ :"file_search_call.results",
34
34
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
35
35
  )
36
- COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL =
36
+ WEB_SEARCH_CALL_RESULTS =
37
37
  T.let(
38
- :"computer_call_output.output.image_url",
38
+ :"web_search_call.results",
39
39
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
40
40
  )
41
- FILE_SEARCH_CALL_RESULTS =
41
+ WEB_SEARCH_CALL_ACTION_SOURCES =
42
42
  T.let(
43
- :"file_search_call.results",
43
+ :"web_search_call.action.sources",
44
44
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
45
45
  )
46
46
  MESSAGE_INPUT_IMAGE_IMAGE_URL =
@@ -48,9 +48,14 @@ module OpenAI
48
48
  :"message.input_image.image_url",
49
49
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
50
50
  )
51
- MESSAGE_OUTPUT_TEXT_LOGPROBS =
51
+ COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL =
52
52
  T.let(
53
- :"message.output_text.logprobs",
53
+ :"computer_call_output.output.image_url",
54
+ OpenAI::Responses::ResponseIncludable::TaggedSymbol
55
+ )
56
+ CODE_INTERPRETER_CALL_OUTPUTS =
57
+ T.let(
58
+ :"code_interpreter_call.outputs",
54
59
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
55
60
  )
56
61
  REASONING_ENCRYPTED_CONTENT =
@@ -58,6 +63,11 @@ module OpenAI
58
63
  :"reasoning.encrypted_content",
59
64
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
60
65
  )
66
+ MESSAGE_OUTPUT_TEXT_LOGPROBS =
67
+ T.let(
68
+ :"message.output_text.logprobs",
69
+ OpenAI::Responses::ResponseIncludable::TaggedSymbol
70
+ )
61
71
 
62
72
  sig do
63
73
  override.returns(
@@ -1335,7 +1335,6 @@ module OpenAI
1335
1335
  sig { returns(Symbol) }
1336
1336
  attr_accessor :type
1337
1337
 
1338
- # A tool that allows the model to execute shell commands in a local environment.
1339
1338
  sig { params(type: Symbol).returns(T.attached_class) }
1340
1339
  def self.new(
1341
1340
  # The type of the local shell tool. Always `local_shell`.
@@ -0,0 +1,120 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Resources
5
+ class Responses
6
+ class InputTokens
7
+ # Get input token counts
8
+ sig do
9
+ params(
10
+ conversation:
11
+ T.nilable(
12
+ T.any(
13
+ String,
14
+ OpenAI::Responses::ResponseConversationParam::OrHash
15
+ )
16
+ ),
17
+ input:
18
+ T.nilable(
19
+ OpenAI::Responses::InputTokenCountParams::Input::Variants
20
+ ),
21
+ instructions: T.nilable(String),
22
+ model: T.nilable(String),
23
+ parallel_tool_calls: T.nilable(T::Boolean),
24
+ previous_response_id: T.nilable(String),
25
+ reasoning: T.nilable(OpenAI::Reasoning::OrHash),
26
+ text:
27
+ T.nilable(OpenAI::Responses::InputTokenCountParams::Text::OrHash),
28
+ tool_choice:
29
+ T.nilable(
30
+ T.any(
31
+ OpenAI::Responses::ToolChoiceOptions::OrSymbol,
32
+ OpenAI::Responses::ToolChoiceAllowed::OrHash,
33
+ OpenAI::Responses::ToolChoiceTypes::OrHash,
34
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
35
+ OpenAI::Responses::ToolChoiceMcp::OrHash,
36
+ OpenAI::Responses::ToolChoiceCustom::OrHash
37
+ )
38
+ ),
39
+ tools:
40
+ T.nilable(
41
+ T::Array[
42
+ T.any(
43
+ OpenAI::Responses::FunctionTool::OrHash,
44
+ OpenAI::Responses::FileSearchTool::OrHash,
45
+ OpenAI::Responses::ComputerTool::OrHash,
46
+ OpenAI::Responses::Tool::Mcp::OrHash,
47
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
48
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
49
+ OpenAI::Responses::Tool::LocalShell::OrHash,
50
+ OpenAI::Responses::CustomTool::OrHash,
51
+ OpenAI::Responses::WebSearchTool::OrHash,
52
+ OpenAI::Responses::WebSearchPreviewTool::OrHash
53
+ )
54
+ ]
55
+ ),
56
+ truncation:
57
+ OpenAI::Responses::InputTokenCountParams::Truncation::OrSymbol,
58
+ request_options: OpenAI::RequestOptions::OrHash
59
+ ).returns(OpenAI::Models::Responses::InputTokenCountResponse)
60
+ end
61
+ def count(
62
+ # The conversation that this response belongs to. Items from this conversation are
63
+ # prepended to `input_items` for this response request. Input items and output
64
+ # items from this response are automatically added to this conversation after this
65
+ # response completes.
66
+ conversation: nil,
67
+ # Text, image, or file inputs to the model, used to generate a response
68
+ input: nil,
69
+ # A system (or developer) message inserted into the model's context. When used
70
+ # along with `previous_response_id`, the instructions from a previous response
71
+ # will not be carried over to the next response. This makes it simple to swap out
72
+ # system (or developer) messages in new responses.
73
+ instructions: nil,
74
+ # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
75
+ # wide range of models with different capabilities, performance characteristics,
76
+ # and price points. Refer to the
77
+ # [model guide](https://platform.openai.com/docs/models) to browse and compare
78
+ # available models.
79
+ model: nil,
80
+ # Whether to allow the model to run tool calls in parallel.
81
+ parallel_tool_calls: nil,
82
+ # The unique ID of the previous response to the model. Use this to create
83
+ # multi-turn conversations. Learn more about
84
+ # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
85
+ # Cannot be used in conjunction with `conversation`.
86
+ previous_response_id: nil,
87
+ # **gpt-5 and o-series models only** Configuration options for
88
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
89
+ reasoning: nil,
90
+ # Configuration options for a text response from the model. Can be plain text or
91
+ # structured JSON data. Learn more:
92
+ #
93
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
94
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
95
+ text: nil,
96
+ # How the model should select which tool (or tools) to use when generating a
97
+ # response. See the `tools` parameter to see how to specify which tools the model
98
+ # can call.
99
+ tool_choice: nil,
100
+ # An array of tools the model may call while generating a response. You can
101
+ # specify which tool to use by setting the `tool_choice` parameter.
102
+ tools: nil,
103
+ # The truncation strategy to use for the model response. - `auto`: If the input to
104
+ # this Response exceeds the model's context window size, the model will truncate
105
+ # the response to fit the context window by dropping items from the beginning of
106
+ # the conversation. - `disabled` (default): If the input size will exceed the
107
+ # context window size for a model, the request will fail with a 400 error.
108
+ truncation: nil,
109
+ request_options: {}
110
+ )
111
+ end
112
+
113
+ # @api private
114
+ sig { params(client: OpenAI::Client).returns(T.attached_class) }
115
+ def self.new(client:)
116
+ end
117
+ end
118
+ end
119
+ end
120
+ end
@@ -6,6 +6,9 @@ module OpenAI
6
6
  sig { returns(OpenAI::Resources::Responses::InputItems) }
7
7
  attr_reader :input_items
8
8
 
9
+ sig { returns(OpenAI::Resources::Responses::InputTokens) }
10
+ attr_reader :input_tokens
11
+
9
12
  # See {OpenAI::Resources::Responses#stream_raw} for streaming counterpart.
10
13
  #
11
14
  # Creates a model response. Provide
@@ -0,0 +1,165 @@
1
+ module OpenAI
2
+ module Models
3
+ module Responses
4
+ type input_token_count_params =
5
+ {
6
+ conversation: OpenAI::Models::Responses::InputTokenCountParams::conversation?,
7
+ input: OpenAI::Models::Responses::InputTokenCountParams::input?,
8
+ instructions: String?,
9
+ model: String?,
10
+ parallel_tool_calls: bool?,
11
+ previous_response_id: String?,
12
+ reasoning: OpenAI::Reasoning?,
13
+ text: OpenAI::Responses::InputTokenCountParams::Text?,
14
+ tool_choice: OpenAI::Models::Responses::InputTokenCountParams::tool_choice?,
15
+ tools: ::Array[OpenAI::Models::Responses::tool]?,
16
+ truncation: OpenAI::Models::Responses::InputTokenCountParams::truncation
17
+ }
18
+ & OpenAI::Internal::Type::request_parameters
19
+
20
+ class InputTokenCountParams < OpenAI::Internal::Type::BaseModel
21
+ extend OpenAI::Internal::Type::RequestParameters::Converter
22
+ include OpenAI::Internal::Type::RequestParameters
23
+
24
+ attr_accessor conversation: OpenAI::Models::Responses::InputTokenCountParams::conversation?
25
+
26
+ attr_accessor input: OpenAI::Models::Responses::InputTokenCountParams::input?
27
+
28
+ attr_accessor instructions: String?
29
+
30
+ attr_accessor model: String?
31
+
32
+ attr_accessor parallel_tool_calls: bool?
33
+
34
+ attr_accessor previous_response_id: String?
35
+
36
+ attr_accessor reasoning: OpenAI::Reasoning?
37
+
38
+ attr_accessor text: OpenAI::Responses::InputTokenCountParams::Text?
39
+
40
+ attr_accessor tool_choice: OpenAI::Models::Responses::InputTokenCountParams::tool_choice?
41
+
42
+ attr_accessor tools: ::Array[OpenAI::Models::Responses::tool]?
43
+
44
+ attr_reader truncation: OpenAI::Models::Responses::InputTokenCountParams::truncation?
45
+
46
+ def truncation=: (
47
+ OpenAI::Models::Responses::InputTokenCountParams::truncation
48
+ ) -> OpenAI::Models::Responses::InputTokenCountParams::truncation
49
+
50
+ def initialize: (
51
+ ?conversation: OpenAI::Models::Responses::InputTokenCountParams::conversation?,
52
+ ?input: OpenAI::Models::Responses::InputTokenCountParams::input?,
53
+ ?instructions: String?,
54
+ ?model: String?,
55
+ ?parallel_tool_calls: bool?,
56
+ ?previous_response_id: String?,
57
+ ?reasoning: OpenAI::Reasoning?,
58
+ ?text: OpenAI::Responses::InputTokenCountParams::Text?,
59
+ ?tool_choice: OpenAI::Models::Responses::InputTokenCountParams::tool_choice?,
60
+ ?tools: ::Array[OpenAI::Models::Responses::tool]?,
61
+ ?truncation: OpenAI::Models::Responses::InputTokenCountParams::truncation,
62
+ ?request_options: OpenAI::request_opts
63
+ ) -> void
64
+
65
+ def to_hash: -> {
66
+ conversation: OpenAI::Models::Responses::InputTokenCountParams::conversation?,
67
+ input: OpenAI::Models::Responses::InputTokenCountParams::input?,
68
+ instructions: String?,
69
+ model: String?,
70
+ parallel_tool_calls: bool?,
71
+ previous_response_id: String?,
72
+ reasoning: OpenAI::Reasoning?,
73
+ text: OpenAI::Responses::InputTokenCountParams::Text?,
74
+ tool_choice: OpenAI::Models::Responses::InputTokenCountParams::tool_choice?,
75
+ tools: ::Array[OpenAI::Models::Responses::tool]?,
76
+ truncation: OpenAI::Models::Responses::InputTokenCountParams::truncation,
77
+ request_options: OpenAI::RequestOptions
78
+ }
79
+
80
+ type conversation =
81
+ String | OpenAI::Responses::ResponseConversationParam
82
+
83
+ module Conversation
84
+ extend OpenAI::Internal::Type::Union
85
+
86
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::InputTokenCountParams::conversation]
87
+ end
88
+
89
+ type input =
90
+ String | ::Array[OpenAI::Models::Responses::response_input_item]
91
+
92
+ module Input
93
+ extend OpenAI::Internal::Type::Union
94
+
95
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::InputTokenCountParams::input]
96
+
97
+ ResponseInputItemArray: OpenAI::Internal::Type::Converter
98
+ end
99
+
100
+ type text =
101
+ {
102
+ format_: OpenAI::Models::Responses::response_format_text_config,
103
+ verbosity: OpenAI::Models::Responses::InputTokenCountParams::Text::verbosity?
104
+ }
105
+
106
+ class Text < OpenAI::Internal::Type::BaseModel
107
+ attr_reader format_: OpenAI::Models::Responses::response_format_text_config?
108
+
109
+ def format_=: (
110
+ OpenAI::Models::Responses::response_format_text_config
111
+ ) -> OpenAI::Models::Responses::response_format_text_config
112
+
113
+ attr_accessor verbosity: OpenAI::Models::Responses::InputTokenCountParams::Text::verbosity?
114
+
115
+ def initialize: (
116
+ ?format_: OpenAI::Models::Responses::response_format_text_config,
117
+ ?verbosity: OpenAI::Models::Responses::InputTokenCountParams::Text::verbosity?
118
+ ) -> void
119
+
120
+ def to_hash: -> {
121
+ format_: OpenAI::Models::Responses::response_format_text_config,
122
+ verbosity: OpenAI::Models::Responses::InputTokenCountParams::Text::verbosity?
123
+ }
124
+
125
+ type verbosity = :low | :medium | :high
126
+
127
+ module Verbosity
128
+ extend OpenAI::Internal::Type::Enum
129
+
130
+ LOW: :low
131
+ MEDIUM: :medium
132
+ HIGH: :high
133
+
134
+ def self?.values: -> ::Array[OpenAI::Models::Responses::InputTokenCountParams::Text::verbosity]
135
+ end
136
+ end
137
+
138
+ type tool_choice =
139
+ OpenAI::Models::Responses::tool_choice_options
140
+ | OpenAI::Responses::ToolChoiceAllowed
141
+ | OpenAI::Responses::ToolChoiceTypes
142
+ | OpenAI::Responses::ToolChoiceFunction
143
+ | OpenAI::Responses::ToolChoiceMcp
144
+ | OpenAI::Responses::ToolChoiceCustom
145
+
146
+ module ToolChoice
147
+ extend OpenAI::Internal::Type::Union
148
+
149
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::InputTokenCountParams::tool_choice]
150
+ end
151
+
152
+ type truncation = :auto | :disabled
153
+
154
+ module Truncation
155
+ extend OpenAI::Internal::Type::Enum
156
+
157
+ AUTO: :auto
158
+ DISABLED: :disabled
159
+
160
+ def self?.values: -> ::Array[OpenAI::Models::Responses::InputTokenCountParams::truncation]
161
+ end
162
+ end
163
+ end
164
+ end
165
+ end
@@ -0,0 +1,24 @@
1
+ module OpenAI
2
+ module Models
3
+ module Responses
4
+ type input_token_count_response =
5
+ { input_tokens: Integer, object: :"response.input_tokens" }
6
+
7
+ class InputTokenCountResponse < OpenAI::Internal::Type::BaseModel
8
+ attr_accessor input_tokens: Integer
9
+
10
+ attr_accessor object: :"response.input_tokens"
11
+
12
+ def initialize: (
13
+ input_tokens: Integer,
14
+ ?object: :"response.input_tokens"
15
+ ) -> void
16
+
17
+ def to_hash: -> {
18
+ input_tokens: Integer,
19
+ object: :"response.input_tokens"
20
+ }
21
+ end
22
+ end
23
+ end
24
+ end
@@ -253,18 +253,22 @@ module OpenAI
253
253
  end
254
254
 
255
255
  type pending_safety_check =
256
- { id: String, code: String, message: String }
256
+ { id: String, code: String?, message: String? }
257
257
 
258
258
  class PendingSafetyCheck < OpenAI::Internal::Type::BaseModel
259
259
  attr_accessor id: String
260
260
 
261
- attr_accessor code: String
261
+ attr_accessor code: String?
262
262
 
263
- attr_accessor message: String
263
+ attr_accessor message: String?
264
264
 
265
- def initialize: (id: String, code: String, message: String) -> void
265
+ def initialize: (
266
+ id: String,
267
+ ?code: String?,
268
+ ?message: String?
269
+ ) -> void
266
270
 
267
- def to_hash: -> { id: String, code: String, message: String }
271
+ def to_hash: -> { id: String, code: String?, message: String? }
268
272
  end
269
273
 
270
274
  type status = :in_progress | :completed | :incomplete
@@ -51,18 +51,22 @@ module OpenAI
51
51
  }
52
52
 
53
53
  type acknowledged_safety_check =
54
- { id: String, code: String, message: String }
54
+ { id: String, code: String?, message: String? }
55
55
 
56
56
  class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel
57
57
  attr_accessor id: String
58
58
 
59
- attr_accessor code: String
59
+ attr_accessor code: String?
60
60
 
61
- attr_accessor message: String
61
+ attr_accessor message: String?
62
62
 
63
- def initialize: (id: String, code: String, message: String) -> void
63
+ def initialize: (
64
+ id: String,
65
+ ?code: String?,
66
+ ?message: String?
67
+ ) -> void
64
68
 
65
- def to_hash: -> { id: String, code: String, message: String }
69
+ def to_hash: -> { id: String, code: String?, message: String? }
66
70
  end
67
71
 
68
72
  type status = :in_progress | :completed | :incomplete
@@ -2,22 +2,26 @@ module OpenAI
2
2
  module Models
3
3
  module Responses
4
4
  type response_includable =
5
- :"code_interpreter_call.outputs"
6
- | :"computer_call_output.output.image_url"
7
- | :"file_search_call.results"
5
+ :"file_search_call.results"
6
+ | :"web_search_call.results"
7
+ | :"web_search_call.action.sources"
8
8
  | :"message.input_image.image_url"
9
- | :"message.output_text.logprobs"
9
+ | :"computer_call_output.output.image_url"
10
+ | :"code_interpreter_call.outputs"
10
11
  | :"reasoning.encrypted_content"
12
+ | :"message.output_text.logprobs"
11
13
 
12
14
  module ResponseIncludable
13
15
  extend OpenAI::Internal::Type::Enum
14
16
 
15
- CODE_INTERPRETER_CALL_OUTPUTS: :"code_interpreter_call.outputs"
16
- COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url"
17
17
  FILE_SEARCH_CALL_RESULTS: :"file_search_call.results"
18
+ WEB_SEARCH_CALL_RESULTS: :"web_search_call.results"
19
+ WEB_SEARCH_CALL_ACTION_SOURCES: :"web_search_call.action.sources"
18
20
  MESSAGE_INPUT_IMAGE_IMAGE_URL: :"message.input_image.image_url"
19
- MESSAGE_OUTPUT_TEXT_LOGPROBS: :"message.output_text.logprobs"
21
+ COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL: :"computer_call_output.output.image_url"
22
+ CODE_INTERPRETER_CALL_OUTPUTS: :"code_interpreter_call.outputs"
20
23
  REASONING_ENCRYPTED_CONTENT: :"reasoning.encrypted_content"
24
+ MESSAGE_OUTPUT_TEXT_LOGPROBS: :"message.output_text.logprobs"
21
25
 
22
26
  def self?.values: -> ::Array[OpenAI::Models::Responses::response_includable]
23
27
  end
@@ -0,0 +1,24 @@
1
+ module OpenAI
2
+ module Resources
3
+ class Responses
4
+ class InputTokens
5
+ def count: (
6
+ ?conversation: OpenAI::Models::Responses::InputTokenCountParams::conversation?,
7
+ ?input: OpenAI::Models::Responses::InputTokenCountParams::input?,
8
+ ?instructions: String?,
9
+ ?model: String?,
10
+ ?parallel_tool_calls: bool?,
11
+ ?previous_response_id: String?,
12
+ ?reasoning: OpenAI::Reasoning?,
13
+ ?text: OpenAI::Responses::InputTokenCountParams::Text?,
14
+ ?tool_choice: OpenAI::Models::Responses::InputTokenCountParams::tool_choice?,
15
+ ?tools: ::Array[OpenAI::Models::Responses::tool]?,
16
+ ?truncation: OpenAI::Models::Responses::InputTokenCountParams::truncation,
17
+ ?request_options: OpenAI::request_opts
18
+ ) -> OpenAI::Models::Responses::InputTokenCountResponse
19
+
20
+ def initialize: (client: OpenAI::Client) -> void
21
+ end
22
+ end
23
+ end
24
+ end
@@ -3,6 +3,8 @@ module OpenAI
3
3
  class Responses
4
4
  attr_reader input_items: OpenAI::Resources::Responses::InputItems
5
5
 
6
+ attr_reader input_tokens: OpenAI::Resources::Responses::InputTokens
7
+
6
8
  def create: (
7
9
  ?background: bool?,
8
10
  ?conversation: OpenAI::Models::Responses::ResponseCreateParams::conversation?,