openai 0.27.1 → 0.28.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +25 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +7 -10
- data/lib/openai/internal/type/base_stream.rb +0 -17
- data/lib/openai/models/batch.rb +23 -1
- data/lib/openai/models/batch_usage.rb +84 -0
- data/lib/openai/models/conversations/conversation_item.rb +37 -1
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -4
- data/lib/openai/models/responses/response_function_call_arguments_done_event.rb +9 -1
- data/lib/openai/models/responses/response_function_call_output_item.rb +26 -0
- data/lib/openai/models/responses/response_function_call_output_item_list.rb +11 -0
- data/lib/openai/models/responses/response_function_tool_call_output_item.rb +47 -4
- data/lib/openai/models/responses/response_input_file_content.rb +52 -0
- data/lib/openai/models/responses/response_input_image_content.rb +65 -0
- data/lib/openai/models/responses/response_input_item.rb +56 -5
- data/lib/openai/models/responses/response_input_text_content.rb +28 -0
- data/lib/openai/models/responses/response_item.rb +37 -1
- data/lib/openai/models/responses/response_output_item.rb +37 -1
- data/lib/openai/models.rb +2 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/rbi/openai/internal/type/base_stream.rbi +0 -15
- data/rbi/openai/models/batch.rbi +35 -1
- data/rbi/openai/models/batch_usage.rbi +139 -0
- data/rbi/openai/models/conversations/conversation_item.rbi +90 -1
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +77 -5
- data/rbi/openai/models/responses/response_function_call_arguments_done_event.rbi +8 -0
- data/rbi/openai/models/responses/response_function_call_output_item.rbi +31 -0
- data/rbi/openai/models/responses/response_function_call_output_item_list.rbi +15 -0
- data/rbi/openai/models/responses/response_function_tool_call_output_item.rbi +72 -5
- data/rbi/openai/models/responses/response_input_file_content.rbi +75 -0
- data/rbi/openai/models/responses/response_input_image_content.rbi +125 -0
- data/rbi/openai/models/responses/response_input_item.rbi +126 -6
- data/rbi/openai/models/responses/response_input_text_content.rbi +39 -0
- data/rbi/openai/models/responses/response_item.rbi +86 -1
- data/rbi/openai/models/responses/response_output_item.rbi +90 -1
- data/rbi/openai/models.rbi +2 -0
- data/sig/openai/internal/type/base_stream.rbs +0 -4
- data/sig/openai/models/batch.rbs +16 -2
- data/sig/openai/models/batch_usage.rbs +60 -0
- data/sig/openai/models/conversations/conversation_item.rbs +31 -2
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +27 -4
- data/sig/openai/models/responses/response_function_call_arguments_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_function_call_output_item.rbs +16 -0
- data/sig/openai/models/responses/response_function_call_output_item_list.rbs +10 -0
- data/sig/openai/models/responses/response_function_tool_call_output_item.rbs +27 -4
- data/sig/openai/models/responses/response_input_file_content.rbs +42 -0
- data/sig/openai/models/responses/response_input_image_content.rbs +49 -0
- data/sig/openai/models/responses/response_input_item.rbs +45 -6
- data/sig/openai/models/responses/response_input_text_content.rbs +17 -0
- data/sig/openai/models/responses/response_item.rbs +31 -2
- data/sig/openai/models/responses/response_output_item.rbs +31 -2
- data/sig/openai/models.rbs +2 -0
- metadata +20 -2
@@ -291,10 +291,10 @@ module OpenAI
|
|
291
291
|
required :call_id, String
|
292
292
|
|
293
293
|
# @!attribute output
|
294
|
-
#
|
294
|
+
# Text, image, or file output of the function tool call.
|
295
295
|
#
|
296
|
-
# @return [String]
|
297
|
-
required :output,
|
296
|
+
# @return [String, Array<OpenAI::Models::Responses::ResponseInputTextContent, OpenAI::Models::Responses::ResponseInputImageContent, OpenAI::Models::Responses::ResponseInputFileContent>]
|
297
|
+
required :output, union: -> { OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Output }
|
298
298
|
|
299
299
|
# @!attribute type
|
300
300
|
# The type of the function tool call output. Always `function_call_output`.
|
@@ -327,7 +327,7 @@ module OpenAI
|
|
327
327
|
#
|
328
328
|
# @param call_id [String] The unique ID of the function tool call generated by the model.
|
329
329
|
#
|
330
|
-
# @param output [String]
|
330
|
+
# @param output [String, Array<OpenAI::Models::Responses::ResponseInputTextContent, OpenAI::Models::Responses::ResponseInputImageContent, OpenAI::Models::Responses::ResponseInputFileContent>] Text, image, or file output of the function tool call.
|
331
331
|
#
|
332
332
|
# @param id [String, nil] The unique ID of the function tool call output. Populated when this item is retu
|
333
333
|
#
|
@@ -335,6 +335,21 @@ module OpenAI
|
|
335
335
|
#
|
336
336
|
# @param type [Symbol, :function_call_output] The type of the function tool call output. Always `function_call_output`.
|
337
337
|
|
338
|
+
# Text, image, or file output of the function tool call.
|
339
|
+
#
|
340
|
+
# @see OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput#output
|
341
|
+
module Output
|
342
|
+
extend OpenAI::Internal::Type::Union
|
343
|
+
|
344
|
+
# A JSON string of the output of the function tool call.
|
345
|
+
variant String
|
346
|
+
|
347
|
+
variant -> { OpenAI::Responses::ResponseFunctionCallOutputItemList }
|
348
|
+
|
349
|
+
# @!method self.variants
|
350
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::ResponseInputTextContent, OpenAI::Models::Responses::ResponseInputImageContent, OpenAI::Models::Responses::ResponseInputFileContent>)]
|
351
|
+
end
|
352
|
+
|
338
353
|
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
|
339
354
|
# Populated when items are returned via API.
|
340
355
|
#
|
@@ -803,6 +818,14 @@ module OpenAI
|
|
803
818
|
# @return [Symbol, :mcp_call]
|
804
819
|
required :type, const: :mcp_call
|
805
820
|
|
821
|
+
# @!attribute approval_request_id
|
822
|
+
# Unique identifier for the MCP tool call approval request. Include this value in
|
823
|
+
# a subsequent `mcp_approval_response` input to approve or reject the
|
824
|
+
# corresponding tool call.
|
825
|
+
#
|
826
|
+
# @return [String, nil]
|
827
|
+
optional :approval_request_id, String, nil?: true
|
828
|
+
|
806
829
|
# @!attribute error
|
807
830
|
# The error from the tool call, if any.
|
808
831
|
#
|
@@ -815,7 +838,14 @@ module OpenAI
|
|
815
838
|
# @return [String, nil]
|
816
839
|
optional :output, String, nil?: true
|
817
840
|
|
818
|
-
# @!
|
841
|
+
# @!attribute status
|
842
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
843
|
+
# `calling`, or `failed`.
|
844
|
+
#
|
845
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::McpCall::Status, nil]
|
846
|
+
optional :status, enum: -> { OpenAI::Responses::ResponseInputItem::McpCall::Status }
|
847
|
+
|
848
|
+
# @!method initialize(id:, arguments:, name:, server_label:, approval_request_id: nil, error: nil, output: nil, status: nil, type: :mcp_call)
|
819
849
|
# Some parameter documentations has been truncated, see
|
820
850
|
# {OpenAI::Models::Responses::ResponseInputItem::McpCall} for more details.
|
821
851
|
#
|
@@ -829,11 +859,32 @@ module OpenAI
|
|
829
859
|
#
|
830
860
|
# @param server_label [String] The label of the MCP server running the tool.
|
831
861
|
#
|
862
|
+
# @param approval_request_id [String, nil] Unique identifier for the MCP tool call approval request.
|
863
|
+
#
|
832
864
|
# @param error [String, nil] The error from the tool call, if any.
|
833
865
|
#
|
834
866
|
# @param output [String, nil] The output from the tool call.
|
835
867
|
#
|
868
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::McpCall::Status] The status of the tool call. One of `in_progress`, `completed`, `incomplete`, `c
|
869
|
+
#
|
836
870
|
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
|
871
|
+
|
872
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
873
|
+
# `calling`, or `failed`.
|
874
|
+
#
|
875
|
+
# @see OpenAI::Models::Responses::ResponseInputItem::McpCall#status
|
876
|
+
module Status
|
877
|
+
extend OpenAI::Internal::Type::Enum
|
878
|
+
|
879
|
+
IN_PROGRESS = :in_progress
|
880
|
+
COMPLETED = :completed
|
881
|
+
INCOMPLETE = :incomplete
|
882
|
+
CALLING = :calling
|
883
|
+
FAILED = :failed
|
884
|
+
|
885
|
+
# @!method self.values
|
886
|
+
# @return [Array<Symbol>]
|
887
|
+
end
|
837
888
|
end
|
838
889
|
|
839
890
|
class ItemReference < OpenAI::Internal::Type::BaseModel
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ResponseInputTextContent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute text
|
8
|
+
# The text input to the model.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :text, String
|
12
|
+
|
13
|
+
# @!attribute type
|
14
|
+
# The type of the input item. Always `input_text`.
|
15
|
+
#
|
16
|
+
# @return [Symbol, :input_text]
|
17
|
+
required :type, const: :input_text
|
18
|
+
|
19
|
+
# @!method initialize(text:, type: :input_text)
|
20
|
+
# A text input to the model.
|
21
|
+
#
|
22
|
+
# @param text [String] The text input to the model.
|
23
|
+
#
|
24
|
+
# @param type [Symbol, :input_text] The type of the input item. Always `input_text`.
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -508,6 +508,14 @@ module OpenAI
|
|
508
508
|
# @return [Symbol, :mcp_call]
|
509
509
|
required :type, const: :mcp_call
|
510
510
|
|
511
|
+
# @!attribute approval_request_id
|
512
|
+
# Unique identifier for the MCP tool call approval request. Include this value in
|
513
|
+
# a subsequent `mcp_approval_response` input to approve or reject the
|
514
|
+
# corresponding tool call.
|
515
|
+
#
|
516
|
+
# @return [String, nil]
|
517
|
+
optional :approval_request_id, String, nil?: true
|
518
|
+
|
511
519
|
# @!attribute error
|
512
520
|
# The error from the tool call, if any.
|
513
521
|
#
|
@@ -520,7 +528,14 @@ module OpenAI
|
|
520
528
|
# @return [String, nil]
|
521
529
|
optional :output, String, nil?: true
|
522
530
|
|
523
|
-
# @!
|
531
|
+
# @!attribute status
|
532
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
533
|
+
# `calling`, or `failed`.
|
534
|
+
#
|
535
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseItem::McpCall::Status, nil]
|
536
|
+
optional :status, enum: -> { OpenAI::Responses::ResponseItem::McpCall::Status }
|
537
|
+
|
538
|
+
# @!method initialize(id:, arguments:, name:, server_label:, approval_request_id: nil, error: nil, output: nil, status: nil, type: :mcp_call)
|
524
539
|
# Some parameter documentations has been truncated, see
|
525
540
|
# {OpenAI::Models::Responses::ResponseItem::McpCall} for more details.
|
526
541
|
#
|
@@ -534,11 +549,32 @@ module OpenAI
|
|
534
549
|
#
|
535
550
|
# @param server_label [String] The label of the MCP server running the tool.
|
536
551
|
#
|
552
|
+
# @param approval_request_id [String, nil] Unique identifier for the MCP tool call approval request.
|
553
|
+
#
|
537
554
|
# @param error [String, nil] The error from the tool call, if any.
|
538
555
|
#
|
539
556
|
# @param output [String, nil] The output from the tool call.
|
540
557
|
#
|
558
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseItem::McpCall::Status] The status of the tool call. One of `in_progress`, `completed`, `incomplete`, `c
|
559
|
+
#
|
541
560
|
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
|
561
|
+
|
562
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
563
|
+
# `calling`, or `failed`.
|
564
|
+
#
|
565
|
+
# @see OpenAI::Models::Responses::ResponseItem::McpCall#status
|
566
|
+
module Status
|
567
|
+
extend OpenAI::Internal::Type::Enum
|
568
|
+
|
569
|
+
IN_PROGRESS = :in_progress
|
570
|
+
COMPLETED = :completed
|
571
|
+
INCOMPLETE = :incomplete
|
572
|
+
CALLING = :calling
|
573
|
+
FAILED = :failed
|
574
|
+
|
575
|
+
# @!method self.values
|
576
|
+
# @return [Array<Symbol>]
|
577
|
+
end
|
542
578
|
end
|
543
579
|
|
544
580
|
# @!method self.variants
|
@@ -263,6 +263,14 @@ module OpenAI
|
|
263
263
|
# @return [Symbol, :mcp_call]
|
264
264
|
required :type, const: :mcp_call
|
265
265
|
|
266
|
+
# @!attribute approval_request_id
|
267
|
+
# Unique identifier for the MCP tool call approval request. Include this value in
|
268
|
+
# a subsequent `mcp_approval_response` input to approve or reject the
|
269
|
+
# corresponding tool call.
|
270
|
+
#
|
271
|
+
# @return [String, nil]
|
272
|
+
optional :approval_request_id, String, nil?: true
|
273
|
+
|
266
274
|
# @!attribute error
|
267
275
|
# The error from the tool call, if any.
|
268
276
|
#
|
@@ -275,7 +283,14 @@ module OpenAI
|
|
275
283
|
# @return [String, nil]
|
276
284
|
optional :output, String, nil?: true
|
277
285
|
|
278
|
-
# @!
|
286
|
+
# @!attribute status
|
287
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
288
|
+
# `calling`, or `failed`.
|
289
|
+
#
|
290
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseOutputItem::McpCall::Status, nil]
|
291
|
+
optional :status, enum: -> { OpenAI::Responses::ResponseOutputItem::McpCall::Status }
|
292
|
+
|
293
|
+
# @!method initialize(id:, arguments:, name:, server_label:, approval_request_id: nil, error: nil, output: nil, status: nil, type: :mcp_call)
|
279
294
|
# Some parameter documentations has been truncated, see
|
280
295
|
# {OpenAI::Models::Responses::ResponseOutputItem::McpCall} for more details.
|
281
296
|
#
|
@@ -289,11 +304,32 @@ module OpenAI
|
|
289
304
|
#
|
290
305
|
# @param server_label [String] The label of the MCP server running the tool.
|
291
306
|
#
|
307
|
+
# @param approval_request_id [String, nil] Unique identifier for the MCP tool call approval request.
|
308
|
+
#
|
292
309
|
# @param error [String, nil] The error from the tool call, if any.
|
293
310
|
#
|
294
311
|
# @param output [String, nil] The output from the tool call.
|
295
312
|
#
|
313
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseOutputItem::McpCall::Status] The status of the tool call. One of `in_progress`, `completed`, `incomplete`, `c
|
314
|
+
#
|
296
315
|
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
|
316
|
+
|
317
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
318
|
+
# `calling`, or `failed`.
|
319
|
+
#
|
320
|
+
# @see OpenAI::Models::Responses::ResponseOutputItem::McpCall#status
|
321
|
+
module Status
|
322
|
+
extend OpenAI::Internal::Type::Enum
|
323
|
+
|
324
|
+
IN_PROGRESS = :in_progress
|
325
|
+
COMPLETED = :completed
|
326
|
+
INCOMPLETE = :incomplete
|
327
|
+
CALLING = :calling
|
328
|
+
FAILED = :failed
|
329
|
+
|
330
|
+
# @!method self.values
|
331
|
+
# @return [Array<Symbol>]
|
332
|
+
end
|
297
333
|
end
|
298
334
|
|
299
335
|
class McpListTools < OpenAI::Internal::Type::BaseModel
|
data/lib/openai/models.rb
CHANGED
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -99,6 +99,7 @@ require_relative "openai/models/batch_error"
|
|
99
99
|
require_relative "openai/models/batch_list_params"
|
100
100
|
require_relative "openai/models/batch_request_counts"
|
101
101
|
require_relative "openai/models/batch_retrieve_params"
|
102
|
+
require_relative "openai/models/batch_usage"
|
102
103
|
require_relative "openai/models/beta/assistant"
|
103
104
|
require_relative "openai/models/beta/assistant_create_params"
|
104
105
|
require_relative "openai/models/beta/assistant_deleted"
|
@@ -537,6 +538,8 @@ require_relative "openai/models/responses/response_format_text_config"
|
|
537
538
|
require_relative "openai/models/responses/response_format_text_json_schema_config"
|
538
539
|
require_relative "openai/models/responses/response_function_call_arguments_delta_event"
|
539
540
|
require_relative "openai/models/responses/response_function_call_arguments_done_event"
|
541
|
+
require_relative "openai/models/responses/response_function_call_output_item"
|
542
|
+
require_relative "openai/models/responses/response_function_call_output_item_list"
|
540
543
|
require_relative "openai/models/responses/response_function_tool_call_item"
|
541
544
|
require_relative "openai/models/responses/response_function_tool_call_output_item"
|
542
545
|
require_relative "openai/models/responses/response_function_web_search"
|
@@ -550,9 +553,12 @@ require_relative "openai/models/responses/response_in_progress_event"
|
|
550
553
|
require_relative "openai/models/responses/response_input"
|
551
554
|
require_relative "openai/models/responses/response_input_audio"
|
552
555
|
require_relative "openai/models/responses/response_input_content"
|
556
|
+
require_relative "openai/models/responses/response_input_file_content"
|
557
|
+
require_relative "openai/models/responses/response_input_image_content"
|
553
558
|
require_relative "openai/models/responses/response_input_item"
|
554
559
|
require_relative "openai/models/responses/response_input_message_content_list"
|
555
560
|
require_relative "openai/models/responses/response_input_message_item"
|
561
|
+
require_relative "openai/models/responses/response_input_text_content"
|
556
562
|
require_relative "openai/models/responses/response_item"
|
557
563
|
require_relative "openai/models/responses/response_item_list"
|
558
564
|
require_relative "openai/models/responses/response_mcp_call_arguments_delta_event"
|
@@ -12,21 +12,6 @@ module OpenAI
|
|
12
12
|
Message = type_member(:in)
|
13
13
|
Elem = type_member(:out)
|
14
14
|
|
15
|
-
class << self
|
16
|
-
# Attempt to close the underlying transport when the stream itself is garbage
|
17
|
-
# collected.
|
18
|
-
#
|
19
|
-
# This should not be relied upon for resource clean up, as the garbage collector
|
20
|
-
# is not guaranteed to run.
|
21
|
-
sig do
|
22
|
-
params(stream: T::Enumerable[T.anything]).returns(
|
23
|
-
T.proc.params(arg0: Integer).void
|
24
|
-
)
|
25
|
-
end
|
26
|
-
def defer_closing(stream)
|
27
|
-
end
|
28
|
-
end
|
29
|
-
|
30
15
|
sig { returns(Integer) }
|
31
16
|
attr_reader :status
|
32
17
|
|
data/rbi/openai/models/batch.rbi
CHANGED
@@ -110,6 +110,17 @@ module OpenAI
|
|
110
110
|
sig { returns(T.nilable(T::Hash[Symbol, String])) }
|
111
111
|
attr_accessor :metadata
|
112
112
|
|
113
|
+
# Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a
|
114
|
+
# wide range of models with different capabilities, performance characteristics,
|
115
|
+
# and price points. Refer to the
|
116
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
117
|
+
# available models.
|
118
|
+
sig { returns(T.nilable(String)) }
|
119
|
+
attr_reader :model
|
120
|
+
|
121
|
+
sig { params(model: String).void }
|
122
|
+
attr_writer :model
|
123
|
+
|
113
124
|
# The ID of the file containing the outputs of successfully executed requests.
|
114
125
|
sig { returns(T.nilable(String)) }
|
115
126
|
attr_reader :output_file_id
|
@@ -124,6 +135,15 @@ module OpenAI
|
|
124
135
|
sig { params(request_counts: OpenAI::BatchRequestCounts::OrHash).void }
|
125
136
|
attr_writer :request_counts
|
126
137
|
|
138
|
+
# Represents token usage details including input tokens, output tokens, a
|
139
|
+
# breakdown of output tokens, and the total tokens used. Only populated on batches
|
140
|
+
# created after September 7, 2025.
|
141
|
+
sig { returns(T.nilable(OpenAI::BatchUsage)) }
|
142
|
+
attr_reader :usage
|
143
|
+
|
144
|
+
sig { params(usage: OpenAI::BatchUsage::OrHash).void }
|
145
|
+
attr_writer :usage
|
146
|
+
|
127
147
|
sig do
|
128
148
|
params(
|
129
149
|
id: String,
|
@@ -143,8 +163,10 @@ module OpenAI
|
|
143
163
|
finalizing_at: Integer,
|
144
164
|
in_progress_at: Integer,
|
145
165
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
166
|
+
model: String,
|
146
167
|
output_file_id: String,
|
147
168
|
request_counts: OpenAI::BatchRequestCounts::OrHash,
|
169
|
+
usage: OpenAI::BatchUsage::OrHash,
|
148
170
|
object: Symbol
|
149
171
|
).returns(T.attached_class)
|
150
172
|
end
|
@@ -186,10 +208,20 @@ module OpenAI
|
|
186
208
|
# Keys are strings with a maximum length of 64 characters. Values are strings with
|
187
209
|
# a maximum length of 512 characters.
|
188
210
|
metadata: nil,
|
211
|
+
# Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI offers a
|
212
|
+
# wide range of models with different capabilities, performance characteristics,
|
213
|
+
# and price points. Refer to the
|
214
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
215
|
+
# available models.
|
216
|
+
model: nil,
|
189
217
|
# The ID of the file containing the outputs of successfully executed requests.
|
190
218
|
output_file_id: nil,
|
191
219
|
# The request counts for different statuses within the batch.
|
192
220
|
request_counts: nil,
|
221
|
+
# Represents token usage details including input tokens, output tokens, a
|
222
|
+
# breakdown of output tokens, and the total tokens used. Only populated on batches
|
223
|
+
# created after September 7, 2025.
|
224
|
+
usage: nil,
|
193
225
|
# The object type, which is always `batch`.
|
194
226
|
object: :batch
|
195
227
|
)
|
@@ -216,8 +248,10 @@ module OpenAI
|
|
216
248
|
finalizing_at: Integer,
|
217
249
|
in_progress_at: Integer,
|
218
250
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
251
|
+
model: String,
|
219
252
|
output_file_id: String,
|
220
|
-
request_counts: OpenAI::BatchRequestCounts
|
253
|
+
request_counts: OpenAI::BatchRequestCounts,
|
254
|
+
usage: OpenAI::BatchUsage
|
221
255
|
}
|
222
256
|
)
|
223
257
|
end
|
@@ -0,0 +1,139 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class BatchUsage < OpenAI::Internal::Type::BaseModel
|
6
|
+
OrHash =
|
7
|
+
T.type_alias { T.any(OpenAI::BatchUsage, OpenAI::Internal::AnyHash) }
|
8
|
+
|
9
|
+
# The number of input tokens.
|
10
|
+
sig { returns(Integer) }
|
11
|
+
attr_accessor :input_tokens
|
12
|
+
|
13
|
+
# A detailed breakdown of the input tokens.
|
14
|
+
sig { returns(OpenAI::BatchUsage::InputTokensDetails) }
|
15
|
+
attr_reader :input_tokens_details
|
16
|
+
|
17
|
+
sig do
|
18
|
+
params(
|
19
|
+
input_tokens_details: OpenAI::BatchUsage::InputTokensDetails::OrHash
|
20
|
+
).void
|
21
|
+
end
|
22
|
+
attr_writer :input_tokens_details
|
23
|
+
|
24
|
+
# The number of output tokens.
|
25
|
+
sig { returns(Integer) }
|
26
|
+
attr_accessor :output_tokens
|
27
|
+
|
28
|
+
# A detailed breakdown of the output tokens.
|
29
|
+
sig { returns(OpenAI::BatchUsage::OutputTokensDetails) }
|
30
|
+
attr_reader :output_tokens_details
|
31
|
+
|
32
|
+
sig do
|
33
|
+
params(
|
34
|
+
output_tokens_details: OpenAI::BatchUsage::OutputTokensDetails::OrHash
|
35
|
+
).void
|
36
|
+
end
|
37
|
+
attr_writer :output_tokens_details
|
38
|
+
|
39
|
+
# The total number of tokens used.
|
40
|
+
sig { returns(Integer) }
|
41
|
+
attr_accessor :total_tokens
|
42
|
+
|
43
|
+
# Represents token usage details including input tokens, output tokens, a
|
44
|
+
# breakdown of output tokens, and the total tokens used. Only populated on batches
|
45
|
+
# created after September 7, 2025.
|
46
|
+
sig do
|
47
|
+
params(
|
48
|
+
input_tokens: Integer,
|
49
|
+
input_tokens_details: OpenAI::BatchUsage::InputTokensDetails::OrHash,
|
50
|
+
output_tokens: Integer,
|
51
|
+
output_tokens_details:
|
52
|
+
OpenAI::BatchUsage::OutputTokensDetails::OrHash,
|
53
|
+
total_tokens: Integer
|
54
|
+
).returns(T.attached_class)
|
55
|
+
end
|
56
|
+
def self.new(
|
57
|
+
# The number of input tokens.
|
58
|
+
input_tokens:,
|
59
|
+
# A detailed breakdown of the input tokens.
|
60
|
+
input_tokens_details:,
|
61
|
+
# The number of output tokens.
|
62
|
+
output_tokens:,
|
63
|
+
# A detailed breakdown of the output tokens.
|
64
|
+
output_tokens_details:,
|
65
|
+
# The total number of tokens used.
|
66
|
+
total_tokens:
|
67
|
+
)
|
68
|
+
end
|
69
|
+
|
70
|
+
sig do
|
71
|
+
override.returns(
|
72
|
+
{
|
73
|
+
input_tokens: Integer,
|
74
|
+
input_tokens_details: OpenAI::BatchUsage::InputTokensDetails,
|
75
|
+
output_tokens: Integer,
|
76
|
+
output_tokens_details: OpenAI::BatchUsage::OutputTokensDetails,
|
77
|
+
total_tokens: Integer
|
78
|
+
}
|
79
|
+
)
|
80
|
+
end
|
81
|
+
def to_hash
|
82
|
+
end
|
83
|
+
|
84
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
85
|
+
OrHash =
|
86
|
+
T.type_alias do
|
87
|
+
T.any(
|
88
|
+
OpenAI::BatchUsage::InputTokensDetails,
|
89
|
+
OpenAI::Internal::AnyHash
|
90
|
+
)
|
91
|
+
end
|
92
|
+
|
93
|
+
# The number of tokens that were retrieved from the cache.
|
94
|
+
# [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
|
95
|
+
sig { returns(Integer) }
|
96
|
+
attr_accessor :cached_tokens
|
97
|
+
|
98
|
+
# A detailed breakdown of the input tokens.
|
99
|
+
sig { params(cached_tokens: Integer).returns(T.attached_class) }
|
100
|
+
def self.new(
|
101
|
+
# The number of tokens that were retrieved from the cache.
|
102
|
+
# [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
|
103
|
+
cached_tokens:
|
104
|
+
)
|
105
|
+
end
|
106
|
+
|
107
|
+
sig { override.returns({ cached_tokens: Integer }) }
|
108
|
+
def to_hash
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
|
113
|
+
OrHash =
|
114
|
+
T.type_alias do
|
115
|
+
T.any(
|
116
|
+
OpenAI::BatchUsage::OutputTokensDetails,
|
117
|
+
OpenAI::Internal::AnyHash
|
118
|
+
)
|
119
|
+
end
|
120
|
+
|
121
|
+
# The number of reasoning tokens.
|
122
|
+
sig { returns(Integer) }
|
123
|
+
attr_accessor :reasoning_tokens
|
124
|
+
|
125
|
+
# A detailed breakdown of the output tokens.
|
126
|
+
sig { params(reasoning_tokens: Integer).returns(T.attached_class) }
|
127
|
+
def self.new(
|
128
|
+
# The number of reasoning tokens.
|
129
|
+
reasoning_tokens:
|
130
|
+
)
|
131
|
+
end
|
132
|
+
|
133
|
+
sig { override.returns({ reasoning_tokens: Integer }) }
|
134
|
+
def to_hash
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
@@ -767,6 +767,12 @@ module OpenAI
|
|
767
767
|
sig { returns(Symbol) }
|
768
768
|
attr_accessor :type
|
769
769
|
|
770
|
+
# Unique identifier for the MCP tool call approval request. Include this value in
|
771
|
+
# a subsequent `mcp_approval_response` input to approve or reject the
|
772
|
+
# corresponding tool call.
|
773
|
+
sig { returns(T.nilable(String)) }
|
774
|
+
attr_accessor :approval_request_id
|
775
|
+
|
770
776
|
# The error from the tool call, if any.
|
771
777
|
sig { returns(T.nilable(String)) }
|
772
778
|
attr_accessor :error
|
@@ -775,6 +781,25 @@ module OpenAI
|
|
775
781
|
sig { returns(T.nilable(String)) }
|
776
782
|
attr_accessor :output
|
777
783
|
|
784
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
785
|
+
# `calling`, or `failed`.
|
786
|
+
sig do
|
787
|
+
returns(
|
788
|
+
T.nilable(
|
789
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
790
|
+
)
|
791
|
+
)
|
792
|
+
end
|
793
|
+
attr_reader :status
|
794
|
+
|
795
|
+
sig do
|
796
|
+
params(
|
797
|
+
status:
|
798
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::OrSymbol
|
799
|
+
).void
|
800
|
+
end
|
801
|
+
attr_writer :status
|
802
|
+
|
778
803
|
# An invocation of a tool on an MCP server.
|
779
804
|
sig do
|
780
805
|
params(
|
@@ -782,8 +807,11 @@ module OpenAI
|
|
782
807
|
arguments: String,
|
783
808
|
name: String,
|
784
809
|
server_label: String,
|
810
|
+
approval_request_id: T.nilable(String),
|
785
811
|
error: T.nilable(String),
|
786
812
|
output: T.nilable(String),
|
813
|
+
status:
|
814
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::OrSymbol,
|
787
815
|
type: Symbol
|
788
816
|
).returns(T.attached_class)
|
789
817
|
end
|
@@ -796,10 +824,17 @@ module OpenAI
|
|
796
824
|
name:,
|
797
825
|
# The label of the MCP server running the tool.
|
798
826
|
server_label:,
|
827
|
+
# Unique identifier for the MCP tool call approval request. Include this value in
|
828
|
+
# a subsequent `mcp_approval_response` input to approve or reject the
|
829
|
+
# corresponding tool call.
|
830
|
+
approval_request_id: nil,
|
799
831
|
# The error from the tool call, if any.
|
800
832
|
error: nil,
|
801
833
|
# The output from the tool call.
|
802
834
|
output: nil,
|
835
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
836
|
+
# `calling`, or `failed`.
|
837
|
+
status: nil,
|
803
838
|
# The type of the item. Always `mcp_call`.
|
804
839
|
type: :mcp_call
|
805
840
|
)
|
@@ -813,13 +848,67 @@ module OpenAI
|
|
813
848
|
name: String,
|
814
849
|
server_label: String,
|
815
850
|
type: Symbol,
|
851
|
+
approval_request_id: T.nilable(String),
|
816
852
|
error: T.nilable(String),
|
817
|
-
output: T.nilable(String)
|
853
|
+
output: T.nilable(String),
|
854
|
+
status:
|
855
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
818
856
|
}
|
819
857
|
)
|
820
858
|
end
|
821
859
|
def to_hash
|
822
860
|
end
|
861
|
+
|
862
|
+
# The status of the tool call. One of `in_progress`, `completed`, `incomplete`,
|
863
|
+
# `calling`, or `failed`.
|
864
|
+
module Status
|
865
|
+
extend OpenAI::Internal::Type::Enum
|
866
|
+
|
867
|
+
TaggedSymbol =
|
868
|
+
T.type_alias do
|
869
|
+
T.all(
|
870
|
+
Symbol,
|
871
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status
|
872
|
+
)
|
873
|
+
end
|
874
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
875
|
+
|
876
|
+
IN_PROGRESS =
|
877
|
+
T.let(
|
878
|
+
:in_progress,
|
879
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
880
|
+
)
|
881
|
+
COMPLETED =
|
882
|
+
T.let(
|
883
|
+
:completed,
|
884
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
885
|
+
)
|
886
|
+
INCOMPLETE =
|
887
|
+
T.let(
|
888
|
+
:incomplete,
|
889
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
890
|
+
)
|
891
|
+
CALLING =
|
892
|
+
T.let(
|
893
|
+
:calling,
|
894
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
895
|
+
)
|
896
|
+
FAILED =
|
897
|
+
T.let(
|
898
|
+
:failed,
|
899
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
900
|
+
)
|
901
|
+
|
902
|
+
sig do
|
903
|
+
override.returns(
|
904
|
+
T::Array[
|
905
|
+
OpenAI::Conversations::ConversationItem::McpCall::Status::TaggedSymbol
|
906
|
+
]
|
907
|
+
)
|
908
|
+
end
|
909
|
+
def self.values
|
910
|
+
end
|
911
|
+
end
|
823
912
|
end
|
824
913
|
|
825
914
|
sig do
|