openai 0.14.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +18 -0
  3. data/README.md +3 -3
  4. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  5. data/lib/openai/models/chat/chat_completion.rb +2 -2
  6. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  8. data/lib/openai/models/chat/completion_create_params.rb +2 -2
  9. data/lib/openai/models/function_definition.rb +1 -1
  10. data/lib/openai/models/image_edit_params.rb +4 -1
  11. data/lib/openai/models/image_generate_params.rb +4 -1
  12. data/lib/openai/models/images_response.rb +2 -5
  13. data/lib/openai/models/responses/response.rb +2 -2
  14. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  15. data/lib/openai/models/responses/response_create_params.rb +2 -2
  16. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  17. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  18. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  19. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  20. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  21. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  22. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  23. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  24. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  25. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  26. data/lib/openai/resources/images.rb +6 -6
  27. data/lib/openai/resources/responses.rb +2 -2
  28. data/lib/openai/version.rb +1 -1
  29. data/lib/openai.rb +0 -2
  30. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  31. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  32. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  33. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  34. data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
  35. data/rbi/openai/models/function_definition.rbi +2 -2
  36. data/rbi/openai/models/image_edit_params.rbi +6 -0
  37. data/rbi/openai/models/image_generate_params.rbi +6 -0
  38. data/rbi/openai/models/images_response.rbi +2 -2
  39. data/rbi/openai/models/responses/response.rbi +3 -3
  40. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  41. data/rbi/openai/models/responses/response_create_params.rbi +3 -3
  42. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  43. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  44. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  45. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  46. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  47. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  48. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  49. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  50. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  51. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  52. data/rbi/openai/resources/chat/completions.rbi +2 -2
  53. data/rbi/openai/resources/images.rbi +22 -10
  54. data/rbi/openai/resources/responses.rbi +2 -2
  55. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  56. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  57. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  58. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  59. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  60. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  61. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  62. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  63. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  64. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  65. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  66. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  67. metadata +2 -8
  68. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  69. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  70. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  71. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  72. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  73. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -22,6 +22,13 @@ module OpenAI
22
22
  # @return [String]
23
23
  required :item_id, String
24
24
 
25
+ # @!attribute logprobs
26
+ # The log probabilities of the tokens in the delta.
27
+ #
28
+ # @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob>]
29
+ required :logprobs,
30
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob] }
31
+
25
32
  # @!attribute output_index
26
33
  # The index of the output item that the text delta was added to.
27
34
  #
@@ -40,7 +47,7 @@ module OpenAI
40
47
  # @return [Symbol, :"response.output_text.delta"]
41
48
  required :type, const: :"response.output_text.delta"
42
49
 
43
- # @!method initialize(content_index:, delta:, item_id:, output_index:, sequence_number:, type: :"response.output_text.delta")
50
+ # @!method initialize(content_index:, delta:, item_id:, logprobs:, output_index:, sequence_number:, type: :"response.output_text.delta")
44
51
  # Some parameter documentations has been truncated, see
45
52
  # {OpenAI::Models::Responses::ResponseTextDeltaEvent} for more details.
46
53
  #
@@ -52,11 +59,69 @@ module OpenAI
52
59
  #
53
60
  # @param item_id [String] The ID of the output item that the text delta was added to.
54
61
  #
62
+ # @param logprobs [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob>] The log probabilities of the tokens in the delta.
63
+ #
55
64
  # @param output_index [Integer] The index of the output item that the text delta was added to.
56
65
  #
57
66
  # @param sequence_number [Integer] The sequence number for this event.
58
67
  #
59
68
  # @param type [Symbol, :"response.output_text.delta"] The type of the event. Always `response.output_text.delta`.
69
+
70
+ class Logprob < OpenAI::Internal::Type::BaseModel
71
+ # @!attribute token
72
+ # A possible text token.
73
+ #
74
+ # @return [String]
75
+ required :token, String
76
+
77
+ # @!attribute logprob
78
+ # The log probability of this token.
79
+ #
80
+ # @return [Float]
81
+ required :logprob, Float
82
+
83
+ # @!attribute top_logprobs
84
+ # The log probability of the top 20 most likely tokens.
85
+ #
86
+ # @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>, nil]
87
+ optional :top_logprobs,
88
+ -> {
89
+ OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
90
+ }
91
+
92
+ # @!method initialize(token:, logprob:, top_logprobs: nil)
93
+ # Some parameter documentations has been truncated, see
94
+ # {OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob} for more details.
95
+ #
96
+ # A logprob is the logarithmic probability that the model assigns to producing a
97
+ # particular token at a given position in the sequence. Less-negative (higher)
98
+ # logprob values indicate greater model confidence in that token choice.
99
+ #
100
+ # @param token [String] A possible text token.
101
+ #
102
+ # @param logprob [Float] The log probability of this token.
103
+ #
104
+ # @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>] The log probability of the top 20 most likely tokens.
105
+
106
+ class TopLogprob < OpenAI::Internal::Type::BaseModel
107
+ # @!attribute token
108
+ # A possible text token.
109
+ #
110
+ # @return [String, nil]
111
+ optional :token, String
112
+
113
+ # @!attribute logprob
114
+ # The log probability of this token.
115
+ #
116
+ # @return [Float, nil]
117
+ optional :logprob, Float
118
+
119
+ # @!method initialize(token: nil, logprob: nil)
120
+ # @param token [String] A possible text token.
121
+ #
122
+ # @param logprob [Float] The log probability of this token.
123
+ end
124
+ end
60
125
  end
61
126
  end
62
127
  end
@@ -16,6 +16,13 @@ module OpenAI
16
16
  # @return [String]
17
17
  required :item_id, String
18
18
 
19
+ # @!attribute logprobs
20
+ # The log probabilities of the tokens in the delta.
21
+ #
22
+ # @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob>]
23
+ required :logprobs,
24
+ -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob] }
25
+
19
26
  # @!attribute output_index
20
27
  # The index of the output item that the text content is finalized.
21
28
  #
@@ -40,7 +47,7 @@ module OpenAI
40
47
  # @return [Symbol, :"response.output_text.done"]
41
48
  required :type, const: :"response.output_text.done"
42
49
 
43
- # @!method initialize(content_index:, item_id:, output_index:, sequence_number:, text:, type: :"response.output_text.done")
50
+ # @!method initialize(content_index:, item_id:, logprobs:, output_index:, sequence_number:, text:, type: :"response.output_text.done")
44
51
  # Some parameter documentations has been truncated, see
45
52
  # {OpenAI::Models::Responses::ResponseTextDoneEvent} for more details.
46
53
  #
@@ -50,6 +57,8 @@ module OpenAI
50
57
  #
51
58
  # @param item_id [String] The ID of the output item that the text content is finalized.
52
59
  #
60
+ # @param logprobs [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob>] The log probabilities of the tokens in the delta.
61
+ #
53
62
  # @param output_index [Integer] The index of the output item that the text content is finalized.
54
63
  #
55
64
  # @param sequence_number [Integer] The sequence number for this event.
@@ -57,6 +66,62 @@ module OpenAI
57
66
  # @param text [String] The text content that is finalized.
58
67
  #
59
68
  # @param type [Symbol, :"response.output_text.done"] The type of the event. Always `response.output_text.done`.
69
+
70
+ class Logprob < OpenAI::Internal::Type::BaseModel
71
+ # @!attribute token
72
+ # A possible text token.
73
+ #
74
+ # @return [String]
75
+ required :token, String
76
+
77
+ # @!attribute logprob
78
+ # The log probability of this token.
79
+ #
80
+ # @return [Float]
81
+ required :logprob, Float
82
+
83
+ # @!attribute top_logprobs
84
+ # The log probability of the top 20 most likely tokens.
85
+ #
86
+ # @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>, nil]
87
+ optional :top_logprobs,
88
+ -> {
89
+ OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
90
+ }
91
+
92
+ # @!method initialize(token:, logprob:, top_logprobs: nil)
93
+ # Some parameter documentations has been truncated, see
94
+ # {OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob} for more details.
95
+ #
96
+ # A logprob is the logarithmic probability that the model assigns to producing a
97
+ # particular token at a given position in the sequence. Less-negative (higher)
98
+ # logprob values indicate greater model confidence in that token choice.
99
+ #
100
+ # @param token [String] A possible text token.
101
+ #
102
+ # @param logprob [Float] The log probability of this token.
103
+ #
104
+ # @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>] The log probability of the top 20 most likely tokens.
105
+
106
+ class TopLogprob < OpenAI::Internal::Type::BaseModel
107
+ # @!attribute token
108
+ # A possible text token.
109
+ #
110
+ # @return [String, nil]
111
+ optional :token, String
112
+
113
+ # @!attribute logprob
114
+ # The log probability of this token.
115
+ #
116
+ # @return [Float, nil]
117
+ optional :logprob, Float
118
+
119
+ # @!method initialize(token: nil, logprob: nil)
120
+ # @param token [String] A possible text token.
121
+ #
122
+ # @param logprob [Float] The log probability of this token.
123
+ end
124
+ end
60
125
  end
61
126
  end
62
127
  end
@@ -39,7 +39,7 @@ module OpenAI
39
39
  )
40
40
  end
41
41
 
42
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
42
+ # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
43
43
  #
44
44
  # Some parameter documentations has been truncated, see
45
45
  # {OpenAI::Models::ImageEditParams} for more details.
@@ -85,7 +85,7 @@ module OpenAI
85
85
  def edit(params)
86
86
  parsed, options = OpenAI::ImageEditParams.dump_request(params)
87
87
  if parsed[:stream]
88
- message = "Please use `#stream_raw` for the streaming use case."
88
+ message = "Please use `#edit_stream_raw` for the streaming use case."
89
89
  raise ArgumentError.new(message)
90
90
  end
91
91
  @client.request(
@@ -106,7 +106,7 @@ module OpenAI
106
106
  # Creates an edited or extended image given one or more source images and a
107
107
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
108
108
  #
109
- # @overload stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
109
+ # @overload edit_stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
110
110
  #
111
111
  # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
112
112
  #
@@ -159,7 +159,7 @@ module OpenAI
159
159
  )
160
160
  end
161
161
 
162
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
162
+ # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart.
163
163
  #
164
164
  # Some parameter documentations has been truncated, see
165
165
  # {OpenAI::Models::ImageGenerateParams} for more details.
@@ -203,7 +203,7 @@ module OpenAI
203
203
  def generate(params)
204
204
  parsed, options = OpenAI::ImageGenerateParams.dump_request(params)
205
205
  if parsed[:stream]
206
- message = "Please use `#stream_raw` for the streaming use case."
206
+ message = "Please use `#generate_stream_raw` for the streaming use case."
207
207
  raise ArgumentError.new(message)
208
208
  end
209
209
  @client.request(
@@ -223,7 +223,7 @@ module OpenAI
223
223
  # Creates an image given a prompt.
224
224
  # [Learn more](https://platform.openai.com/docs/guides/images).
225
225
  #
226
- # @overload stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
226
+ # @overload generate_stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
227
227
  #
228
228
  # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte
229
229
  #
@@ -270,7 +270,7 @@ module OpenAI
270
270
  #
271
271
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
272
272
  #
273
- # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
273
+ # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
274
274
  #
275
275
  # @see OpenAI::Models::Responses::ResponseCreateParams
276
276
  def stream_raw(params = {})
@@ -344,7 +344,7 @@ module OpenAI
344
344
  #
345
345
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
346
346
  #
347
- # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
347
+ # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
348
348
  #
349
349
  # @see OpenAI::Models::Responses::ResponseRetrieveParams
350
350
  def retrieve_streaming(response_id, params = {})
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.14.0"
4
+ VERSION = "0.15.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -425,8 +425,6 @@ require_relative "openai/models/responses/response_output_text"
425
425
  require_relative "openai/models/responses/response_output_text_annotation_added_event"
426
426
  require_relative "openai/models/responses/response_prompt"
427
427
  require_relative "openai/models/responses/response_queued_event"
428
- require_relative "openai/models/responses/response_reasoning_delta_event"
429
- require_relative "openai/models/responses/response_reasoning_done_event"
430
428
  require_relative "openai/models/responses/response_reasoning_item"
431
429
  require_relative "openai/models/responses/response_reasoning_summary_delta_event"
432
430
  require_relative "openai/models/responses/response_reasoning_summary_done_event"
@@ -216,15 +216,6 @@ module OpenAI
216
216
  )
217
217
  ECHO =
218
218
  T.let(:echo, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
219
- FABLE =
220
- T.let(
221
- :fable,
222
- OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol
223
- )
224
- ONYX =
225
- T.let(:onyx, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
226
- NOVA =
227
- T.let(:nova, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
228
219
  SAGE =
229
220
  T.let(:sage, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
230
221
  SHIMMER =
@@ -37,7 +37,7 @@ module OpenAI
37
37
  # - If set to 'auto', then the request will be processed with the service tier
38
38
  # configured in the Project settings. Unless otherwise configured, the Project
39
39
  # will use 'default'.
40
- # - If set to 'default', then the requset will be processed with the standard
40
+ # - If set to 'default', then the request will be processed with the standard
41
41
  # pricing and performance for the selected model.
42
42
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
43
43
  # 'priority', then the request will be processed with the corresponding service
@@ -103,7 +103,7 @@ module OpenAI
103
103
  # - If set to 'auto', then the request will be processed with the service tier
104
104
  # configured in the Project settings. Unless otherwise configured, the Project
105
105
  # will use 'default'.
106
- # - If set to 'default', then the requset will be processed with the standard
106
+ # - If set to 'default', then the request will be processed with the standard
107
107
  # pricing and performance for the selected model.
108
108
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
109
109
  # 'priority', then the request will be processed with the corresponding service
@@ -368,7 +368,7 @@ module OpenAI
368
368
  # - If set to 'auto', then the request will be processed with the service tier
369
369
  # configured in the Project settings. Unless otherwise configured, the Project
370
370
  # will use 'default'.
371
- # - If set to 'default', then the requset will be processed with the standard
371
+ # - If set to 'default', then the request will be processed with the standard
372
372
  # pricing and performance for the selected model.
373
373
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
374
374
  # 'priority', then the request will be processed with the corresponding service
@@ -176,21 +176,6 @@ module OpenAI
176
176
  :echo,
177
177
  OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
178
178
  )
179
- FABLE =
180
- T.let(
181
- :fable,
182
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
183
- )
184
- ONYX =
185
- T.let(
186
- :onyx,
187
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
188
- )
189
- NOVA =
190
- T.let(
191
- :nova,
192
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
193
- )
194
179
  SAGE =
195
180
  T.let(
196
181
  :sage,
@@ -39,7 +39,7 @@ module OpenAI
39
39
  # - If set to 'auto', then the request will be processed with the service tier
40
40
  # configured in the Project settings. Unless otherwise configured, the Project
41
41
  # will use 'default'.
42
- # - If set to 'default', then the requset will be processed with the standard
42
+ # - If set to 'default', then the request will be processed with the standard
43
43
  # pricing and performance for the selected model.
44
44
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
45
  # 'priority', then the request will be processed with the corresponding service
@@ -118,7 +118,7 @@ module OpenAI
118
118
  # - If set to 'auto', then the request will be processed with the service tier
119
119
  # configured in the Project settings. Unless otherwise configured, the Project
120
120
  # will use 'default'.
121
- # - If set to 'default', then the requset will be processed with the standard
121
+ # - If set to 'default', then the request will be processed with the standard
122
122
  # pricing and performance for the selected model.
123
123
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
124
  # 'priority', then the request will be processed with the corresponding service
@@ -788,7 +788,7 @@ module OpenAI
788
788
  # - If set to 'auto', then the request will be processed with the service tier
789
789
  # configured in the Project settings. Unless otherwise configured, the Project
790
790
  # will use 'default'.
791
- # - If set to 'default', then the requset will be processed with the standard
791
+ # - If set to 'default', then the request will be processed with the standard
792
792
  # pricing and performance for the selected model.
793
793
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
794
  # 'priority', then the request will be processed with the corresponding service
@@ -275,7 +275,7 @@ module OpenAI
275
275
  # - If set to 'auto', then the request will be processed with the service tier
276
276
  # configured in the Project settings. Unless otherwise configured, the Project
277
277
  # will use 'default'.
278
- # - If set to 'default', then the requset will be processed with the standard
278
+ # - If set to 'default', then the request will be processed with the standard
279
279
  # pricing and performance for the selected model.
280
280
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
281
281
  # 'priority', then the request will be processed with the corresponding service
@@ -632,7 +632,7 @@ module OpenAI
632
632
  # - If set to 'auto', then the request will be processed with the service tier
633
633
  # configured in the Project settings. Unless otherwise configured, the Project
634
634
  # will use 'default'.
635
- # - If set to 'default', then the requset will be processed with the standard
635
+ # - If set to 'default', then the request will be processed with the standard
636
636
  # pricing and performance for the selected model.
637
637
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
638
638
  # 'priority', then the request will be processed with the corresponding service
@@ -1017,7 +1017,7 @@ module OpenAI
1017
1017
  # - If set to 'auto', then the request will be processed with the service tier
1018
1018
  # configured in the Project settings. Unless otherwise configured, the Project
1019
1019
  # will use 'default'.
1020
- # - If set to 'default', then the requset will be processed with the standard
1020
+ # - If set to 'default', then the request will be processed with the standard
1021
1021
  # pricing and performance for the selected model.
1022
1022
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1023
1023
  # 'priority', then the request will be processed with the corresponding service
@@ -38,7 +38,7 @@ module OpenAI
38
38
  # set to true, the model will follow the exact schema defined in the `parameters`
39
39
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
40
40
  # more about Structured Outputs in the
41
- # [function calling guide](docs/guides/function-calling).
41
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
42
42
  sig { returns(T.nilable(T::Boolean)) }
43
43
  attr_accessor :strict
44
44
 
@@ -69,7 +69,7 @@ module OpenAI
69
69
  # set to true, the model will follow the exact schema defined in the `parameters`
70
70
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
71
71
  # more about Structured Outputs in the
72
- # [function calling guide](docs/guides/function-calling).
72
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
73
73
  strict: nil
74
74
  )
75
75
  end
@@ -81,6 +81,9 @@ module OpenAI
81
81
  # The number of partial images to generate. This parameter is used for streaming
82
82
  # responses that return partial images. Value must be between 0 and 3. When set to
83
83
  # 0, the response will be a single image sent in one streaming event.
84
+ #
85
+ # Note that the final image may be sent before the full number of partial images
86
+ # are generated if the full image is generated more quickly.
84
87
  sig { returns(T.nilable(Integer)) }
85
88
  attr_accessor :partial_images
86
89
 
@@ -182,6 +185,9 @@ module OpenAI
182
185
  # The number of partial images to generate. This parameter is used for streaming
183
186
  # responses that return partial images. Value must be between 0 and 3. When set to
184
187
  # 0, the response will be a single image sent in one streaming event.
188
+ #
189
+ # Note that the final image may be sent before the full number of partial images
190
+ # are generated if the full image is generated more quickly.
185
191
  partial_images: nil,
186
192
  # The quality of the image that will be generated. `high`, `medium` and `low` are
187
193
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -63,6 +63,9 @@ module OpenAI
63
63
  # The number of partial images to generate. This parameter is used for streaming
64
64
  # responses that return partial images. Value must be between 0 and 3. When set to
65
65
  # 0, the response will be a single image sent in one streaming event.
66
+ #
67
+ # Note that the final image may be sent before the full number of partial images
68
+ # are generated if the full image is generated more quickly.
66
69
  sig { returns(T.nilable(Integer)) }
67
70
  attr_accessor :partial_images
68
71
 
@@ -165,6 +168,9 @@ module OpenAI
165
168
  # The number of partial images to generate. This parameter is used for streaming
166
169
  # responses that return partial images. Value must be between 0 and 3. When set to
167
170
  # 0, the response will be a single image sent in one streaming event.
171
+ #
172
+ # Note that the final image may be sent before the full number of partial images
173
+ # are generated if the full image is generated more quickly.
168
174
  partial_images: nil,
169
175
  # The quality of the image that will be generated.
170
176
  #
@@ -224,7 +224,7 @@ module OpenAI
224
224
  end
225
225
  attr_writer :input_tokens_details
226
226
 
227
- # The number of image tokens in the output image.
227
+ # The number of output tokens generated by the model.
228
228
  sig { returns(Integer) }
229
229
  attr_accessor :output_tokens
230
230
 
@@ -247,7 +247,7 @@ module OpenAI
247
247
  input_tokens:,
248
248
  # The input tokens detailed information for the image generation.
249
249
  input_tokens_details:,
250
- # The number of image tokens in the output image.
250
+ # The number of output tokens generated by the model.
251
251
  output_tokens:,
252
252
  # The total number of tokens (images and text) used for the image generation.
253
253
  total_tokens:
@@ -180,7 +180,7 @@ module OpenAI
180
180
  # - If set to 'auto', then the request will be processed with the service tier
181
181
  # configured in the Project settings. Unless otherwise configured, the Project
182
182
  # will use 'default'.
183
- # - If set to 'default', then the requset will be processed with the standard
183
+ # - If set to 'default', then the request will be processed with the standard
184
184
  # pricing and performance for the selected model.
185
185
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
186
186
  # 'priority', then the request will be processed with the corresponding service
@@ -427,7 +427,7 @@ module OpenAI
427
427
  # - If set to 'auto', then the request will be processed with the service tier
428
428
  # configured in the Project settings. Unless otherwise configured, the Project
429
429
  # will use 'default'.
430
- # - If set to 'default', then the requset will be processed with the standard
430
+ # - If set to 'default', then the request will be processed with the standard
431
431
  # pricing and performance for the selected model.
432
432
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
433
433
  # 'priority', then the request will be processed with the corresponding service
@@ -666,7 +666,7 @@ module OpenAI
666
666
  # - If set to 'auto', then the request will be processed with the service tier
667
667
  # configured in the Project settings. Unless otherwise configured, the Project
668
668
  # will use 'default'.
669
- # - If set to 'default', then the requset will be processed with the standard
669
+ # - If set to 'default', then the request will be processed with the standard
670
670
  # pricing and performance for the selected model.
671
671
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
672
672
  # 'priority', then the request will be processed with the corresponding service
@@ -40,7 +40,8 @@ module OpenAI
40
40
  end
41
41
  attr_accessor :outputs
42
42
 
43
- # The status of the code interpreter tool call.
43
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
44
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
44
45
  sig do
45
46
  returns(
46
47
  OpenAI::Responses::ResponseCodeInterpreterToolCall::Status::OrSymbol
@@ -82,7 +83,8 @@ module OpenAI
82
83
  # The outputs generated by the code interpreter, such as logs or images. Can be
83
84
  # null if no outputs are available.
84
85
  outputs:,
85
- # The status of the code interpreter tool call.
86
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
87
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
86
88
  status:,
87
89
  # The type of the code interpreter tool call. Always `code_interpreter_call`.
88
90
  type: :code_interpreter_call
@@ -200,7 +202,8 @@ module OpenAI
200
202
  end
201
203
  end
202
204
 
203
- # The status of the code interpreter tool call.
205
+ # The status of the code interpreter tool call. Valid values are `in_progress`,
206
+ # `completed`, `incomplete`, `interpreting`, and `failed`.
204
207
  module Status
205
208
  extend OpenAI::Internal::Type::Enum
206
209
 
@@ -163,7 +163,7 @@ module OpenAI
163
163
  # - If set to 'auto', then the request will be processed with the service tier
164
164
  # configured in the Project settings. Unless otherwise configured, the Project
165
165
  # will use 'default'.
166
- # - If set to 'default', then the requset will be processed with the standard
166
+ # - If set to 'default', then the request will be processed with the standard
167
167
  # pricing and performance for the selected model.
168
168
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
169
169
  # 'priority', then the request will be processed with the corresponding service
@@ -472,7 +472,7 @@ module OpenAI
472
472
  # - If set to 'auto', then the request will be processed with the service tier
473
473
  # configured in the Project settings. Unless otherwise configured, the Project
474
474
  # will use 'default'.
475
- # - If set to 'default', then the requset will be processed with the standard
475
+ # - If set to 'default', then the request will be processed with the standard
476
476
  # pricing and performance for the selected model.
477
477
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
478
478
  # 'priority', then the request will be processed with the corresponding service
@@ -640,7 +640,7 @@ module OpenAI
640
640
  # - If set to 'auto', then the request will be processed with the service tier
641
641
  # configured in the Project settings. Unless otherwise configured, the Project
642
642
  # will use 'default'.
643
- # - If set to 'default', then the requset will be processed with the standard
643
+ # - If set to 'default', then the request will be processed with the standard
644
644
  # pricing and performance for the selected model.
645
645
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
646
646
  # 'priority', then the request will be processed with the corresponding service
@@ -12,8 +12,9 @@ module OpenAI
12
12
  )
13
13
  end
14
14
 
15
- # The partial update to the arguments for the MCP tool call.
16
- sig { returns(T.anything) }
15
+ # A JSON string containing the partial update to the arguments for the MCP tool
16
+ # call.
17
+ sig { returns(String) }
17
18
  attr_accessor :delta
18
19
 
19
20
  # The unique identifier of the MCP tool call item being processed.
@@ -36,7 +37,7 @@ module OpenAI
36
37
  # call.
37
38
  sig do
38
39
  params(
39
- delta: T.anything,
40
+ delta: String,
40
41
  item_id: String,
41
42
  output_index: Integer,
42
43
  sequence_number: Integer,
@@ -44,7 +45,8 @@ module OpenAI
44
45
  ).returns(T.attached_class)
45
46
  end
46
47
  def self.new(
47
- # The partial update to the arguments for the MCP tool call.
48
+ # A JSON string containing the partial update to the arguments for the MCP tool
49
+ # call.
48
50
  delta:,
49
51
  # The unique identifier of the MCP tool call item being processed.
50
52
  item_id:,
@@ -60,7 +62,7 @@ module OpenAI
60
62
  sig do
61
63
  override.returns(
62
64
  {
63
- delta: T.anything,
65
+ delta: String,
64
66
  item_id: String,
65
67
  output_index: Integer,
66
68
  sequence_number: Integer,