openai 0.14.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +41 -0
  3. data/README.md +3 -3
  4. data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
  5. data/lib/openai/helpers/structured_output/union_of.rb +11 -1
  6. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion.rb +2 -2
  8. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  9. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  10. data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
  11. data/lib/openai/models/chat/completion_create_params.rb +33 -7
  12. data/lib/openai/models/function_definition.rb +1 -1
  13. data/lib/openai/models/image_edit_params.rb +4 -1
  14. data/lib/openai/models/image_generate_params.rb +4 -1
  15. data/lib/openai/models/images_response.rb +2 -5
  16. data/lib/openai/models/responses/response.rb +52 -6
  17. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  18. data/lib/openai/models/responses/response_create_params.rb +33 -7
  19. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  20. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  21. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  22. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  23. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  24. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  26. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  27. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  28. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  29. data/lib/openai/resources/chat/completions.rb +12 -4
  30. data/lib/openai/resources/images.rb +6 -6
  31. data/lib/openai/resources/responses.rb +42 -17
  32. data/lib/openai/version.rb +1 -1
  33. data/lib/openai.rb +0 -2
  34. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  35. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  36. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  37. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  38. data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
  39. data/rbi/openai/models/chat/completion_create_params.rbi +47 -9
  40. data/rbi/openai/models/function_definition.rbi +2 -2
  41. data/rbi/openai/models/image_edit_params.rbi +6 -0
  42. data/rbi/openai/models/image_generate_params.rbi +6 -0
  43. data/rbi/openai/models/images_response.rbi +2 -2
  44. data/rbi/openai/models/responses/response.rbi +47 -9
  45. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  46. data/rbi/openai/models/responses/response_create_params.rbi +47 -9
  47. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  48. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  49. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  50. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  51. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  52. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  53. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  54. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  55. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  56. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  57. data/rbi/openai/resources/chat/completions.rbi +36 -8
  58. data/rbi/openai/resources/images.rbi +22 -10
  59. data/rbi/openai/resources/responses.rbi +36 -8
  60. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  61. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  62. data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
  63. data/sig/openai/models/chat/completion_create_params.rbs +14 -0
  64. data/sig/openai/models/responses/response.rbs +14 -0
  65. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  66. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  67. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  68. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  69. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  70. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  71. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  72. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  73. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  74. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  75. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  76. data/sig/openai/resources/chat/completions.rbs +4 -0
  77. data/sig/openai/resources/responses.rbs +4 -0
  78. metadata +2 -8
  79. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  80. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  81. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  82. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  83. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  84. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -39,7 +39,7 @@ module OpenAI
39
39
  )
40
40
  end
41
41
 
42
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
42
+ # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
43
43
  #
44
44
  # Some parameter documentations has been truncated, see
45
45
  # {OpenAI::Models::ImageEditParams} for more details.
@@ -85,7 +85,7 @@ module OpenAI
85
85
  def edit(params)
86
86
  parsed, options = OpenAI::ImageEditParams.dump_request(params)
87
87
  if parsed[:stream]
88
- message = "Please use `#stream_raw` for the streaming use case."
88
+ message = "Please use `#edit_stream_raw` for the streaming use case."
89
89
  raise ArgumentError.new(message)
90
90
  end
91
91
  @client.request(
@@ -106,7 +106,7 @@ module OpenAI
106
106
  # Creates an edited or extended image given one or more source images and a
107
107
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
108
108
  #
109
- # @overload stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
109
+ # @overload edit_stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
110
110
  #
111
111
  # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
112
112
  #
@@ -159,7 +159,7 @@ module OpenAI
159
159
  )
160
160
  end
161
161
 
162
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
162
+ # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart.
163
163
  #
164
164
  # Some parameter documentations has been truncated, see
165
165
  # {OpenAI::Models::ImageGenerateParams} for more details.
@@ -203,7 +203,7 @@ module OpenAI
203
203
  def generate(params)
204
204
  parsed, options = OpenAI::ImageGenerateParams.dump_request(params)
205
205
  if parsed[:stream]
206
- message = "Please use `#stream_raw` for the streaming use case."
206
+ message = "Please use `#generate_stream_raw` for the streaming use case."
207
207
  raise ArgumentError.new(message)
208
208
  end
209
209
  @client.request(
@@ -223,7 +223,7 @@ module OpenAI
223
223
  # Creates an image given a prompt.
224
224
  # [Learn more](https://platform.openai.com/docs/guides/images).
225
225
  #
226
- # @overload stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
226
+ # @overload generate_stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
227
227
  #
228
228
  # @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte
229
229
  #
@@ -23,7 +23,7 @@ module OpenAI
23
23
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
24
24
  # your own data as input for the model's response.
25
25
  #
26
- # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
26
+ # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
27
27
  #
28
28
  # @param background [Boolean, nil] Whether to run the model response in the background.
29
29
  #
@@ -47,8 +47,12 @@ module OpenAI
47
47
  #
48
48
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
49
49
  #
50
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
51
+ #
50
52
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
51
53
  #
54
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
55
+ #
52
56
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
53
57
  #
54
58
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
@@ -67,7 +71,7 @@ module OpenAI
67
71
  #
68
72
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
69
73
  #
70
- # @param user [String] A stable identifier for your end-users.
74
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
71
75
  #
72
76
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
73
77
  #
@@ -170,20 +174,22 @@ module OpenAI
170
174
  end
171
175
  model, tool_models = get_structured_output_models(parsed)
172
176
 
177
+ unwrap = ->(raw) do
178
+ if raw[:type] == "response.completed" && raw[:response]
179
+ parse_structured_outputs!(raw[:response], model, tool_models)
180
+ end
181
+ raw
182
+ end
183
+
173
184
  if previous_response_id
174
- retrieve_params = {}
175
- retrieve_params[:include] = params[:include] if params[:include]
176
- retrieve_params[:request_options] = params[:request_options] if params[:request_options]
185
+ retrieve_params = params.slice(:include, :request_options)
177
186
 
178
- raw_stream = retrieve_streaming(previous_response_id, retrieve_params)
187
+ raw_stream = retrieve_streaming_internal(
188
+ previous_response_id,
189
+ params: retrieve_params,
190
+ unwrap: unwrap
191
+ )
179
192
  else
180
- unwrap = ->(raw) do
181
- if raw[:type] == "response.completed" && raw[:response]
182
- parse_structured_outputs!(raw[:response], model, tool_models)
183
- end
184
- raw
185
- end
186
-
187
193
  parsed[:stream] = true
188
194
 
189
195
  raw_stream = @client.request(
@@ -222,7 +228,7 @@ module OpenAI
222
228
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
223
229
  # your own data as input for the model's response.
224
230
  #
225
- # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
231
+ # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
226
232
  #
227
233
  # @param background [Boolean, nil] Whether to run the model response in the background.
228
234
  #
@@ -246,8 +252,12 @@ module OpenAI
246
252
  #
247
253
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
248
254
  #
255
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
256
+ #
249
257
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
250
258
  #
259
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
260
+ #
251
261
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
252
262
  #
253
263
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
@@ -266,11 +276,11 @@ module OpenAI
266
276
  #
267
277
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
268
278
  #
269
- # @param user [String] A stable identifier for your end-users.
279
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
270
280
  #
271
281
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
272
282
  #
273
- # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
283
+ # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
274
284
  #
275
285
  # @see OpenAI::Models::Responses::ResponseCreateParams
276
286
  def stream_raw(params = {})
@@ -344,7 +354,7 @@ module OpenAI
344
354
  #
345
355
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
346
356
  #
347
- # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
357
+ # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
348
358
  #
349
359
  # @see OpenAI::Models::Responses::ResponseRetrieveParams
350
360
  def retrieve_streaming(response_id, params = {})
@@ -365,6 +375,21 @@ module OpenAI
365
375
  )
366
376
  end
367
377
 
378
+ private def retrieve_streaming_internal(response_id, params:, unwrap:)
379
+ parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
380
+ parsed.store(:stream, true)
381
+ @client.request(
382
+ method: :get,
383
+ path: ["responses/%1$s", response_id],
384
+ query: parsed,
385
+ headers: {"accept" => "text/event-stream"},
386
+ stream: OpenAI::Internal::Stream,
387
+ model: OpenAI::Responses::ResponseStreamEvent,
388
+ options: options,
389
+ unwrap: unwrap
390
+ )
391
+ end
392
+
368
393
  # Deletes a model response with the given ID.
369
394
  #
370
395
  # @overload delete(response_id, request_options: {})
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.14.0"
4
+ VERSION = "0.16.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -425,8 +425,6 @@ require_relative "openai/models/responses/response_output_text"
425
425
  require_relative "openai/models/responses/response_output_text_annotation_added_event"
426
426
  require_relative "openai/models/responses/response_prompt"
427
427
  require_relative "openai/models/responses/response_queued_event"
428
- require_relative "openai/models/responses/response_reasoning_delta_event"
429
- require_relative "openai/models/responses/response_reasoning_done_event"
430
428
  require_relative "openai/models/responses/response_reasoning_item"
431
429
  require_relative "openai/models/responses/response_reasoning_summary_delta_event"
432
430
  require_relative "openai/models/responses/response_reasoning_summary_done_event"
@@ -216,15 +216,6 @@ module OpenAI
216
216
  )
217
217
  ECHO =
218
218
  T.let(:echo, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
219
- FABLE =
220
- T.let(
221
- :fable,
222
- OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol
223
- )
224
- ONYX =
225
- T.let(:onyx, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
226
- NOVA =
227
- T.let(:nova, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
228
219
  SAGE =
229
220
  T.let(:sage, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
230
221
  SHIMMER =
@@ -37,7 +37,7 @@ module OpenAI
37
37
  # - If set to 'auto', then the request will be processed with the service tier
38
38
  # configured in the Project settings. Unless otherwise configured, the Project
39
39
  # will use 'default'.
40
- # - If set to 'default', then the requset will be processed with the standard
40
+ # - If set to 'default', then the request will be processed with the standard
41
41
  # pricing and performance for the selected model.
42
42
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
43
43
  # 'priority', then the request will be processed with the corresponding service
@@ -103,7 +103,7 @@ module OpenAI
103
103
  # - If set to 'auto', then the request will be processed with the service tier
104
104
  # configured in the Project settings. Unless otherwise configured, the Project
105
105
  # will use 'default'.
106
- # - If set to 'default', then the requset will be processed with the standard
106
+ # - If set to 'default', then the request will be processed with the standard
107
107
  # pricing and performance for the selected model.
108
108
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
109
109
  # 'priority', then the request will be processed with the corresponding service
@@ -368,7 +368,7 @@ module OpenAI
368
368
  # - If set to 'auto', then the request will be processed with the service tier
369
369
  # configured in the Project settings. Unless otherwise configured, the Project
370
370
  # will use 'default'.
371
- # - If set to 'default', then the requset will be processed with the standard
371
+ # - If set to 'default', then the request will be processed with the standard
372
372
  # pricing and performance for the selected model.
373
373
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
374
374
  # 'priority', then the request will be processed with the corresponding service
@@ -176,21 +176,6 @@ module OpenAI
176
176
  :echo,
177
177
  OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
178
178
  )
179
- FABLE =
180
- T.let(
181
- :fable,
182
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
183
- )
184
- ONYX =
185
- T.let(
186
- :onyx,
187
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
188
- )
189
- NOVA =
190
- T.let(
191
- :nova,
192
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
193
- )
194
179
  SAGE =
195
180
  T.let(
196
181
  :sage,
@@ -39,7 +39,7 @@ module OpenAI
39
39
  # - If set to 'auto', then the request will be processed with the service tier
40
40
  # configured in the Project settings. Unless otherwise configured, the Project
41
41
  # will use 'default'.
42
- # - If set to 'default', then the requset will be processed with the standard
42
+ # - If set to 'default', then the request will be processed with the standard
43
43
  # pricing and performance for the selected model.
44
44
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
45
  # 'priority', then the request will be processed with the corresponding service
@@ -118,7 +118,7 @@ module OpenAI
118
118
  # - If set to 'auto', then the request will be processed with the service tier
119
119
  # configured in the Project settings. Unless otherwise configured, the Project
120
120
  # will use 'default'.
121
- # - If set to 'default', then the requset will be processed with the standard
121
+ # - If set to 'default', then the request will be processed with the standard
122
122
  # pricing and performance for the selected model.
123
123
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
124
  # 'priority', then the request will be processed with the corresponding service
@@ -788,7 +788,7 @@ module OpenAI
788
788
  # - If set to 'auto', then the request will be processed with the service tier
789
789
  # configured in the Project settings. Unless otherwise configured, the Project
790
790
  # will use 'default'.
791
- # - If set to 'default', then the requset will be processed with the standard
791
+ # - If set to 'default', then the request will be processed with the standard
792
792
  # pricing and performance for the selected model.
793
793
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
794
  # 'priority', then the request will be processed with the corresponding service
@@ -18,17 +18,82 @@ module OpenAI
18
18
  sig { returns(String) }
19
19
  attr_accessor :id
20
20
 
21
+ # If a content parts array was provided, this is an array of `text` and
22
+ # `image_url` parts. Otherwise, null.
23
+ sig do
24
+ returns(
25
+ T.nilable(
26
+ T::Array[
27
+ OpenAI::Chat::ChatCompletionStoreMessage::ContentPart::Variants
28
+ ]
29
+ )
30
+ )
31
+ end
32
+ attr_accessor :content_parts
33
+
21
34
  # A chat completion message generated by the model.
22
- sig { params(id: String).returns(T.attached_class) }
35
+ sig do
36
+ params(
37
+ id: String,
38
+ content_parts:
39
+ T.nilable(
40
+ T::Array[
41
+ T.any(
42
+ OpenAI::Chat::ChatCompletionContentPartText::OrHash,
43
+ OpenAI::Chat::ChatCompletionContentPartImage::OrHash
44
+ )
45
+ ]
46
+ )
47
+ ).returns(T.attached_class)
48
+ end
23
49
  def self.new(
24
50
  # The identifier of the chat message.
25
- id:
51
+ id:,
52
+ # If a content parts array was provided, this is an array of `text` and
53
+ # `image_url` parts. Otherwise, null.
54
+ content_parts: nil
26
55
  )
27
56
  end
28
57
 
29
- sig { override.returns({ id: String }) }
58
+ sig do
59
+ override.returns(
60
+ {
61
+ id: String,
62
+ content_parts:
63
+ T.nilable(
64
+ T::Array[
65
+ OpenAI::Chat::ChatCompletionStoreMessage::ContentPart::Variants
66
+ ]
67
+ )
68
+ }
69
+ )
70
+ end
30
71
  def to_hash
31
72
  end
73
+
74
+ # Learn about
75
+ # [text inputs](https://platform.openai.com/docs/guides/text-generation).
76
+ module ContentPart
77
+ extend OpenAI::Internal::Type::Union
78
+
79
+ Variants =
80
+ T.type_alias do
81
+ T.any(
82
+ OpenAI::Chat::ChatCompletionContentPartText,
83
+ OpenAI::Chat::ChatCompletionContentPartImage
84
+ )
85
+ end
86
+
87
+ sig do
88
+ override.returns(
89
+ T::Array[
90
+ OpenAI::Chat::ChatCompletionStoreMessage::ContentPart::Variants
91
+ ]
92
+ )
93
+ end
94
+ def self.variants
95
+ end
96
+ end
32
97
  end
33
98
  end
34
99
  end
@@ -216,6 +216,15 @@ module OpenAI
216
216
  sig { returns(T.nilable(Float)) }
217
217
  attr_accessor :presence_penalty
218
218
 
219
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
220
+ # hit rates. Replaces the `user` field.
221
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
222
+ sig { returns(T.nilable(String)) }
223
+ attr_reader :prompt_cache_key
224
+
225
+ sig { params(prompt_cache_key: String).void }
226
+ attr_writer :prompt_cache_key
227
+
219
228
  # **o-series models only**
220
229
  #
221
230
  # Constrains effort on reasoning for
@@ -262,6 +271,17 @@ module OpenAI
262
271
  end
263
272
  attr_writer :response_format
264
273
 
274
+ # A stable identifier used to help detect users of your application that may be
275
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
276
+ # identifies each user. We recommend hashing their username or email address, in
277
+ # order to avoid sending us any identifying information.
278
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
279
+ sig { returns(T.nilable(String)) }
280
+ attr_reader :safety_identifier
281
+
282
+ sig { params(safety_identifier: String).void }
283
+ attr_writer :safety_identifier
284
+
265
285
  # This feature is in Beta. If specified, our system will make a best effort to
266
286
  # sample deterministically, such that repeated requests with the same `seed` and
267
287
  # parameters should return the same result. Determinism is not guaranteed, and you
@@ -275,7 +295,7 @@ module OpenAI
275
295
  # - If set to 'auto', then the request will be processed with the service tier
276
296
  # configured in the Project settings. Unless otherwise configured, the Project
277
297
  # will use 'default'.
278
- # - If set to 'default', then the requset will be processed with the standard
298
+ # - If set to 'default', then the request will be processed with the standard
279
299
  # pricing and performance for the selected model.
280
300
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
281
301
  # 'priority', then the request will be processed with the corresponding service
@@ -399,9 +419,11 @@ module OpenAI
399
419
  sig { returns(T.nilable(Float)) }
400
420
  attr_accessor :top_p
401
421
 
402
- # A stable identifier for your end-users. Used to boost cache hit rates by better
403
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
404
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
422
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
423
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
424
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
425
+ # similar requests and to help OpenAI detect and prevent abuse.
426
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
405
427
  sig { returns(T.nilable(String)) }
406
428
  attr_reader :user
407
429
 
@@ -465,6 +487,7 @@ module OpenAI
465
487
  prediction:
466
488
  T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash),
467
489
  presence_penalty: T.nilable(Float),
490
+ prompt_cache_key: String,
468
491
  reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
469
492
  response_format:
470
493
  T.any(
@@ -473,6 +496,7 @@ module OpenAI
473
496
  OpenAI::StructuredOutput::JsonSchemaConverter,
474
497
  OpenAI::ResponseFormatJSONObject::OrHash
475
498
  ),
499
+ safety_identifier: String,
476
500
  seed: T.nilable(Integer),
477
501
  service_tier:
478
502
  T.nilable(
@@ -603,6 +627,10 @@ module OpenAI
603
627
  # whether they appear in the text so far, increasing the model's likelihood to
604
628
  # talk about new topics.
605
629
  presence_penalty: nil,
630
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
631
+ # hit rates. Replaces the `user` field.
632
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
633
+ prompt_cache_key: nil,
606
634
  # **o-series models only**
607
635
  #
608
636
  # Constrains effort on reasoning for
@@ -621,6 +649,12 @@ module OpenAI
621
649
  # ensures the message the model generates is valid JSON. Using `json_schema` is
622
650
  # preferred for models that support it.
623
651
  response_format: nil,
652
+ # A stable identifier used to help detect users of your application that may be
653
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
654
+ # identifies each user. We recommend hashing their username or email address, in
655
+ # order to avoid sending us any identifying information.
656
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
657
+ safety_identifier: nil,
624
658
  # This feature is in Beta. If specified, our system will make a best effort to
625
659
  # sample deterministically, such that repeated requests with the same `seed` and
626
660
  # parameters should return the same result. Determinism is not guaranteed, and you
@@ -632,7 +666,7 @@ module OpenAI
632
666
  # - If set to 'auto', then the request will be processed with the service tier
633
667
  # configured in the Project settings. Unless otherwise configured, the Project
634
668
  # will use 'default'.
635
- # - If set to 'default', then the requset will be processed with the standard
669
+ # - If set to 'default', then the request will be processed with the standard
636
670
  # pricing and performance for the selected model.
637
671
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
638
672
  # 'priority', then the request will be processed with the corresponding service
@@ -687,9 +721,11 @@ module OpenAI
687
721
  #
688
722
  # We generally recommend altering this or `temperature` but not both.
689
723
  top_p: nil,
690
- # A stable identifier for your end-users. Used to boost cache hit rates by better
691
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
692
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
724
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
725
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
726
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
727
+ # similar requests and to help OpenAI detect and prevent abuse.
728
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
693
729
  user: nil,
694
730
  # This tool searches the web for relevant results to use in a response. Learn more
695
731
  # about the
@@ -739,6 +775,7 @@ module OpenAI
739
775
  prediction:
740
776
  T.nilable(OpenAI::Chat::ChatCompletionPredictionContent),
741
777
  presence_penalty: T.nilable(Float),
778
+ prompt_cache_key: String,
742
779
  reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
743
780
  response_format:
744
781
  T.any(
@@ -746,6 +783,7 @@ module OpenAI
746
783
  OpenAI::ResponseFormatJSONSchema,
747
784
  OpenAI::ResponseFormatJSONObject
748
785
  ),
786
+ safety_identifier: String,
749
787
  seed: T.nilable(Integer),
750
788
  service_tier:
751
789
  T.nilable(
@@ -1017,7 +1055,7 @@ module OpenAI
1017
1055
  # - If set to 'auto', then the request will be processed with the service tier
1018
1056
  # configured in the Project settings. Unless otherwise configured, the Project
1019
1057
  # will use 'default'.
1020
- # - If set to 'default', then the requset will be processed with the standard
1058
+ # - If set to 'default', then the request will be processed with the standard
1021
1059
  # pricing and performance for the selected model.
1022
1060
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1023
1061
  # 'priority', then the request will be processed with the corresponding service
@@ -38,7 +38,7 @@ module OpenAI
38
38
  # set to true, the model will follow the exact schema defined in the `parameters`
39
39
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
40
40
  # more about Structured Outputs in the
41
- # [function calling guide](docs/guides/function-calling).
41
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
42
42
  sig { returns(T.nilable(T::Boolean)) }
43
43
  attr_accessor :strict
44
44
 
@@ -69,7 +69,7 @@ module OpenAI
69
69
  # set to true, the model will follow the exact schema defined in the `parameters`
70
70
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
71
71
  # more about Structured Outputs in the
72
- # [function calling guide](docs/guides/function-calling).
72
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
73
73
  strict: nil
74
74
  )
75
75
  end
@@ -81,6 +81,9 @@ module OpenAI
81
81
  # The number of partial images to generate. This parameter is used for streaming
82
82
  # responses that return partial images. Value must be between 0 and 3. When set to
83
83
  # 0, the response will be a single image sent in one streaming event.
84
+ #
85
+ # Note that the final image may be sent before the full number of partial images
86
+ # are generated if the full image is generated more quickly.
84
87
  sig { returns(T.nilable(Integer)) }
85
88
  attr_accessor :partial_images
86
89
 
@@ -182,6 +185,9 @@ module OpenAI
182
185
  # The number of partial images to generate. This parameter is used for streaming
183
186
  # responses that return partial images. Value must be between 0 and 3. When set to
184
187
  # 0, the response will be a single image sent in one streaming event.
188
+ #
189
+ # Note that the final image may be sent before the full number of partial images
190
+ # are generated if the full image is generated more quickly.
185
191
  partial_images: nil,
186
192
  # The quality of the image that will be generated. `high`, `medium` and `low` are
187
193
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -63,6 +63,9 @@ module OpenAI
63
63
  # The number of partial images to generate. This parameter is used for streaming
64
64
  # responses that return partial images. Value must be between 0 and 3. When set to
65
65
  # 0, the response will be a single image sent in one streaming event.
66
+ #
67
+ # Note that the final image may be sent before the full number of partial images
68
+ # are generated if the full image is generated more quickly.
66
69
  sig { returns(T.nilable(Integer)) }
67
70
  attr_accessor :partial_images
68
71
 
@@ -165,6 +168,9 @@ module OpenAI
165
168
  # The number of partial images to generate. This parameter is used for streaming
166
169
  # responses that return partial images. Value must be between 0 and 3. When set to
167
170
  # 0, the response will be a single image sent in one streaming event.
171
+ #
172
+ # Note that the final image may be sent before the full number of partial images
173
+ # are generated if the full image is generated more quickly.
168
174
  partial_images: nil,
169
175
  # The quality of the image that will be generated.
170
176
  #
@@ -224,7 +224,7 @@ module OpenAI
224
224
  end
225
225
  attr_writer :input_tokens_details
226
226
 
227
- # The number of image tokens in the output image.
227
+ # The number of output tokens generated by the model.
228
228
  sig { returns(Integer) }
229
229
  attr_accessor :output_tokens
230
230
 
@@ -247,7 +247,7 @@ module OpenAI
247
247
  input_tokens:,
248
248
  # The input tokens detailed information for the image generation.
249
249
  input_tokens_details:,
250
- # The number of image tokens in the output image.
250
+ # The number of output tokens generated by the model.
251
251
  output_tokens:,
252
252
  # The total number of tokens (images and text) used for the image generation.
253
253
  total_tokens: