openai 0.13.1 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +26 -0
  3. data/README.md +3 -3
  4. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  5. data/lib/openai/models/chat/chat_completion.rb +2 -2
  6. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  8. data/lib/openai/models/chat/completion_create_params.rb +2 -2
  9. data/lib/openai/models/function_definition.rb +1 -1
  10. data/lib/openai/models/image_edit_completed_event.rb +198 -0
  11. data/lib/openai/models/image_edit_params.rb +39 -1
  12. data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
  13. data/lib/openai/models/image_edit_stream_event.rb +21 -0
  14. data/lib/openai/models/image_gen_completed_event.rb +198 -0
  15. data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
  16. data/lib/openai/models/image_gen_stream_event.rb +21 -0
  17. data/lib/openai/models/image_generate_params.rb +16 -1
  18. data/lib/openai/models/images_response.rb +2 -2
  19. data/lib/openai/models/responses/response.rb +2 -2
  20. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  21. data/lib/openai/models/responses/response_create_params.rb +2 -2
  22. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  23. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  24. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  26. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  27. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  28. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  29. data/lib/openai/models/responses/response_output_refusal.rb +2 -2
  30. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  31. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  32. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  33. data/lib/openai/models/responses/tool.rb +30 -1
  34. data/lib/openai/models.rb +12 -0
  35. data/lib/openai/resources/images.rb +140 -2
  36. data/lib/openai/resources/responses.rb +2 -2
  37. data/lib/openai/version.rb +1 -1
  38. data/lib/openai.rb +6 -2
  39. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  40. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  41. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  42. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  43. data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
  44. data/rbi/openai/models/function_definition.rbi +2 -2
  45. data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
  46. data/rbi/openai/models/image_edit_params.rbi +57 -0
  47. data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
  48. data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
  49. data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
  50. data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
  51. data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
  52. data/rbi/openai/models/image_generate_params.rbi +18 -0
  53. data/rbi/openai/models/images_response.rbi +2 -2
  54. data/rbi/openai/models/responses/response.rbi +3 -3
  55. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  56. data/rbi/openai/models/responses/response_create_params.rbi +3 -3
  57. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  58. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  59. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  60. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  61. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  62. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  63. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  64. data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
  65. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  66. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  67. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  68. data/rbi/openai/models/responses/tool.rbi +61 -0
  69. data/rbi/openai/models.rbi +12 -0
  70. data/rbi/openai/resources/chat/completions.rbi +2 -2
  71. data/rbi/openai/resources/images.rbi +237 -0
  72. data/rbi/openai/resources/responses.rbi +2 -2
  73. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  74. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  75. data/sig/openai/models/image_edit_completed_event.rbs +150 -0
  76. data/sig/openai/models/image_edit_params.rbs +21 -0
  77. data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
  78. data/sig/openai/models/image_edit_stream_event.rbs +12 -0
  79. data/sig/openai/models/image_gen_completed_event.rbs +150 -0
  80. data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
  81. data/sig/openai/models/image_gen_stream_event.rbs +12 -0
  82. data/sig/openai/models/image_generate_params.rbs +5 -0
  83. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  84. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  85. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  86. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  87. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  88. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  89. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  90. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  91. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  92. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  93. data/sig/openai/models/responses/tool.rbs +16 -0
  94. data/sig/openai/models.rbs +12 -0
  95. data/sig/openai/resources/images.rbs +38 -0
  96. metadata +20 -8
  97. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  98. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  99. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  100. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  101. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  102. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -42,6 +42,8 @@ module OpenAI
42
42
  )
43
43
  end
44
44
 
45
+ # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
46
+ #
45
47
  # Creates an edited or extended image given one or more source images and a
46
48
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
47
49
  sig do
@@ -49,17 +51,21 @@ module OpenAI
49
51
  image: OpenAI::ImageEditParams::Image::Variants,
50
52
  prompt: String,
51
53
  background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
54
+ input_fidelity:
55
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
52
56
  mask: OpenAI::Internal::FileInput,
53
57
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
54
58
  n: T.nilable(Integer),
55
59
  output_compression: T.nilable(Integer),
56
60
  output_format:
57
61
  T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
62
+ partial_images: T.nilable(Integer),
58
63
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
59
64
  response_format:
60
65
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
61
66
  size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol),
62
67
  user: String,
68
+ stream: T.noreturn,
63
69
  request_options: OpenAI::RequestOptions::OrHash
64
70
  ).returns(OpenAI::ImagesResponse)
65
71
  end
@@ -83,6 +89,10 @@ module OpenAI
83
89
  # If `transparent`, the output format needs to support transparency, so it should
84
90
  # be set to either `png` (default value) or `webp`.
85
91
  background: nil,
92
+ # Control how much effort the model will exert to match the style and features,
93
+ # especially facial features, of input images. This parameter is only supported
94
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
95
+ input_fidelity: nil,
86
96
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
87
97
  # indicate where `image` should be edited. If there are multiple images provided,
88
98
  # the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -102,6 +112,13 @@ module OpenAI
102
112
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
103
113
  # default value is `png`.
104
114
  output_format: nil,
115
+ # The number of partial images to generate. This parameter is used for streaming
116
+ # responses that return partial images. Value must be between 0 and 3. When set to
117
+ # 0, the response will be a single image sent in one streaming event.
118
+ #
119
+ # Note that the final image may be sent before the full number of partial images
120
+ # are generated if the full image is generated more quickly.
121
+ partial_images: nil,
105
122
  # The quality of the image that will be generated. `high`, `medium` and `low` are
106
123
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
107
124
  # Defaults to `auto`.
@@ -119,10 +136,118 @@ module OpenAI
119
136
  # and detect abuse.
120
137
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
121
138
  user: nil,
139
+ # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or
140
+ # `#edit` for streaming and non-streaming use cases, respectively.
141
+ stream: false,
122
142
  request_options: {}
123
143
  )
124
144
  end
125
145
 
146
+ # See {OpenAI::Resources::Images#edit} for non-streaming counterpart.
147
+ #
148
+ # Creates an edited or extended image given one or more source images and a
149
+ # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
150
+ sig do
151
+ params(
152
+ image: OpenAI::ImageEditParams::Image::Variants,
153
+ prompt: String,
154
+ background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
155
+ input_fidelity:
156
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
157
+ mask: OpenAI::Internal::FileInput,
158
+ model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
159
+ n: T.nilable(Integer),
160
+ output_compression: T.nilable(Integer),
161
+ output_format:
162
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
163
+ partial_images: T.nilable(Integer),
164
+ quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
165
+ response_format:
166
+ T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
167
+ size: T.nilable(OpenAI::ImageEditParams::Size::OrSymbol),
168
+ user: String,
169
+ stream: T.noreturn,
170
+ request_options: OpenAI::RequestOptions::OrHash
171
+ ).returns(
172
+ OpenAI::Internal::Stream[OpenAI::ImageEditStreamEvent::Variants]
173
+ )
174
+ end
175
+ def edit_stream_raw(
176
+ # The image(s) to edit. Must be a supported image file or an array of images.
177
+ #
178
+ # For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
179
+ # 50MB. You can provide up to 16 images.
180
+ #
181
+ # For `dall-e-2`, you can only provide one image, and it should be a square `png`
182
+ # file less than 4MB.
183
+ image:,
184
+ # A text description of the desired image(s). The maximum length is 1000
185
+ # characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
186
+ prompt:,
187
+ # Allows to set transparency for the background of the generated image(s). This
188
+ # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
189
+ # `opaque` or `auto` (default value). When `auto` is used, the model will
190
+ # automatically determine the best background for the image.
191
+ #
192
+ # If `transparent`, the output format needs to support transparency, so it should
193
+ # be set to either `png` (default value) or `webp`.
194
+ background: nil,
195
+ # Control how much effort the model will exert to match the style and features,
196
+ # especially facial features, of input images. This parameter is only supported
197
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
198
+ input_fidelity: nil,
199
+ # An additional image whose fully transparent areas (e.g. where alpha is zero)
200
+ # indicate where `image` should be edited. If there are multiple images provided,
201
+ # the mask will be applied on the first image. Must be a valid PNG file, less than
202
+ # 4MB, and have the same dimensions as `image`.
203
+ mask: nil,
204
+ # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
205
+ # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
206
+ # is used.
207
+ model: nil,
208
+ # The number of images to generate. Must be between 1 and 10.
209
+ n: nil,
210
+ # The compression level (0-100%) for the generated images. This parameter is only
211
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
212
+ # defaults to 100.
213
+ output_compression: nil,
214
+ # The format in which the generated images are returned. This parameter is only
215
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
216
+ # default value is `png`.
217
+ output_format: nil,
218
+ # The number of partial images to generate. This parameter is used for streaming
219
+ # responses that return partial images. Value must be between 0 and 3. When set to
220
+ # 0, the response will be a single image sent in one streaming event.
221
+ #
222
+ # Note that the final image may be sent before the full number of partial images
223
+ # are generated if the full image is generated more quickly.
224
+ partial_images: nil,
225
+ # The quality of the image that will be generated. `high`, `medium` and `low` are
226
+ # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
227
+ # Defaults to `auto`.
228
+ quality: nil,
229
+ # The format in which the generated images are returned. Must be one of `url` or
230
+ # `b64_json`. URLs are only valid for 60 minutes after the image has been
231
+ # generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
232
+ # will always return base64-encoded images.
233
+ response_format: nil,
234
+ # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
235
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for
236
+ # `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
237
+ size: nil,
238
+ # A unique identifier representing your end-user, which can help OpenAI to monitor
239
+ # and detect abuse.
240
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
241
+ user: nil,
242
+ # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or
243
+ # `#edit` for streaming and non-streaming use cases, respectively.
244
+ stream: true,
245
+ request_options: {}
246
+ )
247
+ end
248
+
249
+ # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart.
250
+ #
126
251
  # Creates an image given a prompt.
127
252
  # [Learn more](https://platform.openai.com/docs/guides/images).
128
253
  sig do
@@ -137,12 +262,14 @@ module OpenAI
137
262
  output_compression: T.nilable(Integer),
138
263
  output_format:
139
264
  T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
265
+ partial_images: T.nilable(Integer),
140
266
  quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
141
267
  response_format:
142
268
  T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
143
269
  size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol),
144
270
  style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol),
145
271
  user: String,
272
+ stream: T.noreturn,
146
273
  request_options: OpenAI::RequestOptions::OrHash
147
274
  ).returns(OpenAI::ImagesResponse)
148
275
  end
@@ -176,6 +303,113 @@ module OpenAI
176
303
  # The format in which the generated images are returned. This parameter is only
177
304
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
178
305
  output_format: nil,
306
+ # The number of partial images to generate. This parameter is used for streaming
307
+ # responses that return partial images. Value must be between 0 and 3. When set to
308
+ # 0, the response will be a single image sent in one streaming event.
309
+ #
310
+ # Note that the final image may be sent before the full number of partial images
311
+ # are generated if the full image is generated more quickly.
312
+ partial_images: nil,
313
+ # The quality of the image that will be generated.
314
+ #
315
+ # - `auto` (default value) will automatically select the best quality for the
316
+ # given model.
317
+ # - `high`, `medium` and `low` are supported for `gpt-image-1`.
318
+ # - `hd` and `standard` are supported for `dall-e-3`.
319
+ # - `standard` is the only option for `dall-e-2`.
320
+ quality: nil,
321
+ # The format in which generated images with `dall-e-2` and `dall-e-3` are
322
+ # returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
323
+ # after the image has been generated. This parameter isn't supported for
324
+ # `gpt-image-1` which will always return base64-encoded images.
325
+ response_format: nil,
326
+ # The size of the generated images. Must be one of `1024x1024`, `1536x1024`
327
+ # (landscape), `1024x1536` (portrait), or `auto` (default value) for
328
+ # `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
329
+ # one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
330
+ size: nil,
331
+ # The style of the generated images. This parameter is only supported for
332
+ # `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
333
+ # towards generating hyper-real and dramatic images. Natural causes the model to
334
+ # produce more natural, less hyper-real looking images.
335
+ style: nil,
336
+ # A unique identifier representing your end-user, which can help OpenAI to monitor
337
+ # and detect abuse.
338
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
339
+ user: nil,
340
+ # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or
341
+ # `#generate` for streaming and non-streaming use cases, respectively.
342
+ stream: false,
343
+ request_options: {}
344
+ )
345
+ end
346
+
347
+ # See {OpenAI::Resources::Images#generate} for non-streaming counterpart.
348
+ #
349
+ # Creates an image given a prompt.
350
+ # [Learn more](https://platform.openai.com/docs/guides/images).
351
+ sig do
352
+ params(
353
+ prompt: String,
354
+ background:
355
+ T.nilable(OpenAI::ImageGenerateParams::Background::OrSymbol),
356
+ model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
357
+ moderation:
358
+ T.nilable(OpenAI::ImageGenerateParams::Moderation::OrSymbol),
359
+ n: T.nilable(Integer),
360
+ output_compression: T.nilable(Integer),
361
+ output_format:
362
+ T.nilable(OpenAI::ImageGenerateParams::OutputFormat::OrSymbol),
363
+ partial_images: T.nilable(Integer),
364
+ quality: T.nilable(OpenAI::ImageGenerateParams::Quality::OrSymbol),
365
+ response_format:
366
+ T.nilable(OpenAI::ImageGenerateParams::ResponseFormat::OrSymbol),
367
+ size: T.nilable(OpenAI::ImageGenerateParams::Size::OrSymbol),
368
+ style: T.nilable(OpenAI::ImageGenerateParams::Style::OrSymbol),
369
+ user: String,
370
+ stream: T.noreturn,
371
+ request_options: OpenAI::RequestOptions::OrHash
372
+ ).returns(
373
+ OpenAI::Internal::Stream[OpenAI::ImageGenStreamEvent::Variants]
374
+ )
375
+ end
376
+ def generate_stream_raw(
377
+ # A text description of the desired image(s). The maximum length is 32000
378
+ # characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
379
+ # for `dall-e-3`.
380
+ prompt:,
381
+ # Allows to set transparency for the background of the generated image(s). This
382
+ # parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
383
+ # `opaque` or `auto` (default value). When `auto` is used, the model will
384
+ # automatically determine the best background for the image.
385
+ #
386
+ # If `transparent`, the output format needs to support transparency, so it should
387
+ # be set to either `png` (default value) or `webp`.
388
+ background: nil,
389
+ # The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
390
+ # `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
391
+ # `gpt-image-1` is used.
392
+ model: nil,
393
+ # Control the content-moderation level for images generated by `gpt-image-1`. Must
394
+ # be either `low` for less restrictive filtering or `auto` (default value).
395
+ moderation: nil,
396
+ # The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
397
+ # `n=1` is supported.
398
+ n: nil,
399
+ # The compression level (0-100%) for the generated images. This parameter is only
400
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
401
+ # defaults to 100.
402
+ output_compression: nil,
403
+ # The format in which the generated images are returned. This parameter is only
404
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
405
+ output_format: nil,
406
+ # The number of partial images to generate. This parameter is used for streaming
407
+ # responses that return partial images. Value must be between 0 and 3. When set to
408
+ # 0, the response will be a single image sent in one streaming event.
409
+ #
410
+ # Note that the final image may be sent before the full number of partial images
411
+ # are generated if the full image is generated more quickly.
412
+ partial_images: nil,
179
413
  # The quality of the image that will be generated.
180
414
  #
181
415
  # - `auto` (default value) will automatically select the best quality for the
@@ -203,6 +437,9 @@ module OpenAI
203
437
  # and detect abuse.
204
438
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
205
439
  user: nil,
440
+ # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or
441
+ # `#generate` for streaming and non-streaming use cases, respectively.
442
+ stream: true,
206
443
  request_options: {}
207
444
  )
208
445
  end
@@ -161,7 +161,7 @@ module OpenAI
161
161
  # - If set to 'auto', then the request will be processed with the service tier
162
162
  # configured in the Project settings. Unless otherwise configured, the Project
163
163
  # will use 'default'.
164
- # - If set to 'default', then the requset will be processed with the standard
164
+ # - If set to 'default', then the request will be processed with the standard
165
165
  # pricing and performance for the selected model.
166
166
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
167
167
  # 'priority', then the request will be processed with the corresponding service
@@ -395,7 +395,7 @@ module OpenAI
395
395
  # - If set to 'auto', then the request will be processed with the service tier
396
396
  # configured in the Project settings. Unless otherwise configured, the Project
397
397
  # will use 'default'.
398
- # - If set to 'default', then the requset will be processed with the standard
398
+ # - If set to 'default', then the request will be processed with the standard
399
399
  # pricing and performance for the selected model.
400
400
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
401
401
  # 'priority', then the request will be processed with the corresponding service
@@ -80,9 +80,6 @@ module OpenAI
80
80
  | :ballad
81
81
  | :coral
82
82
  | :echo
83
- | :fable
84
- | :onyx
85
- | :nova
86
83
  | :sage
87
84
  | :shimmer
88
85
  | :verse
@@ -97,9 +94,6 @@ module OpenAI
97
94
  BALLAD: :ballad
98
95
  CORAL: :coral
99
96
  ECHO: :echo
100
- FABLE: :fable
101
- ONYX: :onyx
102
- NOVA: :nova
103
97
  SAGE: :sage
104
98
  SHIMMER: :shimmer
105
99
  VERSE: :verse
@@ -46,9 +46,6 @@ module OpenAI
46
46
  | :ballad
47
47
  | :coral
48
48
  | :echo
49
- | :fable
50
- | :onyx
51
- | :nova
52
49
  | :sage
53
50
  | :shimmer
54
51
  | :verse
@@ -63,9 +60,6 @@ module OpenAI
63
60
  BALLAD: :ballad
64
61
  CORAL: :coral
65
62
  ECHO: :echo
66
- FABLE: :fable
67
- ONYX: :onyx
68
- NOVA: :nova
69
63
  SAGE: :sage
70
64
  SHIMMER: :shimmer
71
65
  VERSE: :verse
@@ -0,0 +1,150 @@
1
+ module OpenAI
2
+ module Models
3
+ type image_edit_completed_event =
4
+ {
5
+ :b64_json => String,
6
+ background: OpenAI::Models::ImageEditCompletedEvent::background,
7
+ created_at: Integer,
8
+ output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
9
+ quality: OpenAI::Models::ImageEditCompletedEvent::quality,
10
+ size: OpenAI::Models::ImageEditCompletedEvent::size,
11
+ type: :"image_edit.completed",
12
+ usage: OpenAI::ImageEditCompletedEvent::Usage
13
+ }
14
+
15
+ class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
16
+ attr_accessor b64_json: String
17
+
18
+ attr_accessor background: OpenAI::Models::ImageEditCompletedEvent::background
19
+
20
+ attr_accessor created_at: Integer
21
+
22
+ attr_accessor output_format: OpenAI::Models::ImageEditCompletedEvent::output_format
23
+
24
+ attr_accessor quality: OpenAI::Models::ImageEditCompletedEvent::quality
25
+
26
+ attr_accessor size: OpenAI::Models::ImageEditCompletedEvent::size
27
+
28
+ attr_accessor type: :"image_edit.completed"
29
+
30
+ attr_accessor usage: OpenAI::ImageEditCompletedEvent::Usage
31
+
32
+ def initialize: (
33
+ b64_json: String,
34
+ background: OpenAI::Models::ImageEditCompletedEvent::background,
35
+ created_at: Integer,
36
+ output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
37
+ quality: OpenAI::Models::ImageEditCompletedEvent::quality,
38
+ size: OpenAI::Models::ImageEditCompletedEvent::size,
39
+ usage: OpenAI::ImageEditCompletedEvent::Usage,
40
+ ?type: :"image_edit.completed"
41
+ ) -> void
42
+
43
+ def to_hash: -> {
44
+ :b64_json => String,
45
+ background: OpenAI::Models::ImageEditCompletedEvent::background,
46
+ created_at: Integer,
47
+ output_format: OpenAI::Models::ImageEditCompletedEvent::output_format,
48
+ quality: OpenAI::Models::ImageEditCompletedEvent::quality,
49
+ size: OpenAI::Models::ImageEditCompletedEvent::size,
50
+ type: :"image_edit.completed",
51
+ usage: OpenAI::ImageEditCompletedEvent::Usage
52
+ }
53
+
54
+ type background = :transparent | :opaque | :auto
55
+
56
+ module Background
57
+ extend OpenAI::Internal::Type::Enum
58
+
59
+ TRANSPARENT: :transparent
60
+ OPAQUE: :opaque
61
+ AUTO: :auto
62
+
63
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::background]
64
+ end
65
+
66
+ type output_format = :png | :webp | :jpeg
67
+
68
+ module OutputFormat
69
+ extend OpenAI::Internal::Type::Enum
70
+
71
+ PNG: :png
72
+ WEBP: :webp
73
+ JPEG: :jpeg
74
+
75
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::output_format]
76
+ end
77
+
78
+ type quality = :low | :medium | :high | :auto
79
+
80
+ module Quality
81
+ extend OpenAI::Internal::Type::Enum
82
+
83
+ LOW: :low
84
+ MEDIUM: :medium
85
+ HIGH: :high
86
+ AUTO: :auto
87
+
88
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::quality]
89
+ end
90
+
91
+ type size = :"1024x1024" | :"1024x1536" | :"1536x1024" | :auto
92
+
93
+ module Size
94
+ extend OpenAI::Internal::Type::Enum
95
+
96
+ SIZE_1024X1024: :"1024x1024"
97
+ SIZE_1024X1536: :"1024x1536"
98
+ SIZE_1536X1024: :"1536x1024"
99
+ AUTO: :auto
100
+
101
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditCompletedEvent::size]
102
+ end
103
+
104
+ type usage =
105
+ {
106
+ input_tokens: Integer,
107
+ input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
108
+ output_tokens: Integer,
109
+ total_tokens: Integer
110
+ }
111
+
112
+ class Usage < OpenAI::Internal::Type::BaseModel
113
+ attr_accessor input_tokens: Integer
114
+
115
+ attr_accessor input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails
116
+
117
+ attr_accessor output_tokens: Integer
118
+
119
+ attr_accessor total_tokens: Integer
120
+
121
+ def initialize: (
122
+ input_tokens: Integer,
123
+ input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
124
+ output_tokens: Integer,
125
+ total_tokens: Integer
126
+ ) -> void
127
+
128
+ def to_hash: -> {
129
+ input_tokens: Integer,
130
+ input_tokens_details: OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
131
+ output_tokens: Integer,
132
+ total_tokens: Integer
133
+ }
134
+
135
+ type input_tokens_details =
136
+ { image_tokens: Integer, text_tokens: Integer }
137
+
138
+ class InputTokensDetails < OpenAI::Internal::Type::BaseModel
139
+ attr_accessor image_tokens: Integer
140
+
141
+ attr_accessor text_tokens: Integer
142
+
143
+ def initialize: (image_tokens: Integer, text_tokens: Integer) -> void
144
+
145
+ def to_hash: -> { image_tokens: Integer, text_tokens: Integer }
146
+ end
147
+ end
148
+ end
149
+ end
150
+ end
@@ -5,11 +5,13 @@ module OpenAI
5
5
  image: OpenAI::Models::ImageEditParams::image,
6
6
  prompt: String,
7
7
  background: OpenAI::Models::ImageEditParams::background?,
8
+ input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
8
9
  mask: OpenAI::Internal::file_input,
9
10
  model: OpenAI::Models::ImageEditParams::model?,
10
11
  n: Integer?,
11
12
  output_compression: Integer?,
12
13
  output_format: OpenAI::Models::ImageEditParams::output_format?,
14
+ partial_images: Integer?,
13
15
  quality: OpenAI::Models::ImageEditParams::quality?,
14
16
  response_format: OpenAI::Models::ImageEditParams::response_format?,
15
17
  size: OpenAI::Models::ImageEditParams::size?,
@@ -27,6 +29,8 @@ module OpenAI
27
29
 
28
30
  attr_accessor background: OpenAI::Models::ImageEditParams::background?
29
31
 
32
+ attr_accessor input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?
33
+
30
34
  attr_reader mask: OpenAI::Internal::file_input?
31
35
 
32
36
  def mask=: (OpenAI::Internal::file_input) -> OpenAI::Internal::file_input
@@ -39,6 +43,8 @@ module OpenAI
39
43
 
40
44
  attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format?
41
45
 
46
+ attr_accessor partial_images: Integer?
47
+
42
48
  attr_accessor quality: OpenAI::Models::ImageEditParams::quality?
43
49
 
44
50
  attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format?
@@ -53,11 +59,13 @@ module OpenAI
53
59
  image: OpenAI::Models::ImageEditParams::image,
54
60
  prompt: String,
55
61
  ?background: OpenAI::Models::ImageEditParams::background?,
62
+ ?input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
56
63
  ?mask: OpenAI::Internal::file_input,
57
64
  ?model: OpenAI::Models::ImageEditParams::model?,
58
65
  ?n: Integer?,
59
66
  ?output_compression: Integer?,
60
67
  ?output_format: OpenAI::Models::ImageEditParams::output_format?,
68
+ ?partial_images: Integer?,
61
69
  ?quality: OpenAI::Models::ImageEditParams::quality?,
62
70
  ?response_format: OpenAI::Models::ImageEditParams::response_format?,
63
71
  ?size: OpenAI::Models::ImageEditParams::size?,
@@ -69,11 +77,13 @@ module OpenAI
69
77
  image: OpenAI::Models::ImageEditParams::image,
70
78
  prompt: String,
71
79
  background: OpenAI::Models::ImageEditParams::background?,
80
+ input_fidelity: OpenAI::Models::ImageEditParams::input_fidelity?,
72
81
  mask: OpenAI::Internal::file_input,
73
82
  model: OpenAI::Models::ImageEditParams::model?,
74
83
  n: Integer?,
75
84
  output_compression: Integer?,
76
85
  output_format: OpenAI::Models::ImageEditParams::output_format?,
86
+ partial_images: Integer?,
77
87
  quality: OpenAI::Models::ImageEditParams::quality?,
78
88
  response_format: OpenAI::Models::ImageEditParams::response_format?,
79
89
  size: OpenAI::Models::ImageEditParams::size?,
@@ -104,6 +114,17 @@ module OpenAI
104
114
  def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::background]
105
115
  end
106
116
 
117
+ type input_fidelity = :high | :low
118
+
119
+ module InputFidelity
120
+ extend OpenAI::Internal::Type::Enum
121
+
122
+ HIGH: :high
123
+ LOW: :low
124
+
125
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::input_fidelity]
126
+ end
127
+
107
128
  type model = String | OpenAI::Models::image_model
108
129
 
109
130
  module Model