openai 0.13.1 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +26 -0
  3. data/README.md +3 -3
  4. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  5. data/lib/openai/models/chat/chat_completion.rb +2 -2
  6. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  8. data/lib/openai/models/chat/completion_create_params.rb +2 -2
  9. data/lib/openai/models/function_definition.rb +1 -1
  10. data/lib/openai/models/image_edit_completed_event.rb +198 -0
  11. data/lib/openai/models/image_edit_params.rb +39 -1
  12. data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
  13. data/lib/openai/models/image_edit_stream_event.rb +21 -0
  14. data/lib/openai/models/image_gen_completed_event.rb +198 -0
  15. data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
  16. data/lib/openai/models/image_gen_stream_event.rb +21 -0
  17. data/lib/openai/models/image_generate_params.rb +16 -1
  18. data/lib/openai/models/images_response.rb +2 -2
  19. data/lib/openai/models/responses/response.rb +2 -2
  20. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  21. data/lib/openai/models/responses/response_create_params.rb +2 -2
  22. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  23. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  24. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  26. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  27. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  28. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  29. data/lib/openai/models/responses/response_output_refusal.rb +2 -2
  30. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  31. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  32. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  33. data/lib/openai/models/responses/tool.rb +30 -1
  34. data/lib/openai/models.rb +12 -0
  35. data/lib/openai/resources/images.rb +140 -2
  36. data/lib/openai/resources/responses.rb +2 -2
  37. data/lib/openai/version.rb +1 -1
  38. data/lib/openai.rb +6 -2
  39. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  40. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  41. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  42. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  43. data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
  44. data/rbi/openai/models/function_definition.rbi +2 -2
  45. data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
  46. data/rbi/openai/models/image_edit_params.rbi +57 -0
  47. data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
  48. data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
  49. data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
  50. data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
  51. data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
  52. data/rbi/openai/models/image_generate_params.rbi +18 -0
  53. data/rbi/openai/models/images_response.rbi +2 -2
  54. data/rbi/openai/models/responses/response.rbi +3 -3
  55. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  56. data/rbi/openai/models/responses/response_create_params.rbi +3 -3
  57. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  58. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  59. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  60. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  61. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  62. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  63. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  64. data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
  65. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  66. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  67. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  68. data/rbi/openai/models/responses/tool.rbi +61 -0
  69. data/rbi/openai/models.rbi +12 -0
  70. data/rbi/openai/resources/chat/completions.rbi +2 -2
  71. data/rbi/openai/resources/images.rbi +237 -0
  72. data/rbi/openai/resources/responses.rbi +2 -2
  73. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  74. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  75. data/sig/openai/models/image_edit_completed_event.rbs +150 -0
  76. data/sig/openai/models/image_edit_params.rbs +21 -0
  77. data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
  78. data/sig/openai/models/image_edit_stream_event.rbs +12 -0
  79. data/sig/openai/models/image_gen_completed_event.rbs +150 -0
  80. data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
  81. data/sig/openai/models/image_gen_stream_event.rbs +12 -0
  82. data/sig/openai/models/image_generate_params.rbs +5 -0
  83. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  84. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  85. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  86. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  87. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  88. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  89. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  90. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  91. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  92. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  93. data/sig/openai/models/responses/tool.rbs +16 -0
  94. data/sig/openai/models.rbs +12 -0
  95. data/sig/openai/resources/images.rbs +38 -0
  96. metadata +20 -8
  97. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  98. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  99. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  100. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  101. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  102. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
data/lib/openai.rb CHANGED
@@ -320,8 +320,14 @@ require_relative "openai/models/graders/multi_grader"
320
320
  require_relative "openai/models/graders/string_check_grader"
321
321
  require_relative "openai/models/image"
322
322
  require_relative "openai/models/image_create_variation_params"
323
+ require_relative "openai/models/image_edit_completed_event"
323
324
  require_relative "openai/models/image_edit_params"
325
+ require_relative "openai/models/image_edit_partial_image_event"
326
+ require_relative "openai/models/image_edit_stream_event"
327
+ require_relative "openai/models/image_gen_completed_event"
324
328
  require_relative "openai/models/image_generate_params"
329
+ require_relative "openai/models/image_gen_partial_image_event"
330
+ require_relative "openai/models/image_gen_stream_event"
325
331
  require_relative "openai/models/image_model"
326
332
  require_relative "openai/models/images_response"
327
333
  require_relative "openai/models/metadata"
@@ -419,8 +425,6 @@ require_relative "openai/models/responses/response_output_text"
419
425
  require_relative "openai/models/responses/response_output_text_annotation_added_event"
420
426
  require_relative "openai/models/responses/response_prompt"
421
427
  require_relative "openai/models/responses/response_queued_event"
422
- require_relative "openai/models/responses/response_reasoning_delta_event"
423
- require_relative "openai/models/responses/response_reasoning_done_event"
424
428
  require_relative "openai/models/responses/response_reasoning_item"
425
429
  require_relative "openai/models/responses/response_reasoning_summary_delta_event"
426
430
  require_relative "openai/models/responses/response_reasoning_summary_done_event"
@@ -216,15 +216,6 @@ module OpenAI
216
216
  )
217
217
  ECHO =
218
218
  T.let(:echo, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
219
- FABLE =
220
- T.let(
221
- :fable,
222
- OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol
223
- )
224
- ONYX =
225
- T.let(:onyx, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
226
- NOVA =
227
- T.let(:nova, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
228
219
  SAGE =
229
220
  T.let(:sage, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol)
230
221
  SHIMMER =
@@ -37,7 +37,7 @@ module OpenAI
37
37
  # - If set to 'auto', then the request will be processed with the service tier
38
38
  # configured in the Project settings. Unless otherwise configured, the Project
39
39
  # will use 'default'.
40
- # - If set to 'default', then the requset will be processed with the standard
40
+ # - If set to 'default', then the request will be processed with the standard
41
41
  # pricing and performance for the selected model.
42
42
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
43
43
  # 'priority', then the request will be processed with the corresponding service
@@ -103,7 +103,7 @@ module OpenAI
103
103
  # - If set to 'auto', then the request will be processed with the service tier
104
104
  # configured in the Project settings. Unless otherwise configured, the Project
105
105
  # will use 'default'.
106
- # - If set to 'default', then the requset will be processed with the standard
106
+ # - If set to 'default', then the request will be processed with the standard
107
107
  # pricing and performance for the selected model.
108
108
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
109
109
  # 'priority', then the request will be processed with the corresponding service
@@ -368,7 +368,7 @@ module OpenAI
368
368
  # - If set to 'auto', then the request will be processed with the service tier
369
369
  # configured in the Project settings. Unless otherwise configured, the Project
370
370
  # will use 'default'.
371
- # - If set to 'default', then the requset will be processed with the standard
371
+ # - If set to 'default', then the request will be processed with the standard
372
372
  # pricing and performance for the selected model.
373
373
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
374
374
  # 'priority', then the request will be processed with the corresponding service
@@ -176,21 +176,6 @@ module OpenAI
176
176
  :echo,
177
177
  OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
178
178
  )
179
- FABLE =
180
- T.let(
181
- :fable,
182
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
183
- )
184
- ONYX =
185
- T.let(
186
- :onyx,
187
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
188
- )
189
- NOVA =
190
- T.let(
191
- :nova,
192
- OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol
193
- )
194
179
  SAGE =
195
180
  T.let(
196
181
  :sage,
@@ -39,7 +39,7 @@ module OpenAI
39
39
  # - If set to 'auto', then the request will be processed with the service tier
40
40
  # configured in the Project settings. Unless otherwise configured, the Project
41
41
  # will use 'default'.
42
- # - If set to 'default', then the requset will be processed with the standard
42
+ # - If set to 'default', then the request will be processed with the standard
43
43
  # pricing and performance for the selected model.
44
44
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
45
  # 'priority', then the request will be processed with the corresponding service
@@ -118,7 +118,7 @@ module OpenAI
118
118
  # - If set to 'auto', then the request will be processed with the service tier
119
119
  # configured in the Project settings. Unless otherwise configured, the Project
120
120
  # will use 'default'.
121
- # - If set to 'default', then the requset will be processed with the standard
121
+ # - If set to 'default', then the request will be processed with the standard
122
122
  # pricing and performance for the selected model.
123
123
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
124
  # 'priority', then the request will be processed with the corresponding service
@@ -788,7 +788,7 @@ module OpenAI
788
788
  # - If set to 'auto', then the request will be processed with the service tier
789
789
  # configured in the Project settings. Unless otherwise configured, the Project
790
790
  # will use 'default'.
791
- # - If set to 'default', then the requset will be processed with the standard
791
+ # - If set to 'default', then the request will be processed with the standard
792
792
  # pricing and performance for the selected model.
793
793
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
794
  # 'priority', then the request will be processed with the corresponding service
@@ -275,7 +275,7 @@ module OpenAI
275
275
  # - If set to 'auto', then the request will be processed with the service tier
276
276
  # configured in the Project settings. Unless otherwise configured, the Project
277
277
  # will use 'default'.
278
- # - If set to 'default', then the requset will be processed with the standard
278
+ # - If set to 'default', then the request will be processed with the standard
279
279
  # pricing and performance for the selected model.
280
280
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
281
281
  # 'priority', then the request will be processed with the corresponding service
@@ -632,7 +632,7 @@ module OpenAI
632
632
  # - If set to 'auto', then the request will be processed with the service tier
633
633
  # configured in the Project settings. Unless otherwise configured, the Project
634
634
  # will use 'default'.
635
- # - If set to 'default', then the requset will be processed with the standard
635
+ # - If set to 'default', then the request will be processed with the standard
636
636
  # pricing and performance for the selected model.
637
637
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
638
638
  # 'priority', then the request will be processed with the corresponding service
@@ -1017,7 +1017,7 @@ module OpenAI
1017
1017
  # - If set to 'auto', then the request will be processed with the service tier
1018
1018
  # configured in the Project settings. Unless otherwise configured, the Project
1019
1019
  # will use 'default'.
1020
- # - If set to 'default', then the requset will be processed with the standard
1020
+ # - If set to 'default', then the request will be processed with the standard
1021
1021
  # pricing and performance for the selected model.
1022
1022
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1023
1023
  # 'priority', then the request will be processed with the corresponding service
@@ -38,7 +38,7 @@ module OpenAI
38
38
  # set to true, the model will follow the exact schema defined in the `parameters`
39
39
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
40
40
  # more about Structured Outputs in the
41
- # [function calling guide](docs/guides/function-calling).
41
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
42
42
  sig { returns(T.nilable(T::Boolean)) }
43
43
  attr_accessor :strict
44
44
 
@@ -69,7 +69,7 @@ module OpenAI
69
69
  # set to true, the model will follow the exact schema defined in the `parameters`
70
70
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
71
71
  # more about Structured Outputs in the
72
- # [function calling guide](docs/guides/function-calling).
72
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
73
73
  strict: nil
74
74
  )
75
75
  end
@@ -0,0 +1,346 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
6
+ OrHash =
7
+ T.type_alias do
8
+ T.any(OpenAI::ImageEditCompletedEvent, OpenAI::Internal::AnyHash)
9
+ end
10
+
11
+ # Base64-encoded final edited image data, suitable for rendering as an image.
12
+ sig { returns(String) }
13
+ attr_accessor :b64_json
14
+
15
+ # The background setting for the edited image.
16
+ sig { returns(OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol) }
17
+ attr_accessor :background
18
+
19
+ # The Unix timestamp when the event was created.
20
+ sig { returns(Integer) }
21
+ attr_accessor :created_at
22
+
23
+ # The output format for the edited image.
24
+ sig do
25
+ returns(OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol)
26
+ end
27
+ attr_accessor :output_format
28
+
29
+ # The quality setting for the edited image.
30
+ sig { returns(OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol) }
31
+ attr_accessor :quality
32
+
33
+ # The size of the edited image.
34
+ sig { returns(OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol) }
35
+ attr_accessor :size
36
+
37
+ # The type of the event. Always `image_edit.completed`.
38
+ sig { returns(Symbol) }
39
+ attr_accessor :type
40
+
41
+ # For `gpt-image-1` only, the token usage information for the image generation.
42
+ sig { returns(OpenAI::ImageEditCompletedEvent::Usage) }
43
+ attr_reader :usage
44
+
45
+ sig { params(usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash).void }
46
+ attr_writer :usage
47
+
48
+ # Emitted when image editing has completed and the final image is available.
49
+ sig do
50
+ params(
51
+ b64_json: String,
52
+ background: OpenAI::ImageEditCompletedEvent::Background::OrSymbol,
53
+ created_at: Integer,
54
+ output_format:
55
+ OpenAI::ImageEditCompletedEvent::OutputFormat::OrSymbol,
56
+ quality: OpenAI::ImageEditCompletedEvent::Quality::OrSymbol,
57
+ size: OpenAI::ImageEditCompletedEvent::Size::OrSymbol,
58
+ usage: OpenAI::ImageEditCompletedEvent::Usage::OrHash,
59
+ type: Symbol
60
+ ).returns(T.attached_class)
61
+ end
62
+ def self.new(
63
+ # Base64-encoded final edited image data, suitable for rendering as an image.
64
+ b64_json:,
65
+ # The background setting for the edited image.
66
+ background:,
67
+ # The Unix timestamp when the event was created.
68
+ created_at:,
69
+ # The output format for the edited image.
70
+ output_format:,
71
+ # The quality setting for the edited image.
72
+ quality:,
73
+ # The size of the edited image.
74
+ size:,
75
+ # For `gpt-image-1` only, the token usage information for the image generation.
76
+ usage:,
77
+ # The type of the event. Always `image_edit.completed`.
78
+ type: :"image_edit.completed"
79
+ )
80
+ end
81
+
82
+ sig do
83
+ override.returns(
84
+ {
85
+ b64_json: String,
86
+ background:
87
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol,
88
+ created_at: Integer,
89
+ output_format:
90
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol,
91
+ quality: OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol,
92
+ size: OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol,
93
+ type: Symbol,
94
+ usage: OpenAI::ImageEditCompletedEvent::Usage
95
+ }
96
+ )
97
+ end
98
+ def to_hash
99
+ end
100
+
101
+ # The background setting for the edited image.
102
+ module Background
103
+ extend OpenAI::Internal::Type::Enum
104
+
105
+ TaggedSymbol =
106
+ T.type_alias do
107
+ T.all(Symbol, OpenAI::ImageEditCompletedEvent::Background)
108
+ end
109
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
110
+
111
+ TRANSPARENT =
112
+ T.let(
113
+ :transparent,
114
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
115
+ )
116
+ OPAQUE =
117
+ T.let(
118
+ :opaque,
119
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
120
+ )
121
+ AUTO =
122
+ T.let(
123
+ :auto,
124
+ OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol
125
+ )
126
+
127
+ sig do
128
+ override.returns(
129
+ T::Array[OpenAI::ImageEditCompletedEvent::Background::TaggedSymbol]
130
+ )
131
+ end
132
+ def self.values
133
+ end
134
+ end
135
+
136
+ # The output format for the edited image.
137
+ module OutputFormat
138
+ extend OpenAI::Internal::Type::Enum
139
+
140
+ TaggedSymbol =
141
+ T.type_alias do
142
+ T.all(Symbol, OpenAI::ImageEditCompletedEvent::OutputFormat)
143
+ end
144
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
145
+
146
+ PNG =
147
+ T.let(
148
+ :png,
149
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
150
+ )
151
+ WEBP =
152
+ T.let(
153
+ :webp,
154
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
155
+ )
156
+ JPEG =
157
+ T.let(
158
+ :jpeg,
159
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
160
+ )
161
+
162
+ sig do
163
+ override.returns(
164
+ T::Array[
165
+ OpenAI::ImageEditCompletedEvent::OutputFormat::TaggedSymbol
166
+ ]
167
+ )
168
+ end
169
+ def self.values
170
+ end
171
+ end
172
+
173
+ # The quality setting for the edited image.
174
+ module Quality
175
+ extend OpenAI::Internal::Type::Enum
176
+
177
+ TaggedSymbol =
178
+ T.type_alias do
179
+ T.all(Symbol, OpenAI::ImageEditCompletedEvent::Quality)
180
+ end
181
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
182
+
183
+ LOW =
184
+ T.let(:low, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
185
+ MEDIUM =
186
+ T.let(:medium, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
187
+ HIGH =
188
+ T.let(:high, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
189
+ AUTO =
190
+ T.let(:auto, OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol)
191
+
192
+ sig do
193
+ override.returns(
194
+ T::Array[OpenAI::ImageEditCompletedEvent::Quality::TaggedSymbol]
195
+ )
196
+ end
197
+ def self.values
198
+ end
199
+ end
200
+
201
+ # The size of the edited image.
202
+ module Size
203
+ extend OpenAI::Internal::Type::Enum
204
+
205
+ TaggedSymbol =
206
+ T.type_alias { T.all(Symbol, OpenAI::ImageEditCompletedEvent::Size) }
207
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
208
+
209
+ SIZE_1024X1024 =
210
+ T.let(
211
+ :"1024x1024",
212
+ OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
213
+ )
214
+ SIZE_1024X1536 =
215
+ T.let(
216
+ :"1024x1536",
217
+ OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
218
+ )
219
+ SIZE_1536X1024 =
220
+ T.let(
221
+ :"1536x1024",
222
+ OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol
223
+ )
224
+ AUTO = T.let(:auto, OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol)
225
+
226
+ sig do
227
+ override.returns(
228
+ T::Array[OpenAI::ImageEditCompletedEvent::Size::TaggedSymbol]
229
+ )
230
+ end
231
+ def self.values
232
+ end
233
+ end
234
+
235
+ class Usage < OpenAI::Internal::Type::BaseModel
236
+ OrHash =
237
+ T.type_alias do
238
+ T.any(
239
+ OpenAI::ImageEditCompletedEvent::Usage,
240
+ OpenAI::Internal::AnyHash
241
+ )
242
+ end
243
+
244
+ # The number of tokens (images and text) in the input prompt.
245
+ sig { returns(Integer) }
246
+ attr_accessor :input_tokens
247
+
248
+ # The input tokens detailed information for the image generation.
249
+ sig do
250
+ returns(OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails)
251
+ end
252
+ attr_reader :input_tokens_details
253
+
254
+ sig do
255
+ params(
256
+ input_tokens_details:
257
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash
258
+ ).void
259
+ end
260
+ attr_writer :input_tokens_details
261
+
262
+ # The number of image tokens in the output image.
263
+ sig { returns(Integer) }
264
+ attr_accessor :output_tokens
265
+
266
+ # The total number of tokens (images and text) used for the image generation.
267
+ sig { returns(Integer) }
268
+ attr_accessor :total_tokens
269
+
270
+ # For `gpt-image-1` only, the token usage information for the image generation.
271
+ sig do
272
+ params(
273
+ input_tokens: Integer,
274
+ input_tokens_details:
275
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails::OrHash,
276
+ output_tokens: Integer,
277
+ total_tokens: Integer
278
+ ).returns(T.attached_class)
279
+ end
280
+ def self.new(
281
+ # The number of tokens (images and text) in the input prompt.
282
+ input_tokens:,
283
+ # The input tokens detailed information for the image generation.
284
+ input_tokens_details:,
285
+ # The number of image tokens in the output image.
286
+ output_tokens:,
287
+ # The total number of tokens (images and text) used for the image generation.
288
+ total_tokens:
289
+ )
290
+ end
291
+
292
+ sig do
293
+ override.returns(
294
+ {
295
+ input_tokens: Integer,
296
+ input_tokens_details:
297
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
298
+ output_tokens: Integer,
299
+ total_tokens: Integer
300
+ }
301
+ )
302
+ end
303
+ def to_hash
304
+ end
305
+
306
+ class InputTokensDetails < OpenAI::Internal::Type::BaseModel
307
+ OrHash =
308
+ T.type_alias do
309
+ T.any(
310
+ OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails,
311
+ OpenAI::Internal::AnyHash
312
+ )
313
+ end
314
+
315
+ # The number of image tokens in the input prompt.
316
+ sig { returns(Integer) }
317
+ attr_accessor :image_tokens
318
+
319
+ # The number of text tokens in the input prompt.
320
+ sig { returns(Integer) }
321
+ attr_accessor :text_tokens
322
+
323
+ # The input tokens detailed information for the image generation.
324
+ sig do
325
+ params(image_tokens: Integer, text_tokens: Integer).returns(
326
+ T.attached_class
327
+ )
328
+ end
329
+ def self.new(
330
+ # The number of image tokens in the input prompt.
331
+ image_tokens:,
332
+ # The number of text tokens in the input prompt.
333
+ text_tokens:
334
+ )
335
+ end
336
+
337
+ sig do
338
+ override.returns({ image_tokens: Integer, text_tokens: Integer })
339
+ end
340
+ def to_hash
341
+ end
342
+ end
343
+ end
344
+ end
345
+ end
346
+ end
@@ -36,6 +36,14 @@ module OpenAI
36
36
  sig { returns(T.nilable(OpenAI::ImageEditParams::Background::OrSymbol)) }
37
37
  attr_accessor :background
38
38
 
39
+ # Control how much effort the model will exert to match the style and features,
40
+ # especially facial features, of input images. This parameter is only supported
41
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
42
+ sig do
43
+ returns(T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol))
44
+ end
45
+ attr_accessor :input_fidelity
46
+
39
47
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
40
48
  # indicate where `image` should be edited. If there are multiple images provided,
41
49
  # the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -70,6 +78,15 @@ module OpenAI
70
78
  end
71
79
  attr_accessor :output_format
72
80
 
81
+ # The number of partial images to generate. This parameter is used for streaming
82
+ # responses that return partial images. Value must be between 0 and 3. When set to
83
+ # 0, the response will be a single image sent in one streaming event.
84
+ #
85
+ # Note that the final image may be sent before the full number of partial images
86
+ # are generated if the full image is generated more quickly.
87
+ sig { returns(T.nilable(Integer)) }
88
+ attr_accessor :partial_images
89
+
73
90
  # The quality of the image that will be generated. `high`, `medium` and `low` are
74
91
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
75
92
  # Defaults to `auto`.
@@ -105,12 +122,15 @@ module OpenAI
105
122
  image: OpenAI::ImageEditParams::Image::Variants,
106
123
  prompt: String,
107
124
  background: T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
125
+ input_fidelity:
126
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
108
127
  mask: OpenAI::Internal::FileInput,
109
128
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
110
129
  n: T.nilable(Integer),
111
130
  output_compression: T.nilable(Integer),
112
131
  output_format:
113
132
  T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
133
+ partial_images: T.nilable(Integer),
114
134
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
115
135
  response_format:
116
136
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -139,6 +159,10 @@ module OpenAI
139
159
  # If `transparent`, the output format needs to support transparency, so it should
140
160
  # be set to either `png` (default value) or `webp`.
141
161
  background: nil,
162
+ # Control how much effort the model will exert to match the style and features,
163
+ # especially facial features, of input images. This parameter is only supported
164
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
165
+ input_fidelity: nil,
142
166
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
143
167
  # indicate where `image` should be edited. If there are multiple images provided,
144
168
  # the mask will be applied on the first image. Must be a valid PNG file, less than
@@ -158,6 +182,13 @@ module OpenAI
158
182
  # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
159
183
  # default value is `png`.
160
184
  output_format: nil,
185
+ # The number of partial images to generate. This parameter is used for streaming
186
+ # responses that return partial images. Value must be between 0 and 3. When set to
187
+ # 0, the response will be a single image sent in one streaming event.
188
+ #
189
+ # Note that the final image may be sent before the full number of partial images
190
+ # are generated if the full image is generated more quickly.
191
+ partial_images: nil,
161
192
  # The quality of the image that will be generated. `high`, `medium` and `low` are
162
193
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
163
194
  # Defaults to `auto`.
@@ -186,12 +217,15 @@ module OpenAI
186
217
  prompt: String,
187
218
  background:
188
219
  T.nilable(OpenAI::ImageEditParams::Background::OrSymbol),
220
+ input_fidelity:
221
+ T.nilable(OpenAI::ImageEditParams::InputFidelity::OrSymbol),
189
222
  mask: OpenAI::Internal::FileInput,
190
223
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
191
224
  n: T.nilable(Integer),
192
225
  output_compression: T.nilable(Integer),
193
226
  output_format:
194
227
  T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
228
+ partial_images: T.nilable(Integer),
195
229
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
196
230
  response_format:
197
231
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -258,6 +292,29 @@ module OpenAI
258
292
  end
259
293
  end
260
294
 
295
+ # Control how much effort the model will exert to match the style and features,
296
+ # especially facial features, of input images. This parameter is only supported
297
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
298
+ module InputFidelity
299
+ extend OpenAI::Internal::Type::Enum
300
+
301
+ TaggedSymbol =
302
+ T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::InputFidelity) }
303
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
304
+
305
+ HIGH =
306
+ T.let(:high, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol)
307
+ LOW = T.let(:low, OpenAI::ImageEditParams::InputFidelity::TaggedSymbol)
308
+
309
+ sig do
310
+ override.returns(
311
+ T::Array[OpenAI::ImageEditParams::InputFidelity::TaggedSymbol]
312
+ )
313
+ end
314
+ def self.values
315
+ end
316
+ end
317
+
261
318
  # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
262
319
  # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
263
320
  # is used.