openai 0.14.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +41 -0
  3. data/README.md +3 -3
  4. data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
  5. data/lib/openai/helpers/structured_output/union_of.rb +11 -1
  6. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion.rb +2 -2
  8. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  9. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  10. data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
  11. data/lib/openai/models/chat/completion_create_params.rb +33 -7
  12. data/lib/openai/models/function_definition.rb +1 -1
  13. data/lib/openai/models/image_edit_params.rb +4 -1
  14. data/lib/openai/models/image_generate_params.rb +4 -1
  15. data/lib/openai/models/images_response.rb +2 -5
  16. data/lib/openai/models/responses/response.rb +52 -6
  17. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  18. data/lib/openai/models/responses/response_create_params.rb +33 -7
  19. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  20. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  21. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  22. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  23. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  24. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  26. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  27. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  28. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  29. data/lib/openai/resources/chat/completions.rb +12 -4
  30. data/lib/openai/resources/images.rb +6 -6
  31. data/lib/openai/resources/responses.rb +42 -17
  32. data/lib/openai/version.rb +1 -1
  33. data/lib/openai.rb +0 -2
  34. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  35. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  36. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  37. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  38. data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
  39. data/rbi/openai/models/chat/completion_create_params.rbi +47 -9
  40. data/rbi/openai/models/function_definition.rbi +2 -2
  41. data/rbi/openai/models/image_edit_params.rbi +6 -0
  42. data/rbi/openai/models/image_generate_params.rbi +6 -0
  43. data/rbi/openai/models/images_response.rbi +2 -2
  44. data/rbi/openai/models/responses/response.rbi +47 -9
  45. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  46. data/rbi/openai/models/responses/response_create_params.rbi +47 -9
  47. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  48. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  49. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  50. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  51. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  52. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  53. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  54. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  55. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  56. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  57. data/rbi/openai/resources/chat/completions.rbi +36 -8
  58. data/rbi/openai/resources/images.rbi +22 -10
  59. data/rbi/openai/resources/responses.rbi +36 -8
  60. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  61. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  62. data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
  63. data/sig/openai/models/chat/completion_create_params.rbs +14 -0
  64. data/sig/openai/models/responses/response.rbs +14 -0
  65. data/sig/openai/models/responses/response_create_params.rbs +14 -0
  66. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  67. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  68. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  69. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  70. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  71. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  72. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  73. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  74. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  75. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  76. data/sig/openai/resources/chat/completions.rbs +4 -0
  77. data/sig/openai/resources/responses.rbs +4 -0
  78. metadata +2 -8
  79. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  80. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  81. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  82. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  83. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  84. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -24,6 +24,12 @@ module OpenAI
24
24
  sig { returns(String) }
25
25
  attr_accessor :item_id
26
26
 
27
+ # The log probabilities of the tokens in the delta.
28
+ sig do
29
+ returns(T::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob])
30
+ end
31
+ attr_accessor :logprobs
32
+
27
33
  # The index of the output item that the text delta was added to.
28
34
  sig { returns(Integer) }
29
35
  attr_accessor :output_index
@@ -42,6 +48,10 @@ module OpenAI
42
48
  content_index: Integer,
43
49
  delta: String,
44
50
  item_id: String,
51
+ logprobs:
52
+ T::Array[
53
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob::OrHash
54
+ ],
45
55
  output_index: Integer,
46
56
  sequence_number: Integer,
47
57
  type: Symbol
@@ -54,6 +64,8 @@ module OpenAI
54
64
  delta:,
55
65
  # The ID of the output item that the text delta was added to.
56
66
  item_id:,
67
+ # The log probabilities of the tokens in the delta.
68
+ logprobs:,
57
69
  # The index of the output item that the text delta was added to.
58
70
  output_index:,
59
71
  # The sequence number for this event.
@@ -69,6 +81,8 @@ module OpenAI
69
81
  content_index: Integer,
70
82
  delta: String,
71
83
  item_id: String,
84
+ logprobs:
85
+ T::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob],
72
86
  output_index: Integer,
73
87
  sequence_number: Integer,
74
88
  type: Symbol
@@ -77,6 +91,123 @@ module OpenAI
77
91
  end
78
92
  def to_hash
79
93
  end
94
+
95
+ class Logprob < OpenAI::Internal::Type::BaseModel
96
+ OrHash =
97
+ T.type_alias do
98
+ T.any(
99
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob,
100
+ OpenAI::Internal::AnyHash
101
+ )
102
+ end
103
+
104
+ # A possible text token.
105
+ sig { returns(String) }
106
+ attr_accessor :token
107
+
108
+ # The log probability of this token.
109
+ sig { returns(Float) }
110
+ attr_accessor :logprob
111
+
112
+ # The log probability of the top 20 most likely tokens.
113
+ sig do
114
+ returns(
115
+ T.nilable(
116
+ T::Array[
117
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob
118
+ ]
119
+ )
120
+ )
121
+ end
122
+ attr_reader :top_logprobs
123
+
124
+ sig do
125
+ params(
126
+ top_logprobs:
127
+ T::Array[
128
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob::OrHash
129
+ ]
130
+ ).void
131
+ end
132
+ attr_writer :top_logprobs
133
+
134
+ # A logprob is the logarithmic probability that the model assigns to producing a
135
+ # particular token at a given position in the sequence. Less-negative (higher)
136
+ # logprob values indicate greater model confidence in that token choice.
137
+ sig do
138
+ params(
139
+ token: String,
140
+ logprob: Float,
141
+ top_logprobs:
142
+ T::Array[
143
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob::OrHash
144
+ ]
145
+ ).returns(T.attached_class)
146
+ end
147
+ def self.new(
148
+ # A possible text token.
149
+ token:,
150
+ # The log probability of this token.
151
+ logprob:,
152
+ # The log probability of the top 20 most likely tokens.
153
+ top_logprobs: nil
154
+ )
155
+ end
156
+
157
+ sig do
158
+ override.returns(
159
+ {
160
+ token: String,
161
+ logprob: Float,
162
+ top_logprobs:
163
+ T::Array[
164
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob
165
+ ]
166
+ }
167
+ )
168
+ end
169
+ def to_hash
170
+ end
171
+
172
+ class TopLogprob < OpenAI::Internal::Type::BaseModel
173
+ OrHash =
174
+ T.type_alias do
175
+ T.any(
176
+ OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob,
177
+ OpenAI::Internal::AnyHash
178
+ )
179
+ end
180
+
181
+ # A possible text token.
182
+ sig { returns(T.nilable(String)) }
183
+ attr_reader :token
184
+
185
+ sig { params(token: String).void }
186
+ attr_writer :token
187
+
188
+ # The log probability of this token.
189
+ sig { returns(T.nilable(Float)) }
190
+ attr_reader :logprob
191
+
192
+ sig { params(logprob: Float).void }
193
+ attr_writer :logprob
194
+
195
+ sig do
196
+ params(token: String, logprob: Float).returns(T.attached_class)
197
+ end
198
+ def self.new(
199
+ # A possible text token.
200
+ token: nil,
201
+ # The log probability of this token.
202
+ logprob: nil
203
+ )
204
+ end
205
+
206
+ sig { override.returns({ token: String, logprob: Float }) }
207
+ def to_hash
208
+ end
209
+ end
210
+ end
80
211
  end
81
212
  end
82
213
  end
@@ -20,6 +20,12 @@ module OpenAI
20
20
  sig { returns(String) }
21
21
  attr_accessor :item_id
22
22
 
23
+ # The log probabilities of the tokens in the delta.
24
+ sig do
25
+ returns(T::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob])
26
+ end
27
+ attr_accessor :logprobs
28
+
23
29
  # The index of the output item that the text content is finalized.
24
30
  sig { returns(Integer) }
25
31
  attr_accessor :output_index
@@ -41,6 +47,10 @@ module OpenAI
41
47
  params(
42
48
  content_index: Integer,
43
49
  item_id: String,
50
+ logprobs:
51
+ T::Array[
52
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob::OrHash
53
+ ],
44
54
  output_index: Integer,
45
55
  sequence_number: Integer,
46
56
  text: String,
@@ -52,6 +62,8 @@ module OpenAI
52
62
  content_index:,
53
63
  # The ID of the output item that the text content is finalized.
54
64
  item_id:,
65
+ # The log probabilities of the tokens in the delta.
66
+ logprobs:,
55
67
  # The index of the output item that the text content is finalized.
56
68
  output_index:,
57
69
  # The sequence number for this event.
@@ -68,6 +80,8 @@ module OpenAI
68
80
  {
69
81
  content_index: Integer,
70
82
  item_id: String,
83
+ logprobs:
84
+ T::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob],
71
85
  output_index: Integer,
72
86
  sequence_number: Integer,
73
87
  text: String,
@@ -77,6 +91,123 @@ module OpenAI
77
91
  end
78
92
  def to_hash
79
93
  end
94
+
95
+ class Logprob < OpenAI::Internal::Type::BaseModel
96
+ OrHash =
97
+ T.type_alias do
98
+ T.any(
99
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob,
100
+ OpenAI::Internal::AnyHash
101
+ )
102
+ end
103
+
104
+ # A possible text token.
105
+ sig { returns(String) }
106
+ attr_accessor :token
107
+
108
+ # The log probability of this token.
109
+ sig { returns(Float) }
110
+ attr_accessor :logprob
111
+
112
+ # The log probability of the top 20 most likely tokens.
113
+ sig do
114
+ returns(
115
+ T.nilable(
116
+ T::Array[
117
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob
118
+ ]
119
+ )
120
+ )
121
+ end
122
+ attr_reader :top_logprobs
123
+
124
+ sig do
125
+ params(
126
+ top_logprobs:
127
+ T::Array[
128
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob::OrHash
129
+ ]
130
+ ).void
131
+ end
132
+ attr_writer :top_logprobs
133
+
134
+ # A logprob is the logarithmic probability that the model assigns to producing a
135
+ # particular token at a given position in the sequence. Less-negative (higher)
136
+ # logprob values indicate greater model confidence in that token choice.
137
+ sig do
138
+ params(
139
+ token: String,
140
+ logprob: Float,
141
+ top_logprobs:
142
+ T::Array[
143
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob::OrHash
144
+ ]
145
+ ).returns(T.attached_class)
146
+ end
147
+ def self.new(
148
+ # A possible text token.
149
+ token:,
150
+ # The log probability of this token.
151
+ logprob:,
152
+ # The log probability of the top 20 most likely tokens.
153
+ top_logprobs: nil
154
+ )
155
+ end
156
+
157
+ sig do
158
+ override.returns(
159
+ {
160
+ token: String,
161
+ logprob: Float,
162
+ top_logprobs:
163
+ T::Array[
164
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob
165
+ ]
166
+ }
167
+ )
168
+ end
169
+ def to_hash
170
+ end
171
+
172
+ class TopLogprob < OpenAI::Internal::Type::BaseModel
173
+ OrHash =
174
+ T.type_alias do
175
+ T.any(
176
+ OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob,
177
+ OpenAI::Internal::AnyHash
178
+ )
179
+ end
180
+
181
+ # A possible text token.
182
+ sig { returns(T.nilable(String)) }
183
+ attr_reader :token
184
+
185
+ sig { params(token: String).void }
186
+ attr_writer :token
187
+
188
+ # The log probability of this token.
189
+ sig { returns(T.nilable(Float)) }
190
+ attr_reader :logprob
191
+
192
+ sig { params(logprob: Float).void }
193
+ attr_writer :logprob
194
+
195
+ sig do
196
+ params(token: String, logprob: Float).returns(T.attached_class)
197
+ end
198
+ def self.new(
199
+ # A possible text token.
200
+ token: nil,
201
+ # The log probability of this token.
202
+ logprob: nil
203
+ )
204
+ end
205
+
206
+ sig { override.returns({ token: String, logprob: Float }) }
207
+ def to_hash
208
+ end
209
+ end
210
+ end
80
211
  end
81
212
  end
82
213
  end
@@ -65,6 +65,7 @@ module OpenAI
65
65
  prediction:
66
66
  T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash),
67
67
  presence_penalty: T.nilable(Float),
68
+ prompt_cache_key: String,
68
69
  reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
69
70
  response_format:
70
71
  T.any(
@@ -73,6 +74,7 @@ module OpenAI
73
74
  OpenAI::StructuredOutput::JsonSchemaConverter,
74
75
  OpenAI::ResponseFormatJSONObject::OrHash
75
76
  ),
77
+ safety_identifier: String,
76
78
  seed: T.nilable(Integer),
77
79
  service_tier:
78
80
  T.nilable(
@@ -204,6 +206,10 @@ module OpenAI
204
206
  # whether they appear in the text so far, increasing the model's likelihood to
205
207
  # talk about new topics.
206
208
  presence_penalty: nil,
209
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
210
+ # hit rates. Replaces the `user` field.
211
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
212
+ prompt_cache_key: nil,
207
213
  # **o-series models only**
208
214
  #
209
215
  # Constrains effort on reasoning for
@@ -222,6 +228,12 @@ module OpenAI
222
228
  # ensures the message the model generates is valid JSON. Using `json_schema` is
223
229
  # preferred for models that support it.
224
230
  response_format: nil,
231
+ # A stable identifier used to help detect users of your application that may be
232
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
233
+ # identifies each user. We recommend hashing their username or email address, in
234
+ # order to avoid sending us any identifying information.
235
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
236
+ safety_identifier: nil,
225
237
  # This feature is in Beta. If specified, our system will make a best effort to
226
238
  # sample deterministically, such that repeated requests with the same `seed` and
227
239
  # parameters should return the same result. Determinism is not guaranteed, and you
@@ -233,7 +245,7 @@ module OpenAI
233
245
  # - If set to 'auto', then the request will be processed with the service tier
234
246
  # configured in the Project settings. Unless otherwise configured, the Project
235
247
  # will use 'default'.
236
- # - If set to 'default', then the requset will be processed with the standard
248
+ # - If set to 'default', then the request will be processed with the standard
237
249
  # pricing and performance for the selected model.
238
250
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
239
251
  # 'priority', then the request will be processed with the corresponding service
@@ -288,9 +300,11 @@ module OpenAI
288
300
  #
289
301
  # We generally recommend altering this or `temperature` but not both.
290
302
  top_p: nil,
291
- # A stable identifier for your end-users. Used to boost cache hit rates by better
292
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
293
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
303
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
304
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
305
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
306
+ # similar requests and to help OpenAI detect and prevent abuse.
307
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
294
308
  user: nil,
295
309
  # This tool searches the web for relevant results to use in a response. Learn more
296
310
  # about the
@@ -361,6 +375,7 @@ module OpenAI
361
375
  prediction:
362
376
  T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash),
363
377
  presence_penalty: T.nilable(Float),
378
+ prompt_cache_key: String,
364
379
  reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
365
380
  response_format:
366
381
  T.any(
@@ -368,6 +383,7 @@ module OpenAI
368
383
  OpenAI::ResponseFormatJSONSchema::OrHash,
369
384
  OpenAI::ResponseFormatJSONObject::OrHash
370
385
  ),
386
+ safety_identifier: String,
371
387
  seed: T.nilable(Integer),
372
388
  service_tier:
373
389
  T.nilable(
@@ -493,6 +509,10 @@ module OpenAI
493
509
  # whether they appear in the text so far, increasing the model's likelihood to
494
510
  # talk about new topics.
495
511
  presence_penalty: nil,
512
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
513
+ # hit rates. Replaces the `user` field.
514
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
515
+ prompt_cache_key: nil,
496
516
  # **o-series models only**
497
517
  #
498
518
  # Constrains effort on reasoning for
@@ -511,6 +531,12 @@ module OpenAI
511
531
  # ensures the message the model generates is valid JSON. Using `json_schema` is
512
532
  # preferred for models that support it.
513
533
  response_format: nil,
534
+ # A stable identifier used to help detect users of your application that may be
535
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
536
+ # identifies each user. We recommend hashing their username or email address, in
537
+ # order to avoid sending us any identifying information.
538
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
539
+ safety_identifier: nil,
514
540
  # This feature is in Beta. If specified, our system will make a best effort to
515
541
  # sample deterministically, such that repeated requests with the same `seed` and
516
542
  # parameters should return the same result. Determinism is not guaranteed, and you
@@ -522,7 +548,7 @@ module OpenAI
522
548
  # - If set to 'auto', then the request will be processed with the service tier
523
549
  # configured in the Project settings. Unless otherwise configured, the Project
524
550
  # will use 'default'.
525
- # - If set to 'default', then the requset will be processed with the standard
551
+ # - If set to 'default', then the request will be processed with the standard
526
552
  # pricing and performance for the selected model.
527
553
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
528
554
  # 'priority', then the request will be processed with the corresponding service
@@ -577,9 +603,11 @@ module OpenAI
577
603
  #
578
604
  # We generally recommend altering this or `temperature` but not both.
579
605
  top_p: nil,
580
- # A stable identifier for your end-users. Used to boost cache hit rates by better
581
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
582
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
606
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
607
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
608
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
609
+ # similar requests and to help OpenAI detect and prevent abuse.
610
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
583
611
  user: nil,
584
612
  # This tool searches the web for relevant results to use in a response. Learn more
585
613
  # about the
@@ -42,7 +42,7 @@ module OpenAI
42
42
  )
43
43
  end
44
44
 
45
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
45
+ # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
46
46
  #
47
47
  # Creates an edited or extended image given one or more source images and a
48
48
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
@@ -115,6 +115,9 @@ module OpenAI
115
115
  # The number of partial images to generate. This parameter is used for streaming
116
116
  # responses that return partial images. Value must be between 0 and 3. When set to
117
117
  # 0, the response will be a single image sent in one streaming event.
118
+ #
119
+ # Note that the final image may be sent before the full number of partial images
120
+ # are generated if the full image is generated more quickly.
118
121
  partial_images: nil,
119
122
  # The quality of the image that will be generated. `high`, `medium` and `low` are
120
123
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -133,8 +136,8 @@ module OpenAI
133
136
  # and detect abuse.
134
137
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
135
138
  user: nil,
136
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
137
- # streaming and non-streaming use cases, respectively.
139
+ # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or
140
+ # `#edit` for streaming and non-streaming use cases, respectively.
138
141
  stream: false,
139
142
  request_options: {}
140
143
  )
@@ -215,6 +218,9 @@ module OpenAI
215
218
  # The number of partial images to generate. This parameter is used for streaming
216
219
  # responses that return partial images. Value must be between 0 and 3. When set to
217
220
  # 0, the response will be a single image sent in one streaming event.
221
+ #
222
+ # Note that the final image may be sent before the full number of partial images
223
+ # are generated if the full image is generated more quickly.
218
224
  partial_images: nil,
219
225
  # The quality of the image that will be generated. `high`, `medium` and `low` are
220
226
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -233,14 +239,14 @@ module OpenAI
233
239
  # and detect abuse.
234
240
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
235
241
  user: nil,
236
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
237
- # streaming and non-streaming use cases, respectively.
242
+ # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or
243
+ # `#edit` for streaming and non-streaming use cases, respectively.
238
244
  stream: true,
239
245
  request_options: {}
240
246
  )
241
247
  end
242
248
 
243
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
249
+ # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart.
244
250
  #
245
251
  # Creates an image given a prompt.
246
252
  # [Learn more](https://platform.openai.com/docs/guides/images).
@@ -300,6 +306,9 @@ module OpenAI
300
306
  # The number of partial images to generate. This parameter is used for streaming
301
307
  # responses that return partial images. Value must be between 0 and 3. When set to
302
308
  # 0, the response will be a single image sent in one streaming event.
309
+ #
310
+ # Note that the final image may be sent before the full number of partial images
311
+ # are generated if the full image is generated more quickly.
303
312
  partial_images: nil,
304
313
  # The quality of the image that will be generated.
305
314
  #
@@ -328,8 +337,8 @@ module OpenAI
328
337
  # and detect abuse.
329
338
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
330
339
  user: nil,
331
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
332
- # for streaming and non-streaming use cases, respectively.
340
+ # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or
341
+ # `#generate` for streaming and non-streaming use cases, respectively.
333
342
  stream: false,
334
343
  request_options: {}
335
344
  )
@@ -397,6 +406,9 @@ module OpenAI
397
406
  # The number of partial images to generate. This parameter is used for streaming
398
407
  # responses that return partial images. Value must be between 0 and 3. When set to
399
408
  # 0, the response will be a single image sent in one streaming event.
409
+ #
410
+ # Note that the final image may be sent before the full number of partial images
411
+ # are generated if the full image is generated more quickly.
400
412
  partial_images: nil,
401
413
  # The quality of the image that will be generated.
402
414
  #
@@ -425,8 +437,8 @@ module OpenAI
425
437
  # and detect abuse.
426
438
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
427
439
  user: nil,
428
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
429
- # for streaming and non-streaming use cases, respectively.
440
+ # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or
441
+ # `#generate` for streaming and non-streaming use cases, respectively.
430
442
  stream: true,
431
443
  request_options: {}
432
444
  )
@@ -40,7 +40,9 @@ module OpenAI
40
40
  parallel_tool_calls: T.nilable(T::Boolean),
41
41
  previous_response_id: T.nilable(String),
42
42
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
43
+ prompt_cache_key: String,
43
44
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
45
+ safety_identifier: String,
44
46
  service_tier:
45
47
  T.nilable(
46
48
  OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
@@ -151,17 +153,27 @@ module OpenAI
151
153
  # Reference to a prompt template and its variables.
152
154
  # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
153
155
  prompt: nil,
156
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
157
+ # hit rates. Replaces the `user` field.
158
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
159
+ prompt_cache_key: nil,
154
160
  # **o-series models only**
155
161
  #
156
162
  # Configuration options for
157
163
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
158
164
  reasoning: nil,
165
+ # A stable identifier used to help detect users of your application that may be
166
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
167
+ # identifies each user. We recommend hashing their username or email address, in
168
+ # order to avoid sending us any identifying information.
169
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
170
+ safety_identifier: nil,
159
171
  # Specifies the processing type used for serving the request.
160
172
  #
161
173
  # - If set to 'auto', then the request will be processed with the service tier
162
174
  # configured in the Project settings. Unless otherwise configured, the Project
163
175
  # will use 'default'.
164
- # - If set to 'default', then the requset will be processed with the standard
176
+ # - If set to 'default', then the request will be processed with the standard
165
177
  # pricing and performance for the selected model.
166
178
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
167
179
  # 'priority', then the request will be processed with the corresponding service
@@ -223,9 +235,11 @@ module OpenAI
223
235
  # - `disabled` (default): If a model response will exceed the context window size
224
236
  # for a model, the request will fail with a 400 error.
225
237
  truncation: nil,
226
- # A stable identifier for your end-users. Used to boost cache hit rates by better
227
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
228
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
238
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
239
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
240
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
241
+ # similar requests and to help OpenAI detect and prevent abuse.
242
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
229
243
  user: nil,
230
244
  # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create`
231
245
  # for streaming and non-streaming use cases, respectively.
@@ -268,7 +282,9 @@ module OpenAI
268
282
  parallel_tool_calls: T.nilable(T::Boolean),
269
283
  previous_response_id: T.nilable(String),
270
284
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
285
+ prompt_cache_key: String,
271
286
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
287
+ safety_identifier: String,
272
288
  service_tier:
273
289
  T.nilable(
274
290
  OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
@@ -385,17 +401,27 @@ module OpenAI
385
401
  # Reference to a prompt template and its variables.
386
402
  # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
387
403
  prompt: nil,
404
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
405
+ # hit rates. Replaces the `user` field.
406
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
407
+ prompt_cache_key: nil,
388
408
  # **o-series models only**
389
409
  #
390
410
  # Configuration options for
391
411
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
392
412
  reasoning: nil,
413
+ # A stable identifier used to help detect users of your application that may be
414
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
415
+ # identifies each user. We recommend hashing their username or email address, in
416
+ # order to avoid sending us any identifying information.
417
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
418
+ safety_identifier: nil,
393
419
  # Specifies the processing type used for serving the request.
394
420
  #
395
421
  # - If set to 'auto', then the request will be processed with the service tier
396
422
  # configured in the Project settings. Unless otherwise configured, the Project
397
423
  # will use 'default'.
398
- # - If set to 'default', then the requset will be processed with the standard
424
+ # - If set to 'default', then the request will be processed with the standard
399
425
  # pricing and performance for the selected model.
400
426
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
401
427
  # 'priority', then the request will be processed with the corresponding service
@@ -457,9 +483,11 @@ module OpenAI
457
483
  # - `disabled` (default): If a model response will exceed the context window size
458
484
  # for a model, the request will fail with a 400 error.
459
485
  truncation: nil,
460
- # A stable identifier for your end-users. Used to boost cache hit rates by better
461
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
462
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
486
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
487
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
488
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
489
+ # similar requests and to help OpenAI detect and prevent abuse.
490
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
463
491
  user: nil,
464
492
  # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create`
465
493
  # for streaming and non-streaming use cases, respectively.
@@ -80,9 +80,6 @@ module OpenAI
80
80
  | :ballad
81
81
  | :coral
82
82
  | :echo
83
- | :fable
84
- | :onyx
85
- | :nova
86
83
  | :sage
87
84
  | :shimmer
88
85
  | :verse
@@ -97,9 +94,6 @@ module OpenAI
97
94
  BALLAD: :ballad
98
95
  CORAL: :coral
99
96
  ECHO: :echo
100
- FABLE: :fable
101
- ONYX: :onyx
102
- NOVA: :nova
103
97
  SAGE: :sage
104
98
  SHIMMER: :shimmer
105
99
  VERSE: :verse