openai 0.15.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e15e098317bf9151fffc0d83be9fd3ead36872a58ea53628f2d0dd3028735c78
4
- data.tar.gz: 04a779ac9f0b4418138bf4a7216e01109e48d608d76c33fc83f76e9de82d574e
3
+ metadata.gz: 78e829792412e459c6e623de5db5f553d7d1325fa28300ae94c62103d511c946
4
+ data.tar.gz: e7711af3f619a26ac688445353ee83b8ca250cad3feb84358f0a556650662e0b
5
5
  SHA512:
6
- metadata.gz: d3673d18e0d3cfcd0db2ddc4c9c45bc6da8dd86371a38d85f7ad181f45780e6a17b48b9478e621c47a17c1708a2ec775ae8b61c0f6eb39c6b9d6686e09edfb65
7
- data.tar.gz: a7fc556be0b4ba6ea16e6a2ba37bb0e9f62c2f7e39344a372b02b4c54b752e5718e15bfea4ffd8c277dd8db4f6d19530dc29e1ee5aee3fb2fc9a253269944b8f
6
+ metadata.gz: 3a6d15ee5239db7f9f8fe2494169ce279c26435bc7b83a5b8ecb2dfde47da5f6636f6db49a29f429b7fca35b58d90136d06f7de4e49677ee37fb94b899f518da
7
+ data.tar.gz: 1f6659074cc6e2317b47e0677a928765d9d8b73e898f7c2ee0eb435dc1fa0728df18f64859a6a732ec3beaaa1db16a5e96176eb1bdda174ac9368cccd0a243a2
data/CHANGELOG.md CHANGED
@@ -1,5 +1,28 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.16.0 (2025-07-30)
4
+
5
+ Full Changelog: [v0.15.0...v0.16.0](https://github.com/openai/openai-ruby/compare/v0.15.0...v0.16.0)
6
+
7
+ ### Features
8
+
9
+ * add output_text method for non-streaming responses ([#757](https://github.com/openai/openai-ruby/issues/757)) ([50cf119](https://github.com/openai/openai-ruby/commit/50cf119106f9e16d9ac6a9898028b6d563a6f809))
10
+ * **api:** manual updates ([e9fa8a0](https://github.com/openai/openai-ruby/commit/e9fa8a08d6ecebdd06212eaf6b9103082b7d67aa))
11
+
12
+
13
+ ### Bug Fixes
14
+
15
+ * **internal:** ensure sorbet test always runs serially ([0601061](https://github.com/openai/openai-ruby/commit/0601061047525d16cc2afac64e5a4de0dd9de2e5))
16
+ * provide parsed outputs for resumed streams ([#756](https://github.com/openai/openai-ruby/issues/756)) ([82254f9](https://github.com/openai/openai-ruby/commit/82254f980ccc0affa2555a81b0d8ed5aa0290835))
17
+ * union definition re-using ([#760](https://github.com/openai/openai-ruby/issues/760)) ([3046c28](https://github.com/openai/openai-ruby/commit/3046c28935ca925c2f399f0350937d04eab54c0a))
18
+
19
+
20
+ ### Chores
21
+
22
+ * extract reused JSON schema references even in unions ([#761](https://github.com/openai/openai-ruby/issues/761)) ([e17d3bf](https://github.com/openai/openai-ruby/commit/e17d3bf1fdf241f7a78ed72a39ddecabeb5877c8))
23
+ * **internal:** refactor variable name ([#762](https://github.com/openai/openai-ruby/issues/762)) ([7e15b07](https://github.com/openai/openai-ruby/commit/7e15b0745dcbd3bf7fc4c1899d9d76e0a9ab1e48))
24
+ * update contribute.md ([b4a0297](https://github.com/openai/openai-ruby/commit/b4a029775bb52d5db2f3fac235595f37b6746a61))
25
+
3
26
  ## 0.15.0 (2025-07-21)
4
27
 
5
28
  Full Changelog: [v0.14.0...v0.15.0](https://github.com/openai/openai-ruby/compare/v0.14.0...v0.15.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.15.0"
18
+ gem "openai", "~> 0.16.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -6,15 +6,9 @@ module OpenAI
6
6
  # To customize the JSON schema conversion for a type, implement the `JsonSchemaConverter` interface.
7
7
  module JsonSchemaConverter
8
8
  # @api private
9
- POINTER = Object.new.tap do
9
+ POINTERS = Object.new.tap do
10
10
  _1.define_singleton_method(:inspect) do
11
- "#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::POINTER>"
12
- end
13
- end.freeze
14
- # @api private
15
- COUNTER = Object.new.tap do
16
- _1.define_singleton_method(:inspect) do
17
- "#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::COUNTER>"
11
+ "#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::POINTERS>"
18
12
  end
19
13
  end.freeze
20
14
  # @api private
@@ -81,14 +75,15 @@ module OpenAI
81
75
  def cache_def!(state, type:, &blk)
82
76
  defs, path = state.fetch_values(:defs, :path)
83
77
  if (stored = defs[type])
84
- stored[OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER] += 1
85
- stored.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER)
78
+ pointers = stored.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
79
+ pointers.first.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF).tap do
80
+ pointers << _1
81
+ end
86
82
  else
87
83
  ref_path = String.new
88
84
  ref = {"$ref": ref_path}
89
85
  stored = {
90
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER => ref,
91
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER => 1
86
+ OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS => [ref]
92
87
  }
93
88
  defs.store(type, stored)
94
89
  schema = blk.call
@@ -112,17 +107,21 @@ module OpenAI
112
107
  )
113
108
  reused_defs = {}
114
109
  defs.each_value do |acc|
115
- ref = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER)
116
- if (no_ref = ref.delete(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF))
117
- acc[OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER] -= 1
110
+ sch = acc.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
111
+ pointers = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
112
+
113
+ no_refs, refs = pointers.partition do
114
+ _1.delete(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF)
118
115
  end
119
- cnt = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER)
120
116
 
121
- sch = acc.except(
122
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTER,
123
- OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER
124
- )
125
- cnt > 1 && !no_ref ? reused_defs.store(ref.fetch(:$ref), sch) : ref.replace(sch)
117
+ case refs
118
+ in [ref]
119
+ ref.replace(sch)
120
+ in [_, ref, *]
121
+ reused_defs.store(ref.fetch(:$ref), sch)
122
+ else
123
+ end
124
+ no_refs.each { _1.replace(sch) }
126
125
  end
127
126
 
128
127
  xformed = reused_defs.transform_keys { _1.delete_prefix("#/$defs/") }
@@ -36,7 +36,17 @@ module OpenAI
36
36
  mergeable_keys.each_key { mergeable_keys[_1] += 1 if schema.keys == _1 }
37
37
  end
38
38
  mergeable = mergeable_keys.any? { _1.last == schemas.length }
39
- mergeable ? OpenAI::Internal::Util.deep_merge(*schemas, concat: true) : {anyOf: schemas}
39
+ if mergeable
40
+ OpenAI::Internal::Util.deep_merge(*schemas, concat: true)
41
+ else
42
+ {
43
+ anyOf: schemas.each do
44
+ if _1.key?(:$ref)
45
+ _1.update(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF => true)
46
+ end
47
+ end
48
+ }
49
+ end
40
50
  end
41
51
  end
42
52
 
@@ -10,10 +10,41 @@ module OpenAI
10
10
  # @return [String]
11
11
  required :id, String
12
12
 
13
- # @!method initialize(id:)
13
+ # @!attribute content_parts
14
+ # If a content parts array was provided, this is an array of `text` and
15
+ # `image_url` parts. Otherwise, null.
16
+ #
17
+ # @return [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil]
18
+ optional :content_parts,
19
+ -> {
20
+ OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionStoreMessage::ContentPart]
21
+ },
22
+ nil?: true
23
+
24
+ # @!method initialize(id:, content_parts: nil)
25
+ # Some parameter documentations has been truncated, see
26
+ # {OpenAI::Models::Chat::ChatCompletionStoreMessage} for more details.
27
+ #
14
28
  # A chat completion message generated by the model.
15
29
  #
16
30
  # @param id [String] The identifier of the chat message.
31
+ #
32
+ # @param content_parts [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil] If a content parts array was provided, this is an array of `text` and `image_url
33
+
34
+ # Learn about
35
+ # [text inputs](https://platform.openai.com/docs/guides/text-generation).
36
+ module ContentPart
37
+ extend OpenAI::Internal::Type::Union
38
+
39
+ # Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
40
+ variant -> { OpenAI::Chat::ChatCompletionContentPartText }
41
+
42
+ # Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
43
+ variant -> { OpenAI::Chat::ChatCompletionContentPartImage }
44
+
45
+ # @!method self.variants
46
+ # @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage)]
47
+ end
17
48
  end
18
49
  end
19
50
 
@@ -182,6 +182,14 @@ module OpenAI
182
182
  # @return [Float, nil]
183
183
  optional :presence_penalty, Float, nil?: true
184
184
 
185
+ # @!attribute prompt_cache_key
186
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
187
+ # hit rates. Replaces the `user` field.
188
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
189
+ #
190
+ # @return [String, nil]
191
+ optional :prompt_cache_key, String
192
+
185
193
  # @!attribute reasoning_effort
186
194
  # **o-series models only**
187
195
  #
@@ -208,6 +216,16 @@ module OpenAI
208
216
  # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject, nil]
209
217
  optional :response_format, union: -> { OpenAI::Chat::CompletionCreateParams::ResponseFormat }
210
218
 
219
+ # @!attribute safety_identifier
220
+ # A stable identifier used to help detect users of your application that may be
221
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
222
+ # identifies each user. We recommend hashing their username or email address, in
223
+ # order to avoid sending us any identifying information.
224
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
225
+ #
226
+ # @return [String, nil]
227
+ optional :safety_identifier, String
228
+
211
229
  # @!attribute seed
212
230
  # This feature is in Beta. If specified, our system will make a best effort to
213
231
  # sample deterministically, such that repeated requests with the same `seed` and
@@ -320,9 +338,13 @@ module OpenAI
320
338
  optional :top_p, Float, nil?: true
321
339
 
322
340
  # @!attribute user
323
- # A stable identifier for your end-users. Used to boost cache hit rates by better
324
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
325
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
341
+ # @deprecated
342
+ #
343
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
344
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
345
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
346
+ # similar requests and to help OpenAI detect and prevent abuse.
347
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
326
348
  #
327
349
  # @return [String, nil]
328
350
  optional :user, String
@@ -335,7 +357,7 @@ module OpenAI
335
357
  # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
336
358
  optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
337
359
 
338
- # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
360
+ # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
339
361
  # Some parameter documentations has been truncated, see
340
362
  # {OpenAI::Models::Chat::CompletionCreateParams} for more details.
341
363
  #
@@ -371,10 +393,14 @@ module OpenAI
371
393
  #
372
394
  # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
373
395
  #
396
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
397
+ #
374
398
  # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
375
399
  #
376
400
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
377
401
  #
402
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
403
+ #
378
404
  # @param seed [Integer, nil] This feature is in Beta.
379
405
  #
380
406
  # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
@@ -395,7 +421,7 @@ module OpenAI
395
421
  #
396
422
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
397
423
  #
398
- # @param user [String] A stable identifier for your end-users.
424
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
399
425
  #
400
426
  # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
401
427
  #
@@ -171,6 +171,14 @@ module OpenAI
171
171
  # @return [OpenAI::Models::Responses::ResponsePrompt, nil]
172
172
  optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
173
173
 
174
+ # @!attribute prompt_cache_key
175
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
176
+ # hit rates. Replaces the `user` field.
177
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
178
+ #
179
+ # @return [String, nil]
180
+ optional :prompt_cache_key, String
181
+
174
182
  # @!attribute reasoning
175
183
  # **o-series models only**
176
184
  #
@@ -180,6 +188,16 @@ module OpenAI
180
188
  # @return [OpenAI::Models::Reasoning, nil]
181
189
  optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
182
190
 
191
+ # @!attribute safety_identifier
192
+ # A stable identifier used to help detect users of your application that may be
193
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
194
+ # identifies each user. We recommend hashing their username or email address, in
195
+ # order to avoid sending us any identifying information.
196
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
197
+ #
198
+ # @return [String, nil]
199
+ optional :safety_identifier, String
200
+
183
201
  # @!attribute service_tier
184
202
  # Specifies the processing type used for serving the request.
185
203
  #
@@ -246,13 +264,37 @@ module OpenAI
246
264
  optional :usage, -> { OpenAI::Responses::ResponseUsage }
247
265
 
248
266
  # @!attribute user
249
- # A stable identifier for your end-users. Used to boost cache hit rates by better
250
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
251
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
267
+ # @deprecated
268
+ #
269
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
270
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
271
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
272
+ # similar requests and to help OpenAI detect and prevent abuse.
273
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
252
274
  #
253
275
  # @return [String, nil]
254
276
  optional :user, String
255
277
 
278
+ # Convenience property that aggregates all `output_text` items from the `output` list.
279
+ #
280
+ # If no `output_text` content blocks exist, then an empty string is returned.
281
+ #
282
+ # @return [String]
283
+ def output_text
284
+ texts = []
285
+
286
+ output.each do |item|
287
+ next unless item.type == :message
288
+ item.content.each do |content|
289
+ if content.type == :output_text
290
+ texts << content.text
291
+ end
292
+ end
293
+ end
294
+
295
+ texts.join
296
+ end
297
+
256
298
  # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
257
299
  # Some parameter documentations has been truncated, see
258
300
  # {OpenAI::Models::Responses::Response} for more details.
@@ -293,8 +335,12 @@ module OpenAI
293
335
  #
294
336
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
295
337
  #
338
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
339
+ #
296
340
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
297
341
  #
342
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
343
+ #
298
344
  # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
299
345
  #
300
346
  # @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
@@ -307,7 +353,7 @@ module OpenAI
307
353
  #
308
354
  # @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
309
355
  #
310
- # @param user [String] A stable identifier for your end-users.
356
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
311
357
  #
312
358
  # @param object [Symbol, :response] The object type of this resource - always set to `response`.
313
359
 
@@ -123,6 +123,14 @@ module OpenAI
123
123
  # @return [OpenAI::Models::Responses::ResponsePrompt, nil]
124
124
  optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
125
125
 
126
+ # @!attribute prompt_cache_key
127
+ # Used by OpenAI to cache responses for similar requests to optimize your cache
128
+ # hit rates. Replaces the `user` field.
129
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
130
+ #
131
+ # @return [String, nil]
132
+ optional :prompt_cache_key, String
133
+
126
134
  # @!attribute reasoning
127
135
  # **o-series models only**
128
136
  #
@@ -132,6 +140,16 @@ module OpenAI
132
140
  # @return [OpenAI::Models::Reasoning, nil]
133
141
  optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
134
142
 
143
+ # @!attribute safety_identifier
144
+ # A stable identifier used to help detect users of your application that may be
145
+ # violating OpenAI's usage policies. The IDs should be a string that uniquely
146
+ # identifies each user. We recommend hashing their username or email address, in
147
+ # order to avoid sending us any identifying information.
148
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
149
+ #
150
+ # @return [String, nil]
151
+ optional :safety_identifier, String
152
+
135
153
  # @!attribute service_tier
136
154
  # Specifies the processing type used for serving the request.
137
155
  #
@@ -242,14 +260,18 @@ module OpenAI
242
260
  optional :truncation, enum: -> { OpenAI::Responses::ResponseCreateParams::Truncation }, nil?: true
243
261
 
244
262
  # @!attribute user
245
- # A stable identifier for your end-users. Used to boost cache hit rates by better
246
- # bucketing similar requests and to help OpenAI detect and prevent abuse.
247
- # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
263
+ # @deprecated
264
+ #
265
+ # This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
266
+ # `prompt_cache_key` instead to maintain caching optimizations. A stable
267
+ # identifier for your end-users. Used to boost cache hit rates by better bucketing
268
+ # similar requests and to help OpenAI detect and prevent abuse.
269
+ # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
248
270
  #
249
271
  # @return [String, nil]
250
272
  optional :user, String
251
273
 
252
- # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
274
+ # @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
253
275
  # Some parameter documentations has been truncated, see
254
276
  # {OpenAI::Models::Responses::ResponseCreateParams} for more details.
255
277
  #
@@ -275,8 +297,12 @@ module OpenAI
275
297
  #
276
298
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
277
299
  #
300
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
301
+ #
278
302
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
279
303
  #
304
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
305
+ #
280
306
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
281
307
  #
282
308
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
@@ -295,7 +321,7 @@ module OpenAI
295
321
  #
296
322
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
297
323
  #
298
- # @param user [String] A stable identifier for your end-users.
324
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
299
325
  #
300
326
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
301
327
 
@@ -30,7 +30,7 @@ module OpenAI
30
30
  # unsupported parameters in reasoning models,
31
31
  # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
32
32
  #
33
- # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
33
+ # @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
34
34
  #
35
35
  # @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
36
36
  #
@@ -64,10 +64,14 @@ module OpenAI
64
64
  #
65
65
  # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
66
66
  #
67
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
68
+ #
67
69
  # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
68
70
  #
69
71
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
70
72
  #
73
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
74
+ #
71
75
  # @param seed [Integer, nil] This feature is in Beta.
72
76
  #
73
77
  # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
@@ -88,7 +92,7 @@ module OpenAI
88
92
  #
89
93
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
90
94
  #
91
- # @param user [String] A stable identifier for your end-users.
95
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
92
96
  #
93
97
  # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
94
98
  #
@@ -226,7 +230,7 @@ module OpenAI
226
230
  # unsupported parameters in reasoning models,
227
231
  # [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
228
232
  #
229
- # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
233
+ # @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
230
234
  #
231
235
  # @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
232
236
  #
@@ -260,10 +264,14 @@ module OpenAI
260
264
  #
261
265
  # @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
262
266
  #
267
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
268
+ #
263
269
  # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
264
270
  #
265
271
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
266
272
  #
273
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
274
+ #
267
275
  # @param seed [Integer, nil] This feature is in Beta.
268
276
  #
269
277
  # @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
@@ -284,7 +292,7 @@ module OpenAI
284
292
  #
285
293
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
286
294
  #
287
- # @param user [String] A stable identifier for your end-users.
295
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
288
296
  #
289
297
  # @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
290
298
  #
@@ -23,7 +23,7 @@ module OpenAI
23
23
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
24
24
  # your own data as input for the model's response.
25
25
  #
26
- # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
26
+ # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
27
27
  #
28
28
  # @param background [Boolean, nil] Whether to run the model response in the background.
29
29
  #
@@ -47,8 +47,12 @@ module OpenAI
47
47
  #
48
48
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
49
49
  #
50
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
51
+ #
50
52
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
51
53
  #
54
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
55
+ #
52
56
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
53
57
  #
54
58
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
@@ -67,7 +71,7 @@ module OpenAI
67
71
  #
68
72
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
69
73
  #
70
- # @param user [String] A stable identifier for your end-users.
74
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
71
75
  #
72
76
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
73
77
  #
@@ -170,20 +174,22 @@ module OpenAI
170
174
  end
171
175
  model, tool_models = get_structured_output_models(parsed)
172
176
 
177
+ unwrap = ->(raw) do
178
+ if raw[:type] == "response.completed" && raw[:response]
179
+ parse_structured_outputs!(raw[:response], model, tool_models)
180
+ end
181
+ raw
182
+ end
183
+
173
184
  if previous_response_id
174
- retrieve_params = {}
175
- retrieve_params[:include] = params[:include] if params[:include]
176
- retrieve_params[:request_options] = params[:request_options] if params[:request_options]
185
+ retrieve_params = params.slice(:include, :request_options)
177
186
 
178
- raw_stream = retrieve_streaming(previous_response_id, retrieve_params)
187
+ raw_stream = retrieve_streaming_internal(
188
+ previous_response_id,
189
+ params: retrieve_params,
190
+ unwrap: unwrap
191
+ )
179
192
  else
180
- unwrap = ->(raw) do
181
- if raw[:type] == "response.completed" && raw[:response]
182
- parse_structured_outputs!(raw[:response], model, tool_models)
183
- end
184
- raw
185
- end
186
-
187
193
  parsed[:stream] = true
188
194
 
189
195
  raw_stream = @client.request(
@@ -222,7 +228,7 @@ module OpenAI
222
228
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
223
229
  # your own data as input for the model's response.
224
230
  #
225
- # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
231
+ # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
226
232
  #
227
233
  # @param background [Boolean, nil] Whether to run the model response in the background.
228
234
  #
@@ -246,8 +252,12 @@ module OpenAI
246
252
  #
247
253
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
248
254
  #
255
+ # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
256
+ #
249
257
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
250
258
  #
259
+ # @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
260
+ #
251
261
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
252
262
  #
253
263
  # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
@@ -266,7 +276,7 @@ module OpenAI
266
276
  #
267
277
  # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
268
278
  #
269
- # @param user [String] A stable identifier for your end-users.
279
+ # @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
270
280
  #
271
281
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
272
282
  #
@@ -365,6 +375,21 @@ module OpenAI
365
375
  )
366
376
  end
367
377
 
378
+ private def retrieve_streaming_internal(response_id, params:, unwrap:)
379
+ parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
380
+ parsed.store(:stream, true)
381
+ @client.request(
382
+ method: :get,
383
+ path: ["responses/%1$s", response_id],
384
+ query: parsed,
385
+ headers: {"accept" => "text/event-stream"},
386
+ stream: OpenAI::Internal::Stream,
387
+ model: OpenAI::Responses::ResponseStreamEvent,
388
+ options: options,
389
+ unwrap: unwrap
390
+ )
391
+ end
392
+
368
393
  # Deletes a model response with the given ID.
369
394
  #
370
395
  # @overload delete(response_id, request_options: {})
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.15.0"
4
+ VERSION = "0.16.0"
5
5
  end