openai 0.4.1 → 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +27 -0
  3. data/README.md +1 -1
  4. data/lib/openai/internal/util.rb +5 -1
  5. data/lib/openai/models/audio/transcription_text_delta_event.rb +3 -3
  6. data/lib/openai/models/audio/transcription_text_done_event.rb +3 -3
  7. data/lib/openai/models/chat/chat_completion.rb +4 -4
  8. data/lib/openai/models/chat/chat_completion_chunk.rb +4 -4
  9. data/lib/openai/models/chat/completion_create_params.rb +4 -4
  10. data/lib/openai/models/fine_tuning/alpha/grader_run_params.rb +17 -30
  11. data/lib/openai/models/fine_tuning/fine_tuning_job.rb +3 -5
  12. data/lib/openai/models/graders/multi_grader.rb +11 -4
  13. data/lib/openai/models/image_edit_params.rb +2 -2
  14. data/lib/openai/models/responses/response.rb +4 -4
  15. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +5 -5
  16. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +5 -5
  17. data/lib/openai/models/responses/response_create_params.rb +6 -4
  18. data/lib/openai/models/responses/response_includable.rb +3 -0
  19. data/lib/openai/models/responses/response_output_text.rb +120 -4
  20. data/lib/openai/models/responses/response_retrieve_params.rb +11 -1
  21. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  22. data/lib/openai/resources/beta/threads/messages.rb +11 -0
  23. data/lib/openai/resources/beta/threads/runs/steps.rb +5 -0
  24. data/lib/openai/resources/beta/threads/runs.rb +17 -0
  25. data/lib/openai/resources/beta/threads.rb +15 -2
  26. data/lib/openai/resources/containers/files/content.rb +3 -2
  27. data/lib/openai/resources/fine_tuning/alpha/graders.rb +6 -3
  28. data/lib/openai/resources/responses.rb +49 -3
  29. data/lib/openai/version.rb +1 -1
  30. data/lib/openai.rb +3 -1
  31. data/rbi/openai/models/audio/transcription_text_delta_event.rbi +4 -4
  32. data/rbi/openai/models/audio/transcription_text_done_event.rbi +4 -4
  33. data/rbi/openai/models/chat/chat_completion.rbi +6 -6
  34. data/rbi/openai/models/chat/chat_completion_chunk.rbi +6 -6
  35. data/rbi/openai/models/chat/completion_create_params.rbi +6 -6
  36. data/rbi/openai/models/fine_tuning/alpha/grader_run_params.rbi +24 -43
  37. data/rbi/openai/models/fine_tuning/fine_tuning_job.rbi +2 -3
  38. data/rbi/openai/models/graders/multi_grader.rbi +27 -32
  39. data/rbi/openai/models/image_edit_params.rbi +3 -3
  40. data/rbi/openai/models/responses/response.rbi +6 -6
  41. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +3 -3
  42. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +3 -3
  43. data/rbi/openai/models/responses/response_create_params.rbi +10 -6
  44. data/rbi/openai/models/responses/response_includable.rbi +7 -0
  45. data/rbi/openai/models/responses/response_output_text.rbi +189 -1
  46. data/rbi/openai/models/responses/response_retrieve_params.rbi +11 -0
  47. data/rbi/openai/resources/chat/completions.rbi +4 -4
  48. data/rbi/openai/resources/containers/files/content.rbi +1 -1
  49. data/rbi/openai/resources/fine_tuning/alpha/graders.rbi +10 -5
  50. data/rbi/openai/resources/images.rbi +1 -1
  51. data/rbi/openai/resources/responses.rbi +49 -5
  52. data/sig/openai/models/audio/transcription_text_delta_event.rbs +10 -5
  53. data/sig/openai/models/audio/transcription_text_done_event.rbs +10 -5
  54. data/sig/openai/models/fine_tuning/alpha/grader_run_params.rbs +6 -14
  55. data/sig/openai/models/fine_tuning/fine_tuning_job.rbs +1 -1
  56. data/sig/openai/models/graders/multi_grader.rbs +7 -7
  57. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +4 -4
  58. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +4 -4
  59. data/sig/openai/models/responses/response_includable.rbs +2 -0
  60. data/sig/openai/models/responses/response_output_text.rbs +104 -2
  61. data/sig/openai/models/responses/response_retrieve_params.rbs +10 -1
  62. data/sig/openai/resources/containers/files/content.rbs +1 -1
  63. data/sig/openai/resources/fine_tuning/alpha/graders.rbs +1 -1
  64. data/sig/openai/resources/responses.rbs +9 -1
  65. metadata +2 -2
@@ -4,6 +4,8 @@ module OpenAI
4
4
  module Models
5
5
  module Responses
6
6
  # @see OpenAI::Resources::Responses#retrieve
7
+ #
8
+ # @see OpenAI::Resources::Responses#retrieve_streaming
7
9
  class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel
8
10
  extend OpenAI::Internal::Type::RequestParameters::Converter
9
11
  include OpenAI::Internal::Type::RequestParameters
@@ -15,12 +17,20 @@ module OpenAI
15
17
  # @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
16
18
  optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] }
17
19
 
18
- # @!method initialize(include: nil, request_options: {})
20
+ # @!attribute starting_after
21
+ # The sequence number of the event after which to start streaming.
22
+ #
23
+ # @return [Integer, nil]
24
+ optional :starting_after, Integer
25
+
26
+ # @!method initialize(include: nil, starting_after: nil, request_options: {})
19
27
  # Some parameter documentations has been truncated, see
20
28
  # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details.
21
29
  #
22
30
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
23
31
  #
32
+ # @param starting_after [Integer] The sequence number of the event after which to start streaming.
33
+ #
24
34
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
25
35
  end
26
36
  end
@@ -25,11 +25,11 @@ module OpenAI
25
25
  variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
26
26
 
27
27
  # Emitted when a partial code snippet is added by the code interpreter.
28
- variant :"response.code_interpreter_call.code.delta",
28
+ variant :"response.code_interpreter_call_code.delta",
29
29
  -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent }
30
30
 
31
31
  # Emitted when code snippet output is finalized by the code interpreter.
32
- variant :"response.code_interpreter_call.code.done",
32
+ variant :"response.code_interpreter_call_code.done",
33
33
  -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent }
34
34
 
35
35
  # Emitted when the code interpreter call is completed.
@@ -4,7 +4,10 @@ module OpenAI
4
4
  module Resources
5
5
  class Beta
6
6
  class Threads
7
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
7
8
  class Messages
9
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
10
+ #
8
11
  # Some parameter documentations has been truncated, see
9
12
  # {OpenAI::Models::Beta::Threads::MessageCreateParams} for more details.
10
13
  #
@@ -38,6 +41,8 @@ module OpenAI
38
41
  )
39
42
  end
40
43
 
44
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
45
+ #
41
46
  # Some parameter documentations has been truncated, see
42
47
  # {OpenAI::Models::Beta::Threads::MessageRetrieveParams} for more details.
43
48
  #
@@ -68,6 +73,8 @@ module OpenAI
68
73
  )
69
74
  end
70
75
 
76
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
77
+ #
71
78
  # Some parameter documentations has been truncated, see
72
79
  # {OpenAI::Models::Beta::Threads::MessageUpdateParams} for more details.
73
80
  #
@@ -101,6 +108,8 @@ module OpenAI
101
108
  )
102
109
  end
103
110
 
111
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
112
+ #
104
113
  # Some parameter documentations has been truncated, see
105
114
  # {OpenAI::Models::Beta::Threads::MessageListParams} for more details.
106
115
  #
@@ -137,6 +146,8 @@ module OpenAI
137
146
  )
138
147
  end
139
148
 
149
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
150
+ #
140
151
  # Deletes a message.
141
152
  #
142
153
  # @overload delete(message_id, thread_id:, request_options: {})
@@ -5,7 +5,10 @@ module OpenAI
5
5
  class Beta
6
6
  class Threads
7
7
  class Runs
8
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
8
9
  class Steps
10
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
11
+ #
9
12
  # Some parameter documentations has been truncated, see
10
13
  # {OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams} for more details.
11
14
  #
@@ -45,6 +48,8 @@ module OpenAI
45
48
  )
46
49
  end
47
50
 
51
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
52
+ #
48
53
  # Some parameter documentations has been truncated, see
49
54
  # {OpenAI::Models::Beta::Threads::Runs::StepListParams} for more details.
50
55
  #
@@ -4,10 +4,13 @@ module OpenAI
4
4
  module Resources
5
5
  class Beta
6
6
  class Threads
7
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
7
8
  class Runs
8
9
  # @return [OpenAI::Resources::Beta::Threads::Runs::Steps]
9
10
  attr_reader :steps
10
11
 
12
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
13
+ #
11
14
  # See {OpenAI::Resources::Beta::Threads::Runs#create_stream_raw} for streaming
12
15
  # counterpart.
13
16
  #
@@ -76,6 +79,8 @@ module OpenAI
76
79
  )
77
80
  end
78
81
 
82
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
83
+ #
79
84
  # See {OpenAI::Resources::Beta::Threads::Runs#create} for non-streaming
80
85
  # counterpart.
81
86
  #
@@ -147,6 +152,8 @@ module OpenAI
147
152
  )
148
153
  end
149
154
 
155
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
156
+ #
150
157
  # Some parameter documentations has been truncated, see
151
158
  # {OpenAI::Models::Beta::Threads::RunRetrieveParams} for more details.
152
159
  #
@@ -177,6 +184,8 @@ module OpenAI
177
184
  )
178
185
  end
179
186
 
187
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
188
+ #
180
189
  # Some parameter documentations has been truncated, see
181
190
  # {OpenAI::Models::Beta::Threads::RunUpdateParams} for more details.
182
191
  #
@@ -210,6 +219,8 @@ module OpenAI
210
219
  )
211
220
  end
212
221
 
222
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
223
+ #
213
224
  # Some parameter documentations has been truncated, see
214
225
  # {OpenAI::Models::Beta::Threads::RunListParams} for more details.
215
226
  #
@@ -244,6 +255,8 @@ module OpenAI
244
255
  )
245
256
  end
246
257
 
258
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
259
+ #
247
260
  # Cancels a run that is `in_progress`.
248
261
  #
249
262
  # @overload cancel(run_id, thread_id:, request_options: {})
@@ -271,6 +284,8 @@ module OpenAI
271
284
  )
272
285
  end
273
286
 
287
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
288
+ #
274
289
  # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw} for
275
290
  # streaming counterpart.
276
291
  #
@@ -314,6 +329,8 @@ module OpenAI
314
329
  )
315
330
  end
316
331
 
332
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
333
+ #
317
334
  # See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs} for
318
335
  # non-streaming counterpart.
319
336
  #
@@ -3,6 +3,7 @@
3
3
  module OpenAI
4
4
  module Resources
5
5
  class Beta
6
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
6
7
  class Threads
7
8
  # @return [OpenAI::Resources::Beta::Threads::Runs]
8
9
  attr_reader :runs
@@ -10,6 +11,8 @@ module OpenAI
10
11
  # @return [OpenAI::Resources::Beta::Threads::Messages]
11
12
  attr_reader :messages
12
13
 
14
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
15
+ #
13
16
  # Some parameter documentations has been truncated, see
14
17
  # {OpenAI::Models::Beta::ThreadCreateParams} for more details.
15
18
  #
@@ -39,6 +42,8 @@ module OpenAI
39
42
  )
40
43
  end
41
44
 
45
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
46
+ #
42
47
  # Retrieves a thread.
43
48
  #
44
49
  # @overload retrieve(thread_id, request_options: {})
@@ -59,6 +64,8 @@ module OpenAI
59
64
  )
60
65
  end
61
66
 
67
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
68
+ #
62
69
  # Some parameter documentations has been truncated, see
63
70
  # {OpenAI::Models::Beta::ThreadUpdateParams} for more details.
64
71
  #
@@ -88,6 +95,8 @@ module OpenAI
88
95
  )
89
96
  end
90
97
 
98
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
99
+ #
91
100
  # Delete a thread.
92
101
  #
93
102
  # @overload delete(thread_id, request_options: {})
@@ -108,6 +117,8 @@ module OpenAI
108
117
  )
109
118
  end
110
119
 
120
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
121
+ #
111
122
  # See {OpenAI::Resources::Beta::Threads#stream_raw} for streaming counterpart.
112
123
  #
113
124
  # Some parameter documentations has been truncated, see
@@ -166,11 +177,13 @@ module OpenAI
166
177
  options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
167
178
  )
168
179
  end
169
-
180
+
170
181
  def stream
171
182
  raise NotImplementedError.new("higher level helpers are coming soon!")
172
183
  end
173
-
184
+
185
+ # @deprecated The Assistants API is deprecated in favor of the Responses API
186
+ #
174
187
  # See {OpenAI::Resources::Beta::Threads#create_and_run} for non-streaming
175
188
  # counterpart.
176
189
  #
@@ -13,7 +13,7 @@ module OpenAI
13
13
  # @param container_id [String]
14
14
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
15
15
  #
16
- # @return [nil]
16
+ # @return [StringIO]
17
17
  #
18
18
  # @see OpenAI::Models::Containers::Files::ContentRetrieveParams
19
19
  def retrieve(file_id, params)
@@ -25,7 +25,8 @@ module OpenAI
25
25
  @client.request(
26
26
  method: :get,
27
27
  path: ["containers/%1$s/files/%2$s/content", container_id, file_id],
28
- model: NilClass,
28
+ headers: {"accept" => "application/binary"},
29
+ model: StringIO,
29
30
  options: options
30
31
  )
31
32
  end
@@ -5,15 +5,18 @@ module OpenAI
5
5
  class FineTuning
6
6
  class Alpha
7
7
  class Graders
8
+ # Some parameter documentations has been truncated, see
9
+ # {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details.
10
+ #
8
11
  # Run a grader.
9
12
  #
10
- # @overload run(grader:, model_sample:, reference_answer:, request_options: {})
13
+ # @overload run(grader:, model_sample:, item: nil, request_options: {})
11
14
  #
12
15
  # @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
13
16
  #
14
- # @param model_sample [String] The model sample to be evaluated.
17
+ # @param model_sample [String] The model sample to be evaluated. This value will be used to populate
15
18
  #
16
- # @param reference_answer [String, Object, Array<Object>, Float] The reference answer for the evaluation.
19
+ # @param item [Object] The dataset item provided to the grader. This will be used to populate
17
20
  #
18
21
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
19
22
  #
@@ -242,17 +242,21 @@ module OpenAI
242
242
  )
243
243
  end
244
244
 
245
+ # See {OpenAI::Resources::Responses#retrieve_streaming} for streaming counterpart.
246
+ #
245
247
  # Some parameter documentations has been truncated, see
246
248
  # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details.
247
249
  #
248
250
  # Retrieves a model response with the given ID.
249
251
  #
250
- # @overload retrieve(response_id, include: nil, request_options: {})
252
+ # @overload retrieve(response_id, include: nil, starting_after: nil, request_options: {})
251
253
  #
252
254
  # @param response_id [String] The ID of the response to retrieve.
253
255
  #
254
256
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
255
257
  #
258
+ # @param starting_after [Integer] The sequence number of the event after which to start streaming.
259
+ #
256
260
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
257
261
  #
258
262
  # @return [OpenAI::Models::Responses::Response]
@@ -260,6 +264,10 @@ module OpenAI
260
264
  # @see OpenAI::Models::Responses::ResponseRetrieveParams
261
265
  def retrieve(response_id, params = {})
262
266
  parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
267
+ if parsed[:stream]
268
+ message = "Please use `#retrieve_streaming` for the streaming use case."
269
+ raise ArgumentError.new(message)
270
+ end
263
271
  @client.request(
264
272
  method: :get,
265
273
  path: ["responses/%1$s", response_id],
@@ -269,6 +277,44 @@ module OpenAI
269
277
  )
270
278
  end
271
279
 
280
+ # See {OpenAI::Resources::Responses#retrieve} for non-streaming counterpart.
281
+ #
282
+ # Some parameter documentations has been truncated, see
283
+ # {OpenAI::Models::Responses::ResponseRetrieveParams} for more details.
284
+ #
285
+ # Retrieves a model response with the given ID.
286
+ #
287
+ # @overload retrieve_streaming(response_id, include: nil, starting_after: nil, request_options: {})
288
+ #
289
+ # @param response_id [String] The ID of the response to retrieve.
290
+ #
291
+ # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
292
+ #
293
+ # @param starting_after [Integer] The sequence number of the event after which to start streaming.
294
+ #
295
+ # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
296
+ #
297
+ # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
298
+ #
299
+ # @see OpenAI::Models::Responses::ResponseRetrieveParams
300
+ def retrieve_streaming(response_id, params = {})
301
+ parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
302
+ unless parsed.fetch(:stream, true)
303
+ message = "Please use `#retrieve` for the non-streaming use case."
304
+ raise ArgumentError.new(message)
305
+ end
306
+ parsed.store(:stream, true)
307
+ @client.request(
308
+ method: :get,
309
+ path: ["responses/%1$s", response_id],
310
+ query: parsed,
311
+ headers: {"accept" => "text/event-stream"},
312
+ stream: OpenAI::Internal::Stream,
313
+ model: OpenAI::Responses::ResponseStreamEvent,
314
+ options: options
315
+ )
316
+ end
317
+
272
318
  # Deletes a model response with the given ID.
273
319
  #
274
320
  # @overload delete(response_id, request_options: {})
@@ -299,14 +345,14 @@ module OpenAI
299
345
  #
300
346
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
301
347
  #
302
- # @return [nil]
348
+ # @return [OpenAI::Models::Responses::Response]
303
349
  #
304
350
  # @see OpenAI::Models::Responses::ResponseCancelParams
305
351
  def cancel(response_id, params = {})
306
352
  @client.request(
307
353
  method: :post,
308
354
  path: ["responses/%1$s/cancel", response_id],
309
- model: NilClass,
355
+ model: OpenAI::Responses::Response,
310
356
  options: params[:request_options]
311
357
  )
312
358
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.4.1"
4
+ VERSION = "0.5.1"
5
5
  end
data/lib/openai.rb CHANGED
@@ -16,7 +16,9 @@ require "time"
16
16
  require "uri"
17
17
  # We already ship the preferred sorbet manifests in the package itself.
18
18
  # `tapioca` currently does not offer us a way to opt out of unnecessary compilation.
19
- if Object.const_defined?(:Tapioca) && caller.chain([$PROGRAM_NAME]).chain(ARGV).grep(/tapioca/)
19
+ if Object.const_defined?(:Tapioca) &&
20
+ caller.chain([$PROGRAM_NAME]).chain(ARGV).any?(/tapioca/) &&
21
+ ARGV.none?(/dsl/)
20
22
  return
21
23
  end
22
24
 
@@ -98,10 +98,10 @@ module OpenAI
98
98
  attr_writer :token
99
99
 
100
100
  # The bytes that were used to generate the log probability.
101
- sig { returns(T.nilable(T::Array[T.anything])) }
101
+ sig { returns(T.nilable(T::Array[Integer])) }
102
102
  attr_reader :bytes
103
103
 
104
- sig { params(bytes: T::Array[T.anything]).void }
104
+ sig { params(bytes: T::Array[Integer]).void }
105
105
  attr_writer :bytes
106
106
 
107
107
  # The log probability of the token.
@@ -114,7 +114,7 @@ module OpenAI
114
114
  sig do
115
115
  params(
116
116
  token: String,
117
- bytes: T::Array[T.anything],
117
+ bytes: T::Array[Integer],
118
118
  logprob: Float
119
119
  ).returns(T.attached_class)
120
120
  end
@@ -130,7 +130,7 @@ module OpenAI
130
130
 
131
131
  sig do
132
132
  override.returns(
133
- { token: String, bytes: T::Array[T.anything], logprob: Float }
133
+ { token: String, bytes: T::Array[Integer], logprob: Float }
134
134
  )
135
135
  end
136
136
  def to_hash
@@ -100,10 +100,10 @@ module OpenAI
100
100
  attr_writer :token
101
101
 
102
102
  # The bytes that were used to generate the log probability.
103
- sig { returns(T.nilable(T::Array[T.anything])) }
103
+ sig { returns(T.nilable(T::Array[Integer])) }
104
104
  attr_reader :bytes
105
105
 
106
- sig { params(bytes: T::Array[T.anything]).void }
106
+ sig { params(bytes: T::Array[Integer]).void }
107
107
  attr_writer :bytes
108
108
 
109
109
  # The log probability of the token.
@@ -116,7 +116,7 @@ module OpenAI
116
116
  sig do
117
117
  params(
118
118
  token: String,
119
- bytes: T::Array[T.anything],
119
+ bytes: T::Array[Integer],
120
120
  logprob: Float
121
121
  ).returns(T.attached_class)
122
122
  end
@@ -132,7 +132,7 @@ module OpenAI
132
132
 
133
133
  sig do
134
134
  override.returns(
135
- { token: String, bytes: T::Array[T.anything], logprob: Float }
135
+ { token: String, bytes: T::Array[Integer], logprob: Float }
136
136
  )
137
137
  end
138
138
  def to_hash
@@ -39,9 +39,9 @@ module OpenAI
39
39
  # utilize scale tier credits until they are exhausted.
40
40
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
41
41
  # be processed using the default service tier with a lower uptime SLA and no
42
- # latency guarentee.
42
+ # latency guarantee.
43
43
  # - If set to 'default', the request will be processed using the default service
44
- # tier with a lower uptime SLA and no latency guarentee.
44
+ # tier with a lower uptime SLA and no latency guarantee.
45
45
  # - If set to 'flex', the request will be processed with the Flex Processing
46
46
  # service tier.
47
47
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -105,9 +105,9 @@ module OpenAI
105
105
  # utilize scale tier credits until they are exhausted.
106
106
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
107
107
  # be processed using the default service tier with a lower uptime SLA and no
108
- # latency guarentee.
108
+ # latency guarantee.
109
109
  # - If set to 'default', the request will be processed using the default service
110
- # tier with a lower uptime SLA and no latency guarentee.
110
+ # tier with a lower uptime SLA and no latency guarantee.
111
111
  # - If set to 'flex', the request will be processed with the Flex Processing
112
112
  # service tier.
113
113
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -370,9 +370,9 @@ module OpenAI
370
370
  # utilize scale tier credits until they are exhausted.
371
371
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
372
372
  # be processed using the default service tier with a lower uptime SLA and no
373
- # latency guarentee.
373
+ # latency guarantee.
374
374
  # - If set to 'default', the request will be processed using the default service
375
- # tier with a lower uptime SLA and no latency guarentee.
375
+ # tier with a lower uptime SLA and no latency guarantee.
376
376
  # - If set to 'flex', the request will be processed with the Flex Processing
377
377
  # service tier.
378
378
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -41,9 +41,9 @@ module OpenAI
41
41
  # utilize scale tier credits until they are exhausted.
42
42
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
43
43
  # be processed using the default service tier with a lower uptime SLA and no
44
- # latency guarentee.
44
+ # latency guarantee.
45
45
  # - If set to 'default', the request will be processed using the default service
46
- # tier with a lower uptime SLA and no latency guarentee.
46
+ # tier with a lower uptime SLA and no latency guarantee.
47
47
  # - If set to 'flex', the request will be processed with the Flex Processing
48
48
  # service tier.
49
49
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -120,9 +120,9 @@ module OpenAI
120
120
  # utilize scale tier credits until they are exhausted.
121
121
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
122
122
  # be processed using the default service tier with a lower uptime SLA and no
123
- # latency guarentee.
123
+ # latency guarantee.
124
124
  # - If set to 'default', the request will be processed using the default service
125
- # tier with a lower uptime SLA and no latency guarentee.
125
+ # tier with a lower uptime SLA and no latency guarantee.
126
126
  # - If set to 'flex', the request will be processed with the Flex Processing
127
127
  # service tier.
128
128
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -790,9 +790,9 @@ module OpenAI
790
790
  # utilize scale tier credits until they are exhausted.
791
791
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
792
792
  # be processed using the default service tier with a lower uptime SLA and no
793
- # latency guarentee.
793
+ # latency guarantee.
794
794
  # - If set to 'default', the request will be processed using the default service
795
- # tier with a lower uptime SLA and no latency guarentee.
795
+ # tier with a lower uptime SLA and no latency guarantee.
796
796
  # - If set to 'flex', the request will be processed with the Flex Processing
797
797
  # service tier.
798
798
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -277,9 +277,9 @@ module OpenAI
277
277
  # utilize scale tier credits until they are exhausted.
278
278
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
279
279
  # be processed using the default service tier with a lower uptime SLA and no
280
- # latency guarentee.
280
+ # latency guarantee.
281
281
  # - If set to 'default', the request will be processed using the default service
282
- # tier with a lower uptime SLA and no latency guarentee.
282
+ # tier with a lower uptime SLA and no latency guarantee.
283
283
  # - If set to 'flex', the request will be processed with the Flex Processing
284
284
  # service tier.
285
285
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -632,9 +632,9 @@ module OpenAI
632
632
  # utilize scale tier credits until they are exhausted.
633
633
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
634
634
  # be processed using the default service tier with a lower uptime SLA and no
635
- # latency guarentee.
635
+ # latency guarantee.
636
636
  # - If set to 'default', the request will be processed using the default service
637
- # tier with a lower uptime SLA and no latency guarentee.
637
+ # tier with a lower uptime SLA and no latency guarantee.
638
638
  # - If set to 'flex', the request will be processed with the Flex Processing
639
639
  # service tier.
640
640
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -1015,9 +1015,9 @@ module OpenAI
1015
1015
  # utilize scale tier credits until they are exhausted.
1016
1016
  # - If set to 'auto', and the Project is not Scale tier enabled, the request will
1017
1017
  # be processed using the default service tier with a lower uptime SLA and no
1018
- # latency guarentee.
1018
+ # latency guarantee.
1019
1019
  # - If set to 'default', the request will be processed using the default service
1020
- # tier with a lower uptime SLA and no latency guarentee.
1020
+ # tier with a lower uptime SLA and no latency guarantee.
1021
1021
  # - If set to 'flex', the request will be processed with the Flex Processing
1022
1022
  # service tier.
1023
1023
  # [Learn more](https://platform.openai.com/docs/guides/flex-processing).