openai 0.14.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +18 -0
  3. data/README.md +3 -3
  4. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  5. data/lib/openai/models/chat/chat_completion.rb +2 -2
  6. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  8. data/lib/openai/models/chat/completion_create_params.rb +2 -2
  9. data/lib/openai/models/function_definition.rb +1 -1
  10. data/lib/openai/models/image_edit_params.rb +4 -1
  11. data/lib/openai/models/image_generate_params.rb +4 -1
  12. data/lib/openai/models/images_response.rb +2 -5
  13. data/lib/openai/models/responses/response.rb +2 -2
  14. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  15. data/lib/openai/models/responses/response_create_params.rb +2 -2
  16. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  17. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  18. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  19. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  20. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  21. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  22. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  23. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  24. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  25. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  26. data/lib/openai/resources/images.rb +6 -6
  27. data/lib/openai/resources/responses.rb +2 -2
  28. data/lib/openai/version.rb +1 -1
  29. data/lib/openai.rb +0 -2
  30. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  31. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  32. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  33. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  34. data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
  35. data/rbi/openai/models/function_definition.rbi +2 -2
  36. data/rbi/openai/models/image_edit_params.rbi +6 -0
  37. data/rbi/openai/models/image_generate_params.rbi +6 -0
  38. data/rbi/openai/models/images_response.rbi +2 -2
  39. data/rbi/openai/models/responses/response.rbi +3 -3
  40. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  41. data/rbi/openai/models/responses/response_create_params.rbi +3 -3
  42. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  43. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  44. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  45. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  46. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  47. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  48. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  49. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  50. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  51. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  52. data/rbi/openai/resources/chat/completions.rbi +2 -2
  53. data/rbi/openai/resources/images.rbi +22 -10
  54. data/rbi/openai/resources/responses.rbi +2 -2
  55. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  56. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  57. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  58. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  59. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  60. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  61. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  62. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  63. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  64. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  65. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  66. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  67. metadata +2 -8
  68. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  69. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  70. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  71. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  72. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  73. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
@@ -42,7 +42,7 @@ module OpenAI
42
42
  )
43
43
  end
44
44
 
45
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
45
+ # See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
46
46
  #
47
47
  # Creates an edited or extended image given one or more source images and a
48
48
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
@@ -115,6 +115,9 @@ module OpenAI
115
115
  # The number of partial images to generate. This parameter is used for streaming
116
116
  # responses that return partial images. Value must be between 0 and 3. When set to
117
117
  # 0, the response will be a single image sent in one streaming event.
118
+ #
119
+ # Note that the final image may be sent before the full number of partial images
120
+ # are generated if the full image is generated more quickly.
118
121
  partial_images: nil,
119
122
  # The quality of the image that will be generated. `high`, `medium` and `low` are
120
123
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -133,8 +136,8 @@ module OpenAI
133
136
  # and detect abuse.
134
137
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
135
138
  user: nil,
136
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
137
- # streaming and non-streaming use cases, respectively.
139
+ # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or
140
+ # `#edit` for streaming and non-streaming use cases, respectively.
138
141
  stream: false,
139
142
  request_options: {}
140
143
  )
@@ -215,6 +218,9 @@ module OpenAI
215
218
  # The number of partial images to generate. This parameter is used for streaming
216
219
  # responses that return partial images. Value must be between 0 and 3. When set to
217
220
  # 0, the response will be a single image sent in one streaming event.
221
+ #
222
+ # Note that the final image may be sent before the full number of partial images
223
+ # are generated if the full image is generated more quickly.
218
224
  partial_images: nil,
219
225
  # The quality of the image that will be generated. `high`, `medium` and `low` are
220
226
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -233,14 +239,14 @@ module OpenAI
233
239
  # and detect abuse.
234
240
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
235
241
  user: nil,
236
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#edit` for
237
- # streaming and non-streaming use cases, respectively.
242
+ # There is no need to provide `stream:`. Instead, use `#edit_stream_raw` or
243
+ # `#edit` for streaming and non-streaming use cases, respectively.
238
244
  stream: true,
239
245
  request_options: {}
240
246
  )
241
247
  end
242
248
 
243
- # See {OpenAI::Resources::Images#stream_raw} for streaming counterpart.
249
+ # See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart.
244
250
  #
245
251
  # Creates an image given a prompt.
246
252
  # [Learn more](https://platform.openai.com/docs/guides/images).
@@ -300,6 +306,9 @@ module OpenAI
300
306
  # The number of partial images to generate. This parameter is used for streaming
301
307
  # responses that return partial images. Value must be between 0 and 3. When set to
302
308
  # 0, the response will be a single image sent in one streaming event.
309
+ #
310
+ # Note that the final image may be sent before the full number of partial images
311
+ # are generated if the full image is generated more quickly.
303
312
  partial_images: nil,
304
313
  # The quality of the image that will be generated.
305
314
  #
@@ -328,8 +337,8 @@ module OpenAI
328
337
  # and detect abuse.
329
338
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
330
339
  user: nil,
331
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
332
- # for streaming and non-streaming use cases, respectively.
340
+ # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or
341
+ # `#generate` for streaming and non-streaming use cases, respectively.
333
342
  stream: false,
334
343
  request_options: {}
335
344
  )
@@ -397,6 +406,9 @@ module OpenAI
397
406
  # The number of partial images to generate. This parameter is used for streaming
398
407
  # responses that return partial images. Value must be between 0 and 3. When set to
399
408
  # 0, the response will be a single image sent in one streaming event.
409
+ #
410
+ # Note that the final image may be sent before the full number of partial images
411
+ # are generated if the full image is generated more quickly.
400
412
  partial_images: nil,
401
413
  # The quality of the image that will be generated.
402
414
  #
@@ -425,8 +437,8 @@ module OpenAI
425
437
  # and detect abuse.
426
438
  # [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
427
439
  user: nil,
428
- # There is no need to provide `stream:`. Instead, use `#stream_raw` or `#generate`
429
- # for streaming and non-streaming use cases, respectively.
440
+ # There is no need to provide `stream:`. Instead, use `#generate_stream_raw` or
441
+ # `#generate` for streaming and non-streaming use cases, respectively.
430
442
  stream: true,
431
443
  request_options: {}
432
444
  )
@@ -161,7 +161,7 @@ module OpenAI
161
161
  # - If set to 'auto', then the request will be processed with the service tier
162
162
  # configured in the Project settings. Unless otherwise configured, the Project
163
163
  # will use 'default'.
164
- # - If set to 'default', then the requset will be processed with the standard
164
+ # - If set to 'default', then the request will be processed with the standard
165
165
  # pricing and performance for the selected model.
166
166
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
167
167
  # 'priority', then the request will be processed with the corresponding service
@@ -395,7 +395,7 @@ module OpenAI
395
395
  # - If set to 'auto', then the request will be processed with the service tier
396
396
  # configured in the Project settings. Unless otherwise configured, the Project
397
397
  # will use 'default'.
398
- # - If set to 'default', then the requset will be processed with the standard
398
+ # - If set to 'default', then the request will be processed with the standard
399
399
  # pricing and performance for the selected model.
400
400
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
401
401
  # 'priority', then the request will be processed with the corresponding service
@@ -80,9 +80,6 @@ module OpenAI
80
80
  | :ballad
81
81
  | :coral
82
82
  | :echo
83
- | :fable
84
- | :onyx
85
- | :nova
86
83
  | :sage
87
84
  | :shimmer
88
85
  | :verse
@@ -97,9 +94,6 @@ module OpenAI
97
94
  BALLAD: :ballad
98
95
  CORAL: :coral
99
96
  ECHO: :echo
100
- FABLE: :fable
101
- ONYX: :onyx
102
- NOVA: :nova
103
97
  SAGE: :sage
104
98
  SHIMMER: :shimmer
105
99
  VERSE: :verse
@@ -46,9 +46,6 @@ module OpenAI
46
46
  | :ballad
47
47
  | :coral
48
48
  | :echo
49
- | :fable
50
- | :onyx
51
- | :nova
52
49
  | :sage
53
50
  | :shimmer
54
51
  | :verse
@@ -63,9 +60,6 @@ module OpenAI
63
60
  BALLAD: :ballad
64
61
  CORAL: :coral
65
62
  ECHO: :echo
66
- FABLE: :fable
67
- ONYX: :onyx
68
- NOVA: :nova
69
63
  SAGE: :sage
70
64
  SHIMMER: :shimmer
71
65
  VERSE: :verse
@@ -3,7 +3,7 @@ module OpenAI
3
3
  module Responses
4
4
  type response_mcp_call_arguments_delta_event =
5
5
  {
6
- delta: top,
6
+ delta: String,
7
7
  item_id: String,
8
8
  output_index: Integer,
9
9
  sequence_number: Integer,
@@ -11,7 +11,7 @@ module OpenAI
11
11
  }
12
12
 
13
13
  class ResponseMcpCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel
14
- attr_accessor delta: top
14
+ attr_accessor delta: String
15
15
 
16
16
  attr_accessor item_id: String
17
17
 
@@ -22,7 +22,7 @@ module OpenAI
22
22
  attr_accessor type: :"response.mcp_call_arguments.delta"
23
23
 
24
24
  def initialize: (
25
- delta: top,
25
+ delta: String,
26
26
  item_id: String,
27
27
  output_index: Integer,
28
28
  sequence_number: Integer,
@@ -30,7 +30,7 @@ module OpenAI
30
30
  ) -> void
31
31
 
32
32
  def to_hash: -> {
33
- delta: top,
33
+ delta: String,
34
34
  item_id: String,
35
35
  output_index: Integer,
36
36
  sequence_number: Integer,
@@ -3,7 +3,7 @@ module OpenAI
3
3
  module Responses
4
4
  type response_mcp_call_arguments_done_event =
5
5
  {
6
- arguments: top,
6
+ arguments: String,
7
7
  item_id: String,
8
8
  output_index: Integer,
9
9
  sequence_number: Integer,
@@ -11,7 +11,7 @@ module OpenAI
11
11
  }
12
12
 
13
13
  class ResponseMcpCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel
14
- attr_accessor arguments: top
14
+ attr_accessor arguments: String
15
15
 
16
16
  attr_accessor item_id: String
17
17
 
@@ -22,7 +22,7 @@ module OpenAI
22
22
  attr_accessor type: :"response.mcp_call_arguments.done"
23
23
 
24
24
  def initialize: (
25
- arguments: top,
25
+ arguments: String,
26
26
  item_id: String,
27
27
  output_index: Integer,
28
28
  sequence_number: Integer,
@@ -30,7 +30,7 @@ module OpenAI
30
30
  ) -> void
31
31
 
32
32
  def to_hash: -> {
33
- arguments: top,
33
+ arguments: String,
34
34
  item_id: String,
35
35
  output_index: Integer,
36
36
  sequence_number: Integer,
@@ -2,19 +2,32 @@ module OpenAI
2
2
  module Models
3
3
  module Responses
4
4
  type response_mcp_call_completed_event =
5
- { sequence_number: Integer, type: :"response.mcp_call.completed" }
5
+ {
6
+ item_id: String,
7
+ output_index: Integer,
8
+ sequence_number: Integer,
9
+ type: :"response.mcp_call.completed"
10
+ }
6
11
 
7
12
  class ResponseMcpCallCompletedEvent < OpenAI::Internal::Type::BaseModel
13
+ attr_accessor item_id: String
14
+
15
+ attr_accessor output_index: Integer
16
+
8
17
  attr_accessor sequence_number: Integer
9
18
 
10
19
  attr_accessor type: :"response.mcp_call.completed"
11
20
 
12
21
  def initialize: (
22
+ item_id: String,
23
+ output_index: Integer,
13
24
  sequence_number: Integer,
14
25
  ?type: :"response.mcp_call.completed"
15
26
  ) -> void
16
27
 
17
28
  def to_hash: -> {
29
+ item_id: String,
30
+ output_index: Integer,
18
31
  sequence_number: Integer,
19
32
  type: :"response.mcp_call.completed"
20
33
  }
@@ -2,19 +2,32 @@ module OpenAI
2
2
  module Models
3
3
  module Responses
4
4
  type response_mcp_call_failed_event =
5
- { sequence_number: Integer, type: :"response.mcp_call.failed" }
5
+ {
6
+ item_id: String,
7
+ output_index: Integer,
8
+ sequence_number: Integer,
9
+ type: :"response.mcp_call.failed"
10
+ }
6
11
 
7
12
  class ResponseMcpCallFailedEvent < OpenAI::Internal::Type::BaseModel
13
+ attr_accessor item_id: String
14
+
15
+ attr_accessor output_index: Integer
16
+
8
17
  attr_accessor sequence_number: Integer
9
18
 
10
19
  attr_accessor type: :"response.mcp_call.failed"
11
20
 
12
21
  def initialize: (
22
+ item_id: String,
23
+ output_index: Integer,
13
24
  sequence_number: Integer,
14
25
  ?type: :"response.mcp_call.failed"
15
26
  ) -> void
16
27
 
17
28
  def to_hash: -> {
29
+ item_id: String,
30
+ output_index: Integer,
18
31
  sequence_number: Integer,
19
32
  type: :"response.mcp_call.failed"
20
33
  }
@@ -2,19 +2,32 @@ module OpenAI
2
2
  module Models
3
3
  module Responses
4
4
  type response_mcp_list_tools_completed_event =
5
- { sequence_number: Integer, type: :"response.mcp_list_tools.completed" }
5
+ {
6
+ item_id: String,
7
+ output_index: Integer,
8
+ sequence_number: Integer,
9
+ type: :"response.mcp_list_tools.completed"
10
+ }
6
11
 
7
12
  class ResponseMcpListToolsCompletedEvent < OpenAI::Internal::Type::BaseModel
13
+ attr_accessor item_id: String
14
+
15
+ attr_accessor output_index: Integer
16
+
8
17
  attr_accessor sequence_number: Integer
9
18
 
10
19
  attr_accessor type: :"response.mcp_list_tools.completed"
11
20
 
12
21
  def initialize: (
22
+ item_id: String,
23
+ output_index: Integer,
13
24
  sequence_number: Integer,
14
25
  ?type: :"response.mcp_list_tools.completed"
15
26
  ) -> void
16
27
 
17
28
  def to_hash: -> {
29
+ item_id: String,
30
+ output_index: Integer,
18
31
  sequence_number: Integer,
19
32
  type: :"response.mcp_list_tools.completed"
20
33
  }
@@ -2,19 +2,32 @@ module OpenAI
2
2
  module Models
3
3
  module Responses
4
4
  type response_mcp_list_tools_failed_event =
5
- { sequence_number: Integer, type: :"response.mcp_list_tools.failed" }
5
+ {
6
+ item_id: String,
7
+ output_index: Integer,
8
+ sequence_number: Integer,
9
+ type: :"response.mcp_list_tools.failed"
10
+ }
6
11
 
7
12
  class ResponseMcpListToolsFailedEvent < OpenAI::Internal::Type::BaseModel
13
+ attr_accessor item_id: String
14
+
15
+ attr_accessor output_index: Integer
16
+
8
17
  attr_accessor sequence_number: Integer
9
18
 
10
19
  attr_accessor type: :"response.mcp_list_tools.failed"
11
20
 
12
21
  def initialize: (
22
+ item_id: String,
23
+ output_index: Integer,
13
24
  sequence_number: Integer,
14
25
  ?type: :"response.mcp_list_tools.failed"
15
26
  ) -> void
16
27
 
17
28
  def to_hash: -> {
29
+ item_id: String,
30
+ output_index: Integer,
18
31
  sequence_number: Integer,
19
32
  type: :"response.mcp_list_tools.failed"
20
33
  }
@@ -3,21 +3,31 @@ module OpenAI
3
3
  module Responses
4
4
  type response_mcp_list_tools_in_progress_event =
5
5
  {
6
+ item_id: String,
7
+ output_index: Integer,
6
8
  sequence_number: Integer,
7
9
  type: :"response.mcp_list_tools.in_progress"
8
10
  }
9
11
 
10
12
  class ResponseMcpListToolsInProgressEvent < OpenAI::Internal::Type::BaseModel
13
+ attr_accessor item_id: String
14
+
15
+ attr_accessor output_index: Integer
16
+
11
17
  attr_accessor sequence_number: Integer
12
18
 
13
19
  attr_accessor type: :"response.mcp_list_tools.in_progress"
14
20
 
15
21
  def initialize: (
22
+ item_id: String,
23
+ output_index: Integer,
16
24
  sequence_number: Integer,
17
25
  ?type: :"response.mcp_list_tools.in_progress"
18
26
  ) -> void
19
27
 
20
28
  def to_hash: -> {
29
+ item_id: String,
30
+ output_index: Integer,
21
31
  sequence_number: Integer,
22
32
  type: :"response.mcp_list_tools.in_progress"
23
33
  }
@@ -51,8 +51,6 @@ module OpenAI
51
51
  | OpenAI::Responses::ResponseMcpListToolsInProgressEvent
52
52
  | OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent
53
53
  | OpenAI::Responses::ResponseQueuedEvent
54
- | OpenAI::Responses::ResponseReasoningDeltaEvent
55
- | OpenAI::Responses::ResponseReasoningDoneEvent
56
54
  | OpenAI::Responses::ResponseReasoningSummaryDeltaEvent
57
55
  | OpenAI::Responses::ResponseReasoningSummaryDoneEvent
58
56
 
@@ -6,6 +6,7 @@ module OpenAI
6
6
  content_index: Integer,
7
7
  delta: String,
8
8
  item_id: String,
9
+ logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob],
9
10
  output_index: Integer,
10
11
  sequence_number: Integer,
11
12
  type: :"response.output_text.delta"
@@ -18,6 +19,8 @@ module OpenAI
18
19
 
19
20
  attr_accessor item_id: String
20
21
 
22
+ attr_accessor logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob]
23
+
21
24
  attr_accessor output_index: Integer
22
25
 
23
26
  attr_accessor sequence_number: Integer
@@ -28,6 +31,7 @@ module OpenAI
28
31
  content_index: Integer,
29
32
  delta: String,
30
33
  item_id: String,
34
+ logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob],
31
35
  output_index: Integer,
32
36
  sequence_number: Integer,
33
37
  ?type: :"response.output_text.delta"
@@ -37,10 +41,58 @@ module OpenAI
37
41
  content_index: Integer,
38
42
  delta: String,
39
43
  item_id: String,
44
+ logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob],
40
45
  output_index: Integer,
41
46
  sequence_number: Integer,
42
47
  type: :"response.output_text.delta"
43
48
  }
49
+
50
+ type logprob =
51
+ {
52
+ token: String,
53
+ logprob: Float,
54
+ top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
55
+ }
56
+
57
+ class Logprob < OpenAI::Internal::Type::BaseModel
58
+ attr_accessor token: String
59
+
60
+ attr_accessor logprob: Float
61
+
62
+ attr_reader top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]?
63
+
64
+ def top_logprobs=: (
65
+ ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
66
+ ) -> ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
67
+
68
+ def initialize: (
69
+ token: String,
70
+ logprob: Float,
71
+ ?top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
72
+ ) -> void
73
+
74
+ def to_hash: -> {
75
+ token: String,
76
+ logprob: Float,
77
+ top_logprobs: ::Array[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
78
+ }
79
+
80
+ type top_logprob = { token: String, logprob: Float }
81
+
82
+ class TopLogprob < OpenAI::Internal::Type::BaseModel
83
+ attr_reader token: String?
84
+
85
+ def token=: (String) -> String
86
+
87
+ attr_reader logprob: Float?
88
+
89
+ def logprob=: (Float) -> Float
90
+
91
+ def initialize: (?token: String, ?logprob: Float) -> void
92
+
93
+ def to_hash: -> { token: String, logprob: Float }
94
+ end
95
+ end
44
96
  end
45
97
  end
46
98
  end
@@ -5,6 +5,7 @@ module OpenAI
5
5
  {
6
6
  content_index: Integer,
7
7
  item_id: String,
8
+ logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob],
8
9
  output_index: Integer,
9
10
  sequence_number: Integer,
10
11
  text: String,
@@ -16,6 +17,8 @@ module OpenAI
16
17
 
17
18
  attr_accessor item_id: String
18
19
 
20
+ attr_accessor logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob]
21
+
19
22
  attr_accessor output_index: Integer
20
23
 
21
24
  attr_accessor sequence_number: Integer
@@ -27,6 +30,7 @@ module OpenAI
27
30
  def initialize: (
28
31
  content_index: Integer,
29
32
  item_id: String,
33
+ logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob],
30
34
  output_index: Integer,
31
35
  sequence_number: Integer,
32
36
  text: String,
@@ -36,11 +40,59 @@ module OpenAI
36
40
  def to_hash: -> {
37
41
  content_index: Integer,
38
42
  item_id: String,
43
+ logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob],
39
44
  output_index: Integer,
40
45
  sequence_number: Integer,
41
46
  text: String,
42
47
  type: :"response.output_text.done"
43
48
  }
49
+
50
+ type logprob =
51
+ {
52
+ token: String,
53
+ logprob: Float,
54
+ top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
55
+ }
56
+
57
+ class Logprob < OpenAI::Internal::Type::BaseModel
58
+ attr_accessor token: String
59
+
60
+ attr_accessor logprob: Float
61
+
62
+ attr_reader top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]?
63
+
64
+ def top_logprobs=: (
65
+ ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
66
+ ) -> ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
67
+
68
+ def initialize: (
69
+ token: String,
70
+ logprob: Float,
71
+ ?top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
72
+ ) -> void
73
+
74
+ def to_hash: -> {
75
+ token: String,
76
+ logprob: Float,
77
+ top_logprobs: ::Array[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
78
+ }
79
+
80
+ type top_logprob = { token: String, logprob: Float }
81
+
82
+ class TopLogprob < OpenAI::Internal::Type::BaseModel
83
+ attr_reader token: String?
84
+
85
+ def token=: (String) -> String
86
+
87
+ attr_reader logprob: Float?
88
+
89
+ def logprob=: (Float) -> Float
90
+
91
+ def initialize: (?token: String, ?logprob: Float) -> void
92
+
93
+ def to_hash: -> { token: String, logprob: Float }
94
+ end
95
+ end
44
96
  end
45
97
  end
46
98
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.14.0
4
+ version: 0.15.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-07-16 00:00:00.000000000 Z
11
+ date: 2025-07-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: connection_pool
@@ -435,8 +435,6 @@ files:
435
435
  - lib/openai/models/responses/response_output_text_annotation_added_event.rb
436
436
  - lib/openai/models/responses/response_prompt.rb
437
437
  - lib/openai/models/responses/response_queued_event.rb
438
- - lib/openai/models/responses/response_reasoning_delta_event.rb
439
- - lib/openai/models/responses/response_reasoning_done_event.rb
440
438
  - lib/openai/models/responses/response_reasoning_item.rb
441
439
  - lib/openai/models/responses/response_reasoning_summary_delta_event.rb
442
440
  - lib/openai/models/responses/response_reasoning_summary_done_event.rb
@@ -958,8 +956,6 @@ files:
958
956
  - rbi/openai/models/responses/response_output_text_annotation_added_event.rbi
959
957
  - rbi/openai/models/responses/response_prompt.rbi
960
958
  - rbi/openai/models/responses/response_queued_event.rbi
961
- - rbi/openai/models/responses/response_reasoning_delta_event.rbi
962
- - rbi/openai/models/responses/response_reasoning_done_event.rbi
963
959
  - rbi/openai/models/responses/response_reasoning_item.rbi
964
960
  - rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi
965
961
  - rbi/openai/models/responses/response_reasoning_summary_done_event.rbi
@@ -1471,8 +1467,6 @@ files:
1471
1467
  - sig/openai/models/responses/response_output_text_annotation_added_event.rbs
1472
1468
  - sig/openai/models/responses/response_prompt.rbs
1473
1469
  - sig/openai/models/responses/response_queued_event.rbs
1474
- - sig/openai/models/responses/response_reasoning_delta_event.rbs
1475
- - sig/openai/models/responses/response_reasoning_done_event.rbs
1476
1470
  - sig/openai/models/responses/response_reasoning_item.rbs
1477
1471
  - sig/openai/models/responses/response_reasoning_summary_delta_event.rbs
1478
1472
  - sig/openai/models/responses/response_reasoning_summary_done_event.rbs
@@ -1,60 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module OpenAI
4
- module Models
5
- module Responses
6
- class ResponseReasoningDeltaEvent < OpenAI::Internal::Type::BaseModel
7
- # @!attribute content_index
8
- # The index of the reasoning content part within the output item.
9
- #
10
- # @return [Integer]
11
- required :content_index, Integer
12
-
13
- # @!attribute delta
14
- # The partial update to the reasoning content.
15
- #
16
- # @return [Object]
17
- required :delta, OpenAI::Internal::Type::Unknown
18
-
19
- # @!attribute item_id
20
- # The unique identifier of the item for which reasoning is being updated.
21
- #
22
- # @return [String]
23
- required :item_id, String
24
-
25
- # @!attribute output_index
26
- # The index of the output item in the response's output array.
27
- #
28
- # @return [Integer]
29
- required :output_index, Integer
30
-
31
- # @!attribute sequence_number
32
- # The sequence number of this event.
33
- #
34
- # @return [Integer]
35
- required :sequence_number, Integer
36
-
37
- # @!attribute type
38
- # The type of the event. Always 'response.reasoning.delta'.
39
- #
40
- # @return [Symbol, :"response.reasoning.delta"]
41
- required :type, const: :"response.reasoning.delta"
42
-
43
- # @!method initialize(content_index:, delta:, item_id:, output_index:, sequence_number:, type: :"response.reasoning.delta")
44
- # Emitted when there is a delta (partial update) to the reasoning content.
45
- #
46
- # @param content_index [Integer] The index of the reasoning content part within the output item.
47
- #
48
- # @param delta [Object] The partial update to the reasoning content.
49
- #
50
- # @param item_id [String] The unique identifier of the item for which reasoning is being updated.
51
- #
52
- # @param output_index [Integer] The index of the output item in the response's output array.
53
- #
54
- # @param sequence_number [Integer] The sequence number of this event.
55
- #
56
- # @param type [Symbol, :"response.reasoning.delta"] The type of the event. Always 'response.reasoning.delta'.
57
- end
58
- end
59
- end
60
- end