openai 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +40 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +25 -0
  6. data/lib/openai/internal/type/array_of.rb +6 -1
  7. data/lib/openai/internal/type/base_model.rb +76 -24
  8. data/lib/openai/internal/type/boolean.rb +7 -1
  9. data/lib/openai/internal/type/converter.rb +42 -34
  10. data/lib/openai/internal/type/enum.rb +10 -2
  11. data/lib/openai/internal/type/file_input.rb +6 -1
  12. data/lib/openai/internal/type/hash_of.rb +6 -1
  13. data/lib/openai/internal/type/union.rb +12 -7
  14. data/lib/openai/internal/type/unknown.rb +7 -1
  15. data/lib/openai/models/all_models.rb +4 -0
  16. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  17. data/lib/openai/models/audio/transcription.rb +118 -1
  18. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  19. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  20. data/lib/openai/models/chat/chat_completion.rb +32 -31
  21. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  22. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  23. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
  24. data/lib/openai/models/images_response.rb +92 -1
  25. data/lib/openai/models/responses/response.rb +59 -35
  26. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  27. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  28. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  29. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  30. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  31. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  32. data/lib/openai/models/responses/response_create_params.rb +92 -67
  33. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  34. data/lib/openai/models/responses/response_includable.rb +8 -6
  35. data/lib/openai/models/responses/response_output_text.rb +18 -2
  36. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  37. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  38. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  39. data/lib/openai/models/responses_model.rb +4 -0
  40. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  41. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  42. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  43. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  44. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  45. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  46. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  47. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  48. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  49. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  50. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  51. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  52. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  53. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  54. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  55. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  56. data/lib/openai/models.rb +2 -0
  57. data/lib/openai/resources/audio/speech.rb +3 -1
  58. data/lib/openai/resources/chat/completions.rb +10 -2
  59. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
  60. data/lib/openai/resources/responses.rb +24 -16
  61. data/lib/openai/resources/webhooks.rb +124 -0
  62. data/lib/openai/version.rb +1 -1
  63. data/lib/openai.rb +18 -0
  64. data/rbi/openai/client.rbi +3 -0
  65. data/rbi/openai/errors.rbi +16 -0
  66. data/rbi/openai/internal/type/boolean.rbi +2 -0
  67. data/rbi/openai/internal/type/converter.rbi +15 -15
  68. data/rbi/openai/internal/type/union.rbi +5 -0
  69. data/rbi/openai/internal/type/unknown.rbi +2 -0
  70. data/rbi/openai/models/all_models.rbi +20 -0
  71. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  72. data/rbi/openai/models/audio/transcription.rbi +213 -3
  73. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  74. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  75. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  76. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  77. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  78. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
  79. data/rbi/openai/models/images_response.rbi +146 -0
  80. data/rbi/openai/models/responses/response.rbi +75 -44
  81. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  82. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  83. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  84. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  85. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  86. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  87. data/rbi/openai/models/responses/response_create_params.rbi +174 -115
  88. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  89. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  90. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  91. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  92. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  93. data/rbi/openai/models/responses_model.rbi +20 -0
  94. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  95. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  96. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  97. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  98. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  99. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  100. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  101. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  102. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  103. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  104. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  105. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  106. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  107. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  108. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  109. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  110. data/rbi/openai/models.rbi +2 -0
  111. data/rbi/openai/resources/audio/speech.rbi +6 -1
  112. data/rbi/openai/resources/chat/completions.rbi +34 -30
  113. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
  114. data/rbi/openai/resources/responses.rbi +108 -84
  115. data/rbi/openai/resources/webhooks.rbi +68 -0
  116. data/sig/openai/client.rbs +2 -0
  117. data/sig/openai/errors.rbs +9 -0
  118. data/sig/openai/internal/type/converter.rbs +7 -1
  119. data/sig/openai/models/all_models.rbs +8 -0
  120. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  121. data/sig/openai/models/audio/transcription.rbs +95 -3
  122. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  123. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  124. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  125. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  126. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  127. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
  128. data/sig/openai/models/images_response.rbs +83 -0
  129. data/sig/openai/models/responses/response.rbs +13 -1
  130. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  131. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  132. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  133. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  134. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  135. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  136. data/sig/openai/models/responses/response_create_params.rbs +31 -11
  137. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  138. data/sig/openai/models/responses/response_includable.rbs +7 -5
  139. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  140. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  141. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  142. data/sig/openai/models/responses_model.rbs +8 -0
  143. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  144. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  145. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  146. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  147. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  148. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  149. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  150. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  151. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  152. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  153. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  154. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  155. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  156. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  157. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  158. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  159. data/sig/openai/models.rbs +2 -0
  160. data/sig/openai/resources/audio/speech.rbs +1 -0
  161. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  162. data/sig/openai/resources/responses.rbs +8 -4
  163. data/sig/openai/resources/webhooks.rbs +33 -0
  164. metadata +56 -2
@@ -0,0 +1,155 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Webhooks
6
+ class ResponseIncompleteWebhookEvent < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(
10
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent,
11
+ OpenAI::Internal::AnyHash
12
+ )
13
+ end
14
+
15
+ # The unique ID of the event.
16
+ sig { returns(String) }
17
+ attr_accessor :id
18
+
19
+ # The Unix timestamp (in seconds) of when the model response was interrupted.
20
+ sig { returns(Integer) }
21
+ attr_accessor :created_at
22
+
23
+ # Event data payload.
24
+ sig { returns(OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data) }
25
+ attr_reader :data
26
+
27
+ sig do
28
+ params(
29
+ data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data::OrHash
30
+ ).void
31
+ end
32
+ attr_writer :data
33
+
34
+ # The type of the event. Always `response.incomplete`.
35
+ sig { returns(Symbol) }
36
+ attr_accessor :type
37
+
38
+ # The object of the event. Always `event`.
39
+ sig do
40
+ returns(
41
+ T.nilable(
42
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol
43
+ )
44
+ )
45
+ end
46
+ attr_reader :object
47
+
48
+ sig do
49
+ params(
50
+ object:
51
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::OrSymbol
52
+ ).void
53
+ end
54
+ attr_writer :object
55
+
56
+ # Sent when a background response has been interrupted.
57
+ sig do
58
+ params(
59
+ id: String,
60
+ created_at: Integer,
61
+ data:
62
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data::OrHash,
63
+ object:
64
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::OrSymbol,
65
+ type: Symbol
66
+ ).returns(T.attached_class)
67
+ end
68
+ def self.new(
69
+ # The unique ID of the event.
70
+ id:,
71
+ # The Unix timestamp (in seconds) of when the model response was interrupted.
72
+ created_at:,
73
+ # Event data payload.
74
+ data:,
75
+ # The object of the event. Always `event`.
76
+ object: nil,
77
+ # The type of the event. Always `response.incomplete`.
78
+ type: :"response.incomplete"
79
+ )
80
+ end
81
+
82
+ sig do
83
+ override.returns(
84
+ {
85
+ id: String,
86
+ created_at: Integer,
87
+ data: OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data,
88
+ type: Symbol,
89
+ object:
90
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol
91
+ }
92
+ )
93
+ end
94
+ def to_hash
95
+ end
96
+
97
+ class Data < OpenAI::Internal::Type::BaseModel
98
+ OrHash =
99
+ T.type_alias do
100
+ T.any(
101
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data,
102
+ OpenAI::Internal::AnyHash
103
+ )
104
+ end
105
+
106
+ # The unique ID of the model response.
107
+ sig { returns(String) }
108
+ attr_accessor :id
109
+
110
+ # Event data payload.
111
+ sig { params(id: String).returns(T.attached_class) }
112
+ def self.new(
113
+ # The unique ID of the model response.
114
+ id:
115
+ )
116
+ end
117
+
118
+ sig { override.returns({ id: String }) }
119
+ def to_hash
120
+ end
121
+ end
122
+
123
+ # The object of the event. Always `event`.
124
+ module Object
125
+ extend OpenAI::Internal::Type::Enum
126
+
127
+ TaggedSymbol =
128
+ T.type_alias do
129
+ T.all(
130
+ Symbol,
131
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object
132
+ )
133
+ end
134
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
135
+
136
+ EVENT =
137
+ T.let(
138
+ :event,
139
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol
140
+ )
141
+
142
+ sig do
143
+ override.returns(
144
+ T::Array[
145
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object::TaggedSymbol
146
+ ]
147
+ )
148
+ end
149
+ def self.values
150
+ end
151
+ end
152
+ end
153
+ end
154
+ end
155
+ end
@@ -0,0 +1,40 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Webhooks
6
+ # Sent when a batch API request has been cancelled.
7
+ module UnwrapWebhookEvent
8
+ extend OpenAI::Internal::Type::Union
9
+
10
+ Variants =
11
+ T.type_alias do
12
+ T.any(
13
+ OpenAI::Webhooks::BatchCancelledWebhookEvent,
14
+ OpenAI::Webhooks::BatchCompletedWebhookEvent,
15
+ OpenAI::Webhooks::BatchExpiredWebhookEvent,
16
+ OpenAI::Webhooks::BatchFailedWebhookEvent,
17
+ OpenAI::Webhooks::EvalRunCanceledWebhookEvent,
18
+ OpenAI::Webhooks::EvalRunFailedWebhookEvent,
19
+ OpenAI::Webhooks::EvalRunSucceededWebhookEvent,
20
+ OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent,
21
+ OpenAI::Webhooks::FineTuningJobFailedWebhookEvent,
22
+ OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent,
23
+ OpenAI::Webhooks::ResponseCancelledWebhookEvent,
24
+ OpenAI::Webhooks::ResponseCompletedWebhookEvent,
25
+ OpenAI::Webhooks::ResponseFailedWebhookEvent,
26
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent
27
+ )
28
+ end
29
+
30
+ sig do
31
+ override.returns(
32
+ T::Array[OpenAI::Webhooks::UnwrapWebhookEvent::Variants]
33
+ )
34
+ end
35
+ def self.variants
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,32 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Webhooks
6
+ class WebhookUnwrapParams < OpenAI::Internal::Type::BaseModel
7
+ extend OpenAI::Internal::Type::RequestParameters::Converter
8
+ include OpenAI::Internal::Type::RequestParameters
9
+
10
+ OrHash =
11
+ T.type_alias do
12
+ T.any(
13
+ OpenAI::Webhooks::WebhookUnwrapParams,
14
+ OpenAI::Internal::AnyHash
15
+ )
16
+ end
17
+
18
+ sig do
19
+ params(request_options: OpenAI::RequestOptions::OrHash).returns(
20
+ T.attached_class
21
+ )
22
+ end
23
+ def self.new(request_options: {})
24
+ end
25
+
26
+ sig { override.returns({ request_options: OpenAI::RequestOptions }) }
27
+ def to_hash
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
@@ -199,4 +199,6 @@ module OpenAI
199
199
  VectorStoreSearchParams = OpenAI::Models::VectorStoreSearchParams
200
200
 
201
201
  VectorStoreUpdateParams = OpenAI::Models::VectorStoreUpdateParams
202
+
203
+ Webhooks = OpenAI::Models::Webhooks
202
204
  end
@@ -15,6 +15,8 @@ module OpenAI
15
15
  response_format:
16
16
  OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol,
17
17
  speed: Float,
18
+ stream_format:
19
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol,
18
20
  request_options: OpenAI::RequestOptions::OrHash
19
21
  ).returns(StringIO)
20
22
  end
@@ -36,8 +38,11 @@ module OpenAI
36
38
  # `wav`, and `pcm`.
37
39
  response_format: nil,
38
40
  # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
39
- # the default. Does not work with `gpt-4o-mini-tts`.
41
+ # the default.
40
42
  speed: nil,
43
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
44
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
45
+ stream_format: nil,
41
46
  request_options: {}
42
47
  )
43
48
  end
@@ -228,23 +228,23 @@ module OpenAI
228
228
  # should refer to the `system_fingerprint` response parameter to monitor changes
229
229
  # in the backend.
230
230
  seed: nil,
231
- # Specifies the latency tier to use for processing the request. This parameter is
232
- # relevant for customers subscribed to the scale tier service:
233
- #
234
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
235
- # utilize scale tier credits until they are exhausted.
236
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
237
- # be processed using the default service tier with a lower uptime SLA and no
238
- # latency guarantee.
239
- # - If set to 'default', the request will be processed using the default service
240
- # tier with a lower uptime SLA and no latency guarantee.
241
- # - If set to 'flex', the request will be processed with the Flex Processing
242
- # service tier.
243
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
231
+ # Specifies the processing type used for serving the request.
232
+ #
233
+ # - If set to 'auto', then the request will be processed with the service tier
234
+ # configured in the Project settings. Unless otherwise configured, the Project
235
+ # will use 'default'.
236
+ # - If set to 'default', then the requset will be processed with the standard
237
+ # pricing and performance for the selected model.
238
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
239
+ # 'priority', then the request will be processed with the corresponding service
240
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
241
+ # Priority processing.
244
242
  # - When not set, the default behavior is 'auto'.
245
243
  #
246
- # When this parameter is set, the response body will include the `service_tier`
247
- # utilized.
244
+ # When the `service_tier` parameter is set, the response body will include the
245
+ # `service_tier` value based on the processing mode actually used to serve the
246
+ # request. This response value may be different from the value set in the
247
+ # parameter.
248
248
  service_tier: nil,
249
249
  # Not supported with latest reasoning models `o3` and `o4-mini`.
250
250
  #
@@ -254,6 +254,8 @@ module OpenAI
254
254
  # Whether or not to store the output of this chat completion request for use in
255
255
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
256
256
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
257
+ #
258
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
257
259
  store: nil,
258
260
  # Options for streaming response. Only set this when you set `stream: true`.
259
261
  stream_options: nil,
@@ -515,23 +517,23 @@ module OpenAI
515
517
  # should refer to the `system_fingerprint` response parameter to monitor changes
516
518
  # in the backend.
517
519
  seed: nil,
518
- # Specifies the latency tier to use for processing the request. This parameter is
519
- # relevant for customers subscribed to the scale tier service:
520
- #
521
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
522
- # utilize scale tier credits until they are exhausted.
523
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
524
- # be processed using the default service tier with a lower uptime SLA and no
525
- # latency guarantee.
526
- # - If set to 'default', the request will be processed using the default service
527
- # tier with a lower uptime SLA and no latency guarantee.
528
- # - If set to 'flex', the request will be processed with the Flex Processing
529
- # service tier.
530
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
520
+ # Specifies the processing type used for serving the request.
521
+ #
522
+ # - If set to 'auto', then the request will be processed with the service tier
523
+ # configured in the Project settings. Unless otherwise configured, the Project
524
+ # will use 'default'.
525
+ # - If set to 'default', then the requset will be processed with the standard
526
+ # pricing and performance for the selected model.
527
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
528
+ # 'priority', then the request will be processed with the corresponding service
529
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
530
+ # Priority processing.
531
531
  # - When not set, the default behavior is 'auto'.
532
532
  #
533
- # When this parameter is set, the response body will include the `service_tier`
534
- # utilized.
533
+ # When the `service_tier` parameter is set, the response body will include the
534
+ # `service_tier` value based on the processing mode actually used to serve the
535
+ # request. This response value may be different from the value set in the
536
+ # parameter.
535
537
  service_tier: nil,
536
538
  # Not supported with latest reasoning models `o3` and `o4-mini`.
537
539
  #
@@ -541,6 +543,8 @@ module OpenAI
541
543
  # Whether or not to store the output of this chat completion request for use in
542
544
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
543
545
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
546
+ #
547
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
544
548
  store: nil,
545
549
  # Options for streaming response. Only set this when you set `stream: true`.
546
550
  stream_options: nil,
@@ -43,9 +43,7 @@ module OpenAI
43
43
  project_id: String,
44
44
  request_options: OpenAI::RequestOptions::OrHash
45
45
  ).returns(
46
- OpenAI::Internal::CursorPage[
47
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
48
- ]
46
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
49
47
  )
50
48
  end
51
49
  def retrieve(