openai 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +40 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +25 -0
  6. data/lib/openai/internal/type/array_of.rb +6 -1
  7. data/lib/openai/internal/type/base_model.rb +76 -24
  8. data/lib/openai/internal/type/boolean.rb +7 -1
  9. data/lib/openai/internal/type/converter.rb +42 -34
  10. data/lib/openai/internal/type/enum.rb +10 -2
  11. data/lib/openai/internal/type/file_input.rb +6 -1
  12. data/lib/openai/internal/type/hash_of.rb +6 -1
  13. data/lib/openai/internal/type/union.rb +12 -7
  14. data/lib/openai/internal/type/unknown.rb +7 -1
  15. data/lib/openai/models/all_models.rb +4 -0
  16. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  17. data/lib/openai/models/audio/transcription.rb +118 -1
  18. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  19. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  20. data/lib/openai/models/chat/chat_completion.rb +32 -31
  21. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  22. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  23. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
  24. data/lib/openai/models/images_response.rb +92 -1
  25. data/lib/openai/models/responses/response.rb +59 -35
  26. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  27. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  28. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  29. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  30. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  31. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  32. data/lib/openai/models/responses/response_create_params.rb +92 -67
  33. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  34. data/lib/openai/models/responses/response_includable.rb +8 -6
  35. data/lib/openai/models/responses/response_output_text.rb +18 -2
  36. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  37. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  38. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  39. data/lib/openai/models/responses_model.rb +4 -0
  40. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  41. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  42. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  43. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  44. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  45. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  46. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  47. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  48. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  49. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  50. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  51. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  52. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  53. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  54. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  55. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  56. data/lib/openai/models.rb +2 -0
  57. data/lib/openai/resources/audio/speech.rb +3 -1
  58. data/lib/openai/resources/chat/completions.rb +10 -2
  59. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
  60. data/lib/openai/resources/responses.rb +24 -16
  61. data/lib/openai/resources/webhooks.rb +124 -0
  62. data/lib/openai/version.rb +1 -1
  63. data/lib/openai.rb +18 -0
  64. data/rbi/openai/client.rbi +3 -0
  65. data/rbi/openai/errors.rbi +16 -0
  66. data/rbi/openai/internal/type/boolean.rbi +2 -0
  67. data/rbi/openai/internal/type/converter.rbi +15 -15
  68. data/rbi/openai/internal/type/union.rbi +5 -0
  69. data/rbi/openai/internal/type/unknown.rbi +2 -0
  70. data/rbi/openai/models/all_models.rbi +20 -0
  71. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  72. data/rbi/openai/models/audio/transcription.rbi +213 -3
  73. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  74. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  75. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  76. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  77. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  78. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
  79. data/rbi/openai/models/images_response.rbi +146 -0
  80. data/rbi/openai/models/responses/response.rbi +75 -44
  81. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  82. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  83. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  84. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  85. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  86. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  87. data/rbi/openai/models/responses/response_create_params.rbi +174 -115
  88. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  89. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  90. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  91. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  92. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  93. data/rbi/openai/models/responses_model.rbi +20 -0
  94. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  95. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  96. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  97. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  98. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  99. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  100. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  101. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  102. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  103. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  104. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  105. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  106. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  107. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  108. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  109. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  110. data/rbi/openai/models.rbi +2 -0
  111. data/rbi/openai/resources/audio/speech.rbi +6 -1
  112. data/rbi/openai/resources/chat/completions.rbi +34 -30
  113. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
  114. data/rbi/openai/resources/responses.rbi +108 -84
  115. data/rbi/openai/resources/webhooks.rbi +68 -0
  116. data/sig/openai/client.rbs +2 -0
  117. data/sig/openai/errors.rbs +9 -0
  118. data/sig/openai/internal/type/converter.rbs +7 -1
  119. data/sig/openai/models/all_models.rbs +8 -0
  120. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  121. data/sig/openai/models/audio/transcription.rbs +95 -3
  122. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  123. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  124. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  125. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  126. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  127. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
  128. data/sig/openai/models/images_response.rbs +83 -0
  129. data/sig/openai/models/responses/response.rbs +13 -1
  130. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  131. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  132. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  133. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  134. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  135. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  136. data/sig/openai/models/responses/response_create_params.rbs +31 -11
  137. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  138. data/sig/openai/models/responses/response_includable.rbs +7 -5
  139. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  140. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  141. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  142. data/sig/openai/models/responses_model.rbs +8 -0
  143. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  144. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  145. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  146. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  147. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  148. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  149. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  150. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  151. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  152. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  153. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  154. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  155. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  156. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  157. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  158. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  159. data/sig/openai/models.rbs +2 -0
  160. data/sig/openai/resources/audio/speech.rbs +1 -0
  161. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  162. data/sig/openai/resources/responses.rbs +8 -4
  163. data/sig/openai/resources/webhooks.rbs +33 -0
  164. metadata +56 -2
@@ -34,23 +34,23 @@ module OpenAI
34
34
  sig { returns(Symbol) }
35
35
  attr_accessor :object
36
36
 
37
- # Specifies the latency tier to use for processing the request. This parameter is
38
- # relevant for customers subscribed to the scale tier service:
37
+ # Specifies the processing type used for serving the request.
39
38
  #
40
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
41
- # utilize scale tier credits until they are exhausted.
42
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
43
- # be processed using the default service tier with a lower uptime SLA and no
44
- # latency guarantee.
45
- # - If set to 'default', the request will be processed using the default service
46
- # tier with a lower uptime SLA and no latency guarantee.
47
- # - If set to 'flex', the request will be processed with the Flex Processing
48
- # service tier.
49
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
39
+ # - If set to 'auto', then the request will be processed with the service tier
40
+ # configured in the Project settings. Unless otherwise configured, the Project
41
+ # will use 'default'.
42
+ # - If set to 'default', then the requset will be processed with the standard
43
+ # pricing and performance for the selected model.
44
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
45
+ # 'priority', then the request will be processed with the corresponding service
46
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
47
+ # Priority processing.
50
48
  # - When not set, the default behavior is 'auto'.
51
49
  #
52
- # When this parameter is set, the response body will include the `service_tier`
53
- # utilized.
50
+ # When the `service_tier` parameter is set, the response body will include the
51
+ # `service_tier` value based on the processing mode actually used to serve the
52
+ # request. This response value may be different from the value set in the
53
+ # parameter.
54
54
  sig do
55
55
  returns(
56
56
  T.nilable(
@@ -113,23 +113,23 @@ module OpenAI
113
113
  created:,
114
114
  # The model to generate the completion.
115
115
  model:,
116
- # Specifies the latency tier to use for processing the request. This parameter is
117
- # relevant for customers subscribed to the scale tier service:
116
+ # Specifies the processing type used for serving the request.
118
117
  #
119
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
120
- # utilize scale tier credits until they are exhausted.
121
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
122
- # be processed using the default service tier with a lower uptime SLA and no
123
- # latency guarantee.
124
- # - If set to 'default', the request will be processed using the default service
125
- # tier with a lower uptime SLA and no latency guarantee.
126
- # - If set to 'flex', the request will be processed with the Flex Processing
127
- # service tier.
128
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
118
+ # - If set to 'auto', then the request will be processed with the service tier
119
+ # configured in the Project settings. Unless otherwise configured, the Project
120
+ # will use 'default'.
121
+ # - If set to 'default', then the requset will be processed with the standard
122
+ # pricing and performance for the selected model.
123
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
124
+ # 'priority', then the request will be processed with the corresponding service
125
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
126
+ # Priority processing.
129
127
  # - When not set, the default behavior is 'auto'.
130
128
  #
131
- # When this parameter is set, the response body will include the `service_tier`
132
- # utilized.
129
+ # When the `service_tier` parameter is set, the response body will include the
130
+ # `service_tier` value based on the processing mode actually used to serve the
131
+ # request. This response value may be different from the value set in the
132
+ # parameter.
133
133
  service_tier: nil,
134
134
  # This fingerprint represents the backend configuration that the model runs with.
135
135
  # Can be used in conjunction with the `seed` request parameter to understand when
@@ -783,23 +783,23 @@ module OpenAI
783
783
  end
784
784
  end
785
785
 
786
- # Specifies the latency tier to use for processing the request. This parameter is
787
- # relevant for customers subscribed to the scale tier service:
786
+ # Specifies the processing type used for serving the request.
788
787
  #
789
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
790
- # utilize scale tier credits until they are exhausted.
791
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
792
- # be processed using the default service tier with a lower uptime SLA and no
793
- # latency guarantee.
794
- # - If set to 'default', the request will be processed using the default service
795
- # tier with a lower uptime SLA and no latency guarantee.
796
- # - If set to 'flex', the request will be processed with the Flex Processing
797
- # service tier.
798
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
788
+ # - If set to 'auto', then the request will be processed with the service tier
789
+ # configured in the Project settings. Unless otherwise configured, the Project
790
+ # will use 'default'.
791
+ # - If set to 'default', then the requset will be processed with the standard
792
+ # pricing and performance for the selected model.
793
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
794
+ # 'priority', then the request will be processed with the corresponding service
795
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
796
+ # Priority processing.
799
797
  # - When not set, the default behavior is 'auto'.
800
798
  #
801
- # When this parameter is set, the response body will include the `service_tier`
802
- # utilized.
799
+ # When the `service_tier` parameter is set, the response body will include the
800
+ # `service_tier` value based on the processing mode actually used to serve the
801
+ # request. This response value may be different from the value set in the
802
+ # parameter.
803
803
  module ServiceTier
804
804
  extend OpenAI::Internal::Type::Enum
805
805
 
@@ -829,6 +829,11 @@ module OpenAI
829
829
  :scale,
830
830
  OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
831
831
  )
832
+ PRIORITY =
833
+ T.let(
834
+ :priority,
835
+ OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
836
+ )
832
837
 
833
838
  sig do
834
839
  override.returns(
@@ -270,23 +270,23 @@ module OpenAI
270
270
  sig { returns(T.nilable(Integer)) }
271
271
  attr_accessor :seed
272
272
 
273
- # Specifies the latency tier to use for processing the request. This parameter is
274
- # relevant for customers subscribed to the scale tier service:
273
+ # Specifies the processing type used for serving the request.
275
274
  #
276
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
277
- # utilize scale tier credits until they are exhausted.
278
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
279
- # be processed using the default service tier with a lower uptime SLA and no
280
- # latency guarantee.
281
- # - If set to 'default', the request will be processed using the default service
282
- # tier with a lower uptime SLA and no latency guarantee.
283
- # - If set to 'flex', the request will be processed with the Flex Processing
284
- # service tier.
285
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
275
+ # - If set to 'auto', then the request will be processed with the service tier
276
+ # configured in the Project settings. Unless otherwise configured, the Project
277
+ # will use 'default'.
278
+ # - If set to 'default', then the requset will be processed with the standard
279
+ # pricing and performance for the selected model.
280
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
281
+ # 'priority', then the request will be processed with the corresponding service
282
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
283
+ # Priority processing.
286
284
  # - When not set, the default behavior is 'auto'.
287
285
  #
288
- # When this parameter is set, the response body will include the `service_tier`
289
- # utilized.
286
+ # When the `service_tier` parameter is set, the response body will include the
287
+ # `service_tier` value based on the processing mode actually used to serve the
288
+ # request. This response value may be different from the value set in the
289
+ # parameter.
290
290
  sig do
291
291
  returns(
292
292
  T.nilable(
@@ -310,6 +310,8 @@ module OpenAI
310
310
  # Whether or not to store the output of this chat completion request for use in
311
311
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
312
312
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
313
+ #
314
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
313
315
  sig { returns(T.nilable(T::Boolean)) }
314
316
  attr_accessor :store
315
317
 
@@ -625,23 +627,23 @@ module OpenAI
625
627
  # should refer to the `system_fingerprint` response parameter to monitor changes
626
628
  # in the backend.
627
629
  seed: nil,
628
- # Specifies the latency tier to use for processing the request. This parameter is
629
- # relevant for customers subscribed to the scale tier service:
630
+ # Specifies the processing type used for serving the request.
630
631
  #
631
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
632
- # utilize scale tier credits until they are exhausted.
633
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
634
- # be processed using the default service tier with a lower uptime SLA and no
635
- # latency guarantee.
636
- # - If set to 'default', the request will be processed using the default service
637
- # tier with a lower uptime SLA and no latency guarantee.
638
- # - If set to 'flex', the request will be processed with the Flex Processing
639
- # service tier.
640
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
632
+ # - If set to 'auto', then the request will be processed with the service tier
633
+ # configured in the Project settings. Unless otherwise configured, the Project
634
+ # will use 'default'.
635
+ # - If set to 'default', then the requset will be processed with the standard
636
+ # pricing and performance for the selected model.
637
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
638
+ # 'priority', then the request will be processed with the corresponding service
639
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
640
+ # Priority processing.
641
641
  # - When not set, the default behavior is 'auto'.
642
642
  #
643
- # When this parameter is set, the response body will include the `service_tier`
644
- # utilized.
643
+ # When the `service_tier` parameter is set, the response body will include the
644
+ # `service_tier` value based on the processing mode actually used to serve the
645
+ # request. This response value may be different from the value set in the
646
+ # parameter.
645
647
  service_tier: nil,
646
648
  # Not supported with latest reasoning models `o3` and `o4-mini`.
647
649
  #
@@ -651,6 +653,8 @@ module OpenAI
651
653
  # Whether or not to store the output of this chat completion request for use in
652
654
  # our [model distillation](https://platform.openai.com/docs/guides/distillation)
653
655
  # or [evals](https://platform.openai.com/docs/guides/evals) products.
656
+ #
657
+ # Supports text and image inputs. Note: image inputs over 10MB will be dropped.
654
658
  store: nil,
655
659
  # Options for streaming response. Only set this when you set `stream: true`.
656
660
  stream_options: nil,
@@ -1008,23 +1012,23 @@ module OpenAI
1008
1012
  end
1009
1013
  end
1010
1014
 
1011
- # Specifies the latency tier to use for processing the request. This parameter is
1012
- # relevant for customers subscribed to the scale tier service:
1015
+ # Specifies the processing type used for serving the request.
1013
1016
  #
1014
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
1015
- # utilize scale tier credits until they are exhausted.
1016
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
1017
- # be processed using the default service tier with a lower uptime SLA and no
1018
- # latency guarantee.
1019
- # - If set to 'default', the request will be processed using the default service
1020
- # tier with a lower uptime SLA and no latency guarantee.
1021
- # - If set to 'flex', the request will be processed with the Flex Processing
1022
- # service tier.
1023
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
1017
+ # - If set to 'auto', then the request will be processed with the service tier
1018
+ # configured in the Project settings. Unless otherwise configured, the Project
1019
+ # will use 'default'.
1020
+ # - If set to 'default', then the requset will be processed with the standard
1021
+ # pricing and performance for the selected model.
1022
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1023
+ # 'priority', then the request will be processed with the corresponding service
1024
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
1025
+ # Priority processing.
1024
1026
  # - When not set, the default behavior is 'auto'.
1025
1027
  #
1026
- # When this parameter is set, the response body will include the `service_tier`
1027
- # utilized.
1028
+ # When the `service_tier` parameter is set, the response body will include the
1029
+ # `service_tier` value based on the processing mode actually used to serve the
1030
+ # request. This response value may be different from the value set in the
1031
+ # parameter.
1028
1032
  module ServiceTier
1029
1033
  extend OpenAI::Internal::Type::Enum
1030
1034
 
@@ -1054,6 +1058,11 @@ module OpenAI
1054
1058
  :scale,
1055
1059
  OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
1056
1060
  )
1061
+ PRIORITY =
1062
+ T.let(
1063
+ :priority,
1064
+ OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
1065
+ )
1057
1066
 
1058
1067
  sig do
1059
1068
  override.returns(
@@ -13,56 +13,125 @@ module OpenAI
13
13
  )
14
14
  end
15
15
 
16
- # The permission identifier, which can be referenced in the API endpoints.
17
- sig { returns(String) }
18
- attr_accessor :id
16
+ sig do
17
+ returns(
18
+ T::Array[
19
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
20
+ ]
21
+ )
22
+ end
23
+ attr_accessor :data
19
24
 
20
- # The Unix timestamp (in seconds) for when the permission was created.
21
- sig { returns(Integer) }
22
- attr_accessor :created_at
25
+ sig { returns(T::Boolean) }
26
+ attr_accessor :has_more
23
27
 
24
- # The object type, which is always "checkpoint.permission".
25
28
  sig { returns(Symbol) }
26
29
  attr_accessor :object
27
30
 
28
- # The project identifier that the permission is for.
29
- sig { returns(String) }
30
- attr_accessor :project_id
31
+ sig { returns(T.nilable(String)) }
32
+ attr_accessor :first_id
33
+
34
+ sig { returns(T.nilable(String)) }
35
+ attr_accessor :last_id
31
36
 
32
- # The `checkpoint.permission` object represents a permission for a fine-tuned
33
- # model checkpoint.
34
37
  sig do
35
38
  params(
36
- id: String,
37
- created_at: Integer,
38
- project_id: String,
39
+ data:
40
+ T::Array[
41
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data::OrHash
42
+ ],
43
+ has_more: T::Boolean,
44
+ first_id: T.nilable(String),
45
+ last_id: T.nilable(String),
39
46
  object: Symbol
40
47
  ).returns(T.attached_class)
41
48
  end
42
49
  def self.new(
43
- # The permission identifier, which can be referenced in the API endpoints.
44
- id:,
45
- # The Unix timestamp (in seconds) for when the permission was created.
46
- created_at:,
47
- # The project identifier that the permission is for.
48
- project_id:,
49
- # The object type, which is always "checkpoint.permission".
50
- object: :"checkpoint.permission"
50
+ data:,
51
+ has_more:,
52
+ first_id: nil,
53
+ last_id: nil,
54
+ object: :list
51
55
  )
52
56
  end
53
57
 
54
58
  sig do
55
59
  override.returns(
56
60
  {
57
- id: String,
58
- created_at: Integer,
61
+ data:
62
+ T::Array[
63
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
64
+ ],
65
+ has_more: T::Boolean,
59
66
  object: Symbol,
60
- project_id: String
67
+ first_id: T.nilable(String),
68
+ last_id: T.nilable(String)
61
69
  }
62
70
  )
63
71
  end
64
72
  def to_hash
65
73
  end
74
+
75
+ class Data < OpenAI::Internal::Type::BaseModel
76
+ OrHash =
77
+ T.type_alias do
78
+ T.any(
79
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data,
80
+ OpenAI::Internal::AnyHash
81
+ )
82
+ end
83
+
84
+ # The permission identifier, which can be referenced in the API endpoints.
85
+ sig { returns(String) }
86
+ attr_accessor :id
87
+
88
+ # The Unix timestamp (in seconds) for when the permission was created.
89
+ sig { returns(Integer) }
90
+ attr_accessor :created_at
91
+
92
+ # The object type, which is always "checkpoint.permission".
93
+ sig { returns(Symbol) }
94
+ attr_accessor :object
95
+
96
+ # The project identifier that the permission is for.
97
+ sig { returns(String) }
98
+ attr_accessor :project_id
99
+
100
+ # The `checkpoint.permission` object represents a permission for a fine-tuned
101
+ # model checkpoint.
102
+ sig do
103
+ params(
104
+ id: String,
105
+ created_at: Integer,
106
+ project_id: String,
107
+ object: Symbol
108
+ ).returns(T.attached_class)
109
+ end
110
+ def self.new(
111
+ # The permission identifier, which can be referenced in the API endpoints.
112
+ id:,
113
+ # The Unix timestamp (in seconds) for when the permission was created.
114
+ created_at:,
115
+ # The project identifier that the permission is for.
116
+ project_id:,
117
+ # The object type, which is always "checkpoint.permission".
118
+ object: :"checkpoint.permission"
119
+ )
120
+ end
121
+
122
+ sig do
123
+ override.returns(
124
+ {
125
+ id: String,
126
+ created_at: Integer,
127
+ object: Symbol,
128
+ project_id: String
129
+ }
130
+ )
131
+ end
132
+ def to_hash
133
+ end
134
+ end
66
135
  end
67
136
  end
68
137
  end
@@ -12,6 +12,18 @@ module OpenAI
12
12
  sig { returns(Integer) }
13
13
  attr_accessor :created
14
14
 
15
+ # The background parameter used for the image generation. Either `transparent` or
16
+ # `opaque`.
17
+ sig do
18
+ returns(T.nilable(OpenAI::ImagesResponse::Background::TaggedSymbol))
19
+ end
20
+ attr_reader :background
21
+
22
+ sig do
23
+ params(background: OpenAI::ImagesResponse::Background::OrSymbol).void
24
+ end
25
+ attr_writer :background
26
+
15
27
  # The list of generated images.
16
28
  sig { returns(T.nilable(T::Array[OpenAI::Image])) }
17
29
  attr_reader :data
@@ -19,6 +31,34 @@ module OpenAI
19
31
  sig { params(data: T::Array[OpenAI::Image::OrHash]).void }
20
32
  attr_writer :data
21
33
 
34
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
35
+ sig do
36
+ returns(T.nilable(OpenAI::ImagesResponse::OutputFormat::TaggedSymbol))
37
+ end
38
+ attr_reader :output_format
39
+
40
+ sig do
41
+ params(
42
+ output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol
43
+ ).void
44
+ end
45
+ attr_writer :output_format
46
+
47
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
48
+ sig { returns(T.nilable(OpenAI::ImagesResponse::Quality::TaggedSymbol)) }
49
+ attr_reader :quality
50
+
51
+ sig { params(quality: OpenAI::ImagesResponse::Quality::OrSymbol).void }
52
+ attr_writer :quality
53
+
54
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
55
+ # `1536x1024`.
56
+ sig { returns(T.nilable(OpenAI::ImagesResponse::Size::TaggedSymbol)) }
57
+ attr_reader :size
58
+
59
+ sig { params(size: OpenAI::ImagesResponse::Size::OrSymbol).void }
60
+ attr_writer :size
61
+
22
62
  # For `gpt-image-1` only, the token usage information for the image generation.
23
63
  sig { returns(T.nilable(OpenAI::ImagesResponse::Usage)) }
24
64
  attr_reader :usage
@@ -30,15 +70,29 @@ module OpenAI
30
70
  sig do
31
71
  params(
32
72
  created: Integer,
73
+ background: OpenAI::ImagesResponse::Background::OrSymbol,
33
74
  data: T::Array[OpenAI::Image::OrHash],
75
+ output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol,
76
+ quality: OpenAI::ImagesResponse::Quality::OrSymbol,
77
+ size: OpenAI::ImagesResponse::Size::OrSymbol,
34
78
  usage: OpenAI::ImagesResponse::Usage::OrHash
35
79
  ).returns(T.attached_class)
36
80
  end
37
81
  def self.new(
38
82
  # The Unix timestamp (in seconds) of when the image was created.
39
83
  created:,
84
+ # The background parameter used for the image generation. Either `transparent` or
85
+ # `opaque`.
86
+ background: nil,
40
87
  # The list of generated images.
41
88
  data: nil,
89
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
90
+ output_format: nil,
91
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
92
+ quality: nil,
93
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
94
+ # `1536x1024`.
95
+ size: nil,
42
96
  # For `gpt-image-1` only, the token usage information for the image generation.
43
97
  usage: nil
44
98
  )
@@ -48,7 +102,11 @@ module OpenAI
48
102
  override.returns(
49
103
  {
50
104
  created: Integer,
105
+ background: OpenAI::ImagesResponse::Background::TaggedSymbol,
51
106
  data: T::Array[OpenAI::Image],
107
+ output_format: OpenAI::ImagesResponse::OutputFormat::TaggedSymbol,
108
+ quality: OpenAI::ImagesResponse::Quality::TaggedSymbol,
109
+ size: OpenAI::ImagesResponse::Size::TaggedSymbol,
52
110
  usage: OpenAI::ImagesResponse::Usage
53
111
  }
54
112
  )
@@ -56,6 +114,94 @@ module OpenAI
56
114
  def to_hash
57
115
  end
58
116
 
117
+ # The background parameter used for the image generation. Either `transparent` or
118
+ # `opaque`.
119
+ module Background
120
+ extend OpenAI::Internal::Type::Enum
121
+
122
+ TaggedSymbol =
123
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Background) }
124
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
125
+
126
+ TRANSPARENT =
127
+ T.let(:transparent, OpenAI::ImagesResponse::Background::TaggedSymbol)
128
+ OPAQUE =
129
+ T.let(:opaque, OpenAI::ImagesResponse::Background::TaggedSymbol)
130
+
131
+ sig do
132
+ override.returns(
133
+ T::Array[OpenAI::ImagesResponse::Background::TaggedSymbol]
134
+ )
135
+ end
136
+ def self.values
137
+ end
138
+ end
139
+
140
+ # The output format of the image generation. Either `png`, `webp`, or `jpeg`.
141
+ module OutputFormat
142
+ extend OpenAI::Internal::Type::Enum
143
+
144
+ TaggedSymbol =
145
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::OutputFormat) }
146
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
147
+
148
+ PNG = T.let(:png, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
149
+ WEBP = T.let(:webp, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
150
+ JPEG = T.let(:jpeg, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
151
+
152
+ sig do
153
+ override.returns(
154
+ T::Array[OpenAI::ImagesResponse::OutputFormat::TaggedSymbol]
155
+ )
156
+ end
157
+ def self.values
158
+ end
159
+ end
160
+
161
+ # The quality of the image generated. Either `low`, `medium`, or `high`.
162
+ module Quality
163
+ extend OpenAI::Internal::Type::Enum
164
+
165
+ TaggedSymbol =
166
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Quality) }
167
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
168
+
169
+ LOW = T.let(:low, OpenAI::ImagesResponse::Quality::TaggedSymbol)
170
+ MEDIUM = T.let(:medium, OpenAI::ImagesResponse::Quality::TaggedSymbol)
171
+ HIGH = T.let(:high, OpenAI::ImagesResponse::Quality::TaggedSymbol)
172
+
173
+ sig do
174
+ override.returns(
175
+ T::Array[OpenAI::ImagesResponse::Quality::TaggedSymbol]
176
+ )
177
+ end
178
+ def self.values
179
+ end
180
+ end
181
+
182
+ # The size of the image generated. Either `1024x1024`, `1024x1536`, or
183
+ # `1536x1024`.
184
+ module Size
185
+ extend OpenAI::Internal::Type::Enum
186
+
187
+ TaggedSymbol =
188
+ T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Size) }
189
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
190
+
191
+ SIZE_1024X1024 =
192
+ T.let(:"1024x1024", OpenAI::ImagesResponse::Size::TaggedSymbol)
193
+ SIZE_1024X1536 =
194
+ T.let(:"1024x1536", OpenAI::ImagesResponse::Size::TaggedSymbol)
195
+ SIZE_1536X1024 =
196
+ T.let(:"1536x1024", OpenAI::ImagesResponse::Size::TaggedSymbol)
197
+
198
+ sig do
199
+ override.returns(T::Array[OpenAI::ImagesResponse::Size::TaggedSymbol])
200
+ end
201
+ def self.values
202
+ end
203
+ end
204
+
59
205
  class Usage < OpenAI::Internal::Type::BaseModel
60
206
  OrHash =
61
207
  T.type_alias do