openai 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +25 -0
- data/lib/openai/internal/type/array_of.rb +6 -1
- data/lib/openai/internal/type/base_model.rb +76 -24
- data/lib/openai/internal/type/boolean.rb +7 -1
- data/lib/openai/internal/type/converter.rb +42 -34
- data/lib/openai/internal/type/enum.rb +10 -2
- data/lib/openai/internal/type/file_input.rb +6 -1
- data/lib/openai/internal/type/hash_of.rb +6 -1
- data/lib/openai/internal/type/union.rb +12 -7
- data/lib/openai/internal/type/unknown.rb +7 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/audio/speech_create_params.rb +23 -2
- data/lib/openai/models/audio/transcription.rb +118 -1
- data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
- data/lib/openai/models/audio/transcription_verbose.rb +31 -1
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
- data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
- data/lib/openai/models/responses/response_create_params.rb +92 -67
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +18 -2
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/audio/speech.rb +3 -1
- data/lib/openai/resources/chat/completions.rb +10 -2
- data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
- data/lib/openai/resources/responses.rb +24 -16
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +16 -0
- data/rbi/openai/internal/type/boolean.rbi +2 -0
- data/rbi/openai/internal/type/converter.rbi +15 -15
- data/rbi/openai/internal/type/union.rbi +5 -0
- data/rbi/openai/internal/type/unknown.rbi +2 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
- data/rbi/openai/models/audio/transcription.rbi +213 -3
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
- data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
- data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
- data/rbi/openai/models/responses/response_create_params.rbi +174 -115
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/response_output_text.rbi +26 -4
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/audio/speech.rbi +6 -1
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
- data/rbi/openai/resources/responses.rbi +108 -84
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/errors.rbs +9 -0
- data/sig/openai/internal/type/converter.rbs +7 -1
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/audio/speech_create_params.rbs +21 -1
- data/sig/openai/models/audio/transcription.rbs +95 -3
- data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
- data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
- data/sig/openai/models/responses/response_create_params.rbs +31 -11
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/response_output_text.rbs +15 -1
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/audio/speech.rbs +1 -0
- data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
- data/sig/openai/resources/responses.rbs +8 -4
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -34,23 +34,23 @@ module OpenAI
|
|
34
34
|
sig { returns(Symbol) }
|
35
35
|
attr_accessor :object
|
36
36
|
|
37
|
-
# Specifies the
|
38
|
-
# relevant for customers subscribed to the scale tier service:
|
37
|
+
# Specifies the processing type used for serving the request.
|
39
38
|
#
|
40
|
-
# - If set to 'auto',
|
41
|
-
#
|
42
|
-
#
|
43
|
-
#
|
44
|
-
#
|
45
|
-
# - If set to '
|
46
|
-
#
|
47
|
-
# -
|
48
|
-
#
|
49
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
39
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
40
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
41
|
+
# will use 'default'.
|
42
|
+
# - If set to 'default', then the requset will be processed with the standard
|
43
|
+
# pricing and performance for the selected model.
|
44
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
45
|
+
# 'priority', then the request will be processed with the corresponding service
|
46
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
47
|
+
# Priority processing.
|
50
48
|
# - When not set, the default behavior is 'auto'.
|
51
49
|
#
|
52
|
-
# When
|
53
|
-
#
|
50
|
+
# When the `service_tier` parameter is set, the response body will include the
|
51
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
52
|
+
# request. This response value may be different from the value set in the
|
53
|
+
# parameter.
|
54
54
|
sig do
|
55
55
|
returns(
|
56
56
|
T.nilable(
|
@@ -113,23 +113,23 @@ module OpenAI
|
|
113
113
|
created:,
|
114
114
|
# The model to generate the completion.
|
115
115
|
model:,
|
116
|
-
# Specifies the
|
117
|
-
# relevant for customers subscribed to the scale tier service:
|
116
|
+
# Specifies the processing type used for serving the request.
|
118
117
|
#
|
119
|
-
# - If set to 'auto',
|
120
|
-
#
|
121
|
-
#
|
122
|
-
#
|
123
|
-
#
|
124
|
-
# - If set to '
|
125
|
-
#
|
126
|
-
# -
|
127
|
-
#
|
128
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
118
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
119
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
120
|
+
# will use 'default'.
|
121
|
+
# - If set to 'default', then the requset will be processed with the standard
|
122
|
+
# pricing and performance for the selected model.
|
123
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
124
|
+
# 'priority', then the request will be processed with the corresponding service
|
125
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
126
|
+
# Priority processing.
|
129
127
|
# - When not set, the default behavior is 'auto'.
|
130
128
|
#
|
131
|
-
# When
|
132
|
-
#
|
129
|
+
# When the `service_tier` parameter is set, the response body will include the
|
130
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
131
|
+
# request. This response value may be different from the value set in the
|
132
|
+
# parameter.
|
133
133
|
service_tier: nil,
|
134
134
|
# This fingerprint represents the backend configuration that the model runs with.
|
135
135
|
# Can be used in conjunction with the `seed` request parameter to understand when
|
@@ -783,23 +783,23 @@ module OpenAI
|
|
783
783
|
end
|
784
784
|
end
|
785
785
|
|
786
|
-
# Specifies the
|
787
|
-
# relevant for customers subscribed to the scale tier service:
|
786
|
+
# Specifies the processing type used for serving the request.
|
788
787
|
#
|
789
|
-
# - If set to 'auto',
|
790
|
-
#
|
791
|
-
#
|
792
|
-
#
|
793
|
-
#
|
794
|
-
# - If set to '
|
795
|
-
#
|
796
|
-
# -
|
797
|
-
#
|
798
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
788
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
789
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
790
|
+
# will use 'default'.
|
791
|
+
# - If set to 'default', then the requset will be processed with the standard
|
792
|
+
# pricing and performance for the selected model.
|
793
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
794
|
+
# 'priority', then the request will be processed with the corresponding service
|
795
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
796
|
+
# Priority processing.
|
799
797
|
# - When not set, the default behavior is 'auto'.
|
800
798
|
#
|
801
|
-
# When
|
802
|
-
#
|
799
|
+
# When the `service_tier` parameter is set, the response body will include the
|
800
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
801
|
+
# request. This response value may be different from the value set in the
|
802
|
+
# parameter.
|
803
803
|
module ServiceTier
|
804
804
|
extend OpenAI::Internal::Type::Enum
|
805
805
|
|
@@ -829,6 +829,11 @@ module OpenAI
|
|
829
829
|
:scale,
|
830
830
|
OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
|
831
831
|
)
|
832
|
+
PRIORITY =
|
833
|
+
T.let(
|
834
|
+
:priority,
|
835
|
+
OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
|
836
|
+
)
|
832
837
|
|
833
838
|
sig do
|
834
839
|
override.returns(
|
@@ -270,23 +270,23 @@ module OpenAI
|
|
270
270
|
sig { returns(T.nilable(Integer)) }
|
271
271
|
attr_accessor :seed
|
272
272
|
|
273
|
-
# Specifies the
|
274
|
-
# relevant for customers subscribed to the scale tier service:
|
273
|
+
# Specifies the processing type used for serving the request.
|
275
274
|
#
|
276
|
-
# - If set to 'auto',
|
277
|
-
#
|
278
|
-
#
|
279
|
-
#
|
280
|
-
#
|
281
|
-
# - If set to '
|
282
|
-
#
|
283
|
-
# -
|
284
|
-
#
|
285
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
275
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
276
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
277
|
+
# will use 'default'.
|
278
|
+
# - If set to 'default', then the requset will be processed with the standard
|
279
|
+
# pricing and performance for the selected model.
|
280
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
281
|
+
# 'priority', then the request will be processed with the corresponding service
|
282
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
283
|
+
# Priority processing.
|
286
284
|
# - When not set, the default behavior is 'auto'.
|
287
285
|
#
|
288
|
-
# When
|
289
|
-
#
|
286
|
+
# When the `service_tier` parameter is set, the response body will include the
|
287
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
288
|
+
# request. This response value may be different from the value set in the
|
289
|
+
# parameter.
|
290
290
|
sig do
|
291
291
|
returns(
|
292
292
|
T.nilable(
|
@@ -310,6 +310,8 @@ module OpenAI
|
|
310
310
|
# Whether or not to store the output of this chat completion request for use in
|
311
311
|
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
312
312
|
# or [evals](https://platform.openai.com/docs/guides/evals) products.
|
313
|
+
#
|
314
|
+
# Supports text and image inputs. Note: image inputs over 10MB will be dropped.
|
313
315
|
sig { returns(T.nilable(T::Boolean)) }
|
314
316
|
attr_accessor :store
|
315
317
|
|
@@ -625,23 +627,23 @@ module OpenAI
|
|
625
627
|
# should refer to the `system_fingerprint` response parameter to monitor changes
|
626
628
|
# in the backend.
|
627
629
|
seed: nil,
|
628
|
-
# Specifies the
|
629
|
-
# relevant for customers subscribed to the scale tier service:
|
630
|
+
# Specifies the processing type used for serving the request.
|
630
631
|
#
|
631
|
-
# - If set to 'auto',
|
632
|
-
#
|
633
|
-
#
|
634
|
-
#
|
635
|
-
#
|
636
|
-
# - If set to '
|
637
|
-
#
|
638
|
-
# -
|
639
|
-
#
|
640
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
632
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
633
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
634
|
+
# will use 'default'.
|
635
|
+
# - If set to 'default', then the requset will be processed with the standard
|
636
|
+
# pricing and performance for the selected model.
|
637
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
638
|
+
# 'priority', then the request will be processed with the corresponding service
|
639
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
640
|
+
# Priority processing.
|
641
641
|
# - When not set, the default behavior is 'auto'.
|
642
642
|
#
|
643
|
-
# When
|
644
|
-
#
|
643
|
+
# When the `service_tier` parameter is set, the response body will include the
|
644
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
645
|
+
# request. This response value may be different from the value set in the
|
646
|
+
# parameter.
|
645
647
|
service_tier: nil,
|
646
648
|
# Not supported with latest reasoning models `o3` and `o4-mini`.
|
647
649
|
#
|
@@ -651,6 +653,8 @@ module OpenAI
|
|
651
653
|
# Whether or not to store the output of this chat completion request for use in
|
652
654
|
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
653
655
|
# or [evals](https://platform.openai.com/docs/guides/evals) products.
|
656
|
+
#
|
657
|
+
# Supports text and image inputs. Note: image inputs over 10MB will be dropped.
|
654
658
|
store: nil,
|
655
659
|
# Options for streaming response. Only set this when you set `stream: true`.
|
656
660
|
stream_options: nil,
|
@@ -1008,23 +1012,23 @@ module OpenAI
|
|
1008
1012
|
end
|
1009
1013
|
end
|
1010
1014
|
|
1011
|
-
# Specifies the
|
1012
|
-
# relevant for customers subscribed to the scale tier service:
|
1015
|
+
# Specifies the processing type used for serving the request.
|
1013
1016
|
#
|
1014
|
-
# - If set to 'auto',
|
1015
|
-
#
|
1016
|
-
#
|
1017
|
-
#
|
1018
|
-
#
|
1019
|
-
# - If set to '
|
1020
|
-
#
|
1021
|
-
# -
|
1022
|
-
#
|
1023
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
1017
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
1018
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
1019
|
+
# will use 'default'.
|
1020
|
+
# - If set to 'default', then the requset will be processed with the standard
|
1021
|
+
# pricing and performance for the selected model.
|
1022
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
1023
|
+
# 'priority', then the request will be processed with the corresponding service
|
1024
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
1025
|
+
# Priority processing.
|
1024
1026
|
# - When not set, the default behavior is 'auto'.
|
1025
1027
|
#
|
1026
|
-
# When
|
1027
|
-
#
|
1028
|
+
# When the `service_tier` parameter is set, the response body will include the
|
1029
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
1030
|
+
# request. This response value may be different from the value set in the
|
1031
|
+
# parameter.
|
1028
1032
|
module ServiceTier
|
1029
1033
|
extend OpenAI::Internal::Type::Enum
|
1030
1034
|
|
@@ -1054,6 +1058,11 @@ module OpenAI
|
|
1054
1058
|
:scale,
|
1055
1059
|
OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
|
1056
1060
|
)
|
1061
|
+
PRIORITY =
|
1062
|
+
T.let(
|
1063
|
+
:priority,
|
1064
|
+
OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
|
1065
|
+
)
|
1057
1066
|
|
1058
1067
|
sig do
|
1059
1068
|
override.returns(
|
@@ -13,56 +13,125 @@ module OpenAI
|
|
13
13
|
)
|
14
14
|
end
|
15
15
|
|
16
|
-
|
17
|
-
|
18
|
-
|
16
|
+
sig do
|
17
|
+
returns(
|
18
|
+
T::Array[
|
19
|
+
OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
|
20
|
+
]
|
21
|
+
)
|
22
|
+
end
|
23
|
+
attr_accessor :data
|
19
24
|
|
20
|
-
|
21
|
-
|
22
|
-
attr_accessor :created_at
|
25
|
+
sig { returns(T::Boolean) }
|
26
|
+
attr_accessor :has_more
|
23
27
|
|
24
|
-
# The object type, which is always "checkpoint.permission".
|
25
28
|
sig { returns(Symbol) }
|
26
29
|
attr_accessor :object
|
27
30
|
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
+
sig { returns(T.nilable(String)) }
|
32
|
+
attr_accessor :first_id
|
33
|
+
|
34
|
+
sig { returns(T.nilable(String)) }
|
35
|
+
attr_accessor :last_id
|
31
36
|
|
32
|
-
# The `checkpoint.permission` object represents a permission for a fine-tuned
|
33
|
-
# model checkpoint.
|
34
37
|
sig do
|
35
38
|
params(
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
+
data:
|
40
|
+
T::Array[
|
41
|
+
OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data::OrHash
|
42
|
+
],
|
43
|
+
has_more: T::Boolean,
|
44
|
+
first_id: T.nilable(String),
|
45
|
+
last_id: T.nilable(String),
|
39
46
|
object: Symbol
|
40
47
|
).returns(T.attached_class)
|
41
48
|
end
|
42
49
|
def self.new(
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
project_id:,
|
49
|
-
# The object type, which is always "checkpoint.permission".
|
50
|
-
object: :"checkpoint.permission"
|
50
|
+
data:,
|
51
|
+
has_more:,
|
52
|
+
first_id: nil,
|
53
|
+
last_id: nil,
|
54
|
+
object: :list
|
51
55
|
)
|
52
56
|
end
|
53
57
|
|
54
58
|
sig do
|
55
59
|
override.returns(
|
56
60
|
{
|
57
|
-
|
58
|
-
|
61
|
+
data:
|
62
|
+
T::Array[
|
63
|
+
OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
|
64
|
+
],
|
65
|
+
has_more: T::Boolean,
|
59
66
|
object: Symbol,
|
60
|
-
|
67
|
+
first_id: T.nilable(String),
|
68
|
+
last_id: T.nilable(String)
|
61
69
|
}
|
62
70
|
)
|
63
71
|
end
|
64
72
|
def to_hash
|
65
73
|
end
|
74
|
+
|
75
|
+
class Data < OpenAI::Internal::Type::BaseModel
|
76
|
+
OrHash =
|
77
|
+
T.type_alias do
|
78
|
+
T.any(
|
79
|
+
OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data,
|
80
|
+
OpenAI::Internal::AnyHash
|
81
|
+
)
|
82
|
+
end
|
83
|
+
|
84
|
+
# The permission identifier, which can be referenced in the API endpoints.
|
85
|
+
sig { returns(String) }
|
86
|
+
attr_accessor :id
|
87
|
+
|
88
|
+
# The Unix timestamp (in seconds) for when the permission was created.
|
89
|
+
sig { returns(Integer) }
|
90
|
+
attr_accessor :created_at
|
91
|
+
|
92
|
+
# The object type, which is always "checkpoint.permission".
|
93
|
+
sig { returns(Symbol) }
|
94
|
+
attr_accessor :object
|
95
|
+
|
96
|
+
# The project identifier that the permission is for.
|
97
|
+
sig { returns(String) }
|
98
|
+
attr_accessor :project_id
|
99
|
+
|
100
|
+
# The `checkpoint.permission` object represents a permission for a fine-tuned
|
101
|
+
# model checkpoint.
|
102
|
+
sig do
|
103
|
+
params(
|
104
|
+
id: String,
|
105
|
+
created_at: Integer,
|
106
|
+
project_id: String,
|
107
|
+
object: Symbol
|
108
|
+
).returns(T.attached_class)
|
109
|
+
end
|
110
|
+
def self.new(
|
111
|
+
# The permission identifier, which can be referenced in the API endpoints.
|
112
|
+
id:,
|
113
|
+
# The Unix timestamp (in seconds) for when the permission was created.
|
114
|
+
created_at:,
|
115
|
+
# The project identifier that the permission is for.
|
116
|
+
project_id:,
|
117
|
+
# The object type, which is always "checkpoint.permission".
|
118
|
+
object: :"checkpoint.permission"
|
119
|
+
)
|
120
|
+
end
|
121
|
+
|
122
|
+
sig do
|
123
|
+
override.returns(
|
124
|
+
{
|
125
|
+
id: String,
|
126
|
+
created_at: Integer,
|
127
|
+
object: Symbol,
|
128
|
+
project_id: String
|
129
|
+
}
|
130
|
+
)
|
131
|
+
end
|
132
|
+
def to_hash
|
133
|
+
end
|
134
|
+
end
|
66
135
|
end
|
67
136
|
end
|
68
137
|
end
|
@@ -12,6 +12,18 @@ module OpenAI
|
|
12
12
|
sig { returns(Integer) }
|
13
13
|
attr_accessor :created
|
14
14
|
|
15
|
+
# The background parameter used for the image generation. Either `transparent` or
|
16
|
+
# `opaque`.
|
17
|
+
sig do
|
18
|
+
returns(T.nilable(OpenAI::ImagesResponse::Background::TaggedSymbol))
|
19
|
+
end
|
20
|
+
attr_reader :background
|
21
|
+
|
22
|
+
sig do
|
23
|
+
params(background: OpenAI::ImagesResponse::Background::OrSymbol).void
|
24
|
+
end
|
25
|
+
attr_writer :background
|
26
|
+
|
15
27
|
# The list of generated images.
|
16
28
|
sig { returns(T.nilable(T::Array[OpenAI::Image])) }
|
17
29
|
attr_reader :data
|
@@ -19,6 +31,34 @@ module OpenAI
|
|
19
31
|
sig { params(data: T::Array[OpenAI::Image::OrHash]).void }
|
20
32
|
attr_writer :data
|
21
33
|
|
34
|
+
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
35
|
+
sig do
|
36
|
+
returns(T.nilable(OpenAI::ImagesResponse::OutputFormat::TaggedSymbol))
|
37
|
+
end
|
38
|
+
attr_reader :output_format
|
39
|
+
|
40
|
+
sig do
|
41
|
+
params(
|
42
|
+
output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol
|
43
|
+
).void
|
44
|
+
end
|
45
|
+
attr_writer :output_format
|
46
|
+
|
47
|
+
# The quality of the image generated. Either `low`, `medium`, or `high`.
|
48
|
+
sig { returns(T.nilable(OpenAI::ImagesResponse::Quality::TaggedSymbol)) }
|
49
|
+
attr_reader :quality
|
50
|
+
|
51
|
+
sig { params(quality: OpenAI::ImagesResponse::Quality::OrSymbol).void }
|
52
|
+
attr_writer :quality
|
53
|
+
|
54
|
+
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
55
|
+
# `1536x1024`.
|
56
|
+
sig { returns(T.nilable(OpenAI::ImagesResponse::Size::TaggedSymbol)) }
|
57
|
+
attr_reader :size
|
58
|
+
|
59
|
+
sig { params(size: OpenAI::ImagesResponse::Size::OrSymbol).void }
|
60
|
+
attr_writer :size
|
61
|
+
|
22
62
|
# For `gpt-image-1` only, the token usage information for the image generation.
|
23
63
|
sig { returns(T.nilable(OpenAI::ImagesResponse::Usage)) }
|
24
64
|
attr_reader :usage
|
@@ -30,15 +70,29 @@ module OpenAI
|
|
30
70
|
sig do
|
31
71
|
params(
|
32
72
|
created: Integer,
|
73
|
+
background: OpenAI::ImagesResponse::Background::OrSymbol,
|
33
74
|
data: T::Array[OpenAI::Image::OrHash],
|
75
|
+
output_format: OpenAI::ImagesResponse::OutputFormat::OrSymbol,
|
76
|
+
quality: OpenAI::ImagesResponse::Quality::OrSymbol,
|
77
|
+
size: OpenAI::ImagesResponse::Size::OrSymbol,
|
34
78
|
usage: OpenAI::ImagesResponse::Usage::OrHash
|
35
79
|
).returns(T.attached_class)
|
36
80
|
end
|
37
81
|
def self.new(
|
38
82
|
# The Unix timestamp (in seconds) of when the image was created.
|
39
83
|
created:,
|
84
|
+
# The background parameter used for the image generation. Either `transparent` or
|
85
|
+
# `opaque`.
|
86
|
+
background: nil,
|
40
87
|
# The list of generated images.
|
41
88
|
data: nil,
|
89
|
+
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
90
|
+
output_format: nil,
|
91
|
+
# The quality of the image generated. Either `low`, `medium`, or `high`.
|
92
|
+
quality: nil,
|
93
|
+
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
94
|
+
# `1536x1024`.
|
95
|
+
size: nil,
|
42
96
|
# For `gpt-image-1` only, the token usage information for the image generation.
|
43
97
|
usage: nil
|
44
98
|
)
|
@@ -48,7 +102,11 @@ module OpenAI
|
|
48
102
|
override.returns(
|
49
103
|
{
|
50
104
|
created: Integer,
|
105
|
+
background: OpenAI::ImagesResponse::Background::TaggedSymbol,
|
51
106
|
data: T::Array[OpenAI::Image],
|
107
|
+
output_format: OpenAI::ImagesResponse::OutputFormat::TaggedSymbol,
|
108
|
+
quality: OpenAI::ImagesResponse::Quality::TaggedSymbol,
|
109
|
+
size: OpenAI::ImagesResponse::Size::TaggedSymbol,
|
52
110
|
usage: OpenAI::ImagesResponse::Usage
|
53
111
|
}
|
54
112
|
)
|
@@ -56,6 +114,94 @@ module OpenAI
|
|
56
114
|
def to_hash
|
57
115
|
end
|
58
116
|
|
117
|
+
# The background parameter used for the image generation. Either `transparent` or
|
118
|
+
# `opaque`.
|
119
|
+
module Background
|
120
|
+
extend OpenAI::Internal::Type::Enum
|
121
|
+
|
122
|
+
TaggedSymbol =
|
123
|
+
T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Background) }
|
124
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
125
|
+
|
126
|
+
TRANSPARENT =
|
127
|
+
T.let(:transparent, OpenAI::ImagesResponse::Background::TaggedSymbol)
|
128
|
+
OPAQUE =
|
129
|
+
T.let(:opaque, OpenAI::ImagesResponse::Background::TaggedSymbol)
|
130
|
+
|
131
|
+
sig do
|
132
|
+
override.returns(
|
133
|
+
T::Array[OpenAI::ImagesResponse::Background::TaggedSymbol]
|
134
|
+
)
|
135
|
+
end
|
136
|
+
def self.values
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
141
|
+
module OutputFormat
|
142
|
+
extend OpenAI::Internal::Type::Enum
|
143
|
+
|
144
|
+
TaggedSymbol =
|
145
|
+
T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::OutputFormat) }
|
146
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
147
|
+
|
148
|
+
PNG = T.let(:png, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
|
149
|
+
WEBP = T.let(:webp, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
|
150
|
+
JPEG = T.let(:jpeg, OpenAI::ImagesResponse::OutputFormat::TaggedSymbol)
|
151
|
+
|
152
|
+
sig do
|
153
|
+
override.returns(
|
154
|
+
T::Array[OpenAI::ImagesResponse::OutputFormat::TaggedSymbol]
|
155
|
+
)
|
156
|
+
end
|
157
|
+
def self.values
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
# The quality of the image generated. Either `low`, `medium`, or `high`.
|
162
|
+
module Quality
|
163
|
+
extend OpenAI::Internal::Type::Enum
|
164
|
+
|
165
|
+
TaggedSymbol =
|
166
|
+
T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Quality) }
|
167
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
168
|
+
|
169
|
+
LOW = T.let(:low, OpenAI::ImagesResponse::Quality::TaggedSymbol)
|
170
|
+
MEDIUM = T.let(:medium, OpenAI::ImagesResponse::Quality::TaggedSymbol)
|
171
|
+
HIGH = T.let(:high, OpenAI::ImagesResponse::Quality::TaggedSymbol)
|
172
|
+
|
173
|
+
sig do
|
174
|
+
override.returns(
|
175
|
+
T::Array[OpenAI::ImagesResponse::Quality::TaggedSymbol]
|
176
|
+
)
|
177
|
+
end
|
178
|
+
def self.values
|
179
|
+
end
|
180
|
+
end
|
181
|
+
|
182
|
+
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
183
|
+
# `1536x1024`.
|
184
|
+
module Size
|
185
|
+
extend OpenAI::Internal::Type::Enum
|
186
|
+
|
187
|
+
TaggedSymbol =
|
188
|
+
T.type_alias { T.all(Symbol, OpenAI::ImagesResponse::Size) }
|
189
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
190
|
+
|
191
|
+
SIZE_1024X1024 =
|
192
|
+
T.let(:"1024x1024", OpenAI::ImagesResponse::Size::TaggedSymbol)
|
193
|
+
SIZE_1024X1536 =
|
194
|
+
T.let(:"1024x1536", OpenAI::ImagesResponse::Size::TaggedSymbol)
|
195
|
+
SIZE_1536X1024 =
|
196
|
+
T.let(:"1536x1024", OpenAI::ImagesResponse::Size::TaggedSymbol)
|
197
|
+
|
198
|
+
sig do
|
199
|
+
override.returns(T::Array[OpenAI::ImagesResponse::Size::TaggedSymbol])
|
200
|
+
end
|
201
|
+
def self.values
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
59
205
|
class Usage < OpenAI::Internal::Type::BaseModel
|
60
206
|
OrHash =
|
61
207
|
T.type_alias do
|