openai 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +25 -0
- data/lib/openai/internal/type/array_of.rb +6 -1
- data/lib/openai/internal/type/base_model.rb +76 -24
- data/lib/openai/internal/type/boolean.rb +7 -1
- data/lib/openai/internal/type/converter.rb +42 -34
- data/lib/openai/internal/type/enum.rb +10 -2
- data/lib/openai/internal/type/file_input.rb +6 -1
- data/lib/openai/internal/type/hash_of.rb +6 -1
- data/lib/openai/internal/type/union.rb +12 -7
- data/lib/openai/internal/type/unknown.rb +7 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/audio/speech_create_params.rb +23 -2
- data/lib/openai/models/audio/transcription.rb +118 -1
- data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
- data/lib/openai/models/audio/transcription_verbose.rb +31 -1
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
- data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
- data/lib/openai/models/responses/response_create_params.rb +92 -67
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +18 -2
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/audio/speech.rb +3 -1
- data/lib/openai/resources/chat/completions.rb +10 -2
- data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
- data/lib/openai/resources/responses.rb +24 -16
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +16 -0
- data/rbi/openai/internal/type/boolean.rbi +2 -0
- data/rbi/openai/internal/type/converter.rbi +15 -15
- data/rbi/openai/internal/type/union.rbi +5 -0
- data/rbi/openai/internal/type/unknown.rbi +2 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
- data/rbi/openai/models/audio/transcription.rbi +213 -3
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
- data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
- data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
- data/rbi/openai/models/responses/response_create_params.rbi +174 -115
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/response_output_text.rbi +26 -4
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/audio/speech.rbi +6 -1
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
- data/rbi/openai/resources/responses.rbi +108 -84
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/errors.rbs +9 -0
- data/sig/openai/internal/type/converter.rbs +7 -1
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/audio/speech_create_params.rbs +21 -1
- data/sig/openai/models/audio/transcription.rbs +95 -3
- data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
- data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
- data/sig/openai/models/responses/response_create_params.rbs +31 -11
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/response_output_text.rbs +15 -1
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/audio/speech.rbs +1 -0
- data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
- data/sig/openai/resources/responses.rbs +8 -4
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -219,23 +219,23 @@ module OpenAI
|
|
219
219
|
optional :seed, Integer, nil?: true
|
220
220
|
|
221
221
|
# @!attribute service_tier
|
222
|
-
# Specifies the
|
223
|
-
#
|
224
|
-
#
|
225
|
-
#
|
226
|
-
#
|
227
|
-
# - If set to '
|
228
|
-
#
|
229
|
-
#
|
230
|
-
#
|
231
|
-
# tier
|
232
|
-
#
|
233
|
-
# service tier.
|
234
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
222
|
+
# Specifies the processing type used for serving the request.
|
223
|
+
#
|
224
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
225
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
226
|
+
# will use 'default'.
|
227
|
+
# - If set to 'default', then the requset will be processed with the standard
|
228
|
+
# pricing and performance for the selected model.
|
229
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
230
|
+
# 'priority', then the request will be processed with the corresponding service
|
231
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
232
|
+
# Priority processing.
|
235
233
|
# - When not set, the default behavior is 'auto'.
|
236
234
|
#
|
237
|
-
# When
|
238
|
-
#
|
235
|
+
# When the `service_tier` parameter is set, the response body will include the
|
236
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
237
|
+
# request. This response value may be different from the value set in the
|
238
|
+
# parameter.
|
239
239
|
#
|
240
240
|
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil]
|
241
241
|
optional :service_tier, enum: -> { OpenAI::Chat::CompletionCreateParams::ServiceTier }, nil?: true
|
@@ -254,6 +254,8 @@ module OpenAI
|
|
254
254
|
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
255
255
|
# or [evals](https://platform.openai.com/docs/guides/evals) products.
|
256
256
|
#
|
257
|
+
# Supports text and image inputs. Note: image inputs over 10MB will be dropped.
|
258
|
+
#
|
257
259
|
# @return [Boolean, nil]
|
258
260
|
optional :store, OpenAI::Internal::Type::Boolean, nil?: true
|
259
261
|
|
@@ -375,7 +377,7 @@ module OpenAI
|
|
375
377
|
#
|
376
378
|
# @param seed [Integer, nil] This feature is in Beta.
|
377
379
|
#
|
378
|
-
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the
|
380
|
+
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
379
381
|
#
|
380
382
|
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
|
381
383
|
#
|
@@ -546,23 +548,23 @@ module OpenAI
|
|
546
548
|
# @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
|
547
549
|
end
|
548
550
|
|
549
|
-
# Specifies the
|
550
|
-
#
|
551
|
-
#
|
552
|
-
#
|
553
|
-
#
|
554
|
-
# - If set to '
|
555
|
-
#
|
556
|
-
#
|
557
|
-
#
|
558
|
-
# tier
|
559
|
-
#
|
560
|
-
# service tier.
|
561
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
551
|
+
# Specifies the processing type used for serving the request.
|
552
|
+
#
|
553
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
554
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
555
|
+
# will use 'default'.
|
556
|
+
# - If set to 'default', then the requset will be processed with the standard
|
557
|
+
# pricing and performance for the selected model.
|
558
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
559
|
+
# 'priority', then the request will be processed with the corresponding service
|
560
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
561
|
+
# Priority processing.
|
562
562
|
# - When not set, the default behavior is 'auto'.
|
563
563
|
#
|
564
|
-
# When
|
565
|
-
#
|
564
|
+
# When the `service_tier` parameter is set, the response body will include the
|
565
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
566
|
+
# request. This response value may be different from the value set in the
|
567
|
+
# parameter.
|
566
568
|
module ServiceTier
|
567
569
|
extend OpenAI::Internal::Type::Enum
|
568
570
|
|
@@ -570,6 +572,7 @@ module OpenAI
|
|
570
572
|
DEFAULT = :default
|
571
573
|
FLEX = :flex
|
572
574
|
SCALE = :scale
|
575
|
+
PRIORITY = :priority
|
573
576
|
|
574
577
|
# @!method self.values
|
575
578
|
# @return [Array<Symbol>]
|
@@ -6,41 +6,76 @@ module OpenAI
|
|
6
6
|
module Checkpoints
|
7
7
|
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#retrieve
|
8
8
|
class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
|
9
|
-
# @!attribute
|
10
|
-
# The permission identifier, which can be referenced in the API endpoints.
|
9
|
+
# @!attribute data
|
11
10
|
#
|
12
|
-
# @return [
|
13
|
-
required :
|
11
|
+
# @return [Array<OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data>]
|
12
|
+
required :data,
|
13
|
+
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data] }
|
14
14
|
|
15
|
-
# @!attribute
|
16
|
-
# The Unix timestamp (in seconds) for when the permission was created.
|
15
|
+
# @!attribute has_more
|
17
16
|
#
|
18
|
-
# @return [
|
19
|
-
required :
|
17
|
+
# @return [Boolean]
|
18
|
+
required :has_more, OpenAI::Internal::Type::Boolean
|
20
19
|
|
21
20
|
# @!attribute object
|
22
|
-
# The object type, which is always "checkpoint.permission".
|
23
21
|
#
|
24
|
-
# @return [Symbol, :
|
25
|
-
required :object, const: :
|
22
|
+
# @return [Symbol, :list]
|
23
|
+
required :object, const: :list
|
26
24
|
|
27
|
-
# @!attribute
|
28
|
-
# The project identifier that the permission is for.
|
25
|
+
# @!attribute first_id
|
29
26
|
#
|
30
|
-
# @return [String]
|
31
|
-
|
27
|
+
# @return [String, nil]
|
28
|
+
optional :first_id, String, nil?: true
|
32
29
|
|
33
|
-
# @!
|
34
|
-
# The `checkpoint.permission` object represents a permission for a fine-tuned
|
35
|
-
# model checkpoint.
|
30
|
+
# @!attribute last_id
|
36
31
|
#
|
37
|
-
# @
|
38
|
-
|
39
|
-
|
40
|
-
#
|
41
|
-
# @param
|
42
|
-
#
|
43
|
-
# @param
|
32
|
+
# @return [String, nil]
|
33
|
+
optional :last_id, String, nil?: true
|
34
|
+
|
35
|
+
# @!method initialize(data:, has_more:, first_id: nil, last_id: nil, object: :list)
|
36
|
+
# @param data [Array<OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data>]
|
37
|
+
# @param has_more [Boolean]
|
38
|
+
# @param first_id [String, nil]
|
39
|
+
# @param last_id [String, nil]
|
40
|
+
# @param object [Symbol, :list]
|
41
|
+
|
42
|
+
class Data < OpenAI::Internal::Type::BaseModel
|
43
|
+
# @!attribute id
|
44
|
+
# The permission identifier, which can be referenced in the API endpoints.
|
45
|
+
#
|
46
|
+
# @return [String]
|
47
|
+
required :id, String
|
48
|
+
|
49
|
+
# @!attribute created_at
|
50
|
+
# The Unix timestamp (in seconds) for when the permission was created.
|
51
|
+
#
|
52
|
+
# @return [Integer]
|
53
|
+
required :created_at, Integer
|
54
|
+
|
55
|
+
# @!attribute object
|
56
|
+
# The object type, which is always "checkpoint.permission".
|
57
|
+
#
|
58
|
+
# @return [Symbol, :"checkpoint.permission"]
|
59
|
+
required :object, const: :"checkpoint.permission"
|
60
|
+
|
61
|
+
# @!attribute project_id
|
62
|
+
# The project identifier that the permission is for.
|
63
|
+
#
|
64
|
+
# @return [String]
|
65
|
+
required :project_id, String
|
66
|
+
|
67
|
+
# @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission")
|
68
|
+
# The `checkpoint.permission` object represents a permission for a fine-tuned
|
69
|
+
# model checkpoint.
|
70
|
+
#
|
71
|
+
# @param id [String] The permission identifier, which can be referenced in the API endpoints.
|
72
|
+
#
|
73
|
+
# @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created.
|
74
|
+
#
|
75
|
+
# @param project_id [String] The project identifier that the permission is for.
|
76
|
+
#
|
77
|
+
# @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission".
|
78
|
+
end
|
44
79
|
end
|
45
80
|
end
|
46
81
|
end
|
@@ -10,19 +10,45 @@ module OpenAI
|
|
10
10
|
# @return [Integer]
|
11
11
|
required :created, Integer
|
12
12
|
|
13
|
+
# @!attribute background
|
14
|
+
# The background parameter used for the image generation. Either `transparent` or
|
15
|
+
# `opaque`.
|
16
|
+
#
|
17
|
+
# @return [Symbol, OpenAI::Models::ImagesResponse::Background, nil]
|
18
|
+
optional :background, enum: -> { OpenAI::ImagesResponse::Background }
|
19
|
+
|
13
20
|
# @!attribute data
|
14
21
|
# The list of generated images.
|
15
22
|
#
|
16
23
|
# @return [Array<OpenAI::Models::Image>, nil]
|
17
24
|
optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Image] }
|
18
25
|
|
26
|
+
# @!attribute output_format
|
27
|
+
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
28
|
+
#
|
29
|
+
# @return [Symbol, OpenAI::Models::ImagesResponse::OutputFormat, nil]
|
30
|
+
optional :output_format, enum: -> { OpenAI::ImagesResponse::OutputFormat }
|
31
|
+
|
32
|
+
# @!attribute quality
|
33
|
+
# The quality of the image generated. Either `low`, `medium`, or `high`.
|
34
|
+
#
|
35
|
+
# @return [Symbol, OpenAI::Models::ImagesResponse::Quality, nil]
|
36
|
+
optional :quality, enum: -> { OpenAI::ImagesResponse::Quality }
|
37
|
+
|
38
|
+
# @!attribute size
|
39
|
+
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
40
|
+
# `1536x1024`.
|
41
|
+
#
|
42
|
+
# @return [Symbol, OpenAI::Models::ImagesResponse::Size, nil]
|
43
|
+
optional :size, enum: -> { OpenAI::ImagesResponse::Size }
|
44
|
+
|
19
45
|
# @!attribute usage
|
20
46
|
# For `gpt-image-1` only, the token usage information for the image generation.
|
21
47
|
#
|
22
48
|
# @return [OpenAI::Models::ImagesResponse::Usage, nil]
|
23
49
|
optional :usage, -> { OpenAI::ImagesResponse::Usage }
|
24
50
|
|
25
|
-
# @!method initialize(created:, data: nil, usage: nil)
|
51
|
+
# @!method initialize(created:, background: nil, data: nil, output_format: nil, quality: nil, size: nil, usage: nil)
|
26
52
|
# Some parameter documentations has been truncated, see
|
27
53
|
# {OpenAI::Models::ImagesResponse} for more details.
|
28
54
|
#
|
@@ -30,10 +56,75 @@ module OpenAI
|
|
30
56
|
#
|
31
57
|
# @param created [Integer] The Unix timestamp (in seconds) of when the image was created.
|
32
58
|
#
|
59
|
+
# @param background [Symbol, OpenAI::Models::ImagesResponse::Background] The background parameter used for the image generation. Either `transparent` or
|
60
|
+
#
|
33
61
|
# @param data [Array<OpenAI::Models::Image>] The list of generated images.
|
34
62
|
#
|
63
|
+
# @param output_format [Symbol, OpenAI::Models::ImagesResponse::OutputFormat] The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
64
|
+
#
|
65
|
+
# @param quality [Symbol, OpenAI::Models::ImagesResponse::Quality] The quality of the image generated. Either `low`, `medium`, or `high`.
|
66
|
+
#
|
67
|
+
# @param size [Symbol, OpenAI::Models::ImagesResponse::Size] The size of the image generated. Either `1024x1024`, `1024x1536`, or `1536x1024`
|
68
|
+
#
|
35
69
|
# @param usage [OpenAI::Models::ImagesResponse::Usage] For `gpt-image-1` only, the token usage information for the image generation.
|
36
70
|
|
71
|
+
# The background parameter used for the image generation. Either `transparent` or
|
72
|
+
# `opaque`.
|
73
|
+
#
|
74
|
+
# @see OpenAI::Models::ImagesResponse#background
|
75
|
+
module Background
|
76
|
+
extend OpenAI::Internal::Type::Enum
|
77
|
+
|
78
|
+
TRANSPARENT = :transparent
|
79
|
+
OPAQUE = :opaque
|
80
|
+
|
81
|
+
# @!method self.values
|
82
|
+
# @return [Array<Symbol>]
|
83
|
+
end
|
84
|
+
|
85
|
+
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
|
86
|
+
#
|
87
|
+
# @see OpenAI::Models::ImagesResponse#output_format
|
88
|
+
module OutputFormat
|
89
|
+
extend OpenAI::Internal::Type::Enum
|
90
|
+
|
91
|
+
PNG = :png
|
92
|
+
WEBP = :webp
|
93
|
+
JPEG = :jpeg
|
94
|
+
|
95
|
+
# @!method self.values
|
96
|
+
# @return [Array<Symbol>]
|
97
|
+
end
|
98
|
+
|
99
|
+
# The quality of the image generated. Either `low`, `medium`, or `high`.
|
100
|
+
#
|
101
|
+
# @see OpenAI::Models::ImagesResponse#quality
|
102
|
+
module Quality
|
103
|
+
extend OpenAI::Internal::Type::Enum
|
104
|
+
|
105
|
+
LOW = :low
|
106
|
+
MEDIUM = :medium
|
107
|
+
HIGH = :high
|
108
|
+
|
109
|
+
# @!method self.values
|
110
|
+
# @return [Array<Symbol>]
|
111
|
+
end
|
112
|
+
|
113
|
+
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
|
114
|
+
# `1536x1024`.
|
115
|
+
#
|
116
|
+
# @see OpenAI::Models::ImagesResponse#size
|
117
|
+
module Size
|
118
|
+
extend OpenAI::Internal::Type::Enum
|
119
|
+
|
120
|
+
SIZE_1024X1024 = :"1024x1024"
|
121
|
+
SIZE_1024X1536 = :"1024x1536"
|
122
|
+
SIZE_1536X1024 = :"1536x1024"
|
123
|
+
|
124
|
+
# @!method self.values
|
125
|
+
# @return [Array<Symbol>]
|
126
|
+
end
|
127
|
+
|
37
128
|
# @see OpenAI::Models::ImagesResponse#usage
|
38
129
|
class Usage < OpenAI::Internal::Type::BaseModel
|
39
130
|
# @!attribute input_tokens
|
@@ -100,7 +100,7 @@ module OpenAI
|
|
100
100
|
# response. See the `tools` parameter to see how to specify which tools the model
|
101
101
|
# can call.
|
102
102
|
#
|
103
|
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction]
|
103
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp]
|
104
104
|
required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice }
|
105
105
|
|
106
106
|
# @!attribute tools
|
@@ -147,6 +147,15 @@ module OpenAI
|
|
147
147
|
# @return [Integer, nil]
|
148
148
|
optional :max_output_tokens, Integer, nil?: true
|
149
149
|
|
150
|
+
# @!attribute max_tool_calls
|
151
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
152
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
153
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
154
|
+
# ignored.
|
155
|
+
#
|
156
|
+
# @return [Integer, nil]
|
157
|
+
optional :max_tool_calls, Integer, nil?: true
|
158
|
+
|
150
159
|
# @!attribute previous_response_id
|
151
160
|
# The unique ID of the previous response to the model. Use this to create
|
152
161
|
# multi-turn conversations. Learn more about
|
@@ -172,23 +181,23 @@ module OpenAI
|
|
172
181
|
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
|
173
182
|
|
174
183
|
# @!attribute service_tier
|
175
|
-
# Specifies the
|
176
|
-
#
|
177
|
-
#
|
178
|
-
#
|
179
|
-
#
|
180
|
-
# - If set to '
|
181
|
-
#
|
182
|
-
#
|
183
|
-
#
|
184
|
-
# tier
|
185
|
-
#
|
186
|
-
# service tier.
|
187
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
184
|
+
# Specifies the processing type used for serving the request.
|
185
|
+
#
|
186
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
187
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
188
|
+
# will use 'default'.
|
189
|
+
# - If set to 'default', then the requset will be processed with the standard
|
190
|
+
# pricing and performance for the selected model.
|
191
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
192
|
+
# 'priority', then the request will be processed with the corresponding service
|
193
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
194
|
+
# Priority processing.
|
188
195
|
# - When not set, the default behavior is 'auto'.
|
189
196
|
#
|
190
|
-
# When
|
191
|
-
#
|
197
|
+
# When the `service_tier` parameter is set, the response body will include the
|
198
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
199
|
+
# request. This response value may be different from the value set in the
|
200
|
+
# parameter.
|
192
201
|
#
|
193
202
|
# @return [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil]
|
194
203
|
optional :service_tier, enum: -> { OpenAI::Responses::Response::ServiceTier }, nil?: true
|
@@ -210,6 +219,13 @@ module OpenAI
|
|
210
219
|
# @return [OpenAI::Models::Responses::ResponseTextConfig, nil]
|
211
220
|
optional :text, -> { OpenAI::Responses::ResponseTextConfig }
|
212
221
|
|
222
|
+
# @!attribute top_logprobs
|
223
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
224
|
+
# return at each token position, each with an associated log probability.
|
225
|
+
#
|
226
|
+
# @return [Integer, nil]
|
227
|
+
optional :top_logprobs, Integer, nil?: true
|
228
|
+
|
213
229
|
# @!attribute truncation
|
214
230
|
# The truncation strategy to use for the model response.
|
215
231
|
#
|
@@ -237,7 +253,7 @@ module OpenAI
|
|
237
253
|
# @return [String, nil]
|
238
254
|
optional :user, String
|
239
255
|
|
240
|
-
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
|
256
|
+
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
|
241
257
|
# Some parameter documentations has been truncated, see
|
242
258
|
# {OpenAI::Models::Responses::Response} for more details.
|
243
259
|
#
|
@@ -261,7 +277,7 @@ module OpenAI
|
|
261
277
|
#
|
262
278
|
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
263
279
|
#
|
264
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
280
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
265
281
|
#
|
266
282
|
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
267
283
|
#
|
@@ -271,18 +287,22 @@ module OpenAI
|
|
271
287
|
#
|
272
288
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
273
289
|
#
|
290
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
291
|
+
#
|
274
292
|
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
|
275
293
|
#
|
276
294
|
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
277
295
|
#
|
278
296
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
279
297
|
#
|
280
|
-
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the
|
298
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
|
281
299
|
#
|
282
300
|
# @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
|
283
301
|
#
|
284
302
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
285
303
|
#
|
304
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
305
|
+
#
|
286
306
|
# @param truncation [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] The truncation strategy to use for the model response.
|
287
307
|
#
|
288
308
|
# @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
|
@@ -369,27 +389,30 @@ module OpenAI
|
|
369
389
|
# Use this option to force the model to call a specific function.
|
370
390
|
variant -> { OpenAI::Responses::ToolChoiceFunction }
|
371
391
|
|
392
|
+
# Use this option to force the model to call a specific tool on a remote MCP server.
|
393
|
+
variant -> { OpenAI::Responses::ToolChoiceMcp }
|
394
|
+
|
372
395
|
# @!method self.variants
|
373
|
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)]
|
396
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
|
374
397
|
end
|
375
398
|
|
376
|
-
# Specifies the
|
377
|
-
#
|
378
|
-
#
|
379
|
-
#
|
380
|
-
#
|
381
|
-
# - If set to '
|
382
|
-
#
|
383
|
-
#
|
384
|
-
#
|
385
|
-
# tier
|
386
|
-
#
|
387
|
-
# service tier.
|
388
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
399
|
+
# Specifies the processing type used for serving the request.
|
400
|
+
#
|
401
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
402
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
403
|
+
# will use 'default'.
|
404
|
+
# - If set to 'default', then the requset will be processed with the standard
|
405
|
+
# pricing and performance for the selected model.
|
406
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
407
|
+
# 'priority', then the request will be processed with the corresponding service
|
408
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
409
|
+
# Priority processing.
|
389
410
|
# - When not set, the default behavior is 'auto'.
|
390
411
|
#
|
391
|
-
# When
|
392
|
-
#
|
412
|
+
# When the `service_tier` parameter is set, the response body will include the
|
413
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
414
|
+
# request. This response value may be different from the value set in the
|
415
|
+
# parameter.
|
393
416
|
#
|
394
417
|
# @see OpenAI::Models::Responses::Response#service_tier
|
395
418
|
module ServiceTier
|
@@ -399,6 +422,7 @@ module OpenAI
|
|
399
422
|
DEFAULT = :default
|
400
423
|
FLEX = :flex
|
401
424
|
SCALE = :scale
|
425
|
+
PRIORITY = :priority
|
402
426
|
|
403
427
|
# @!method self.values
|
404
428
|
# @return [Array<Symbol>]
|
@@ -5,19 +5,26 @@ module OpenAI
|
|
5
5
|
module Responses
|
6
6
|
class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel
|
7
7
|
# @!attribute delta
|
8
|
-
# The partial code snippet
|
8
|
+
# The partial code snippet being streamed by the code interpreter.
|
9
9
|
#
|
10
10
|
# @return [String]
|
11
11
|
required :delta, String
|
12
12
|
|
13
|
+
# @!attribute item_id
|
14
|
+
# The unique identifier of the code interpreter tool call item.
|
15
|
+
#
|
16
|
+
# @return [String]
|
17
|
+
required :item_id, String
|
18
|
+
|
13
19
|
# @!attribute output_index
|
14
|
-
# The index of the output item
|
20
|
+
# The index of the output item in the response for which the code is being
|
21
|
+
# streamed.
|
15
22
|
#
|
16
23
|
# @return [Integer]
|
17
24
|
required :output_index, Integer
|
18
25
|
|
19
26
|
# @!attribute sequence_number
|
20
|
-
# The sequence number of this event.
|
27
|
+
# The sequence number of this event, used to order streaming events.
|
21
28
|
#
|
22
29
|
# @return [Integer]
|
23
30
|
required :sequence_number, Integer
|
@@ -28,18 +35,20 @@ module OpenAI
|
|
28
35
|
# @return [Symbol, :"response.code_interpreter_call_code.delta"]
|
29
36
|
required :type, const: :"response.code_interpreter_call_code.delta"
|
30
37
|
|
31
|
-
# @!method initialize(delta:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta")
|
38
|
+
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta")
|
32
39
|
# Some parameter documentations has been truncated, see
|
33
40
|
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent} for more
|
34
41
|
# details.
|
35
42
|
#
|
36
|
-
# Emitted when a partial code snippet is
|
43
|
+
# Emitted when a partial code snippet is streamed by the code interpreter.
|
44
|
+
#
|
45
|
+
# @param delta [String] The partial code snippet being streamed by the code interpreter.
|
37
46
|
#
|
38
|
-
# @param
|
47
|
+
# @param item_id [String] The unique identifier of the code interpreter tool call item.
|
39
48
|
#
|
40
|
-
# @param output_index [Integer] The index of the output item
|
49
|
+
# @param output_index [Integer] The index of the output item in the response for which the code is being streame
|
41
50
|
#
|
42
|
-
# @param sequence_number [Integer] The sequence number of this event.
|
51
|
+
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
|
43
52
|
#
|
44
53
|
# @param type [Symbol, :"response.code_interpreter_call_code.delta"] The type of the event. Always `response.code_interpreter_call_code.delta`.
|
45
54
|
end
|
@@ -10,14 +10,20 @@ module OpenAI
|
|
10
10
|
# @return [String]
|
11
11
|
required :code, String
|
12
12
|
|
13
|
+
# @!attribute item_id
|
14
|
+
# The unique identifier of the code interpreter tool call item.
|
15
|
+
#
|
16
|
+
# @return [String]
|
17
|
+
required :item_id, String
|
18
|
+
|
13
19
|
# @!attribute output_index
|
14
|
-
# The index of the output item
|
20
|
+
# The index of the output item in the response for which the code is finalized.
|
15
21
|
#
|
16
22
|
# @return [Integer]
|
17
23
|
required :output_index, Integer
|
18
24
|
|
19
25
|
# @!attribute sequence_number
|
20
|
-
# The sequence number of this event.
|
26
|
+
# The sequence number of this event, used to order streaming events.
|
21
27
|
#
|
22
28
|
# @return [Integer]
|
23
29
|
required :sequence_number, Integer
|
@@ -28,18 +34,16 @@ module OpenAI
|
|
28
34
|
# @return [Symbol, :"response.code_interpreter_call_code.done"]
|
29
35
|
required :type, const: :"response.code_interpreter_call_code.done"
|
30
36
|
|
31
|
-
# @!method initialize(code:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.done")
|
32
|
-
#
|
33
|
-
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent} for more
|
34
|
-
# details.
|
35
|
-
#
|
36
|
-
# Emitted when code snippet output is finalized by the code interpreter.
|
37
|
+
# @!method initialize(code:, item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.done")
|
38
|
+
# Emitted when the code snippet is finalized by the code interpreter.
|
37
39
|
#
|
38
40
|
# @param code [String] The final code snippet output by the code interpreter.
|
39
41
|
#
|
40
|
-
# @param
|
42
|
+
# @param item_id [String] The unique identifier of the code interpreter tool call item.
|
43
|
+
#
|
44
|
+
# @param output_index [Integer] The index of the output item in the response for which the code is finalized.
|
41
45
|
#
|
42
|
-
# @param sequence_number [Integer] The sequence number of this event.
|
46
|
+
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
|
43
47
|
#
|
44
48
|
# @param type [Symbol, :"response.code_interpreter_call_code.done"] The type of the event. Always `response.code_interpreter_call_code.done`.
|
45
49
|
end
|