openai 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +21 -0
- data/README.md +1 -1
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +34 -10
- data/lib/openai/models/eval_create_params.rb +50 -5
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +50 -5
- data/lib/openai/models/evals/run_cancel_response.rb +48 -5
- data/lib/openai/models/evals/run_create_params.rb +50 -5
- data/lib/openai/models/evals/run_create_response.rb +48 -5
- data/lib/openai/models/evals/run_list_response.rb +48 -5
- data/lib/openai/models/evals/run_retrieve_response.rb +48 -5
- data/lib/openai/models/graders/label_model_grader.rb +48 -5
- data/lib/openai/models/graders/score_model_grader.rb +48 -5
- data/lib/openai/models/image_edit_completed_event.rb +198 -0
- data/lib/openai/models/image_edit_params.rb +36 -1
- data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
- data/lib/openai/models/image_edit_stream_event.rb +21 -0
- data/lib/openai/models/image_gen_completed_event.rb +198 -0
- data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
- data/lib/openai/models/image_gen_stream_event.rb +21 -0
- data/lib/openai/models/image_generate_params.rb +13 -1
- data/lib/openai/models/images_response.rb +3 -0
- data/lib/openai/models/responses/response_output_refusal.rb +2 -2
- data/lib/openai/models/responses/tool.rb +30 -1
- data/lib/openai/models.rb +12 -0
- data/lib/openai/resources/images.rb +140 -2
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +6 -0
- data/rbi/openai/helpers/structured_output/json_schema_converter.rbi +4 -0
- data/rbi/openai/models/eval_create_params.rbi +76 -7
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +76 -7
- data/rbi/openai/models/evals/run_cancel_response.rbi +70 -5
- data/rbi/openai/models/evals/run_create_params.rbi +76 -7
- data/rbi/openai/models/evals/run_create_response.rbi +70 -5
- data/rbi/openai/models/evals/run_list_response.rbi +70 -5
- data/rbi/openai/models/evals/run_retrieve_response.rbi +70 -5
- data/rbi/openai/models/graders/label_model_grader.rbi +74 -7
- data/rbi/openai/models/graders/score_model_grader.rbi +74 -7
- data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
- data/rbi/openai/models/image_edit_params.rbi +51 -0
- data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
- data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
- data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
- data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
- data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
- data/rbi/openai/models/image_generate_params.rbi +12 -0
- data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
- data/rbi/openai/models/responses/tool.rbi +61 -0
- data/rbi/openai/models.rbi +12 -0
- data/rbi/openai/resources/images.rbi +225 -0
- data/sig/openai/models/eval_create_params.rbs +29 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +29 -0
- data/sig/openai/models/evals/run_cancel_response.rbs +33 -0
- data/sig/openai/models/evals/run_create_params.rbs +33 -0
- data/sig/openai/models/evals/run_create_response.rbs +33 -0
- data/sig/openai/models/evals/run_list_response.rbs +33 -0
- data/sig/openai/models/evals/run_retrieve_response.rbs +33 -0
- data/sig/openai/models/graders/label_model_grader.rbs +29 -0
- data/sig/openai/models/graders/score_model_grader.rbs +29 -0
- data/sig/openai/models/image_edit_completed_event.rbs +150 -0
- data/sig/openai/models/image_edit_params.rbs +21 -0
- data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_edit_stream_event.rbs +12 -0
- data/sig/openai/models/image_gen_completed_event.rbs +150 -0
- data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
- data/sig/openai/models/image_gen_stream_event.rbs +12 -0
- data/sig/openai/models/image_generate_params.rbs +5 -0
- data/sig/openai/models/responses/tool.rbs +16 -0
- data/sig/openai/models.rbs +12 -0
- data/sig/openai/resources/images.rbs +38 -0
- metadata +20 -2
@@ -457,9 +457,9 @@ module OpenAI
|
|
457
457
|
|
458
458
|
class EvalItem < OpenAI::Internal::Type::BaseModel
|
459
459
|
# @!attribute content
|
460
|
-
#
|
460
|
+
# Inputs to the model - can contain template strings.
|
461
461
|
#
|
462
|
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText]
|
462
|
+
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
|
463
463
|
required :content,
|
464
464
|
union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content }
|
465
465
|
|
@@ -489,13 +489,13 @@ module OpenAI
|
|
489
489
|
# `assistant` role are presumed to have been generated by the model in previous
|
490
490
|
# interactions.
|
491
491
|
#
|
492
|
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText]
|
492
|
+
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
|
493
493
|
#
|
494
494
|
# @param role [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
|
495
495
|
#
|
496
496
|
# @param type [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
|
497
497
|
|
498
|
-
#
|
498
|
+
# Inputs to the model - can contain template strings.
|
499
499
|
#
|
500
500
|
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content
|
501
501
|
module Content
|
@@ -510,6 +510,12 @@ module OpenAI
|
|
510
510
|
# A text output from the model.
|
511
511
|
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText }
|
512
512
|
|
513
|
+
# An image input to the model.
|
514
|
+
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage }
|
515
|
+
|
516
|
+
# A list of inputs, each of which may be either an input text or input image object.
|
517
|
+
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
|
518
|
+
|
513
519
|
class OutputText < OpenAI::Internal::Type::BaseModel
|
514
520
|
# @!attribute text
|
515
521
|
# The text output from the model.
|
@@ -535,8 +541,45 @@ module OpenAI
|
|
535
541
|
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
536
542
|
end
|
537
543
|
|
544
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
545
|
+
# @!attribute image_url
|
546
|
+
# The URL of the image input.
|
547
|
+
#
|
548
|
+
# @return [String]
|
549
|
+
required :image_url, String
|
550
|
+
|
551
|
+
# @!attribute type
|
552
|
+
# The type of the image input. Always `input_image`.
|
553
|
+
#
|
554
|
+
# @return [Symbol, :input_image]
|
555
|
+
required :type, const: :input_image
|
556
|
+
|
557
|
+
# @!attribute detail
|
558
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
559
|
+
# `auto`. Defaults to `auto`.
|
560
|
+
#
|
561
|
+
# @return [String, nil]
|
562
|
+
optional :detail, String
|
563
|
+
|
564
|
+
# @!method initialize(image_url:, detail: nil, type: :input_image)
|
565
|
+
# Some parameter documentations has been truncated, see
|
566
|
+
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage}
|
567
|
+
# for more details.
|
568
|
+
#
|
569
|
+
# An image input to the model.
|
570
|
+
#
|
571
|
+
# @param image_url [String] The URL of the image input.
|
572
|
+
#
|
573
|
+
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
|
574
|
+
#
|
575
|
+
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
|
576
|
+
end
|
577
|
+
|
538
578
|
# @!method self.variants
|
539
|
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText)]
|
579
|
+
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
|
580
|
+
|
581
|
+
# @type [OpenAI::Internal::Type::Converter]
|
582
|
+
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
|
540
583
|
end
|
541
584
|
|
542
585
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
@@ -461,9 +461,9 @@ module OpenAI
|
|
461
461
|
|
462
462
|
class EvalItem < OpenAI::Internal::Type::BaseModel
|
463
463
|
# @!attribute content
|
464
|
-
#
|
464
|
+
# Inputs to the model - can contain template strings.
|
465
465
|
#
|
466
|
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText]
|
466
|
+
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
|
467
467
|
required :content,
|
468
468
|
union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content }
|
469
469
|
|
@@ -493,13 +493,13 @@ module OpenAI
|
|
493
493
|
# `assistant` role are presumed to have been generated by the model in previous
|
494
494
|
# interactions.
|
495
495
|
#
|
496
|
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText]
|
496
|
+
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
|
497
497
|
#
|
498
498
|
# @param role [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
|
499
499
|
#
|
500
500
|
# @param type [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
|
501
501
|
|
502
|
-
#
|
502
|
+
# Inputs to the model - can contain template strings.
|
503
503
|
#
|
504
504
|
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content
|
505
505
|
module Content
|
@@ -514,6 +514,12 @@ module OpenAI
|
|
514
514
|
# A text output from the model.
|
515
515
|
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText }
|
516
516
|
|
517
|
+
# An image input to the model.
|
518
|
+
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage }
|
519
|
+
|
520
|
+
# A list of inputs, each of which may be either an input text or input image object.
|
521
|
+
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
|
522
|
+
|
517
523
|
class OutputText < OpenAI::Internal::Type::BaseModel
|
518
524
|
# @!attribute text
|
519
525
|
# The text output from the model.
|
@@ -539,8 +545,45 @@ module OpenAI
|
|
539
545
|
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
540
546
|
end
|
541
547
|
|
548
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
549
|
+
# @!attribute image_url
|
550
|
+
# The URL of the image input.
|
551
|
+
#
|
552
|
+
# @return [String]
|
553
|
+
required :image_url, String
|
554
|
+
|
555
|
+
# @!attribute type
|
556
|
+
# The type of the image input. Always `input_image`.
|
557
|
+
#
|
558
|
+
# @return [Symbol, :input_image]
|
559
|
+
required :type, const: :input_image
|
560
|
+
|
561
|
+
# @!attribute detail
|
562
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
563
|
+
# `auto`. Defaults to `auto`.
|
564
|
+
#
|
565
|
+
# @return [String, nil]
|
566
|
+
optional :detail, String
|
567
|
+
|
568
|
+
# @!method initialize(image_url:, detail: nil, type: :input_image)
|
569
|
+
# Some parameter documentations has been truncated, see
|
570
|
+
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage}
|
571
|
+
# for more details.
|
572
|
+
#
|
573
|
+
# An image input to the model.
|
574
|
+
#
|
575
|
+
# @param image_url [String] The URL of the image input.
|
576
|
+
#
|
577
|
+
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
|
578
|
+
#
|
579
|
+
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
|
580
|
+
end
|
581
|
+
|
542
582
|
# @!method self.variants
|
543
|
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText)]
|
583
|
+
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
|
584
|
+
|
585
|
+
# @type [OpenAI::Internal::Type::Converter]
|
586
|
+
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
|
544
587
|
end
|
545
588
|
|
546
589
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
@@ -57,9 +57,9 @@ module OpenAI
|
|
57
57
|
|
58
58
|
class Input < OpenAI::Internal::Type::BaseModel
|
59
59
|
# @!attribute content
|
60
|
-
#
|
60
|
+
# Inputs to the model - can contain template strings.
|
61
61
|
#
|
62
|
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText]
|
62
|
+
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array<Object>]
|
63
63
|
required :content, union: -> { OpenAI::Graders::LabelModelGrader::Input::Content }
|
64
64
|
|
65
65
|
# @!attribute role
|
@@ -85,13 +85,13 @@ module OpenAI
|
|
85
85
|
# `assistant` role are presumed to have been generated by the model in previous
|
86
86
|
# interactions.
|
87
87
|
#
|
88
|
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText]
|
88
|
+
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
|
89
89
|
#
|
90
90
|
# @param role [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or
|
91
91
|
#
|
92
92
|
# @param type [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Type] The type of the message input. Always `message`.
|
93
93
|
|
94
|
-
#
|
94
|
+
# Inputs to the model - can contain template strings.
|
95
95
|
#
|
96
96
|
# @see OpenAI::Models::Graders::LabelModelGrader::Input#content
|
97
97
|
module Content
|
@@ -106,6 +106,12 @@ module OpenAI
|
|
106
106
|
# A text output from the model.
|
107
107
|
variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::OutputText }
|
108
108
|
|
109
|
+
# An image input to the model.
|
110
|
+
variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::InputImage }
|
111
|
+
|
112
|
+
# A list of inputs, each of which may be either an input text or input image object.
|
113
|
+
variant -> { OpenAI::Models::Graders::LabelModelGrader::Input::Content::AnArrayOfInputTextAndInputImageArray }
|
114
|
+
|
109
115
|
class OutputText < OpenAI::Internal::Type::BaseModel
|
110
116
|
# @!attribute text
|
111
117
|
# The text output from the model.
|
@@ -131,8 +137,45 @@ module OpenAI
|
|
131
137
|
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
132
138
|
end
|
133
139
|
|
140
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
141
|
+
# @!attribute image_url
|
142
|
+
# The URL of the image input.
|
143
|
+
#
|
144
|
+
# @return [String]
|
145
|
+
required :image_url, String
|
146
|
+
|
147
|
+
# @!attribute type
|
148
|
+
# The type of the image input. Always `input_image`.
|
149
|
+
#
|
150
|
+
# @return [Symbol, :input_image]
|
151
|
+
required :type, const: :input_image
|
152
|
+
|
153
|
+
# @!attribute detail
|
154
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
155
|
+
# `auto`. Defaults to `auto`.
|
156
|
+
#
|
157
|
+
# @return [String, nil]
|
158
|
+
optional :detail, String
|
159
|
+
|
160
|
+
# @!method initialize(image_url:, detail: nil, type: :input_image)
|
161
|
+
# Some parameter documentations has been truncated, see
|
162
|
+
# {OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage} for more
|
163
|
+
# details.
|
164
|
+
#
|
165
|
+
# An image input to the model.
|
166
|
+
#
|
167
|
+
# @param image_url [String] The URL of the image input.
|
168
|
+
#
|
169
|
+
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
|
170
|
+
#
|
171
|
+
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
|
172
|
+
end
|
173
|
+
|
134
174
|
# @!method self.variants
|
135
|
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText)]
|
175
|
+
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array<Object>)]
|
176
|
+
|
177
|
+
# @type [OpenAI::Internal::Type::Converter]
|
178
|
+
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
|
136
179
|
end
|
137
180
|
|
138
181
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
@@ -57,9 +57,9 @@ module OpenAI
|
|
57
57
|
|
58
58
|
class Input < OpenAI::Internal::Type::BaseModel
|
59
59
|
# @!attribute content
|
60
|
-
#
|
60
|
+
# Inputs to the model - can contain template strings.
|
61
61
|
#
|
62
|
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText]
|
62
|
+
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array<Object>]
|
63
63
|
required :content, union: -> { OpenAI::Graders::ScoreModelGrader::Input::Content }
|
64
64
|
|
65
65
|
# @!attribute role
|
@@ -85,13 +85,13 @@ module OpenAI
|
|
85
85
|
# `assistant` role are presumed to have been generated by the model in previous
|
86
86
|
# interactions.
|
87
87
|
#
|
88
|
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText]
|
88
|
+
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
|
89
89
|
#
|
90
90
|
# @param role [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or
|
91
91
|
#
|
92
92
|
# @param type [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Type] The type of the message input. Always `message`.
|
93
93
|
|
94
|
-
#
|
94
|
+
# Inputs to the model - can contain template strings.
|
95
95
|
#
|
96
96
|
# @see OpenAI::Models::Graders::ScoreModelGrader::Input#content
|
97
97
|
module Content
|
@@ -106,6 +106,12 @@ module OpenAI
|
|
106
106
|
# A text output from the model.
|
107
107
|
variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText }
|
108
108
|
|
109
|
+
# An image input to the model.
|
110
|
+
variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage }
|
111
|
+
|
112
|
+
# A list of inputs, each of which may be either an input text or input image object.
|
113
|
+
variant -> { OpenAI::Models::Graders::ScoreModelGrader::Input::Content::AnArrayOfInputTextAndInputImageArray }
|
114
|
+
|
109
115
|
class OutputText < OpenAI::Internal::Type::BaseModel
|
110
116
|
# @!attribute text
|
111
117
|
# The text output from the model.
|
@@ -131,8 +137,45 @@ module OpenAI
|
|
131
137
|
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
132
138
|
end
|
133
139
|
|
140
|
+
class InputImage < OpenAI::Internal::Type::BaseModel
|
141
|
+
# @!attribute image_url
|
142
|
+
# The URL of the image input.
|
143
|
+
#
|
144
|
+
# @return [String]
|
145
|
+
required :image_url, String
|
146
|
+
|
147
|
+
# @!attribute type
|
148
|
+
# The type of the image input. Always `input_image`.
|
149
|
+
#
|
150
|
+
# @return [Symbol, :input_image]
|
151
|
+
required :type, const: :input_image
|
152
|
+
|
153
|
+
# @!attribute detail
|
154
|
+
# The detail level of the image to be sent to the model. One of `high`, `low`, or
|
155
|
+
# `auto`. Defaults to `auto`.
|
156
|
+
#
|
157
|
+
# @return [String, nil]
|
158
|
+
optional :detail, String
|
159
|
+
|
160
|
+
# @!method initialize(image_url:, detail: nil, type: :input_image)
|
161
|
+
# Some parameter documentations has been truncated, see
|
162
|
+
# {OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage} for more
|
163
|
+
# details.
|
164
|
+
#
|
165
|
+
# An image input to the model.
|
166
|
+
#
|
167
|
+
# @param image_url [String] The URL of the image input.
|
168
|
+
#
|
169
|
+
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
|
170
|
+
#
|
171
|
+
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
|
172
|
+
end
|
173
|
+
|
134
174
|
# @!method self.variants
|
135
|
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText)]
|
175
|
+
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array<Object>)]
|
176
|
+
|
177
|
+
# @type [OpenAI::Internal::Type::Converter]
|
178
|
+
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
|
136
179
|
end
|
137
180
|
|
138
181
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
@@ -0,0 +1,198 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
|
6
|
+
# @!attribute b64_json
|
7
|
+
# Base64-encoded final edited image data, suitable for rendering as an image.
|
8
|
+
#
|
9
|
+
# @return [String]
|
10
|
+
required :b64_json, String
|
11
|
+
|
12
|
+
# @!attribute background
|
13
|
+
# The background setting for the edited image.
|
14
|
+
#
|
15
|
+
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background]
|
16
|
+
required :background, enum: -> { OpenAI::ImageEditCompletedEvent::Background }
|
17
|
+
|
18
|
+
# @!attribute created_at
|
19
|
+
# The Unix timestamp when the event was created.
|
20
|
+
#
|
21
|
+
# @return [Integer]
|
22
|
+
required :created_at, Integer
|
23
|
+
|
24
|
+
# @!attribute output_format
|
25
|
+
# The output format for the edited image.
|
26
|
+
#
|
27
|
+
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat]
|
28
|
+
required :output_format, enum: -> { OpenAI::ImageEditCompletedEvent::OutputFormat }
|
29
|
+
|
30
|
+
# @!attribute quality
|
31
|
+
# The quality setting for the edited image.
|
32
|
+
#
|
33
|
+
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality]
|
34
|
+
required :quality, enum: -> { OpenAI::ImageEditCompletedEvent::Quality }
|
35
|
+
|
36
|
+
# @!attribute size
|
37
|
+
# The size of the edited image.
|
38
|
+
#
|
39
|
+
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size]
|
40
|
+
required :size, enum: -> { OpenAI::ImageEditCompletedEvent::Size }
|
41
|
+
|
42
|
+
# @!attribute type
|
43
|
+
# The type of the event. Always `image_edit.completed`.
|
44
|
+
#
|
45
|
+
# @return [Symbol, :"image_edit.completed"]
|
46
|
+
required :type, const: :"image_edit.completed"
|
47
|
+
|
48
|
+
# @!attribute usage
|
49
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
50
|
+
#
|
51
|
+
# @return [OpenAI::Models::ImageEditCompletedEvent::Usage]
|
52
|
+
required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage }
|
53
|
+
|
54
|
+
# @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_edit.completed")
|
55
|
+
# Some parameter documentations has been truncated, see
|
56
|
+
# {OpenAI::Models::ImageEditCompletedEvent} for more details.
|
57
|
+
#
|
58
|
+
# Emitted when image editing has completed and the final image is available.
|
59
|
+
#
|
60
|
+
# @param b64_json [String] Base64-encoded final edited image data, suitable for rendering as an image.
|
61
|
+
#
|
62
|
+
# @param background [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] The background setting for the edited image.
|
63
|
+
#
|
64
|
+
# @param created_at [Integer] The Unix timestamp when the event was created.
|
65
|
+
#
|
66
|
+
# @param output_format [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] The output format for the edited image.
|
67
|
+
#
|
68
|
+
# @param quality [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] The quality setting for the edited image.
|
69
|
+
#
|
70
|
+
# @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image.
|
71
|
+
#
|
72
|
+
# @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
|
73
|
+
#
|
74
|
+
# @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`.
|
75
|
+
|
76
|
+
# The background setting for the edited image.
|
77
|
+
#
|
78
|
+
# @see OpenAI::Models::ImageEditCompletedEvent#background
|
79
|
+
module Background
|
80
|
+
extend OpenAI::Internal::Type::Enum
|
81
|
+
|
82
|
+
TRANSPARENT = :transparent
|
83
|
+
OPAQUE = :opaque
|
84
|
+
AUTO = :auto
|
85
|
+
|
86
|
+
# @!method self.values
|
87
|
+
# @return [Array<Symbol>]
|
88
|
+
end
|
89
|
+
|
90
|
+
# The output format for the edited image.
|
91
|
+
#
|
92
|
+
# @see OpenAI::Models::ImageEditCompletedEvent#output_format
|
93
|
+
module OutputFormat
|
94
|
+
extend OpenAI::Internal::Type::Enum
|
95
|
+
|
96
|
+
PNG = :png
|
97
|
+
WEBP = :webp
|
98
|
+
JPEG = :jpeg
|
99
|
+
|
100
|
+
# @!method self.values
|
101
|
+
# @return [Array<Symbol>]
|
102
|
+
end
|
103
|
+
|
104
|
+
# The quality setting for the edited image.
|
105
|
+
#
|
106
|
+
# @see OpenAI::Models::ImageEditCompletedEvent#quality
|
107
|
+
module Quality
|
108
|
+
extend OpenAI::Internal::Type::Enum
|
109
|
+
|
110
|
+
LOW = :low
|
111
|
+
MEDIUM = :medium
|
112
|
+
HIGH = :high
|
113
|
+
AUTO = :auto
|
114
|
+
|
115
|
+
# @!method self.values
|
116
|
+
# @return [Array<Symbol>]
|
117
|
+
end
|
118
|
+
|
119
|
+
# The size of the edited image.
|
120
|
+
#
|
121
|
+
# @see OpenAI::Models::ImageEditCompletedEvent#size
|
122
|
+
module Size
|
123
|
+
extend OpenAI::Internal::Type::Enum
|
124
|
+
|
125
|
+
SIZE_1024X1024 = :"1024x1024"
|
126
|
+
SIZE_1024X1536 = :"1024x1536"
|
127
|
+
SIZE_1536X1024 = :"1536x1024"
|
128
|
+
AUTO = :auto
|
129
|
+
|
130
|
+
# @!method self.values
|
131
|
+
# @return [Array<Symbol>]
|
132
|
+
end
|
133
|
+
|
134
|
+
# @see OpenAI::Models::ImageEditCompletedEvent#usage
|
135
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
136
|
+
# @!attribute input_tokens
|
137
|
+
# The number of tokens (images and text) in the input prompt.
|
138
|
+
#
|
139
|
+
# @return [Integer]
|
140
|
+
required :input_tokens, Integer
|
141
|
+
|
142
|
+
# @!attribute input_tokens_details
|
143
|
+
# The input tokens detailed information for the image generation.
|
144
|
+
#
|
145
|
+
# @return [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails]
|
146
|
+
required :input_tokens_details, -> { OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails }
|
147
|
+
|
148
|
+
# @!attribute output_tokens
|
149
|
+
# The number of image tokens in the output image.
|
150
|
+
#
|
151
|
+
# @return [Integer]
|
152
|
+
required :output_tokens, Integer
|
153
|
+
|
154
|
+
# @!attribute total_tokens
|
155
|
+
# The total number of tokens (images and text) used for the image generation.
|
156
|
+
#
|
157
|
+
# @return [Integer]
|
158
|
+
required :total_tokens, Integer
|
159
|
+
|
160
|
+
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
|
161
|
+
# Some parameter documentations has been truncated, see
|
162
|
+
# {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details.
|
163
|
+
#
|
164
|
+
# For `gpt-image-1` only, the token usage information for the image generation.
|
165
|
+
#
|
166
|
+
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
|
167
|
+
#
|
168
|
+
# @param input_tokens_details [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
|
169
|
+
#
|
170
|
+
# @param output_tokens [Integer] The number of image tokens in the output image.
|
171
|
+
#
|
172
|
+
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
|
173
|
+
|
174
|
+
# @see OpenAI::Models::ImageEditCompletedEvent::Usage#input_tokens_details
|
175
|
+
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
|
176
|
+
# @!attribute image_tokens
|
177
|
+
# The number of image tokens in the input prompt.
|
178
|
+
#
|
179
|
+
# @return [Integer]
|
180
|
+
required :image_tokens, Integer
|
181
|
+
|
182
|
+
# @!attribute text_tokens
|
183
|
+
# The number of text tokens in the input prompt.
|
184
|
+
#
|
185
|
+
# @return [Integer]
|
186
|
+
required :text_tokens, Integer
|
187
|
+
|
188
|
+
# @!method initialize(image_tokens:, text_tokens:)
|
189
|
+
# The input tokens detailed information for the image generation.
|
190
|
+
#
|
191
|
+
# @param image_tokens [Integer] The number of image tokens in the input prompt.
|
192
|
+
#
|
193
|
+
# @param text_tokens [Integer] The number of text tokens in the input prompt.
|
194
|
+
end
|
195
|
+
end
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
@@ -3,6 +3,8 @@
|
|
3
3
|
module OpenAI
|
4
4
|
module Models
|
5
5
|
# @see OpenAI::Resources::Images#edit
|
6
|
+
#
|
7
|
+
# @see OpenAI::Resources::Images#stream_raw
|
6
8
|
class ImageEditParams < OpenAI::Internal::Type::BaseModel
|
7
9
|
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
10
|
include OpenAI::Internal::Type::RequestParameters
|
@@ -38,6 +40,14 @@ module OpenAI
|
|
38
40
|
# @return [Symbol, OpenAI::Models::ImageEditParams::Background, nil]
|
39
41
|
optional :background, enum: -> { OpenAI::ImageEditParams::Background }, nil?: true
|
40
42
|
|
43
|
+
# @!attribute input_fidelity
|
44
|
+
# Control how much effort the model will exert to match the style and features,
|
45
|
+
# especially facial features, of input images. This parameter is only supported
|
46
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
47
|
+
#
|
48
|
+
# @return [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil]
|
49
|
+
optional :input_fidelity, enum: -> { OpenAI::ImageEditParams::InputFidelity }, nil?: true
|
50
|
+
|
41
51
|
# @!attribute mask
|
42
52
|
# An additional image whose fully transparent areas (e.g. where alpha is zero)
|
43
53
|
# indicate where `image` should be edited. If there are multiple images provided,
|
@@ -77,6 +87,14 @@ module OpenAI
|
|
77
87
|
# @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
|
78
88
|
optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true
|
79
89
|
|
90
|
+
# @!attribute partial_images
|
91
|
+
# The number of partial images to generate. This parameter is used for streaming
|
92
|
+
# responses that return partial images. Value must be between 0 and 3. When set to
|
93
|
+
# 0, the response will be a single image sent in one streaming event.
|
94
|
+
#
|
95
|
+
# @return [Integer, nil]
|
96
|
+
optional :partial_images, Integer, nil?: true
|
97
|
+
|
80
98
|
# @!attribute quality
|
81
99
|
# The quality of the image that will be generated. `high`, `medium` and `low` are
|
82
100
|
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
@@ -110,7 +128,7 @@ module OpenAI
|
|
110
128
|
# @return [String, nil]
|
111
129
|
optional :user, String
|
112
130
|
|
113
|
-
# @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
|
131
|
+
# @!method initialize(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
|
114
132
|
# Some parameter documentations has been truncated, see
|
115
133
|
# {OpenAI::Models::ImageEditParams} for more details.
|
116
134
|
#
|
@@ -120,6 +138,8 @@ module OpenAI
|
|
120
138
|
#
|
121
139
|
# @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s).
|
122
140
|
#
|
141
|
+
# @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features,
|
142
|
+
#
|
123
143
|
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
|
124
144
|
#
|
125
145
|
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup
|
@@ -130,6 +150,8 @@ module OpenAI
|
|
130
150
|
#
|
131
151
|
# @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
|
132
152
|
#
|
153
|
+
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
|
154
|
+
#
|
133
155
|
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
|
134
156
|
#
|
135
157
|
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
|
@@ -179,6 +201,19 @@ module OpenAI
|
|
179
201
|
# @return [Array<Symbol>]
|
180
202
|
end
|
181
203
|
|
204
|
+
# Control how much effort the model will exert to match the style and features,
|
205
|
+
# especially facial features, of input images. This parameter is only supported
|
206
|
+
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
|
207
|
+
module InputFidelity
|
208
|
+
extend OpenAI::Internal::Type::Enum
|
209
|
+
|
210
|
+
HIGH = :high
|
211
|
+
LOW = :low
|
212
|
+
|
213
|
+
# @!method self.values
|
214
|
+
# @return [Array<Symbol>]
|
215
|
+
end
|
216
|
+
|
182
217
|
# The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
183
218
|
# supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
184
219
|
# is used.
|