openai 0.8.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +21 -0
  3. data/README.md +115 -4
  4. data/lib/openai/models/chat/chat_completion.rb +1 -0
  5. data/lib/openai/models/chat/chat_completion_chunk.rb +1 -0
  6. data/lib/openai/models/chat/completion_create_params.rb +1 -0
  7. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +25 -60
  8. data/lib/openai/models/fine_tuning/job_create_params.rb +4 -2
  9. data/lib/openai/models/image_edit_params.rb +35 -1
  10. data/lib/openai/models/responses/response.rb +41 -6
  11. data/lib/openai/models/responses/response_create_params.rb +13 -4
  12. data/lib/openai/models/responses/response_prompt.rb +63 -0
  13. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +2 -1
  14. data/lib/openai/resources/fine_tuning/jobs.rb +2 -2
  15. data/lib/openai/resources/images.rb +5 -1
  16. data/lib/openai/resources/responses.rb +8 -4
  17. data/lib/openai/version.rb +1 -1
  18. data/lib/openai.rb +1 -0
  19. data/rbi/openai/models/chat/chat_completion.rbi +5 -0
  20. data/rbi/openai/models/chat/chat_completion_chunk.rbi +5 -0
  21. data/rbi/openai/models/chat/completion_create_params.rbi +5 -0
  22. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +26 -95
  23. data/rbi/openai/models/fine_tuning/job_create_params.rbi +8 -4
  24. data/rbi/openai/models/image_edit_params.rbi +51 -0
  25. data/rbi/openai/models/responses/response.rbi +66 -7
  26. data/rbi/openai/models/responses/response_create_params.rbi +24 -4
  27. data/rbi/openai/models/responses/response_prompt.rbi +120 -0
  28. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +3 -1
  29. data/rbi/openai/resources/fine_tuning/jobs.rbi +6 -4
  30. data/rbi/openai/resources/images.rbi +11 -0
  31. data/rbi/openai/resources/responses.rbi +10 -4
  32. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  33. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  34. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  35. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +16 -53
  36. data/sig/openai/models/image_edit_params.rbs +22 -0
  37. data/sig/openai/models/responses/response.rbs +22 -5
  38. data/sig/openai/models/responses/response_create_params.rbs +7 -1
  39. data/sig/openai/models/responses/response_prompt.rbs +44 -0
  40. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  41. data/sig/openai/resources/images.rbs +2 -0
  42. data/sig/openai/resources/responses.rbs +2 -0
  43. metadata +5 -2
@@ -45,7 +45,7 @@ module OpenAI
45
45
  # Creates an edited or extended image given one or more source images and a
46
46
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
47
47
  #
48
- # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
48
+ # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
49
49
  #
50
50
  # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
51
51
  #
@@ -59,6 +59,10 @@ module OpenAI
59
59
  #
60
60
  # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
61
61
  #
62
+ # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
63
+ #
64
+ # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
65
+ #
62
66
  # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
63
67
  #
64
68
  # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
@@ -23,7 +23,7 @@ module OpenAI
23
23
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
24
24
  # your own data as input for the model's response.
25
25
  #
26
- # @overload create(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
26
+ # @overload create(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
27
27
  #
28
28
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
29
29
  #
@@ -33,7 +33,7 @@ module OpenAI
33
33
  #
34
34
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
35
35
  #
36
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
36
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
37
37
  #
38
38
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
39
39
  #
@@ -43,6 +43,8 @@ module OpenAI
43
43
  #
44
44
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
45
45
  #
46
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
47
+ #
46
48
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
47
49
  #
48
50
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -181,7 +183,7 @@ module OpenAI
181
183
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
182
184
  # your own data as input for the model's response.
183
185
  #
184
- # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
186
+ # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
185
187
  #
186
188
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
187
189
  #
@@ -191,7 +193,7 @@ module OpenAI
191
193
  #
192
194
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
193
195
  #
194
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
196
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
195
197
  #
196
198
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
197
199
  #
@@ -201,6 +203,8 @@ module OpenAI
201
203
  #
202
204
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
203
205
  #
206
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
207
+ #
204
208
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
205
209
  #
206
210
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.8.0"
4
+ VERSION = "0.9.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -416,6 +416,7 @@ require_relative "openai/models/responses/response_output_message"
416
416
  require_relative "openai/models/responses/response_output_refusal"
417
417
  require_relative "openai/models/responses/response_output_text"
418
418
  require_relative "openai/models/responses/response_output_text_annotation_added_event"
419
+ require_relative "openai/models/responses/response_prompt"
419
420
  require_relative "openai/models/responses/response_queued_event"
420
421
  require_relative "openai/models/responses/response_reasoning_delta_event"
421
422
  require_relative "openai/models/responses/response_reasoning_done_event"
@@ -404,6 +404,11 @@ module OpenAI
404
404
  :flex,
405
405
  OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol
406
406
  )
407
+ SCALE =
408
+ T.let(
409
+ :scale,
410
+ OpenAI::Chat::ChatCompletion::ServiceTier::TaggedSymbol
411
+ )
407
412
 
408
413
  sig do
409
414
  override.returns(
@@ -824,6 +824,11 @@ module OpenAI
824
824
  :flex,
825
825
  OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
826
826
  )
827
+ SCALE =
828
+ T.let(
829
+ :scale,
830
+ OpenAI::Chat::ChatCompletionChunk::ServiceTier::TaggedSymbol
831
+ )
827
832
 
828
833
  sig do
829
834
  override.returns(
@@ -1049,6 +1049,11 @@ module OpenAI
1049
1049
  :flex,
1050
1050
  OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
1051
1051
  )
1052
+ SCALE =
1053
+ T.let(
1054
+ :scale,
1055
+ OpenAI::Chat::CompletionCreateParams::ServiceTier::TaggedSymbol
1056
+ )
1052
1057
 
1053
1058
  sig do
1054
1059
  override.returns(
@@ -13,125 +13,56 @@ module OpenAI
13
13
  )
14
14
  end
15
15
 
16
- sig do
17
- returns(
18
- T::Array[
19
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
20
- ]
21
- )
22
- end
23
- attr_accessor :data
16
+ # The permission identifier, which can be referenced in the API endpoints.
17
+ sig { returns(String) }
18
+ attr_accessor :id
24
19
 
25
- sig { returns(T::Boolean) }
26
- attr_accessor :has_more
20
+ # The Unix timestamp (in seconds) for when the permission was created.
21
+ sig { returns(Integer) }
22
+ attr_accessor :created_at
27
23
 
24
+ # The object type, which is always "checkpoint.permission".
28
25
  sig { returns(Symbol) }
29
26
  attr_accessor :object
30
27
 
31
- sig { returns(T.nilable(String)) }
32
- attr_accessor :first_id
33
-
34
- sig { returns(T.nilable(String)) }
35
- attr_accessor :last_id
28
+ # The project identifier that the permission is for.
29
+ sig { returns(String) }
30
+ attr_accessor :project_id
36
31
 
32
+ # The `checkpoint.permission` object represents a permission for a fine-tuned
33
+ # model checkpoint.
37
34
  sig do
38
35
  params(
39
- data:
40
- T::Array[
41
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data::OrHash
42
- ],
43
- has_more: T::Boolean,
44
- first_id: T.nilable(String),
45
- last_id: T.nilable(String),
36
+ id: String,
37
+ created_at: Integer,
38
+ project_id: String,
46
39
  object: Symbol
47
40
  ).returns(T.attached_class)
48
41
  end
49
42
  def self.new(
50
- data:,
51
- has_more:,
52
- first_id: nil,
53
- last_id: nil,
54
- object: :list
43
+ # The permission identifier, which can be referenced in the API endpoints.
44
+ id:,
45
+ # The Unix timestamp (in seconds) for when the permission was created.
46
+ created_at:,
47
+ # The project identifier that the permission is for.
48
+ project_id:,
49
+ # The object type, which is always "checkpoint.permission".
50
+ object: :"checkpoint.permission"
55
51
  )
56
52
  end
57
53
 
58
54
  sig do
59
55
  override.returns(
60
56
  {
61
- data:
62
- T::Array[
63
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data
64
- ],
65
- has_more: T::Boolean,
57
+ id: String,
58
+ created_at: Integer,
66
59
  object: Symbol,
67
- first_id: T.nilable(String),
68
- last_id: T.nilable(String)
60
+ project_id: String
69
61
  }
70
62
  )
71
63
  end
72
64
  def to_hash
73
65
  end
74
-
75
- class Data < OpenAI::Internal::Type::BaseModel
76
- OrHash =
77
- T.type_alias do
78
- T.any(
79
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data,
80
- OpenAI::Internal::AnyHash
81
- )
82
- end
83
-
84
- # The permission identifier, which can be referenced in the API endpoints.
85
- sig { returns(String) }
86
- attr_accessor :id
87
-
88
- # The Unix timestamp (in seconds) for when the permission was created.
89
- sig { returns(Integer) }
90
- attr_accessor :created_at
91
-
92
- # The object type, which is always "checkpoint.permission".
93
- sig { returns(Symbol) }
94
- attr_accessor :object
95
-
96
- # The project identifier that the permission is for.
97
- sig { returns(String) }
98
- attr_accessor :project_id
99
-
100
- # The `checkpoint.permission` object represents a permission for a fine-tuned
101
- # model checkpoint.
102
- sig do
103
- params(
104
- id: String,
105
- created_at: Integer,
106
- project_id: String,
107
- object: Symbol
108
- ).returns(T.attached_class)
109
- end
110
- def self.new(
111
- # The permission identifier, which can be referenced in the API endpoints.
112
- id:,
113
- # The Unix timestamp (in seconds) for when the permission was created.
114
- created_at:,
115
- # The project identifier that the permission is for.
116
- project_id:,
117
- # The object type, which is always "checkpoint.permission".
118
- object: :"checkpoint.permission"
119
- )
120
- end
121
-
122
- sig do
123
- override.returns(
124
- {
125
- id: String,
126
- created_at: Integer,
127
- object: Symbol,
128
- project_id: String
129
- }
130
- )
131
- end
132
- def to_hash
133
- end
134
- end
135
66
  end
136
67
  end
137
68
  end
@@ -39,7 +39,8 @@ module OpenAI
39
39
  # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
40
40
  # format.
41
41
  #
42
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
42
+ # See the
43
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
43
44
  # for more details.
44
45
  sig { returns(String) }
45
46
  attr_accessor :training_file
@@ -115,7 +116,8 @@ module OpenAI
115
116
  # Your dataset must be formatted as a JSONL file. You must upload your file with
116
117
  # the purpose `fine-tune`.
117
118
  #
118
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
119
+ # See the
120
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
119
121
  # for more details.
120
122
  sig { returns(T.nilable(String)) }
121
123
  attr_accessor :validation_file
@@ -163,7 +165,8 @@ module OpenAI
163
165
  # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
164
166
  # format.
165
167
  #
166
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
168
+ # See the
169
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
167
170
  # for more details.
168
171
  training_file:,
169
172
  # The hyperparameters used for the fine-tuning job. This value is now deprecated
@@ -200,7 +203,8 @@ module OpenAI
200
203
  # Your dataset must be formatted as a JSONL file. You must upload your file with
201
204
  # the purpose `fine-tune`.
202
205
  #
203
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
206
+ # See the
207
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
204
208
  # for more details.
205
209
  validation_file: nil,
206
210
  request_options: {}
@@ -56,6 +56,20 @@ module OpenAI
56
56
  sig { returns(T.nilable(Integer)) }
57
57
  attr_accessor :n
58
58
 
59
+ # The compression level (0-100%) for the generated images. This parameter is only
60
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
61
+ # defaults to 100.
62
+ sig { returns(T.nilable(Integer)) }
63
+ attr_accessor :output_compression
64
+
65
+ # The format in which the generated images are returned. This parameter is only
66
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
67
+ # default value is `png`.
68
+ sig do
69
+ returns(T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol))
70
+ end
71
+ attr_accessor :output_format
72
+
59
73
  # The quality of the image that will be generated. `high`, `medium` and `low` are
60
74
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
61
75
  # Defaults to `auto`.
@@ -94,6 +108,9 @@ module OpenAI
94
108
  mask: OpenAI::Internal::FileInput,
95
109
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
96
110
  n: T.nilable(Integer),
111
+ output_compression: T.nilable(Integer),
112
+ output_format:
113
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
97
114
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
98
115
  response_format:
99
116
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -133,6 +150,14 @@ module OpenAI
133
150
  model: nil,
134
151
  # The number of images to generate. Must be between 1 and 10.
135
152
  n: nil,
153
+ # The compression level (0-100%) for the generated images. This parameter is only
154
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
155
+ # defaults to 100.
156
+ output_compression: nil,
157
+ # The format in which the generated images are returned. This parameter is only
158
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
159
+ # default value is `png`.
160
+ output_format: nil,
136
161
  # The quality of the image that will be generated. `high`, `medium` and `low` are
137
162
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
138
163
  # Defaults to `auto`.
@@ -164,6 +189,9 @@ module OpenAI
164
189
  mask: OpenAI::Internal::FileInput,
165
190
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
166
191
  n: T.nilable(Integer),
192
+ output_compression: T.nilable(Integer),
193
+ output_format:
194
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
167
195
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
168
196
  response_format:
169
197
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -246,6 +274,29 @@ module OpenAI
246
274
  end
247
275
  end
248
276
 
277
+ # The format in which the generated images are returned. This parameter is only
278
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
279
+ # default value is `png`.
280
+ module OutputFormat
281
+ extend OpenAI::Internal::Type::Enum
282
+
283
+ TaggedSymbol =
284
+ T.type_alias { T.all(Symbol, OpenAI::ImageEditParams::OutputFormat) }
285
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
286
+
287
+ PNG = T.let(:png, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol)
288
+ JPEG = T.let(:jpeg, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol)
289
+ WEBP = T.let(:webp, OpenAI::ImageEditParams::OutputFormat::TaggedSymbol)
290
+
291
+ sig do
292
+ override.returns(
293
+ T::Array[OpenAI::ImageEditParams::OutputFormat::TaggedSymbol]
294
+ )
295
+ end
296
+ def self.values
297
+ end
298
+ end
299
+
249
300
  # The quality of the image that will be generated. `high`, `medium` and `low` are
250
301
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
251
302
  # Defaults to `auto`.
@@ -42,13 +42,16 @@ module OpenAI
42
42
  end
43
43
  attr_writer :incomplete_details
44
44
 
45
- # Inserts a system (or developer) message as the first item in the model's
46
- # context.
45
+ # A system (or developer) message inserted into the model's context.
47
46
  #
48
47
  # When using along with `previous_response_id`, the instructions from a previous
49
48
  # response will not be carried over to the next response. This makes it simple to
50
49
  # swap out system (or developer) messages in new responses.
51
- sig { returns(T.nilable(String)) }
50
+ sig do
51
+ returns(
52
+ T.nilable(OpenAI::Responses::Response::Instructions::Variants)
53
+ )
54
+ end
52
55
  attr_accessor :instructions
53
56
 
54
57
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -143,6 +146,18 @@ module OpenAI
143
146
  sig { returns(T.nilable(String)) }
144
147
  attr_accessor :previous_response_id
145
148
 
149
+ # Reference to a prompt template and its variables.
150
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
151
+ sig { returns(T.nilable(OpenAI::Responses::ResponsePrompt)) }
152
+ attr_reader :prompt
153
+
154
+ sig do
155
+ params(
156
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash)
157
+ ).void
158
+ end
159
+ attr_writer :prompt
160
+
146
161
  # **o-series models only**
147
162
  #
148
163
  # Configuration options for
@@ -236,7 +251,8 @@ module OpenAI
236
251
  error: T.nilable(OpenAI::Responses::ResponseError::OrHash),
237
252
  incomplete_details:
238
253
  T.nilable(OpenAI::Responses::Response::IncompleteDetails::OrHash),
239
- instructions: T.nilable(String),
254
+ instructions:
255
+ T.nilable(OpenAI::Responses::Response::Instructions::Variants),
240
256
  metadata: T.nilable(T::Hash[Symbol, String]),
241
257
  model:
242
258
  T.any(
@@ -286,6 +302,7 @@ module OpenAI
286
302
  background: T.nilable(T::Boolean),
287
303
  max_output_tokens: T.nilable(Integer),
288
304
  previous_response_id: T.nilable(String),
305
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
289
306
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
290
307
  service_tier:
291
308
  T.nilable(OpenAI::Responses::Response::ServiceTier::OrSymbol),
@@ -307,8 +324,7 @@ module OpenAI
307
324
  error:,
308
325
  # Details about why the response is incomplete.
309
326
  incomplete_details:,
310
- # Inserts a system (or developer) message as the first item in the model's
311
- # context.
327
+ # A system (or developer) message inserted into the model's context.
312
328
  #
313
329
  # When using along with `previous_response_id`, the instructions from a previous
314
330
  # response will not be carried over to the next response. This makes it simple to
@@ -378,6 +394,9 @@ module OpenAI
378
394
  # multi-turn conversations. Learn more about
379
395
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
380
396
  previous_response_id: nil,
397
+ # Reference to a prompt template and its variables.
398
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
399
+ prompt: nil,
381
400
  # **o-series models only**
382
401
  #
383
402
  # Configuration options for
@@ -438,7 +457,8 @@ module OpenAI
438
457
  error: T.nilable(OpenAI::Responses::ResponseError),
439
458
  incomplete_details:
440
459
  T.nilable(OpenAI::Responses::Response::IncompleteDetails),
441
- instructions: T.nilable(String),
460
+ instructions:
461
+ T.nilable(OpenAI::Responses::Response::Instructions::Variants),
442
462
  metadata: T.nilable(T::Hash[Symbol, String]),
443
463
  model: OpenAI::ResponsesModel::Variants,
444
464
  object: Symbol,
@@ -451,6 +471,7 @@ module OpenAI
451
471
  background: T.nilable(T::Boolean),
452
472
  max_output_tokens: T.nilable(Integer),
453
473
  previous_response_id: T.nilable(String),
474
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
454
475
  reasoning: T.nilable(OpenAI::Reasoning),
455
476
  service_tier:
456
477
  T.nilable(
@@ -557,6 +578,39 @@ module OpenAI
557
578
  end
558
579
  end
559
580
 
581
+ # A system (or developer) message inserted into the model's context.
582
+ #
583
+ # When using along with `previous_response_id`, the instructions from a previous
584
+ # response will not be carried over to the next response. This makes it simple to
585
+ # swap out system (or developer) messages in new responses.
586
+ module Instructions
587
+ extend OpenAI::Internal::Type::Union
588
+
589
+ Variants =
590
+ T.type_alias do
591
+ T.any(
592
+ String,
593
+ T::Array[OpenAI::Responses::ResponseInputItem::Variants]
594
+ )
595
+ end
596
+
597
+ sig do
598
+ override.returns(
599
+ T::Array[OpenAI::Responses::Response::Instructions::Variants]
600
+ )
601
+ end
602
+ def self.variants
603
+ end
604
+
605
+ ResponseInputItemArray =
606
+ T.let(
607
+ OpenAI::Internal::Type::ArrayOf[
608
+ union: OpenAI::Responses::ResponseInputItem
609
+ ],
610
+ OpenAI::Internal::Type::Converter
611
+ )
612
+ end
613
+
560
614
  # How the model should select which tool (or tools) to use when generating a
561
615
  # response. See the `tools` parameter to see how to specify which tools the model
562
616
  # can call.
@@ -616,6 +670,11 @@ module OpenAI
616
670
  )
617
671
  FLEX =
618
672
  T.let(:flex, OpenAI::Responses::Response::ServiceTier::TaggedSymbol)
673
+ SCALE =
674
+ T.let(
675
+ :scale,
676
+ OpenAI::Responses::Response::ServiceTier::TaggedSymbol
677
+ )
619
678
 
620
679
  sig do
621
680
  override.returns(
@@ -72,8 +72,7 @@ module OpenAI
72
72
  end
73
73
  attr_accessor :include
74
74
 
75
- # Inserts a system (or developer) message as the first item in the model's
76
- # context.
75
+ # A system (or developer) message inserted into the model's context.
77
76
  #
78
77
  # When using along with `previous_response_id`, the instructions from a previous
79
78
  # response will not be carried over to the next response. This makes it simple to
@@ -106,6 +105,18 @@ module OpenAI
106
105
  sig { returns(T.nilable(String)) }
107
106
  attr_accessor :previous_response_id
108
107
 
108
+ # Reference to a prompt template and its variables.
109
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
110
+ sig { returns(T.nilable(OpenAI::Responses::ResponsePrompt)) }
111
+ attr_reader :prompt
112
+
113
+ sig do
114
+ params(
115
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash)
116
+ ).void
117
+ end
118
+ attr_writer :prompt
119
+
109
120
  # **o-series models only**
110
121
  #
111
122
  # Configuration options for
@@ -305,6 +316,7 @@ module OpenAI
305
316
  metadata: T.nilable(T::Hash[Symbol, String]),
306
317
  parallel_tool_calls: T.nilable(T::Boolean),
307
318
  previous_response_id: T.nilable(String),
319
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
308
320
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
309
321
  service_tier:
310
322
  T.nilable(
@@ -377,8 +389,7 @@ module OpenAI
377
389
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
378
390
  # in code interpreter tool call items.
379
391
  include: nil,
380
- # Inserts a system (or developer) message as the first item in the model's
381
- # context.
392
+ # A system (or developer) message inserted into the model's context.
382
393
  #
383
394
  # When using along with `previous_response_id`, the instructions from a previous
384
395
  # response will not be carried over to the next response. This makes it simple to
@@ -401,6 +412,9 @@ module OpenAI
401
412
  # multi-turn conversations. Learn more about
402
413
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
403
414
  previous_response_id: nil,
415
+ # Reference to a prompt template and its variables.
416
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
417
+ prompt: nil,
404
418
  # **o-series models only**
405
419
  #
406
420
  # Configuration options for
@@ -498,6 +512,7 @@ module OpenAI
498
512
  metadata: T.nilable(T::Hash[Symbol, String]),
499
513
  parallel_tool_calls: T.nilable(T::Boolean),
500
514
  previous_response_id: T.nilable(String),
515
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt),
501
516
  reasoning: T.nilable(OpenAI::Reasoning),
502
517
  service_tier:
503
518
  T.nilable(
@@ -611,6 +626,11 @@ module OpenAI
611
626
  :flex,
612
627
  OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol
613
628
  )
629
+ SCALE =
630
+ T.let(
631
+ :scale,
632
+ OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol
633
+ )
614
634
 
615
635
  sig do
616
636
  override.returns(