openai 0.8.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +21 -0
  3. data/README.md +115 -4
  4. data/lib/openai/models/chat/chat_completion.rb +1 -0
  5. data/lib/openai/models/chat/chat_completion_chunk.rb +1 -0
  6. data/lib/openai/models/chat/completion_create_params.rb +1 -0
  7. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +25 -60
  8. data/lib/openai/models/fine_tuning/job_create_params.rb +4 -2
  9. data/lib/openai/models/image_edit_params.rb +35 -1
  10. data/lib/openai/models/responses/response.rb +41 -6
  11. data/lib/openai/models/responses/response_create_params.rb +13 -4
  12. data/lib/openai/models/responses/response_prompt.rb +63 -0
  13. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +2 -1
  14. data/lib/openai/resources/fine_tuning/jobs.rb +2 -2
  15. data/lib/openai/resources/images.rb +5 -1
  16. data/lib/openai/resources/responses.rb +8 -4
  17. data/lib/openai/version.rb +1 -1
  18. data/lib/openai.rb +1 -0
  19. data/rbi/openai/models/chat/chat_completion.rbi +5 -0
  20. data/rbi/openai/models/chat/chat_completion_chunk.rbi +5 -0
  21. data/rbi/openai/models/chat/completion_create_params.rbi +5 -0
  22. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +26 -95
  23. data/rbi/openai/models/fine_tuning/job_create_params.rbi +8 -4
  24. data/rbi/openai/models/image_edit_params.rbi +51 -0
  25. data/rbi/openai/models/responses/response.rbi +66 -7
  26. data/rbi/openai/models/responses/response_create_params.rbi +24 -4
  27. data/rbi/openai/models/responses/response_prompt.rbi +120 -0
  28. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +3 -1
  29. data/rbi/openai/resources/fine_tuning/jobs.rbi +6 -4
  30. data/rbi/openai/resources/images.rbi +11 -0
  31. data/rbi/openai/resources/responses.rbi +10 -4
  32. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  33. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  34. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  35. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +16 -53
  36. data/sig/openai/models/image_edit_params.rbs +22 -0
  37. data/sig/openai/models/responses/response.rbs +22 -5
  38. data/sig/openai/models/responses/response_create_params.rbs +7 -1
  39. data/sig/openai/models/responses/response_prompt.rbs +44 -0
  40. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  41. data/sig/openai/resources/images.rbs +2 -0
  42. data/sig/openai/resources/responses.rbs +2 -0
  43. metadata +5 -2
@@ -0,0 +1,120 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ResponsePrompt < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(OpenAI::Responses::ResponsePrompt, OpenAI::Internal::AnyHash)
10
+ end
11
+
12
+ # The unique identifier of the prompt template to use.
13
+ sig { returns(String) }
14
+ attr_accessor :id
15
+
16
+ # Optional map of values to substitute in for variables in your prompt. The
17
+ # substitution values can either be strings, or other Response input types like
18
+ # images or files.
19
+ sig do
20
+ returns(
21
+ T.nilable(
22
+ T::Hash[
23
+ Symbol,
24
+ T.any(
25
+ String,
26
+ OpenAI::Responses::ResponseInputText,
27
+ OpenAI::Responses::ResponseInputImage,
28
+ OpenAI::Responses::ResponseInputFile
29
+ )
30
+ ]
31
+ )
32
+ )
33
+ end
34
+ attr_accessor :variables
35
+
36
+ # Optional version of the prompt template.
37
+ sig { returns(T.nilable(String)) }
38
+ attr_accessor :version
39
+
40
+ # Reference to a prompt template and its variables.
41
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
42
+ sig do
43
+ params(
44
+ id: String,
45
+ variables:
46
+ T.nilable(
47
+ T::Hash[
48
+ Symbol,
49
+ T.any(
50
+ String,
51
+ OpenAI::Responses::ResponseInputText::OrHash,
52
+ OpenAI::Responses::ResponseInputImage::OrHash,
53
+ OpenAI::Responses::ResponseInputFile::OrHash
54
+ )
55
+ ]
56
+ ),
57
+ version: T.nilable(String)
58
+ ).returns(T.attached_class)
59
+ end
60
+ def self.new(
61
+ # The unique identifier of the prompt template to use.
62
+ id:,
63
+ # Optional map of values to substitute in for variables in your prompt. The
64
+ # substitution values can either be strings, or other Response input types like
65
+ # images or files.
66
+ variables: nil,
67
+ # Optional version of the prompt template.
68
+ version: nil
69
+ )
70
+ end
71
+
72
+ sig do
73
+ override.returns(
74
+ {
75
+ id: String,
76
+ variables:
77
+ T.nilable(
78
+ T::Hash[
79
+ Symbol,
80
+ T.any(
81
+ String,
82
+ OpenAI::Responses::ResponseInputText,
83
+ OpenAI::Responses::ResponseInputImage,
84
+ OpenAI::Responses::ResponseInputFile
85
+ )
86
+ ]
87
+ ),
88
+ version: T.nilable(String)
89
+ }
90
+ )
91
+ end
92
+ def to_hash
93
+ end
94
+
95
+ # A text input to the model.
96
+ module Variable
97
+ extend OpenAI::Internal::Type::Union
98
+
99
+ Variants =
100
+ T.type_alias do
101
+ T.any(
102
+ String,
103
+ OpenAI::Responses::ResponseInputText,
104
+ OpenAI::Responses::ResponseInputImage,
105
+ OpenAI::Responses::ResponseInputFile
106
+ )
107
+ end
108
+
109
+ sig do
110
+ override.returns(
111
+ T::Array[OpenAI::Responses::ResponsePrompt::Variable::Variants]
112
+ )
113
+ end
114
+ def self.variants
115
+ end
116
+ end
117
+ end
118
+ end
119
+ end
120
+ end
@@ -43,7 +43,9 @@ module OpenAI
43
43
  project_id: String,
44
44
  request_options: OpenAI::RequestOptions::OrHash
45
45
  ).returns(
46
- OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
46
+ OpenAI::Internal::CursorPage[
47
+ OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
48
+ ]
47
49
  )
48
50
  end
49
51
  def retrieve(
@@ -13,7 +13,7 @@ module OpenAI
13
13
  # Response includes details of the enqueued job including job status and the name
14
14
  # of the fine-tuned models once complete.
15
15
  #
16
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
16
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
17
17
  sig do
18
18
  params(
19
19
  model:
@@ -57,7 +57,8 @@ module OpenAI
57
57
  # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
58
58
  # format.
59
59
  #
60
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
60
+ # See the
61
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
61
62
  # for more details.
62
63
  training_file:,
63
64
  # The hyperparameters used for the fine-tuning job. This value is now deprecated
@@ -94,7 +95,8 @@ module OpenAI
94
95
  # Your dataset must be formatted as a JSONL file. You must upload your file with
95
96
  # the purpose `fine-tune`.
96
97
  #
97
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
98
+ # See the
99
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
98
100
  # for more details.
99
101
  validation_file: nil,
100
102
  request_options: {}
@@ -103,7 +105,7 @@ module OpenAI
103
105
 
104
106
  # Get info about a fine-tuning job.
105
107
  #
106
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
108
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
107
109
  sig do
108
110
  params(
109
111
  fine_tuning_job_id: String,
@@ -52,6 +52,9 @@ module OpenAI
52
52
  mask: OpenAI::Internal::FileInput,
53
53
  model: T.nilable(T.any(String, OpenAI::ImageModel::OrSymbol)),
54
54
  n: T.nilable(Integer),
55
+ output_compression: T.nilable(Integer),
56
+ output_format:
57
+ T.nilable(OpenAI::ImageEditParams::OutputFormat::OrSymbol),
55
58
  quality: T.nilable(OpenAI::ImageEditParams::Quality::OrSymbol),
56
59
  response_format:
57
60
  T.nilable(OpenAI::ImageEditParams::ResponseFormat::OrSymbol),
@@ -91,6 +94,14 @@ module OpenAI
91
94
  model: nil,
92
95
  # The number of images to generate. Must be between 1 and 10.
93
96
  n: nil,
97
+ # The compression level (0-100%) for the generated images. This parameter is only
98
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
99
+ # defaults to 100.
100
+ output_compression: nil,
101
+ # The format in which the generated images are returned. This parameter is only
102
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
103
+ # default value is `png`.
104
+ output_format: nil,
94
105
  # The quality of the image that will be generated. `high`, `medium` and `low` are
95
106
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
96
107
  # Defaults to `auto`.
@@ -38,6 +38,7 @@ module OpenAI
38
38
  metadata: T.nilable(T::Hash[Symbol, String]),
39
39
  parallel_tool_calls: T.nilable(T::Boolean),
40
40
  previous_response_id: T.nilable(String),
41
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
41
42
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
42
43
  service_tier:
43
44
  T.nilable(
@@ -115,8 +116,7 @@ module OpenAI
115
116
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
116
117
  # in code interpreter tool call items.
117
118
  include: nil,
118
- # Inserts a system (or developer) message as the first item in the model's
119
- # context.
119
+ # A system (or developer) message inserted into the model's context.
120
120
  #
121
121
  # When using along with `previous_response_id`, the instructions from a previous
122
122
  # response will not be carried over to the next response. This makes it simple to
@@ -139,6 +139,9 @@ module OpenAI
139
139
  # multi-turn conversations. Learn more about
140
140
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
141
141
  previous_response_id: nil,
142
+ # Reference to a prompt template and its variables.
143
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
144
+ prompt: nil,
142
145
  # **o-series models only**
143
146
  #
144
147
  # Configuration options for
@@ -251,6 +254,7 @@ module OpenAI
251
254
  metadata: T.nilable(T::Hash[Symbol, String]),
252
255
  parallel_tool_calls: T.nilable(T::Boolean),
253
256
  previous_response_id: T.nilable(String),
257
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
254
258
  reasoning: T.nilable(OpenAI::Reasoning::OrHash),
255
259
  service_tier:
256
260
  T.nilable(
@@ -328,8 +332,7 @@ module OpenAI
328
332
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
329
333
  # in code interpreter tool call items.
330
334
  include: nil,
331
- # Inserts a system (or developer) message as the first item in the model's
332
- # context.
335
+ # A system (or developer) message inserted into the model's context.
333
336
  #
334
337
  # When using along with `previous_response_id`, the instructions from a previous
335
338
  # response will not be carried over to the next response. This makes it simple to
@@ -352,6 +355,9 @@ module OpenAI
352
355
  # multi-turn conversations. Learn more about
353
356
  # [conversation state](https://platform.openai.com/docs/guides/conversation-state).
354
357
  previous_response_id: nil,
358
+ # Reference to a prompt template and its variables.
359
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
360
+ prompt: nil,
355
361
  # **o-series models only**
356
362
  #
357
363
  # Configuration options for
@@ -127,7 +127,7 @@ module OpenAI
127
127
  end
128
128
  end
129
129
 
130
- type service_tier = :auto | :default | :flex
130
+ type service_tier = :auto | :default | :flex | :scale
131
131
 
132
132
  module ServiceTier
133
133
  extend OpenAI::Internal::Type::Enum
@@ -135,6 +135,7 @@ module OpenAI
135
135
  AUTO: :auto
136
136
  DEFAULT: :default
137
137
  FLEX: :flex
138
+ SCALE: :scale
138
139
 
139
140
  def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier]
140
141
  end
@@ -272,7 +272,7 @@ module OpenAI
272
272
  end
273
273
  end
274
274
 
275
- type service_tier = :auto | :default | :flex
275
+ type service_tier = :auto | :default | :flex | :scale
276
276
 
277
277
  module ServiceTier
278
278
  extend OpenAI::Internal::Type::Enum
@@ -280,6 +280,7 @@ module OpenAI
280
280
  AUTO: :auto
281
281
  DEFAULT: :default
282
282
  FLEX: :flex
283
+ SCALE: :scale
283
284
 
284
285
  def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier]
285
286
  end
@@ -280,7 +280,7 @@ module OpenAI
280
280
  def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::response_format]
281
281
  end
282
282
 
283
- type service_tier = :auto | :default | :flex
283
+ type service_tier = :auto | :default | :flex | :scale
284
284
 
285
285
  module ServiceTier
286
286
  extend OpenAI::Internal::Type::Enum
@@ -288,6 +288,7 @@ module OpenAI
288
288
  AUTO: :auto
289
289
  DEFAULT: :default
290
290
  FLEX: :flex
291
+ SCALE: :scale
291
292
 
292
293
  def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier]
293
294
  end
@@ -4,71 +4,34 @@ module OpenAI
4
4
  module Checkpoints
5
5
  type permission_retrieve_response =
6
6
  {
7
- data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
8
- has_more: bool,
9
- object: :list,
10
- first_id: String?,
11
- last_id: String?
7
+ id: String,
8
+ created_at: Integer,
9
+ object: :"checkpoint.permission",
10
+ project_id: String
12
11
  }
13
12
 
14
13
  class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
15
- attr_accessor data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data]
14
+ attr_accessor id: String
16
15
 
17
- attr_accessor has_more: bool
16
+ attr_accessor created_at: Integer
18
17
 
19
- attr_accessor object: :list
18
+ attr_accessor object: :"checkpoint.permission"
20
19
 
21
- attr_accessor first_id: String?
22
-
23
- attr_accessor last_id: String?
20
+ attr_accessor project_id: String
24
21
 
25
22
  def initialize: (
26
- data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
27
- has_more: bool,
28
- ?first_id: String?,
29
- ?last_id: String?,
30
- ?object: :list
23
+ id: String,
24
+ created_at: Integer,
25
+ project_id: String,
26
+ ?object: :"checkpoint.permission"
31
27
  ) -> void
32
28
 
33
29
  def to_hash: -> {
34
- data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
35
- has_more: bool,
36
- object: :list,
37
- first_id: String?,
38
- last_id: String?
30
+ id: String,
31
+ created_at: Integer,
32
+ object: :"checkpoint.permission",
33
+ project_id: String
39
34
  }
40
-
41
- type data =
42
- {
43
- id: String,
44
- created_at: Integer,
45
- object: :"checkpoint.permission",
46
- project_id: String
47
- }
48
-
49
- class Data < OpenAI::Internal::Type::BaseModel
50
- attr_accessor id: String
51
-
52
- attr_accessor created_at: Integer
53
-
54
- attr_accessor object: :"checkpoint.permission"
55
-
56
- attr_accessor project_id: String
57
-
58
- def initialize: (
59
- id: String,
60
- created_at: Integer,
61
- project_id: String,
62
- ?object: :"checkpoint.permission"
63
- ) -> void
64
-
65
- def to_hash: -> {
66
- id: String,
67
- created_at: Integer,
68
- object: :"checkpoint.permission",
69
- project_id: String
70
- }
71
- end
72
35
  end
73
36
  end
74
37
  end
@@ -8,6 +8,8 @@ module OpenAI
8
8
  mask: OpenAI::Internal::file_input,
9
9
  model: OpenAI::Models::ImageEditParams::model?,
10
10
  n: Integer?,
11
+ output_compression: Integer?,
12
+ output_format: OpenAI::Models::ImageEditParams::output_format?,
11
13
  quality: OpenAI::Models::ImageEditParams::quality?,
12
14
  response_format: OpenAI::Models::ImageEditParams::response_format?,
13
15
  size: OpenAI::Models::ImageEditParams::size?,
@@ -33,6 +35,10 @@ module OpenAI
33
35
 
34
36
  attr_accessor n: Integer?
35
37
 
38
+ attr_accessor output_compression: Integer?
39
+
40
+ attr_accessor output_format: OpenAI::Models::ImageEditParams::output_format?
41
+
36
42
  attr_accessor quality: OpenAI::Models::ImageEditParams::quality?
37
43
 
38
44
  attr_accessor response_format: OpenAI::Models::ImageEditParams::response_format?
@@ -50,6 +56,8 @@ module OpenAI
50
56
  ?mask: OpenAI::Internal::file_input,
51
57
  ?model: OpenAI::Models::ImageEditParams::model?,
52
58
  ?n: Integer?,
59
+ ?output_compression: Integer?,
60
+ ?output_format: OpenAI::Models::ImageEditParams::output_format?,
53
61
  ?quality: OpenAI::Models::ImageEditParams::quality?,
54
62
  ?response_format: OpenAI::Models::ImageEditParams::response_format?,
55
63
  ?size: OpenAI::Models::ImageEditParams::size?,
@@ -64,6 +72,8 @@ module OpenAI
64
72
  mask: OpenAI::Internal::file_input,
65
73
  model: OpenAI::Models::ImageEditParams::model?,
66
74
  n: Integer?,
75
+ output_compression: Integer?,
76
+ output_format: OpenAI::Models::ImageEditParams::output_format?,
67
77
  quality: OpenAI::Models::ImageEditParams::quality?,
68
78
  response_format: OpenAI::Models::ImageEditParams::response_format?,
69
79
  size: OpenAI::Models::ImageEditParams::size?,
@@ -102,6 +112,18 @@ module OpenAI
102
112
  def self?.variants: -> ::Array[OpenAI::Models::ImageEditParams::model]
103
113
  end
104
114
 
115
+ type output_format = :png | :jpeg | :webp
116
+
117
+ module OutputFormat
118
+ extend OpenAI::Internal::Type::Enum
119
+
120
+ PNG: :png
121
+ JPEG: :jpeg
122
+ WEBP: :webp
123
+
124
+ def self?.values: -> ::Array[OpenAI::Models::ImageEditParams::output_format]
125
+ end
126
+
105
127
  type quality = :standard | :low | :medium | :high | :auto
106
128
 
107
129
  module Quality
@@ -7,7 +7,7 @@ module OpenAI
7
7
  created_at: Float,
8
8
  error: OpenAI::Responses::ResponseError?,
9
9
  incomplete_details: OpenAI::Responses::Response::IncompleteDetails?,
10
- instructions: String?,
10
+ instructions: OpenAI::Models::Responses::Response::instructions?,
11
11
  metadata: OpenAI::Models::metadata?,
12
12
  model: OpenAI::Models::responses_model,
13
13
  object: :response,
@@ -20,6 +20,7 @@ module OpenAI
20
20
  background: bool?,
21
21
  max_output_tokens: Integer?,
22
22
  previous_response_id: String?,
23
+ prompt: OpenAI::Responses::ResponsePrompt?,
23
24
  reasoning: OpenAI::Reasoning?,
24
25
  service_tier: OpenAI::Models::Responses::Response::service_tier?,
25
26
  status: OpenAI::Models::Responses::response_status,
@@ -38,7 +39,7 @@ module OpenAI
38
39
 
39
40
  attr_accessor incomplete_details: OpenAI::Responses::Response::IncompleteDetails?
40
41
 
41
- attr_accessor instructions: String?
42
+ attr_accessor instructions: OpenAI::Models::Responses::Response::instructions?
42
43
 
43
44
  attr_accessor metadata: OpenAI::Models::metadata?
44
45
 
@@ -64,6 +65,8 @@ module OpenAI
64
65
 
65
66
  attr_accessor previous_response_id: String?
66
67
 
68
+ attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
69
+
67
70
  attr_accessor reasoning: OpenAI::Reasoning?
68
71
 
69
72
  attr_accessor service_tier: OpenAI::Models::Responses::Response::service_tier?
@@ -97,7 +100,7 @@ module OpenAI
97
100
  created_at: Float,
98
101
  error: OpenAI::Responses::ResponseError?,
99
102
  incomplete_details: OpenAI::Responses::Response::IncompleteDetails?,
100
- instructions: String?,
103
+ instructions: OpenAI::Models::Responses::Response::instructions?,
101
104
  metadata: OpenAI::Models::metadata?,
102
105
  model: OpenAI::Models::responses_model,
103
106
  output: ::Array[OpenAI::Models::Responses::response_output_item],
@@ -109,6 +112,7 @@ module OpenAI
109
112
  ?background: bool?,
110
113
  ?max_output_tokens: Integer?,
111
114
  ?previous_response_id: String?,
115
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
112
116
  ?reasoning: OpenAI::Reasoning?,
113
117
  ?service_tier: OpenAI::Models::Responses::Response::service_tier?,
114
118
  ?status: OpenAI::Models::Responses::response_status,
@@ -124,7 +128,7 @@ module OpenAI
124
128
  created_at: Float,
125
129
  error: OpenAI::Responses::ResponseError?,
126
130
  incomplete_details: OpenAI::Responses::Response::IncompleteDetails?,
127
- instructions: String?,
131
+ instructions: OpenAI::Models::Responses::Response::instructions?,
128
132
  metadata: OpenAI::Models::metadata?,
129
133
  model: OpenAI::Models::responses_model,
130
134
  object: :response,
@@ -137,6 +141,7 @@ module OpenAI
137
141
  background: bool?,
138
142
  max_output_tokens: Integer?,
139
143
  previous_response_id: String?,
144
+ prompt: OpenAI::Responses::ResponsePrompt?,
140
145
  reasoning: OpenAI::Reasoning?,
141
146
  service_tier: OpenAI::Models::Responses::Response::service_tier?,
142
147
  status: OpenAI::Models::Responses::response_status,
@@ -178,6 +183,17 @@ module OpenAI
178
183
  end
179
184
  end
180
185
 
186
+ type instructions =
187
+ String | ::Array[OpenAI::Models::Responses::response_input_item]
188
+
189
+ module Instructions
190
+ extend OpenAI::Internal::Type::Union
191
+
192
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::instructions]
193
+
194
+ ResponseInputItemArray: OpenAI::Internal::Type::Converter
195
+ end
196
+
181
197
  type tool_choice =
182
198
  OpenAI::Models::Responses::tool_choice_options
183
199
  | OpenAI::Responses::ToolChoiceTypes
@@ -189,7 +205,7 @@ module OpenAI
189
205
  def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::tool_choice]
190
206
  end
191
207
 
192
- type service_tier = :auto | :default | :flex
208
+ type service_tier = :auto | :default | :flex | :scale
193
209
 
194
210
  module ServiceTier
195
211
  extend OpenAI::Internal::Type::Enum
@@ -197,6 +213,7 @@ module OpenAI
197
213
  AUTO: :auto
198
214
  DEFAULT: :default
199
215
  FLEX: :flex
216
+ SCALE: :scale
200
217
 
201
218
  def self?.values: -> ::Array[OpenAI::Models::Responses::Response::service_tier]
202
219
  end
@@ -12,6 +12,7 @@ module OpenAI
12
12
  metadata: OpenAI::Models::metadata?,
13
13
  parallel_tool_calls: bool?,
14
14
  previous_response_id: String?,
15
+ prompt: OpenAI::Responses::ResponsePrompt?,
15
16
  reasoning: OpenAI::Reasoning?,
16
17
  service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
17
18
  store: bool?,
@@ -47,6 +48,8 @@ module OpenAI
47
48
 
48
49
  attr_accessor previous_response_id: String?
49
50
 
51
+ attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
52
+
50
53
  attr_accessor reasoning: OpenAI::Reasoning?
51
54
 
52
55
  attr_accessor service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?
@@ -91,6 +94,7 @@ module OpenAI
91
94
  ?metadata: OpenAI::Models::metadata?,
92
95
  ?parallel_tool_calls: bool?,
93
96
  ?previous_response_id: String?,
97
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
94
98
  ?reasoning: OpenAI::Reasoning?,
95
99
  ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
96
100
  ?store: bool?,
@@ -114,6 +118,7 @@ module OpenAI
114
118
  metadata: OpenAI::Models::metadata?,
115
119
  parallel_tool_calls: bool?,
116
120
  previous_response_id: String?,
121
+ prompt: OpenAI::Responses::ResponsePrompt?,
117
122
  reasoning: OpenAI::Reasoning?,
118
123
  service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
119
124
  store: bool?,
@@ -135,7 +140,7 @@ module OpenAI
135
140
  def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::input]
136
141
  end
137
142
 
138
- type service_tier = :auto | :default | :flex
143
+ type service_tier = :auto | :default | :flex | :scale
139
144
 
140
145
  module ServiceTier
141
146
  extend OpenAI::Internal::Type::Enum
@@ -143,6 +148,7 @@ module OpenAI
143
148
  AUTO: :auto
144
149
  DEFAULT: :default
145
150
  FLEX: :flex
151
+ SCALE: :scale
146
152
 
147
153
  def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::service_tier]
148
154
  end
@@ -0,0 +1,44 @@
1
+ module OpenAI
2
+ module Models
3
+ module Responses
4
+ type response_prompt =
5
+ {
6
+ id: String,
7
+ variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?,
8
+ version: String?
9
+ }
10
+
11
+ class ResponsePrompt < OpenAI::Internal::Type::BaseModel
12
+ attr_accessor id: String
13
+
14
+ attr_accessor variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?
15
+
16
+ attr_accessor version: String?
17
+
18
+ def initialize: (
19
+ id: String,
20
+ ?variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?,
21
+ ?version: String?
22
+ ) -> void
23
+
24
+ def to_hash: -> {
25
+ id: String,
26
+ variables: ::Hash[Symbol, OpenAI::Models::Responses::ResponsePrompt::variable]?,
27
+ version: String?
28
+ }
29
+
30
+ type variable =
31
+ String
32
+ | OpenAI::Responses::ResponseInputText
33
+ | OpenAI::Responses::ResponseInputImage
34
+ | OpenAI::Responses::ResponseInputFile
35
+
36
+ module Variable
37
+ extend OpenAI::Internal::Type::Union
38
+
39
+ def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponsePrompt::variable]
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
@@ -16,7 +16,7 @@ module OpenAI
16
16
  ?order: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::order,
17
17
  ?project_id: String,
18
18
  ?request_options: OpenAI::request_opts
19
- ) -> OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse
19
+ ) -> OpenAI::Internal::CursorPage[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse]
20
20
 
21
21
  def delete: (
22
22
  String permission_id,
@@ -18,6 +18,8 @@ module OpenAI
18
18
  ?mask: OpenAI::Internal::file_input,
19
19
  ?model: OpenAI::Models::ImageEditParams::model?,
20
20
  ?n: Integer?,
21
+ ?output_compression: Integer?,
22
+ ?output_format: OpenAI::Models::ImageEditParams::output_format?,
21
23
  ?quality: OpenAI::Models::ImageEditParams::quality?,
22
24
  ?response_format: OpenAI::Models::ImageEditParams::response_format?,
23
25
  ?size: OpenAI::Models::ImageEditParams::size?,
@@ -13,6 +13,7 @@ module OpenAI
13
13
  ?metadata: OpenAI::Models::metadata?,
14
14
  ?parallel_tool_calls: bool?,
15
15
  ?previous_response_id: String?,
16
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
16
17
  ?reasoning: OpenAI::Reasoning?,
17
18
  ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
18
19
  ?store: bool?,
@@ -36,6 +37,7 @@ module OpenAI
36
37
  ?metadata: OpenAI::Models::metadata?,
37
38
  ?parallel_tool_calls: bool?,
38
39
  ?previous_response_id: String?,
40
+ ?prompt: OpenAI::Responses::ResponsePrompt?,
39
41
  ?reasoning: OpenAI::Reasoning?,
40
42
  ?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
41
43
  ?store: bool?,