openai 0.57.0 → 0.59.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +45 -0
  3. data/README.md +98 -1
  4. data/lib/openai/auth/subject_token_provider.rb +15 -0
  5. data/lib/openai/auth/subject_token_providers/azure_managed_identity_token_provider.rb +88 -0
  6. data/lib/openai/auth/subject_token_providers/gcp_id_token_provider.rb +66 -0
  7. data/lib/openai/auth/subject_token_providers/k8s_service_account_token_provider.rb +37 -0
  8. data/lib/openai/auth/token_type.rb +10 -0
  9. data/lib/openai/auth/workload_identity.rb +23 -0
  10. data/lib/openai/auth/workload_identity_auth.rb +176 -0
  11. data/lib/openai/client.rb +59 -4
  12. data/lib/openai/errors.rb +39 -0
  13. data/lib/openai/internal/util.rb +22 -7
  14. data/lib/openai/models/conversations/message.rb +28 -1
  15. data/lib/openai/models/oauth_error_code.rb +29 -0
  16. data/lib/openai/models/realtime/realtime_session_create_request.rb +4 -3
  17. data/lib/openai/models/realtime/realtime_session_create_response.rb +7 -5
  18. data/lib/openai/models/realtime/realtime_tracing_config.rb +3 -2
  19. data/lib/openai/models/responses/response_input_file.rb +26 -1
  20. data/lib/openai/models/responses/response_input_file_content.rb +29 -1
  21. data/lib/openai/models/vector_stores/file_batch_create_params.rb +9 -5
  22. data/lib/openai/models/vector_stores/file_create_params.rb +3 -1
  23. data/lib/openai/models.rb +2 -0
  24. data/lib/openai/resources/realtime/calls.rb +1 -1
  25. data/lib/openai/version.rb +1 -1
  26. data/lib/openai.rb +8 -0
  27. data/rbi/openai/auth.rbi +55 -0
  28. data/rbi/openai/internal/util.rbi +8 -0
  29. data/rbi/openai/models/conversations/message.rbi +53 -1
  30. data/rbi/openai/models/oauth_error_code.rbi +24 -0
  31. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +6 -4
  32. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +9 -6
  33. data/rbi/openai/models/realtime/realtime_tracing_config.rbi +3 -2
  34. data/rbi/openai/models/responses/response_input_file.rbi +57 -0
  35. data/rbi/openai/models/responses/response_input_file_content.rbi +62 -0
  36. data/rbi/openai/models/vector_stores/file_batch_create_params.rbi +18 -10
  37. data/rbi/openai/models/vector_stores/file_create_params.rbi +6 -2
  38. data/rbi/openai/models.rbi +2 -0
  39. data/rbi/openai/resources/realtime/calls.rbi +3 -2
  40. data/rbi/openai/resources/vector_stores/file_batches.rbi +6 -4
  41. data/rbi/openai/resources/vector_stores/files.rbi +3 -1
  42. data/sig/openai/internal/util.rbs +4 -0
  43. data/sig/openai/models/conversations/message.rbs +18 -2
  44. data/sig/openai/models/oauth_error_code.rbs +14 -0
  45. data/sig/openai/models/responses/response_input_file.rbs +20 -0
  46. data/sig/openai/models/responses/response_input_file_content.rbs +20 -0
  47. data/sig/openai/models.rbs +2 -0
  48. metadata +13 -2
data/lib/openai/client.rb CHANGED
@@ -15,6 +15,8 @@ module OpenAI
15
15
  # Default max retry delay in seconds.
16
16
  DEFAULT_MAX_RETRY_DELAY = 8.0
17
17
 
18
+ WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER = "workload-identity-auth"
19
+
18
20
  # @return [String]
19
21
  attr_reader :api_key
20
22
 
@@ -27,6 +29,10 @@ module OpenAI
27
29
  # @return [String, nil]
28
30
  attr_reader :webhook_secret
29
31
 
32
+ # @return [OpenAI::Auth::WorkloadIdentityAuth, nil]
33
+ # @api private
34
+ attr_reader :workload_identity_auth
35
+
30
36
  # Given a prompt, the model will return one or more predicted completions, and can
31
37
  # also return the probabilities of alternative tokens at each position.
32
38
  # @return [OpenAI::Resources::Completions]
@@ -116,13 +122,48 @@ module OpenAI
116
122
  {"authorization" => "Bearer #{@api_key}"}
117
123
  end
118
124
 
125
+ # @api private
126
+ private def request_replayable?(request)
127
+ body = request[:body]
128
+ return true if body.nil? || body.is_a?(String)
129
+ return false if body.respond_to?(:read)
130
+ true
131
+ end
132
+
133
+ # @api private
134
+ private def send_request(request, redirect_count:, retry_count:, send_retry_header:)
135
+ return super unless @workload_identity_auth
136
+
137
+ token = @workload_identity_auth.get_token
138
+ updated_headers = request[:headers].merge("authorization" => "Bearer #{token}")
139
+ updated_request = request.merge(headers: updated_headers)
140
+
141
+ begin
142
+ super(updated_request, redirect_count: redirect_count, retry_count: retry_count, send_retry_header: send_retry_header)
143
+ rescue OpenAI::Errors::AuthenticationError
144
+ raise unless retry_count.zero? && request_replayable?(request)
145
+ @workload_identity_auth.invalidate_token
146
+
147
+ fresh_token = @workload_identity_auth.get_token
148
+ refreshed_headers = request[:headers].merge("authorization" => "Bearer #{fresh_token}")
149
+ refreshed_request = request.merge(headers: refreshed_headers)
150
+
151
+ super(refreshed_request, redirect_count: redirect_count, retry_count: retry_count + 1, send_retry_header: send_retry_header)
152
+ end
153
+ end
154
+
119
155
  # Creates and returns a new client for interacting with the API.
120
156
  #
121
- # @param api_key [String, nil] Defaults to `ENV["OPENAI_API_KEY"]`
157
+ # @param api_key [String, nil] Defaults to `ENV["OPENAI_API_KEY"]`.
158
+ # Mutually exclusive with `workload_identity`.
159
+ #
160
+ # @param workload_identity [OpenAI::Auth::WorkloadIdentity, nil]
161
+ # OAuth2 workload identity configuration for token exchange authentication.
162
+ # Mutually exclusive with `api_key`.
122
163
  #
123
- # @param organization [String, nil] Defaults to `ENV["OPENAI_ORG_ID"]`
164
+ # @param organization [String, nil] Defaults to `ENV["OPENAI_ORG_ID"]`.
124
165
  #
125
- # @param project [String, nil] Defaults to `ENV["OPENAI_PROJECT_ID"]`
166
+ # @param project [String, nil] Defaults to `ENV["OPENAI_PROJECT_ID"]`.
126
167
  #
127
168
  # @param webhook_secret [String, nil] Defaults to `ENV["OPENAI_WEBHOOK_SECRET"]`
128
169
  #
@@ -138,6 +179,7 @@ module OpenAI
138
179
  # @param max_retry_delay [Float]
139
180
  def initialize(
140
181
  api_key: ENV["OPENAI_API_KEY"],
182
+ workload_identity: nil,
141
183
  organization: ENV["OPENAI_ORG_ID"],
142
184
  project: ENV["OPENAI_PROJECT_ID"],
143
185
  webhook_secret: ENV["OPENAI_WEBHOOK_SECRET"],
@@ -149,7 +191,20 @@ module OpenAI
149
191
  )
150
192
  base_url ||= "https://api.openai.com/v1"
151
193
 
152
- if api_key.nil?
194
+ if workload_identity && api_key && api_key != WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER
195
+ raise ArgumentError.new(
196
+ "The `api_key` and `workload_identity` arguments are mutually exclusive; " \
197
+ "only one can be passed at a time."
198
+ )
199
+ end
200
+
201
+ if workload_identity
202
+ @workload_identity_auth = OpenAI::Auth::WorkloadIdentityAuth.new(
203
+ workload_identity,
204
+ organization
205
+ )
206
+ api_key = WORKLOAD_IDENTITY_API_KEY_PLACEHOLDER
207
+ elsif api_key.nil?
153
208
  raise ArgumentError.new("api_key is required, and can be set via environ: \"OPENAI_API_KEY\"")
154
209
  end
155
210
 
data/lib/openai/errors.rb CHANGED
@@ -263,5 +263,44 @@ module OpenAI
263
263
  class InternalServerError < OpenAI::Errors::APIStatusError
264
264
  HTTP_STATUS = (500..)
265
265
  end
266
+
267
+ class OAuthError < OpenAI::Errors::APIStatusError
268
+ # @return [OpenAI::Models::OAuthErrorCode::Variants, nil]
269
+ attr_reader :error_code
270
+
271
+ def initialize(status:, body:, headers:)
272
+ @error_code = OpenAI::Internal::Type::Converter.coerce(OpenAI::Models::OAuthErrorCode, body&.dig(:error))
273
+
274
+ message =
275
+ if body&.dig(:error_description)
276
+ body[:error_description]
277
+ elsif @error_code
278
+ @error_code
279
+ else
280
+ "OAuth2 authentication error"
281
+ end
282
+
283
+ super(
284
+ url: URI("https://auth.openai.com/oauth/token"),
285
+ status: status,
286
+ headers: headers,
287
+ body: body,
288
+ request: nil,
289
+ response: nil,
290
+ message: message
291
+ )
292
+ end
293
+ end
294
+
295
+ class SubjectTokenProviderError < OpenAI::Errors::Error
296
+ attr_reader :provider
297
+ attr_accessor :cause
298
+
299
+ def initialize(message:, provider:, cause: nil)
300
+ super(message)
301
+ @provider = provider
302
+ @cause = cause
303
+ end
304
+ end
266
305
  end
267
306
  end
@@ -157,7 +157,7 @@ module OpenAI
157
157
  in Hash | nil => coerced
158
158
  coerced
159
159
  else
160
- message = "Expected a #{Hash} or #{OpenAI::Internal::Type::BaseModel}, got #{data.inspect}"
160
+ message = "Expected a #{Hash} or #{OpenAI::Internal::Type::BaseModel}, got #{input.inspect}"
161
161
  raise ArgumentError.new(message)
162
162
  end
163
163
  end
@@ -237,6 +237,11 @@ module OpenAI
237
237
  end
238
238
  end
239
239
 
240
+ # @type [Regexp]
241
+ #
242
+ # https://www.rfc-editor.org/rfc/rfc3986.html#section-3.3
243
+ RFC_3986_NOT_PCHARS = /[^A-Za-z0-9\-._~!$&'()*+,;=:@]+/
244
+
240
245
  class << self
241
246
  # @api private
242
247
  #
@@ -247,6 +252,15 @@ module OpenAI
247
252
  "#{uri.scheme}://#{uri.host}#{":#{uri.port}" unless uri.port == uri.default_port}"
248
253
  end
249
254
 
255
+ # @api private
256
+ #
257
+ # @param path [String, Integer]
258
+ #
259
+ # @return [String]
260
+ def encode_path(path)
261
+ path.to_s.gsub(OpenAI::Internal::Util::RFC_3986_NOT_PCHARS) { ERB::Util.url_encode(_1) }
262
+ end
263
+
250
264
  # @api private
251
265
  #
252
266
  # @param path [String, Array<String>]
@@ -259,7 +273,7 @@ module OpenAI
259
273
  in []
260
274
  ""
261
275
  in [String => p, *interpolations]
262
- encoded = interpolations.map { ERB::Util.url_encode(_1) }
276
+ encoded = interpolations.map { encode_path(_1) }
263
277
  format(p, *encoded)
264
278
  end
265
279
  end
@@ -571,16 +585,15 @@ module OpenAI
571
585
  y << "Content-Disposition: form-data"
572
586
 
573
587
  unless key.nil?
574
- name = ERB::Util.url_encode(key.to_s)
575
- y << "; name=\"#{name}\""
588
+ y << "; name=\"#{key}\""
576
589
  end
577
590
 
578
591
  case val
579
592
  in OpenAI::FilePart unless val.filename.nil?
580
- filename = ERB::Util.url_encode(val.filename)
593
+ filename = encode_path(val.filename)
581
594
  y << "; filename=\"#{filename}\""
582
595
  in Pathname | IO
583
- filename = ERB::Util.url_encode(::File.basename(val.to_path))
596
+ filename = encode_path(::File.basename(val.to_path))
584
597
  y << "; filename=\"#{filename}\""
585
598
  else
586
599
  end
@@ -597,6 +610,7 @@ module OpenAI
597
610
  #
598
611
  # @return [Array(String, Enumerable<String>)]
599
612
  private def encode_multipart_streaming(body)
613
+ # rubocop:disable Style/CaseEquality
600
614
  # RFC 1521 Section 7.2.1 says we should have 70 char maximum for boundary length
601
615
  boundary = SecureRandom.urlsafe_base64(46)
602
616
 
@@ -606,7 +620,7 @@ module OpenAI
606
620
  in Hash
607
621
  body.each do |key, val|
608
622
  case val
609
- in Array if val.all? { primitive?(_1) }
623
+ in Array if val.all? { primitive?(_1) || OpenAI::Internal::Type::FileInput === _1 }
610
624
  val.each do |v|
611
625
  write_multipart_chunk(y, boundary: boundary, key: key, val: v, closing: closing)
612
626
  end
@@ -622,6 +636,7 @@ module OpenAI
622
636
 
623
637
  fused_io = fused_enum(strio) { closing.each(&:call) }
624
638
  [boundary, fused_io]
639
+ # rubocop:enable Style/CaseEquality
625
640
  end
626
641
 
627
642
  # @api private
@@ -36,7 +36,16 @@ module OpenAI
36
36
  # @return [Symbol, :message]
37
37
  required :type, const: :message
38
38
 
39
- # @!method initialize(id:, content:, role:, status:, type: :message)
39
+ # @!attribute phase
40
+ # Labels an `assistant` message as intermediate commentary (`commentary`) or the
41
+ # final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
42
+ # sending follow-up requests, preserve and resend phase on all assistant messages
43
+ # — dropping it can degrade performance. Not used for user messages.
44
+ #
45
+ # @return [Symbol, OpenAI::Models::Conversations::Message::Phase, nil]
46
+ optional :phase, enum: -> { OpenAI::Conversations::Message::Phase }, nil?: true
47
+
48
+ # @!method initialize(id:, content:, role:, status:, phase: nil, type: :message)
40
49
  # Some parameter documentations has been truncated, see
41
50
  # {OpenAI::Models::Conversations::Message} for more details.
42
51
  #
@@ -50,6 +59,8 @@ module OpenAI
50
59
  #
51
60
  # @param status [Symbol, OpenAI::Models::Conversations::Message::Status] The status of item. One of `in_progress`, `completed`, or `incomplete`. Populate
52
61
  #
62
+ # @param phase [Symbol, OpenAI::Models::Conversations::Message::Phase, nil] Labels an `assistant` message as intermediate commentary (`commentary`) or the f
63
+ #
53
64
  # @param type [Symbol, :message] The type of the message. Always set to `message`.
54
65
 
55
66
  # A content part that makes up an input or output item.
@@ -144,6 +155,22 @@ module OpenAI
144
155
  # @!method self.values
145
156
  # @return [Array<Symbol>]
146
157
  end
158
+
159
+ # Labels an `assistant` message as intermediate commentary (`commentary`) or the
160
+ # final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
161
+ # sending follow-up requests, preserve and resend phase on all assistant messages
162
+ # — dropping it can degrade performance. Not used for user messages.
163
+ #
164
+ # @see OpenAI::Models::Conversations::Message#phase
165
+ module Phase
166
+ extend OpenAI::Internal::Type::Enum
167
+
168
+ COMMENTARY = :commentary
169
+ FINAL_ANSWER = :final_answer
170
+
171
+ # @!method self.values
172
+ # @return [Array<Symbol>]
173
+ end
147
174
  end
148
175
  end
149
176
  end
@@ -0,0 +1,29 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module OAuthErrorCode
6
+ extend OpenAI::Internal::Type::Union
7
+
8
+ variant const: -> { OpenAI::Models::OAuthErrorCode::INVALID_GRANT }
9
+
10
+ variant const: -> { OpenAI::Models::OAuthErrorCode::INVALID_SUBJECT_TOKEN }
11
+
12
+ variant String
13
+
14
+ # @!method self.variants
15
+ # @return [Array(Symbol, String)]
16
+
17
+ define_sorbet_constant!(:Variants) do
18
+ T.type_alias { T.any(OpenAI::OAuthErrorCode::TaggedSymbol, String) }
19
+ end
20
+
21
+ # @!group
22
+
23
+ INVALID_GRANT = :invalid_grant
24
+ INVALID_SUBJECT_TOKEN = :invalid_subject_token
25
+
26
+ # @!endgroup
27
+ end
28
+ end
29
+ end
@@ -88,8 +88,9 @@ module OpenAI
88
88
 
89
89
  # @!attribute tracing
90
90
  # Realtime API can write session traces to the
91
- # [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
92
- # tracing is enabled for a session, the configuration cannot be modified.
91
+ # [Traces Dashboard](https://platform.openai.com/logs?api=traces). Set to null to
92
+ # disable tracing. Once tracing is enabled for a session, the configuration cannot
93
+ # be modified.
93
94
  #
94
95
  # `auto` will create a trace for the session with default values for the workflow
95
96
  # name, group id, and metadata.
@@ -144,7 +145,7 @@ module OpenAI
144
145
  #
145
146
  # @param tools [Array<OpenAI::Models::Realtime::RealtimeFunctionTool, OpenAI::Models::Realtime::RealtimeToolsConfigUnion::Mcp>] Tools available to the model.
146
147
  #
147
- # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces
148
+ # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](https://platform
148
149
  #
149
150
  # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi
150
151
  #
@@ -96,8 +96,9 @@ module OpenAI
96
96
 
97
97
  # @!attribute tracing
98
98
  # Realtime API can write session traces to the
99
- # [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
100
- # tracing is enabled for a session, the configuration cannot be modified.
99
+ # [Traces Dashboard](https://platform.openai.com/logs?api=traces). Set to null to
100
+ # disable tracing. Once tracing is enabled for a session, the configuration cannot
101
+ # be modified.
101
102
  #
102
103
  # `auto` will create a trace for the session with default values for the workflow
103
104
  # name, group id, and metadata.
@@ -155,7 +156,7 @@ module OpenAI
155
156
  #
156
157
  # @param tools [Array<OpenAI::Models::Realtime::RealtimeFunctionTool, OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Tool::McpTool>] Tools available to the model.
157
158
  #
158
- # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Tracing::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces
159
+ # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Tracing::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](https://platform
159
160
  #
160
161
  # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi
161
162
  #
@@ -1024,8 +1025,9 @@ module OpenAI
1024
1025
  end
1025
1026
 
1026
1027
  # Realtime API can write session traces to the
1027
- # [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
1028
- # tracing is enabled for a session, the configuration cannot be modified.
1028
+ # [Traces Dashboard](https://platform.openai.com/logs?api=traces). Set to null to
1029
+ # disable tracing. Once tracing is enabled for a session, the configuration cannot
1030
+ # be modified.
1029
1031
  #
1030
1032
  # `auto` will create a trace for the session with default values for the workflow
1031
1033
  # name, group id, and metadata.
@@ -4,8 +4,9 @@ module OpenAI
4
4
  module Models
5
5
  module Realtime
6
6
  # Realtime API can write session traces to the
7
- # [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
8
- # tracing is enabled for a session, the configuration cannot be modified.
7
+ # [Traces Dashboard](https://platform.openai.com/logs?api=traces). Set to null to
8
+ # disable tracing. Once tracing is enabled for a session, the configuration cannot
9
+ # be modified.
9
10
  #
10
11
  # `auto` will create a trace for the session with default values for the workflow
11
12
  # name, group id, and metadata.
@@ -10,6 +10,14 @@ module OpenAI
10
10
  # @return [Symbol, :input_file]
11
11
  required :type, const: :input_file
12
12
 
13
+ # @!attribute detail
14
+ # The detail level of the file to be sent to the model. Use `low` for the default
15
+ # rendering behavior, or `high` to render the file at higher quality. Defaults to
16
+ # `low`.
17
+ #
18
+ # @return [Symbol, OpenAI::Models::Responses::ResponseInputFile::Detail, nil]
19
+ optional :detail, enum: -> { OpenAI::Responses::ResponseInputFile::Detail }
20
+
13
21
  # @!attribute file_data
14
22
  # The content of the file to be sent to the model.
15
23
  #
@@ -34,12 +42,14 @@ module OpenAI
34
42
  # @return [String, nil]
35
43
  optional :filename, String
36
44
 
37
- # @!method initialize(file_data: nil, file_id: nil, file_url: nil, filename: nil, type: :input_file)
45
+ # @!method initialize(detail: nil, file_data: nil, file_id: nil, file_url: nil, filename: nil, type: :input_file)
38
46
  # Some parameter documentations has been truncated, see
39
47
  # {OpenAI::Models::Responses::ResponseInputFile} for more details.
40
48
  #
41
49
  # A file input to the model.
42
50
  #
51
+ # @param detail [Symbol, OpenAI::Models::Responses::ResponseInputFile::Detail] The detail level of the file to be sent to the model. Use `low` for the default
52
+ #
43
53
  # @param file_data [String] The content of the file to be sent to the model.
44
54
  #
45
55
  # @param file_id [String, nil] The ID of the file to be sent to the model.
@@ -49,6 +59,21 @@ module OpenAI
49
59
  # @param filename [String] The name of the file to be sent to the model.
50
60
  #
51
61
  # @param type [Symbol, :input_file] The type of the input item. Always `input_file`.
62
+
63
+ # The detail level of the file to be sent to the model. Use `low` for the default
64
+ # rendering behavior, or `high` to render the file at higher quality. Defaults to
65
+ # `low`.
66
+ #
67
+ # @see OpenAI::Models::Responses::ResponseInputFile#detail
68
+ module Detail
69
+ extend OpenAI::Internal::Type::Enum
70
+
71
+ LOW = :low
72
+ HIGH = :high
73
+
74
+ # @!method self.values
75
+ # @return [Array<Symbol>]
76
+ end
52
77
  end
53
78
  end
54
79
  end
@@ -10,6 +10,14 @@ module OpenAI
10
10
  # @return [Symbol, :input_file]
11
11
  required :type, const: :input_file
12
12
 
13
+ # @!attribute detail
14
+ # The detail level of the file to be sent to the model. Use `low` for the default
15
+ # rendering behavior, or `high` to render the file at higher quality. Defaults to
16
+ # `low`.
17
+ #
18
+ # @return [Symbol, OpenAI::Models::Responses::ResponseInputFileContent::Detail, nil]
19
+ optional :detail, enum: -> { OpenAI::Responses::ResponseInputFileContent::Detail }
20
+
13
21
  # @!attribute file_data
14
22
  # The base64-encoded data of the file to be sent to the model.
15
23
  #
@@ -34,9 +42,14 @@ module OpenAI
34
42
  # @return [String, nil]
35
43
  optional :filename, String, nil?: true
36
44
 
37
- # @!method initialize(file_data: nil, file_id: nil, file_url: nil, filename: nil, type: :input_file)
45
+ # @!method initialize(detail: nil, file_data: nil, file_id: nil, file_url: nil, filename: nil, type: :input_file)
46
+ # Some parameter documentations has been truncated, see
47
+ # {OpenAI::Models::Responses::ResponseInputFileContent} for more details.
48
+ #
38
49
  # A file input to the model.
39
50
  #
51
+ # @param detail [Symbol, OpenAI::Models::Responses::ResponseInputFileContent::Detail] The detail level of the file to be sent to the model. Use `low` for the default
52
+ #
40
53
  # @param file_data [String, nil] The base64-encoded data of the file to be sent to the model.
41
54
  #
42
55
  # @param file_id [String, nil] The ID of the file to be sent to the model.
@@ -46,6 +59,21 @@ module OpenAI
46
59
  # @param filename [String, nil] The name of the file to be sent to the model.
47
60
  #
48
61
  # @param type [Symbol, :input_file] The type of the input item. Always `input_file`.
62
+
63
+ # The detail level of the file to be sent to the model. Use `low` for the default
64
+ # rendering behavior, or `high` to render the file at higher quality. Defaults to
65
+ # `low`.
66
+ #
67
+ # @see OpenAI::Models::Responses::ResponseInputFileContent#detail
68
+ module Detail
69
+ extend OpenAI::Internal::Type::Enum
70
+
71
+ LOW = :low
72
+ HIGH = :high
73
+
74
+ # @!method self.values
75
+ # @return [Array<Symbol>]
76
+ end
49
77
  end
50
78
  end
51
79
  end
@@ -38,8 +38,9 @@ module OpenAI
38
38
  # A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
39
39
  # the vector store should use. Useful for tools like `file_search` that can access
40
40
  # files. If `attributes` or `chunking_strategy` are provided, they will be applied
41
- # to all files in the batch. The maximum batch size is 2000 files. Mutually
42
- # exclusive with `files`.
41
+ # to all files in the batch. The maximum batch size is 2000 files. This endpoint
42
+ # is recommended for multi-file ingestion and helps reduce per-vector-store write
43
+ # request pressure. Mutually exclusive with `files`.
43
44
  #
44
45
  # @return [Array<String>, nil]
45
46
  optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
@@ -48,8 +49,9 @@ module OpenAI
48
49
  # A list of objects that each include a `file_id` plus optional `attributes` or
49
50
  # `chunking_strategy`. Use this when you need to override metadata for specific
50
51
  # files. The global `attributes` or `chunking_strategy` will be ignored and must
51
- # be specified for each file. The maximum batch size is 2000 files. Mutually
52
- # exclusive with `file_ids`.
52
+ # be specified for each file. The maximum batch size is 2000 files. This endpoint
53
+ # is recommended for multi-file ingestion and helps reduce per-vector-store write
54
+ # request pressure. Mutually exclusive with `file_ids`.
53
55
  #
54
56
  # @return [Array<OpenAI::Models::VectorStores::FileBatchCreateParams::File>, nil]
55
57
  optional :files, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::VectorStores::FileBatchCreateParams::File] }
@@ -87,7 +89,9 @@ module OpenAI
87
89
  # @!attribute file_id
88
90
  # A [File](https://platform.openai.com/docs/api-reference/files) ID that the
89
91
  # vector store should use. Useful for tools like `file_search` that can access
90
- # files.
92
+ # files. For multi-file ingestion, we recommend
93
+ # [`file_batches`](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)
94
+ # to minimize per-vector-store write requests.
91
95
  #
92
96
  # @return [String]
93
97
  required :file_id, String
@@ -16,7 +16,9 @@ module OpenAI
16
16
  # @!attribute file_id
17
17
  # A [File](https://platform.openai.com/docs/api-reference/files) ID that the
18
18
  # vector store should use. Useful for tools like `file_search` that can access
19
- # files.
19
+ # files. For multi-file ingestion, we recommend
20
+ # [`file_batches`](https://platform.openai.com/docs/api-reference/vector-stores-file-batches/createBatch)
21
+ # to minimize per-vector-store write requests.
20
22
  #
21
23
  # @return [String]
22
24
  required :file_id, String
data/lib/openai/models.rb CHANGED
@@ -207,6 +207,8 @@ module OpenAI
207
207
 
208
208
  ModerationTextInput = OpenAI::Models::ModerationTextInput
209
209
 
210
+ OAuthErrorCode = OpenAI::Models::OAuthErrorCode
211
+
210
212
  OtherFileChunkingStrategyObject = OpenAI::Models::OtherFileChunkingStrategyObject
211
213
 
212
214
  Realtime = OpenAI::Models::Realtime
@@ -32,7 +32,7 @@ module OpenAI
32
32
  #
33
33
  # @param tools [Array<OpenAI::Models::Realtime::RealtimeFunctionTool, OpenAI::Models::Realtime::RealtimeToolsConfigUnion::Mcp>] Tools available to the model.
34
34
  #
35
- # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces
35
+ # @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](https://platform
36
36
  #
37
37
  # @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi
38
38
  #
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.57.0"
4
+ VERSION = "0.59.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -29,6 +29,13 @@ require "connection_pool"
29
29
 
30
30
  # Package files.
31
31
  require_relative "openai/version"
32
+ require_relative "openai/auth/workload_identity"
33
+ require_relative "openai/auth/subject_token_provider"
34
+ require_relative "openai/auth/token_type"
35
+ require_relative "openai/auth/workload_identity_auth"
36
+ require_relative "openai/auth/subject_token_providers/k8s_service_account_token_provider"
37
+ require_relative "openai/auth/subject_token_providers/azure_managed_identity_token_provider"
38
+ require_relative "openai/auth/subject_token_providers/gcp_id_token_provider"
32
39
  require_relative "openai/internal/util"
33
40
  require_relative "openai/internal/type/converter"
34
41
  require_relative "openai/internal/type/unknown"
@@ -421,6 +428,7 @@ require_relative "openai/models/moderation_image_url_input"
421
428
  require_relative "openai/models/moderation_model"
422
429
  require_relative "openai/models/moderation_multi_modal_input"
423
430
  require_relative "openai/models/moderation_text_input"
431
+ require_relative "openai/models/oauth_error_code"
424
432
  require_relative "openai/models/other_file_chunking_strategy_object"
425
433
  require_relative "openai/models/realtime/audio_transcription"
426
434
  require_relative "openai/models/realtime/call_accept_params"
@@ -0,0 +1,55 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Auth
5
+ module TokenType
6
+ JWT = T.let(:jwt, Symbol)
7
+ ID = T.let(:id, Symbol)
8
+ end
9
+
10
+ module SubjectTokenProvider
11
+ sig { returns(Symbol) }
12
+ def token_type
13
+ end
14
+
15
+ sig { returns(String) }
16
+ def get_token
17
+ end
18
+ end
19
+
20
+ class WorkloadIdentity
21
+ sig { returns(String) }
22
+ attr_reader :client_id
23
+
24
+ sig { returns(String) }
25
+ attr_reader :identity_provider_id
26
+
27
+ sig { returns(String) }
28
+ attr_reader :service_account_id
29
+
30
+ sig { returns(SubjectTokenProvider) }
31
+ attr_reader :provider
32
+
33
+ sig { returns(Integer) }
34
+ attr_reader :refresh_buffer_seconds
35
+
36
+ sig do
37
+ params(
38
+ client_id: T.any(String, Symbol),
39
+ identity_provider_id: T.any(String, Symbol),
40
+ service_account_id: T.any(String, Symbol),
41
+ provider: SubjectTokenProvider,
42
+ refresh_buffer_seconds: Integer
43
+ ).void
44
+ end
45
+ def initialize(
46
+ client_id:,
47
+ identity_provider_id:,
48
+ service_account_id:,
49
+ provider:,
50
+ refresh_buffer_seconds: 1200
51
+ )
52
+ end
53
+ end
54
+ end
55
+ end
@@ -148,12 +148,20 @@ module OpenAI
148
148
  end
149
149
  end
150
150
 
151
+ # https://www.rfc-editor.org/rfc/rfc3986.html#section-3.3
152
+ RFC_3986_NOT_PCHARS = T.let(/[^A-Za-z0-9\-._~!$&'()*+,;=:@]+/, Regexp)
153
+
151
154
  class << self
152
155
  # @api private
153
156
  sig { params(uri: URI::Generic).returns(String) }
154
157
  def uri_origin(uri)
155
158
  end
156
159
 
160
+ # @api private
161
+ sig { params(path: T.any(String, Integer)).returns(String) }
162
+ def encode_path(path)
163
+ end
164
+
157
165
  # @api private
158
166
  sig { params(path: T.any(String, T::Array[String])).returns(String) }
159
167
  def interpolate_path(path)