openai 0.34.1 → 0.35.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +23 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/base_client.rb +7 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +30 -24
- data/lib/openai/models/custom_tool_input_format.rb +6 -0
- data/lib/openai/models/image_edit_params.rb +1 -1
- data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -3
- data/lib/openai/models/realtime/realtime_session_create_response.rb +15 -4
- data/lib/openai/models/realtime/realtime_tracing_config.rb +1 -1
- data/lib/openai/models/realtime/realtime_truncation.rb +13 -2
- data/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +38 -4
- data/lib/openai/models/reasoning.rb +4 -0
- data/lib/openai/models/responses/custom_tool.rb +3 -0
- data/lib/openai/models/responses/easy_input_message.rb +3 -3
- data/lib/openai/models/responses/file_search_tool.rb +33 -1
- data/lib/openai/models/responses/response_content.rb +1 -4
- data/lib/openai/models/responses/response_input_content.rb +1 -4
- data/lib/openai/models/responses/response_input_item.rb +2 -2
- data/lib/openai/models/responses/response_input_message_item.rb +2 -2
- data/lib/openai/models/responses/response_output_text.rb +8 -8
- data/lib/openai/models/responses/tool.rb +30 -2
- data/lib/openai/models/vector_stores/file_batch_create_params.rb +77 -11
- data/lib/openai/models/video.rb +9 -1
- data/lib/openai/resources/files.rb +13 -14
- data/lib/openai/resources/images.rb +2 -2
- data/lib/openai/resources/realtime/calls.rb +1 -1
- data/lib/openai/resources/responses.rb +4 -0
- data/lib/openai/resources/vector_stores/file_batches.rb +6 -4
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/internal/transport/base_client.rbi +5 -0
- data/rbi/openai/internal/type/base_model.rbi +8 -4
- data/rbi/openai/models/custom_tool_input_format.rbi +2 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +26 -4
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +26 -4
- data/rbi/openai/models/realtime/realtime_truncation.rbi +13 -2
- data/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +84 -6
- data/rbi/openai/models/reasoning.rbi +6 -0
- data/rbi/openai/models/responses/custom_tool.rbi +2 -0
- data/rbi/openai/models/responses/file_search_tool.rbi +65 -0
- data/rbi/openai/models/responses/response_content.rbi +0 -1
- data/rbi/openai/models/responses/response_input_content.rbi +1 -2
- data/rbi/openai/models/responses/response_input_item.rbi +3 -6
- data/rbi/openai/models/responses/response_input_message_item.rbi +1 -2
- data/rbi/openai/models/responses/response_output_text.rbi +10 -19
- data/rbi/openai/models/responses/tool.rbi +73 -4
- data/rbi/openai/models/vector_stores/file_batch_create_params.rbi +181 -12
- data/rbi/openai/models/video.rbi +8 -0
- data/rbi/openai/resources/files.rbi +13 -14
- data/rbi/openai/resources/realtime/calls.rbi +17 -6
- data/rbi/openai/resources/vector_stores/file_batches.rbi +15 -5
- data/sig/openai/internal/transport/base_client.rbs +2 -0
- data/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +29 -2
- data/sig/openai/models/responses/file_search_tool.rbs +24 -0
- data/sig/openai/models/responses/response_content.rbs +0 -1
- data/sig/openai/models/responses/response_input_content.rbs +0 -1
- data/sig/openai/models/responses/response_output_text.rbs +7 -11
- data/sig/openai/models/responses/tool.rbs +30 -3
- data/sig/openai/models/vector_stores/file_batch_create_params.rbs +56 -6
- data/sig/openai/models/video.rbs +5 -0
- data/sig/openai/resources/vector_stores/file_batches.rbs +2 -1
- metadata +4 -3
|
@@ -57,7 +57,7 @@ module OpenAI
|
|
|
57
57
|
end
|
|
58
58
|
def accept(
|
|
59
59
|
# The identifier for the call provided in the
|
|
60
|
-
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/
|
|
60
|
+
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming)
|
|
61
61
|
# webhook.
|
|
62
62
|
call_id,
|
|
63
63
|
# Configuration for input and output audio.
|
|
@@ -105,8 +105,19 @@ module OpenAI
|
|
|
105
105
|
# `auto` will create a trace for the session with default values for the workflow
|
|
106
106
|
# name, group id, and metadata.
|
|
107
107
|
tracing: nil,
|
|
108
|
-
#
|
|
109
|
-
#
|
|
108
|
+
# When the number of tokens in a conversation exceeds the model's input token
|
|
109
|
+
# limit, the conversation be truncated, meaning messages (starting from the
|
|
110
|
+
# oldest) will not be included in the model's context. A 32k context model with
|
|
111
|
+
# 4,096 max output tokens can only include 28,224 tokens in the context before
|
|
112
|
+
# truncation occurs. Clients can configure truncation behavior to truncate with a
|
|
113
|
+
# lower max token limit, which is an effective way to control token usage and
|
|
114
|
+
# cost. Truncation will reduce the number of cached tokens on the next turn
|
|
115
|
+
# (busting the cache), since messages are dropped from the beginning of the
|
|
116
|
+
# context. However, clients can also configure truncation to retain messages up to
|
|
117
|
+
# a fraction of the maximum context size, which will reduce the need for future
|
|
118
|
+
# truncations and thus improve the cache rate. Truncation can be disabled
|
|
119
|
+
# entirely, which means the server will never truncate but would instead return an
|
|
120
|
+
# error if the conversation exceeds the model's input token limit.
|
|
110
121
|
truncation: nil,
|
|
111
122
|
# The type of session to create. Always `realtime` for the Realtime API.
|
|
112
123
|
type: :realtime,
|
|
@@ -123,7 +134,7 @@ module OpenAI
|
|
|
123
134
|
end
|
|
124
135
|
def hangup(
|
|
125
136
|
# The identifier for the call. For SIP calls, use the value provided in the
|
|
126
|
-
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/
|
|
137
|
+
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming)
|
|
127
138
|
# webhook. For WebRTC sessions, reuse the call ID returned in the `Location`
|
|
128
139
|
# header when creating the call with
|
|
129
140
|
# [`POST /v1/realtime/calls`](https://platform.openai.com/docs/api-reference/realtime/create-call).
|
|
@@ -142,7 +153,7 @@ module OpenAI
|
|
|
142
153
|
end
|
|
143
154
|
def refer(
|
|
144
155
|
# The identifier for the call provided in the
|
|
145
|
-
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/
|
|
156
|
+
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming)
|
|
146
157
|
# webhook.
|
|
147
158
|
call_id,
|
|
148
159
|
# URI that should appear in the SIP Refer-To header. Supports values like
|
|
@@ -162,7 +173,7 @@ module OpenAI
|
|
|
162
173
|
end
|
|
163
174
|
def reject(
|
|
164
175
|
# The identifier for the call provided in the
|
|
165
|
-
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/
|
|
176
|
+
# [`realtime.call.incoming`](https://platform.openai.com/docs/api-reference/webhook-events/realtime/call/incoming)
|
|
166
177
|
# webhook.
|
|
167
178
|
call_id,
|
|
168
179
|
# SIP response code to send back to the caller. Defaults to `603` (Decline) when
|
|
@@ -8,7 +8,6 @@ module OpenAI
|
|
|
8
8
|
sig do
|
|
9
9
|
params(
|
|
10
10
|
vector_store_id: String,
|
|
11
|
-
file_ids: T::Array[String],
|
|
12
11
|
attributes:
|
|
13
12
|
T.nilable(
|
|
14
13
|
T::Hash[
|
|
@@ -21,16 +20,17 @@ module OpenAI
|
|
|
21
20
|
OpenAI::AutoFileChunkingStrategyParam::OrHash,
|
|
22
21
|
OpenAI::StaticFileChunkingStrategyObjectParam::OrHash
|
|
23
22
|
),
|
|
23
|
+
file_ids: T::Array[String],
|
|
24
|
+
files:
|
|
25
|
+
T::Array[
|
|
26
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::OrHash
|
|
27
|
+
],
|
|
24
28
|
request_options: OpenAI::RequestOptions::OrHash
|
|
25
29
|
).returns(OpenAI::VectorStores::VectorStoreFileBatch)
|
|
26
30
|
end
|
|
27
31
|
def create(
|
|
28
32
|
# The ID of the vector store for which to create a File Batch.
|
|
29
33
|
vector_store_id,
|
|
30
|
-
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
|
31
|
-
# the vector store should use. Useful for tools like `file_search` that can access
|
|
32
|
-
# files.
|
|
33
|
-
file_ids:,
|
|
34
34
|
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
35
35
|
# for storing additional information about the object in a structured format, and
|
|
36
36
|
# querying for objects via API or the dashboard. Keys are strings with a maximum
|
|
@@ -40,6 +40,16 @@ module OpenAI
|
|
|
40
40
|
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
|
|
41
41
|
# strategy. Only applicable if `file_ids` is non-empty.
|
|
42
42
|
chunking_strategy: nil,
|
|
43
|
+
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
|
44
|
+
# the vector store should use. Useful for tools like `file_search` that can access
|
|
45
|
+
# files. If `attributes` or `chunking_strategy` are provided, they will be applied
|
|
46
|
+
# to all files in the batch. Mutually exclusive with `files`.
|
|
47
|
+
file_ids: nil,
|
|
48
|
+
# A list of objects that each include a `file_id` plus optional `attributes` or
|
|
49
|
+
# `chunking_strategy`. Use this when you need to override metadata for specific
|
|
50
|
+
# files. The global `attributes` or `chunking_strategy` will be ignored and must
|
|
51
|
+
# be specified for each file. Mutually exclusive with `file_ids`.
|
|
52
|
+
files: nil,
|
|
43
53
|
request_options: {}
|
|
44
54
|
)
|
|
45
55
|
end
|
|
@@ -2,19 +2,46 @@ module OpenAI
|
|
|
2
2
|
module Models
|
|
3
3
|
module Realtime
|
|
4
4
|
type realtime_truncation_retention_ratio =
|
|
5
|
-
{
|
|
5
|
+
{
|
|
6
|
+
retention_ratio: Float,
|
|
7
|
+
type: :retention_ratio,
|
|
8
|
+
token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits
|
|
9
|
+
}
|
|
6
10
|
|
|
7
11
|
class RealtimeTruncationRetentionRatio < OpenAI::Internal::Type::BaseModel
|
|
8
12
|
attr_accessor retention_ratio: Float
|
|
9
13
|
|
|
10
14
|
attr_accessor type: :retention_ratio
|
|
11
15
|
|
|
16
|
+
attr_reader token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits?
|
|
17
|
+
|
|
18
|
+
def token_limits=: (
|
|
19
|
+
OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits
|
|
20
|
+
) -> OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits
|
|
21
|
+
|
|
12
22
|
def initialize: (
|
|
13
23
|
retention_ratio: Float,
|
|
24
|
+
?token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits,
|
|
14
25
|
?type: :retention_ratio
|
|
15
26
|
) -> void
|
|
16
27
|
|
|
17
|
-
def to_hash: -> {
|
|
28
|
+
def to_hash: -> {
|
|
29
|
+
retention_ratio: Float,
|
|
30
|
+
type: :retention_ratio,
|
|
31
|
+
token_limits: OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
type token_limits = { post_instructions: Integer }
|
|
35
|
+
|
|
36
|
+
class TokenLimits < OpenAI::Internal::Type::BaseModel
|
|
37
|
+
attr_reader post_instructions: Integer?
|
|
38
|
+
|
|
39
|
+
def post_instructions=: (Integer) -> Integer
|
|
40
|
+
|
|
41
|
+
def initialize: (?post_instructions: Integer) -> void
|
|
42
|
+
|
|
43
|
+
def to_hash: -> { post_instructions: Integer }
|
|
44
|
+
end
|
|
18
45
|
end
|
|
19
46
|
end
|
|
20
47
|
end
|
|
@@ -53,11 +53,18 @@ module OpenAI
|
|
|
53
53
|
|
|
54
54
|
type ranking_options =
|
|
55
55
|
{
|
|
56
|
+
hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch,
|
|
56
57
|
ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker,
|
|
57
58
|
score_threshold: Float
|
|
58
59
|
}
|
|
59
60
|
|
|
60
61
|
class RankingOptions < OpenAI::Internal::Type::BaseModel
|
|
62
|
+
attr_reader hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch?
|
|
63
|
+
|
|
64
|
+
def hybrid_search=: (
|
|
65
|
+
OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch
|
|
66
|
+
) -> OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch
|
|
67
|
+
|
|
61
68
|
attr_reader ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker?
|
|
62
69
|
|
|
63
70
|
def ranker=: (
|
|
@@ -69,15 +76,32 @@ module OpenAI
|
|
|
69
76
|
def score_threshold=: (Float) -> Float
|
|
70
77
|
|
|
71
78
|
def initialize: (
|
|
79
|
+
?hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch,
|
|
72
80
|
?ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker,
|
|
73
81
|
?score_threshold: Float
|
|
74
82
|
) -> void
|
|
75
83
|
|
|
76
84
|
def to_hash: -> {
|
|
85
|
+
hybrid_search: OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch,
|
|
77
86
|
ranker: OpenAI::Models::Responses::FileSearchTool::RankingOptions::ranker,
|
|
78
87
|
score_threshold: Float
|
|
79
88
|
}
|
|
80
89
|
|
|
90
|
+
type hybrid_search = { embedding_weight: Float, text_weight: Float }
|
|
91
|
+
|
|
92
|
+
class HybridSearch < OpenAI::Internal::Type::BaseModel
|
|
93
|
+
attr_accessor embedding_weight: Float
|
|
94
|
+
|
|
95
|
+
attr_accessor text_weight: Float
|
|
96
|
+
|
|
97
|
+
def initialize: (
|
|
98
|
+
embedding_weight: Float,
|
|
99
|
+
text_weight: Float
|
|
100
|
+
) -> void
|
|
101
|
+
|
|
102
|
+
def to_hash: -> { embedding_weight: Float, text_weight: Float }
|
|
103
|
+
end
|
|
104
|
+
|
|
81
105
|
type ranker = :auto | :"default-2024-11-15"
|
|
82
106
|
|
|
83
107
|
module Ranker
|
|
@@ -5,7 +5,6 @@ module OpenAI
|
|
|
5
5
|
OpenAI::Responses::ResponseInputText
|
|
6
6
|
| OpenAI::Responses::ResponseInputImage
|
|
7
7
|
| OpenAI::Responses::ResponseInputFile
|
|
8
|
-
| OpenAI::Responses::ResponseInputAudio
|
|
9
8
|
| OpenAI::Responses::ResponseOutputText
|
|
10
9
|
| OpenAI::Responses::ResponseOutputRefusal
|
|
11
10
|
| OpenAI::Responses::ResponseContent::ReasoningTextContent
|
|
@@ -4,36 +4,32 @@ module OpenAI
|
|
|
4
4
|
type response_output_text =
|
|
5
5
|
{
|
|
6
6
|
annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation],
|
|
7
|
+
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob],
|
|
7
8
|
text: String,
|
|
8
|
-
type: :output_text
|
|
9
|
-
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
|
9
|
+
type: :output_text
|
|
10
10
|
}
|
|
11
11
|
|
|
12
12
|
class ResponseOutputText < OpenAI::Internal::Type::BaseModel
|
|
13
13
|
attr_accessor annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation]
|
|
14
14
|
|
|
15
|
+
attr_accessor logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
|
16
|
+
|
|
15
17
|
attr_accessor text: String
|
|
16
18
|
|
|
17
19
|
attr_accessor type: :output_text
|
|
18
20
|
|
|
19
|
-
attr_reader logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]?
|
|
20
|
-
|
|
21
|
-
def logprobs=: (
|
|
22
|
-
::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
|
23
|
-
) -> ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
|
24
|
-
|
|
25
21
|
def initialize: (
|
|
26
22
|
annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation],
|
|
23
|
+
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob],
|
|
27
24
|
text: String,
|
|
28
|
-
?logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob],
|
|
29
25
|
?type: :output_text
|
|
30
26
|
) -> void
|
|
31
27
|
|
|
32
28
|
def to_hash: -> {
|
|
33
29
|
annotations: ::Array[OpenAI::Models::Responses::ResponseOutputText::annotation],
|
|
30
|
+
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob],
|
|
34
31
|
text: String,
|
|
35
|
-
type: :output_text
|
|
36
|
-
logprobs: ::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
|
32
|
+
type: :output_text
|
|
37
33
|
}
|
|
38
34
|
|
|
39
35
|
type annotation =
|
|
@@ -258,7 +258,11 @@ module OpenAI
|
|
|
258
258
|
extend OpenAI::Internal::Type::Union
|
|
259
259
|
|
|
260
260
|
type code_interpreter_tool_auto =
|
|
261
|
-
{
|
|
261
|
+
{
|
|
262
|
+
type: :auto,
|
|
263
|
+
file_ids: ::Array[String],
|
|
264
|
+
memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit?
|
|
265
|
+
}
|
|
262
266
|
|
|
263
267
|
class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel
|
|
264
268
|
attr_accessor type: :auto
|
|
@@ -267,9 +271,32 @@ module OpenAI
|
|
|
267
271
|
|
|
268
272
|
def file_ids=: (::Array[String]) -> ::Array[String]
|
|
269
273
|
|
|
270
|
-
|
|
274
|
+
attr_accessor memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit?
|
|
275
|
+
|
|
276
|
+
def initialize: (
|
|
277
|
+
?file_ids: ::Array[String],
|
|
278
|
+
?memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit?,
|
|
279
|
+
?type: :auto
|
|
280
|
+
) -> void
|
|
281
|
+
|
|
282
|
+
def to_hash: -> {
|
|
283
|
+
type: :auto,
|
|
284
|
+
file_ids: ::Array[String],
|
|
285
|
+
memory_limit: OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit?
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
type memory_limit = :"1g" | :"4g" | :"16g" | :"64g"
|
|
271
289
|
|
|
272
|
-
|
|
290
|
+
module MemoryLimit
|
|
291
|
+
extend OpenAI::Internal::Type::Enum
|
|
292
|
+
|
|
293
|
+
MEMORY_LIMIT_1G: :"1g"
|
|
294
|
+
MEMORY_LIMIT_4G: :"4g"
|
|
295
|
+
MEMORY_LIMIT_16G: :"16g"
|
|
296
|
+
MEMORY_LIMIT_64G: :"64g"
|
|
297
|
+
|
|
298
|
+
def self?.values: -> ::Array[OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::memory_limit]
|
|
299
|
+
end
|
|
273
300
|
end
|
|
274
301
|
|
|
275
302
|
def self?.variants: -> ::Array[OpenAI::Models::Responses::Tool::CodeInterpreter::container]
|
|
@@ -3,9 +3,10 @@ module OpenAI
|
|
|
3
3
|
module VectorStores
|
|
4
4
|
type file_batch_create_params =
|
|
5
5
|
{
|
|
6
|
-
file_ids: ::Array[String],
|
|
7
6
|
attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?,
|
|
8
|
-
chunking_strategy: OpenAI::Models::file_chunking_strategy_param
|
|
7
|
+
chunking_strategy: OpenAI::Models::file_chunking_strategy_param,
|
|
8
|
+
file_ids: ::Array[String],
|
|
9
|
+
files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File]
|
|
9
10
|
}
|
|
10
11
|
& OpenAI::Internal::Type::request_parameters
|
|
11
12
|
|
|
@@ -13,8 +14,6 @@ module OpenAI
|
|
|
13
14
|
extend OpenAI::Internal::Type::RequestParameters::Converter
|
|
14
15
|
include OpenAI::Internal::Type::RequestParameters
|
|
15
16
|
|
|
16
|
-
attr_accessor file_ids: ::Array[String]
|
|
17
|
-
|
|
18
17
|
attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?
|
|
19
18
|
|
|
20
19
|
attr_reader chunking_strategy: OpenAI::Models::file_chunking_strategy_param?
|
|
@@ -23,17 +22,29 @@ module OpenAI
|
|
|
23
22
|
OpenAI::Models::file_chunking_strategy_param
|
|
24
23
|
) -> OpenAI::Models::file_chunking_strategy_param
|
|
25
24
|
|
|
25
|
+
attr_reader file_ids: ::Array[String]?
|
|
26
|
+
|
|
27
|
+
def file_ids=: (::Array[String]) -> ::Array[String]
|
|
28
|
+
|
|
29
|
+
attr_reader files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File]?
|
|
30
|
+
|
|
31
|
+
def files=: (
|
|
32
|
+
::Array[OpenAI::VectorStores::FileBatchCreateParams::File]
|
|
33
|
+
) -> ::Array[OpenAI::VectorStores::FileBatchCreateParams::File]
|
|
34
|
+
|
|
26
35
|
def initialize: (
|
|
27
|
-
file_ids: ::Array[String],
|
|
28
36
|
?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?,
|
|
29
37
|
?chunking_strategy: OpenAI::Models::file_chunking_strategy_param,
|
|
38
|
+
?file_ids: ::Array[String],
|
|
39
|
+
?files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File],
|
|
30
40
|
?request_options: OpenAI::request_opts
|
|
31
41
|
) -> void
|
|
32
42
|
|
|
33
43
|
def to_hash: -> {
|
|
34
|
-
file_ids: ::Array[String],
|
|
35
44
|
attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?,
|
|
36
45
|
chunking_strategy: OpenAI::Models::file_chunking_strategy_param,
|
|
46
|
+
file_ids: ::Array[String],
|
|
47
|
+
files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File],
|
|
37
48
|
request_options: OpenAI::RequestOptions
|
|
38
49
|
}
|
|
39
50
|
|
|
@@ -44,6 +55,45 @@ module OpenAI
|
|
|
44
55
|
|
|
45
56
|
def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]
|
|
46
57
|
end
|
|
58
|
+
|
|
59
|
+
type file =
|
|
60
|
+
{
|
|
61
|
+
file_id: String,
|
|
62
|
+
attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?,
|
|
63
|
+
chunking_strategy: OpenAI::Models::file_chunking_strategy_param
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
class File < OpenAI::Internal::Type::BaseModel
|
|
67
|
+
attr_accessor file_id: String
|
|
68
|
+
|
|
69
|
+
attr_accessor attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?
|
|
70
|
+
|
|
71
|
+
attr_reader chunking_strategy: OpenAI::Models::file_chunking_strategy_param?
|
|
72
|
+
|
|
73
|
+
def chunking_strategy=: (
|
|
74
|
+
OpenAI::Models::file_chunking_strategy_param
|
|
75
|
+
) -> OpenAI::Models::file_chunking_strategy_param
|
|
76
|
+
|
|
77
|
+
def initialize: (
|
|
78
|
+
file_id: String,
|
|
79
|
+
?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?,
|
|
80
|
+
?chunking_strategy: OpenAI::Models::file_chunking_strategy_param
|
|
81
|
+
) -> void
|
|
82
|
+
|
|
83
|
+
def to_hash: -> {
|
|
84
|
+
file_id: String,
|
|
85
|
+
attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]?,
|
|
86
|
+
chunking_strategy: OpenAI::Models::file_chunking_strategy_param
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
type attribute = String | Float | bool
|
|
90
|
+
|
|
91
|
+
module Attribute
|
|
92
|
+
extend OpenAI::Internal::Type::Union
|
|
93
|
+
|
|
94
|
+
def self?.variants: -> ::Array[OpenAI::Models::VectorStores::FileBatchCreateParams::File::attribute]
|
|
95
|
+
end
|
|
96
|
+
end
|
|
47
97
|
end
|
|
48
98
|
end
|
|
49
99
|
end
|
data/sig/openai/models/video.rbs
CHANGED
|
@@ -10,6 +10,7 @@ module OpenAI
|
|
|
10
10
|
model: OpenAI::Models::video_model,
|
|
11
11
|
object: :video,
|
|
12
12
|
progress: Integer,
|
|
13
|
+
prompt: String?,
|
|
13
14
|
remixed_from_video_id: String?,
|
|
14
15
|
seconds: OpenAI::Models::video_seconds,
|
|
15
16
|
size: OpenAI::Models::video_size,
|
|
@@ -33,6 +34,8 @@ module OpenAI
|
|
|
33
34
|
|
|
34
35
|
attr_accessor progress: Integer
|
|
35
36
|
|
|
37
|
+
attr_accessor prompt: String?
|
|
38
|
+
|
|
36
39
|
attr_accessor remixed_from_video_id: String?
|
|
37
40
|
|
|
38
41
|
attr_accessor seconds: OpenAI::Models::video_seconds
|
|
@@ -49,6 +52,7 @@ module OpenAI
|
|
|
49
52
|
expires_at: Integer?,
|
|
50
53
|
model: OpenAI::Models::video_model,
|
|
51
54
|
progress: Integer,
|
|
55
|
+
prompt: String?,
|
|
52
56
|
remixed_from_video_id: String?,
|
|
53
57
|
seconds: OpenAI::Models::video_seconds,
|
|
54
58
|
size: OpenAI::Models::video_size,
|
|
@@ -65,6 +69,7 @@ module OpenAI
|
|
|
65
69
|
model: OpenAI::Models::video_model,
|
|
66
70
|
object: :video,
|
|
67
71
|
progress: Integer,
|
|
72
|
+
prompt: String?,
|
|
68
73
|
remixed_from_video_id: String?,
|
|
69
74
|
seconds: OpenAI::Models::video_seconds,
|
|
70
75
|
size: OpenAI::Models::video_size,
|
|
@@ -4,9 +4,10 @@ module OpenAI
|
|
|
4
4
|
class FileBatches
|
|
5
5
|
def create: (
|
|
6
6
|
String vector_store_id,
|
|
7
|
-
file_ids: ::Array[String],
|
|
8
7
|
?attributes: ::Hash[Symbol, OpenAI::Models::VectorStores::FileBatchCreateParams::attribute]?,
|
|
9
8
|
?chunking_strategy: OpenAI::Models::file_chunking_strategy_param,
|
|
9
|
+
?file_ids: ::Array[String],
|
|
10
|
+
?files: ::Array[OpenAI::VectorStores::FileBatchCreateParams::File],
|
|
10
11
|
?request_options: OpenAI::request_opts
|
|
11
12
|
) -> OpenAI::VectorStores::VectorStoreFileBatch
|
|
12
13
|
|
metadata
CHANGED
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: openai
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.
|
|
4
|
+
version: 0.35.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- OpenAI
|
|
8
8
|
autorequire:
|
|
9
9
|
bindir: bin
|
|
10
10
|
cert_chain: []
|
|
11
|
-
date: 2025-
|
|
11
|
+
date: 2025-11-03 00:00:00.000000000 Z
|
|
12
12
|
dependencies:
|
|
13
13
|
- !ruby/object:Gem::Dependency
|
|
14
14
|
name: connection_pool
|
|
@@ -2228,7 +2228,8 @@ files:
|
|
|
2228
2228
|
- sig/openai/resources/webhooks.rbs
|
|
2229
2229
|
- sig/openai/version.rbs
|
|
2230
2230
|
homepage: https://gemdocs.org/gems/openai
|
|
2231
|
-
licenses:
|
|
2231
|
+
licenses:
|
|
2232
|
+
- Apache-2.0
|
|
2232
2233
|
metadata:
|
|
2233
2234
|
homepage_uri: https://gemdocs.org/gems/openai
|
|
2234
2235
|
source_code_uri: https://github.com/openai/openai-ruby
|