openai 0.34.1 → 0.35.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +23 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/base_client.rb +7 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +30 -24
- data/lib/openai/models/custom_tool_input_format.rb +6 -0
- data/lib/openai/models/image_edit_params.rb +1 -1
- data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -3
- data/lib/openai/models/realtime/realtime_session_create_response.rb +15 -4
- data/lib/openai/models/realtime/realtime_tracing_config.rb +1 -1
- data/lib/openai/models/realtime/realtime_truncation.rb +13 -2
- data/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +38 -4
- data/lib/openai/models/reasoning.rb +4 -0
- data/lib/openai/models/responses/custom_tool.rb +3 -0
- data/lib/openai/models/responses/easy_input_message.rb +3 -3
- data/lib/openai/models/responses/file_search_tool.rb +33 -1
- data/lib/openai/models/responses/response_content.rb +1 -4
- data/lib/openai/models/responses/response_input_content.rb +1 -4
- data/lib/openai/models/responses/response_input_item.rb +2 -2
- data/lib/openai/models/responses/response_input_message_item.rb +2 -2
- data/lib/openai/models/responses/response_output_text.rb +8 -8
- data/lib/openai/models/responses/tool.rb +30 -2
- data/lib/openai/models/vector_stores/file_batch_create_params.rb +77 -11
- data/lib/openai/models/video.rb +9 -1
- data/lib/openai/resources/files.rb +13 -14
- data/lib/openai/resources/images.rb +2 -2
- data/lib/openai/resources/realtime/calls.rb +1 -1
- data/lib/openai/resources/responses.rb +4 -0
- data/lib/openai/resources/vector_stores/file_batches.rb +6 -4
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/internal/transport/base_client.rbi +5 -0
- data/rbi/openai/internal/type/base_model.rbi +8 -4
- data/rbi/openai/models/custom_tool_input_format.rbi +2 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +26 -4
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +26 -4
- data/rbi/openai/models/realtime/realtime_truncation.rbi +13 -2
- data/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +84 -6
- data/rbi/openai/models/reasoning.rbi +6 -0
- data/rbi/openai/models/responses/custom_tool.rbi +2 -0
- data/rbi/openai/models/responses/file_search_tool.rbi +65 -0
- data/rbi/openai/models/responses/response_content.rbi +0 -1
- data/rbi/openai/models/responses/response_input_content.rbi +1 -2
- data/rbi/openai/models/responses/response_input_item.rbi +3 -6
- data/rbi/openai/models/responses/response_input_message_item.rbi +1 -2
- data/rbi/openai/models/responses/response_output_text.rbi +10 -19
- data/rbi/openai/models/responses/tool.rbi +73 -4
- data/rbi/openai/models/vector_stores/file_batch_create_params.rbi +181 -12
- data/rbi/openai/models/video.rbi +8 -0
- data/rbi/openai/resources/files.rbi +13 -14
- data/rbi/openai/resources/realtime/calls.rbi +17 -6
- data/rbi/openai/resources/vector_stores/file_batches.rbi +15 -5
- data/sig/openai/internal/transport/base_client.rbs +2 -0
- data/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +29 -2
- data/sig/openai/models/responses/file_search_tool.rbs +24 -0
- data/sig/openai/models/responses/response_content.rbs +0 -1
- data/sig/openai/models/responses/response_input_content.rbs +0 -1
- data/sig/openai/models/responses/response_output_text.rbs +7 -11
- data/sig/openai/models/responses/tool.rbs +30 -3
- data/sig/openai/models/vector_stores/file_batch_create_params.rbs +56 -6
- data/sig/openai/models/video.rbs +5 -0
- data/sig/openai/resources/vector_stores/file_batches.rbs +2 -1
- metadata +4 -3
|
@@ -125,6 +125,25 @@ module OpenAI
|
|
|
125
125
|
)
|
|
126
126
|
end
|
|
127
127
|
|
|
128
|
+
# Weights that control how reciprocal rank fusion balances semantic embedding
|
|
129
|
+
# matches versus sparse keyword matches when hybrid search is enabled.
|
|
130
|
+
sig do
|
|
131
|
+
returns(
|
|
132
|
+
T.nilable(
|
|
133
|
+
OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch
|
|
134
|
+
)
|
|
135
|
+
)
|
|
136
|
+
end
|
|
137
|
+
attr_reader :hybrid_search
|
|
138
|
+
|
|
139
|
+
sig do
|
|
140
|
+
params(
|
|
141
|
+
hybrid_search:
|
|
142
|
+
OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch::OrHash
|
|
143
|
+
).void
|
|
144
|
+
end
|
|
145
|
+
attr_writer :hybrid_search
|
|
146
|
+
|
|
128
147
|
# The ranker to use for the file search.
|
|
129
148
|
sig do
|
|
130
149
|
returns(
|
|
@@ -155,12 +174,17 @@ module OpenAI
|
|
|
155
174
|
# Ranking options for search.
|
|
156
175
|
sig do
|
|
157
176
|
params(
|
|
177
|
+
hybrid_search:
|
|
178
|
+
OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch::OrHash,
|
|
158
179
|
ranker:
|
|
159
180
|
OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol,
|
|
160
181
|
score_threshold: Float
|
|
161
182
|
).returns(T.attached_class)
|
|
162
183
|
end
|
|
163
184
|
def self.new(
|
|
185
|
+
# Weights that control how reciprocal rank fusion balances semantic embedding
|
|
186
|
+
# matches versus sparse keyword matches when hybrid search is enabled.
|
|
187
|
+
hybrid_search: nil,
|
|
164
188
|
# The ranker to use for the file search.
|
|
165
189
|
ranker: nil,
|
|
166
190
|
# The score threshold for the file search, a number between 0 and 1. Numbers
|
|
@@ -173,6 +197,8 @@ module OpenAI
|
|
|
173
197
|
sig do
|
|
174
198
|
override.returns(
|
|
175
199
|
{
|
|
200
|
+
hybrid_search:
|
|
201
|
+
OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch,
|
|
176
202
|
ranker:
|
|
177
203
|
OpenAI::Responses::FileSearchTool::RankingOptions::Ranker::OrSymbol,
|
|
178
204
|
score_threshold: Float
|
|
@@ -182,6 +208,45 @@ module OpenAI
|
|
|
182
208
|
def to_hash
|
|
183
209
|
end
|
|
184
210
|
|
|
211
|
+
class HybridSearch < OpenAI::Internal::Type::BaseModel
|
|
212
|
+
OrHash =
|
|
213
|
+
T.type_alias do
|
|
214
|
+
T.any(
|
|
215
|
+
OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch,
|
|
216
|
+
OpenAI::Internal::AnyHash
|
|
217
|
+
)
|
|
218
|
+
end
|
|
219
|
+
|
|
220
|
+
# The weight of the embedding in the reciprocal ranking fusion.
|
|
221
|
+
sig { returns(Float) }
|
|
222
|
+
attr_accessor :embedding_weight
|
|
223
|
+
|
|
224
|
+
# The weight of the text in the reciprocal ranking fusion.
|
|
225
|
+
sig { returns(Float) }
|
|
226
|
+
attr_accessor :text_weight
|
|
227
|
+
|
|
228
|
+
# Weights that control how reciprocal rank fusion balances semantic embedding
|
|
229
|
+
# matches versus sparse keyword matches when hybrid search is enabled.
|
|
230
|
+
sig do
|
|
231
|
+
params(embedding_weight: Float, text_weight: Float).returns(
|
|
232
|
+
T.attached_class
|
|
233
|
+
)
|
|
234
|
+
end
|
|
235
|
+
def self.new(
|
|
236
|
+
# The weight of the embedding in the reciprocal ranking fusion.
|
|
237
|
+
embedding_weight:,
|
|
238
|
+
# The weight of the text in the reciprocal ranking fusion.
|
|
239
|
+
text_weight:
|
|
240
|
+
)
|
|
241
|
+
end
|
|
242
|
+
|
|
243
|
+
sig do
|
|
244
|
+
override.returns({ embedding_weight: Float, text_weight: Float })
|
|
245
|
+
end
|
|
246
|
+
def to_hash
|
|
247
|
+
end
|
|
248
|
+
end
|
|
249
|
+
|
|
185
250
|
# The ranker to use for the file search.
|
|
186
251
|
module Ranker
|
|
187
252
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -13,7 +13,6 @@ module OpenAI
|
|
|
13
13
|
OpenAI::Responses::ResponseInputText,
|
|
14
14
|
OpenAI::Responses::ResponseInputImage,
|
|
15
15
|
OpenAI::Responses::ResponseInputFile,
|
|
16
|
-
OpenAI::Responses::ResponseInputAudio,
|
|
17
16
|
OpenAI::Responses::ResponseOutputText,
|
|
18
17
|
OpenAI::Responses::ResponseOutputRefusal,
|
|
19
18
|
OpenAI::Responses::ResponseContent::ReasoningTextContent
|
|
@@ -55,8 +55,7 @@ module OpenAI
|
|
|
55
55
|
T.any(
|
|
56
56
|
OpenAI::Responses::ResponseInputText,
|
|
57
57
|
OpenAI::Responses::ResponseInputImage,
|
|
58
|
-
OpenAI::Responses::ResponseInputFile
|
|
59
|
-
OpenAI::Responses::ResponseInputAudio
|
|
58
|
+
OpenAI::Responses::ResponseInputFile
|
|
60
59
|
)
|
|
61
60
|
]
|
|
62
61
|
)
|
|
@@ -118,8 +117,7 @@ module OpenAI
|
|
|
118
117
|
T.any(
|
|
119
118
|
OpenAI::Responses::ResponseInputText::OrHash,
|
|
120
119
|
OpenAI::Responses::ResponseInputImage::OrHash,
|
|
121
|
-
OpenAI::Responses::ResponseInputFile::OrHash
|
|
122
|
-
OpenAI::Responses::ResponseInputAudio::OrHash
|
|
120
|
+
OpenAI::Responses::ResponseInputFile::OrHash
|
|
123
121
|
)
|
|
124
122
|
],
|
|
125
123
|
role:
|
|
@@ -152,8 +150,7 @@ module OpenAI
|
|
|
152
150
|
T.any(
|
|
153
151
|
OpenAI::Responses::ResponseInputText,
|
|
154
152
|
OpenAI::Responses::ResponseInputImage,
|
|
155
|
-
OpenAI::Responses::ResponseInputFile
|
|
156
|
-
OpenAI::Responses::ResponseInputAudio
|
|
153
|
+
OpenAI::Responses::ResponseInputFile
|
|
157
154
|
)
|
|
158
155
|
],
|
|
159
156
|
role:
|
|
@@ -75,8 +75,7 @@ module OpenAI
|
|
|
75
75
|
T.any(
|
|
76
76
|
OpenAI::Responses::ResponseInputText::OrHash,
|
|
77
77
|
OpenAI::Responses::ResponseInputImage::OrHash,
|
|
78
|
-
OpenAI::Responses::ResponseInputFile::OrHash
|
|
79
|
-
OpenAI::Responses::ResponseInputAudio::OrHash
|
|
78
|
+
OpenAI::Responses::ResponseInputFile::OrHash
|
|
80
79
|
)
|
|
81
80
|
],
|
|
82
81
|
role: OpenAI::Responses::ResponseInputMessageItem::Role::OrSymbol,
|
|
@@ -27,6 +27,11 @@ module OpenAI
|
|
|
27
27
|
end
|
|
28
28
|
attr_accessor :annotations
|
|
29
29
|
|
|
30
|
+
sig do
|
|
31
|
+
returns(T::Array[OpenAI::Responses::ResponseOutputText::Logprob])
|
|
32
|
+
end
|
|
33
|
+
attr_accessor :logprobs
|
|
34
|
+
|
|
30
35
|
# The text output from the model.
|
|
31
36
|
sig { returns(String) }
|
|
32
37
|
attr_accessor :text
|
|
@@ -39,21 +44,6 @@ module OpenAI
|
|
|
39
44
|
sig { returns(Symbol) }
|
|
40
45
|
attr_accessor :type
|
|
41
46
|
|
|
42
|
-
sig do
|
|
43
|
-
returns(
|
|
44
|
-
T.nilable(T::Array[OpenAI::Responses::ResponseOutputText::Logprob])
|
|
45
|
-
)
|
|
46
|
-
end
|
|
47
|
-
attr_reader :logprobs
|
|
48
|
-
|
|
49
|
-
sig do
|
|
50
|
-
params(
|
|
51
|
-
logprobs:
|
|
52
|
-
T::Array[OpenAI::Responses::ResponseOutputText::Logprob::OrHash]
|
|
53
|
-
).void
|
|
54
|
-
end
|
|
55
|
-
attr_writer :logprobs
|
|
56
|
-
|
|
57
47
|
# A text output from the model.
|
|
58
48
|
sig do
|
|
59
49
|
params(
|
|
@@ -66,18 +56,18 @@ module OpenAI
|
|
|
66
56
|
OpenAI::Responses::ResponseOutputText::Annotation::FilePath::OrHash
|
|
67
57
|
)
|
|
68
58
|
],
|
|
69
|
-
text: String,
|
|
70
59
|
logprobs:
|
|
71
60
|
T::Array[OpenAI::Responses::ResponseOutputText::Logprob::OrHash],
|
|
61
|
+
text: String,
|
|
72
62
|
type: Symbol
|
|
73
63
|
).returns(T.attached_class)
|
|
74
64
|
end
|
|
75
65
|
def self.new(
|
|
76
66
|
# The annotations of the text output.
|
|
77
67
|
annotations:,
|
|
68
|
+
logprobs:,
|
|
78
69
|
# The text output from the model.
|
|
79
70
|
text:,
|
|
80
|
-
logprobs: nil,
|
|
81
71
|
# The type of the output text. Always `output_text`.
|
|
82
72
|
type: :output_text
|
|
83
73
|
)
|
|
@@ -95,9 +85,10 @@ module OpenAI
|
|
|
95
85
|
OpenAI::Responses::ResponseOutputText::Annotation::FilePath
|
|
96
86
|
)
|
|
97
87
|
],
|
|
88
|
+
logprobs:
|
|
89
|
+
T::Array[OpenAI::Responses::ResponseOutputText::Logprob],
|
|
98
90
|
text: String,
|
|
99
|
-
type: Symbol
|
|
100
|
-
logprobs: T::Array[OpenAI::Responses::ResponseOutputText::Logprob]
|
|
91
|
+
type: Symbol
|
|
101
92
|
}
|
|
102
93
|
)
|
|
103
94
|
end
|
|
@@ -713,26 +713,94 @@ module OpenAI
|
|
|
713
713
|
sig { params(file_ids: T::Array[String]).void }
|
|
714
714
|
attr_writer :file_ids
|
|
715
715
|
|
|
716
|
+
sig do
|
|
717
|
+
returns(
|
|
718
|
+
T.nilable(
|
|
719
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::OrSymbol
|
|
720
|
+
)
|
|
721
|
+
)
|
|
722
|
+
end
|
|
723
|
+
attr_accessor :memory_limit
|
|
724
|
+
|
|
716
725
|
# Configuration for a code interpreter container. Optionally specify the IDs of
|
|
717
726
|
# the files to run the code on.
|
|
718
727
|
sig do
|
|
719
|
-
params(
|
|
720
|
-
T
|
|
721
|
-
|
|
728
|
+
params(
|
|
729
|
+
file_ids: T::Array[String],
|
|
730
|
+
memory_limit:
|
|
731
|
+
T.nilable(
|
|
732
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::OrSymbol
|
|
733
|
+
),
|
|
734
|
+
type: Symbol
|
|
735
|
+
).returns(T.attached_class)
|
|
722
736
|
end
|
|
723
737
|
def self.new(
|
|
724
738
|
# An optional list of uploaded files to make available to your code.
|
|
725
739
|
file_ids: nil,
|
|
740
|
+
memory_limit: nil,
|
|
726
741
|
# Always `auto`.
|
|
727
742
|
type: :auto
|
|
728
743
|
)
|
|
729
744
|
end
|
|
730
745
|
|
|
731
746
|
sig do
|
|
732
|
-
override.returns(
|
|
747
|
+
override.returns(
|
|
748
|
+
{
|
|
749
|
+
type: Symbol,
|
|
750
|
+
file_ids: T::Array[String],
|
|
751
|
+
memory_limit:
|
|
752
|
+
T.nilable(
|
|
753
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::OrSymbol
|
|
754
|
+
)
|
|
755
|
+
}
|
|
756
|
+
)
|
|
733
757
|
end
|
|
734
758
|
def to_hash
|
|
735
759
|
end
|
|
760
|
+
|
|
761
|
+
module MemoryLimit
|
|
762
|
+
extend OpenAI::Internal::Type::Enum
|
|
763
|
+
|
|
764
|
+
TaggedSymbol =
|
|
765
|
+
T.type_alias do
|
|
766
|
+
T.all(
|
|
767
|
+
Symbol,
|
|
768
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit
|
|
769
|
+
)
|
|
770
|
+
end
|
|
771
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
|
772
|
+
|
|
773
|
+
MEMORY_LIMIT_1G =
|
|
774
|
+
T.let(
|
|
775
|
+
:"1g",
|
|
776
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol
|
|
777
|
+
)
|
|
778
|
+
MEMORY_LIMIT_4G =
|
|
779
|
+
T.let(
|
|
780
|
+
:"4g",
|
|
781
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol
|
|
782
|
+
)
|
|
783
|
+
MEMORY_LIMIT_16G =
|
|
784
|
+
T.let(
|
|
785
|
+
:"16g",
|
|
786
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol
|
|
787
|
+
)
|
|
788
|
+
MEMORY_LIMIT_64G =
|
|
789
|
+
T.let(
|
|
790
|
+
:"64g",
|
|
791
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol
|
|
792
|
+
)
|
|
793
|
+
|
|
794
|
+
sig do
|
|
795
|
+
override.returns(
|
|
796
|
+
T::Array[
|
|
797
|
+
OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto::MemoryLimit::TaggedSymbol
|
|
798
|
+
]
|
|
799
|
+
)
|
|
800
|
+
end
|
|
801
|
+
def self.values
|
|
802
|
+
end
|
|
803
|
+
end
|
|
736
804
|
end
|
|
737
805
|
|
|
738
806
|
sig do
|
|
@@ -1335,6 +1403,7 @@ module OpenAI
|
|
|
1335
1403
|
sig { returns(Symbol) }
|
|
1336
1404
|
attr_accessor :type
|
|
1337
1405
|
|
|
1406
|
+
# A tool that allows the model to execute shell commands in a local environment.
|
|
1338
1407
|
sig { params(type: Symbol).returns(T.attached_class) }
|
|
1339
1408
|
def self.new(
|
|
1340
1409
|
# The type of the local shell tool. Always `local_shell`.
|
|
@@ -15,12 +15,6 @@ module OpenAI
|
|
|
15
15
|
)
|
|
16
16
|
end
|
|
17
17
|
|
|
18
|
-
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
|
19
|
-
# the vector store should use. Useful for tools like `file_search` that can access
|
|
20
|
-
# files.
|
|
21
|
-
sig { returns(T::Array[String]) }
|
|
22
|
-
attr_accessor :file_ids
|
|
23
|
-
|
|
24
18
|
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
25
19
|
# for storing additional information about the object in a structured format, and
|
|
26
20
|
# querying for objects via API or the dashboard. Keys are strings with a maximum
|
|
@@ -63,9 +57,41 @@ module OpenAI
|
|
|
63
57
|
end
|
|
64
58
|
attr_writer :chunking_strategy
|
|
65
59
|
|
|
60
|
+
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
|
61
|
+
# the vector store should use. Useful for tools like `file_search` that can access
|
|
62
|
+
# files. If `attributes` or `chunking_strategy` are provided, they will be applied
|
|
63
|
+
# to all files in the batch. Mutually exclusive with `files`.
|
|
64
|
+
sig { returns(T.nilable(T::Array[String])) }
|
|
65
|
+
attr_reader :file_ids
|
|
66
|
+
|
|
67
|
+
sig { params(file_ids: T::Array[String]).void }
|
|
68
|
+
attr_writer :file_ids
|
|
69
|
+
|
|
70
|
+
# A list of objects that each include a `file_id` plus optional `attributes` or
|
|
71
|
+
# `chunking_strategy`. Use this when you need to override metadata for specific
|
|
72
|
+
# files. The global `attributes` or `chunking_strategy` will be ignored and must
|
|
73
|
+
# be specified for each file. Mutually exclusive with `file_ids`.
|
|
74
|
+
sig do
|
|
75
|
+
returns(
|
|
76
|
+
T.nilable(
|
|
77
|
+
T::Array[OpenAI::VectorStores::FileBatchCreateParams::File]
|
|
78
|
+
)
|
|
79
|
+
)
|
|
80
|
+
end
|
|
81
|
+
attr_reader :files
|
|
82
|
+
|
|
83
|
+
sig do
|
|
84
|
+
params(
|
|
85
|
+
files:
|
|
86
|
+
T::Array[
|
|
87
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::OrHash
|
|
88
|
+
]
|
|
89
|
+
).void
|
|
90
|
+
end
|
|
91
|
+
attr_writer :files
|
|
92
|
+
|
|
66
93
|
sig do
|
|
67
94
|
params(
|
|
68
|
-
file_ids: T::Array[String],
|
|
69
95
|
attributes:
|
|
70
96
|
T.nilable(
|
|
71
97
|
T::Hash[
|
|
@@ -78,14 +104,15 @@ module OpenAI
|
|
|
78
104
|
OpenAI::AutoFileChunkingStrategyParam::OrHash,
|
|
79
105
|
OpenAI::StaticFileChunkingStrategyObjectParam::OrHash
|
|
80
106
|
),
|
|
107
|
+
file_ids: T::Array[String],
|
|
108
|
+
files:
|
|
109
|
+
T::Array[
|
|
110
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::OrHash
|
|
111
|
+
],
|
|
81
112
|
request_options: OpenAI::RequestOptions::OrHash
|
|
82
113
|
).returns(T.attached_class)
|
|
83
114
|
end
|
|
84
115
|
def self.new(
|
|
85
|
-
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
|
86
|
-
# the vector store should use. Useful for tools like `file_search` that can access
|
|
87
|
-
# files.
|
|
88
|
-
file_ids:,
|
|
89
116
|
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
90
117
|
# for storing additional information about the object in a structured format, and
|
|
91
118
|
# querying for objects via API or the dashboard. Keys are strings with a maximum
|
|
@@ -95,6 +122,16 @@ module OpenAI
|
|
|
95
122
|
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
|
|
96
123
|
# strategy. Only applicable if `file_ids` is non-empty.
|
|
97
124
|
chunking_strategy: nil,
|
|
125
|
+
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
|
126
|
+
# the vector store should use. Useful for tools like `file_search` that can access
|
|
127
|
+
# files. If `attributes` or `chunking_strategy` are provided, they will be applied
|
|
128
|
+
# to all files in the batch. Mutually exclusive with `files`.
|
|
129
|
+
file_ids: nil,
|
|
130
|
+
# A list of objects that each include a `file_id` plus optional `attributes` or
|
|
131
|
+
# `chunking_strategy`. Use this when you need to override metadata for specific
|
|
132
|
+
# files. The global `attributes` or `chunking_strategy` will be ignored and must
|
|
133
|
+
# be specified for each file. Mutually exclusive with `file_ids`.
|
|
134
|
+
files: nil,
|
|
98
135
|
request_options: {}
|
|
99
136
|
)
|
|
100
137
|
end
|
|
@@ -102,7 +139,6 @@ module OpenAI
|
|
|
102
139
|
sig do
|
|
103
140
|
override.returns(
|
|
104
141
|
{
|
|
105
|
-
file_ids: T::Array[String],
|
|
106
142
|
attributes:
|
|
107
143
|
T.nilable(
|
|
108
144
|
T::Hash[
|
|
@@ -115,6 +151,9 @@ module OpenAI
|
|
|
115
151
|
OpenAI::AutoFileChunkingStrategyParam,
|
|
116
152
|
OpenAI::StaticFileChunkingStrategyObjectParam
|
|
117
153
|
),
|
|
154
|
+
file_ids: T::Array[String],
|
|
155
|
+
files:
|
|
156
|
+
T::Array[OpenAI::VectorStores::FileBatchCreateParams::File],
|
|
118
157
|
request_options: OpenAI::RequestOptions
|
|
119
158
|
}
|
|
120
159
|
)
|
|
@@ -137,6 +176,136 @@ module OpenAI
|
|
|
137
176
|
def self.variants
|
|
138
177
|
end
|
|
139
178
|
end
|
|
179
|
+
|
|
180
|
+
class File < OpenAI::Internal::Type::BaseModel
|
|
181
|
+
OrHash =
|
|
182
|
+
T.type_alias do
|
|
183
|
+
T.any(
|
|
184
|
+
OpenAI::VectorStores::FileBatchCreateParams::File,
|
|
185
|
+
OpenAI::Internal::AnyHash
|
|
186
|
+
)
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
# A [File](https://platform.openai.com/docs/api-reference/files) ID that the
|
|
190
|
+
# vector store should use. Useful for tools like `file_search` that can access
|
|
191
|
+
# files.
|
|
192
|
+
sig { returns(String) }
|
|
193
|
+
attr_accessor :file_id
|
|
194
|
+
|
|
195
|
+
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
196
|
+
# for storing additional information about the object in a structured format, and
|
|
197
|
+
# querying for objects via API or the dashboard. Keys are strings with a maximum
|
|
198
|
+
# length of 64 characters. Values are strings with a maximum length of 512
|
|
199
|
+
# characters, booleans, or numbers.
|
|
200
|
+
sig do
|
|
201
|
+
returns(
|
|
202
|
+
T.nilable(
|
|
203
|
+
T::Hash[
|
|
204
|
+
Symbol,
|
|
205
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants
|
|
206
|
+
]
|
|
207
|
+
)
|
|
208
|
+
)
|
|
209
|
+
end
|
|
210
|
+
attr_accessor :attributes
|
|
211
|
+
|
|
212
|
+
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
|
|
213
|
+
# strategy. Only applicable if `file_ids` is non-empty.
|
|
214
|
+
sig do
|
|
215
|
+
returns(
|
|
216
|
+
T.nilable(
|
|
217
|
+
T.any(
|
|
218
|
+
OpenAI::AutoFileChunkingStrategyParam,
|
|
219
|
+
OpenAI::StaticFileChunkingStrategyObjectParam
|
|
220
|
+
)
|
|
221
|
+
)
|
|
222
|
+
)
|
|
223
|
+
end
|
|
224
|
+
attr_reader :chunking_strategy
|
|
225
|
+
|
|
226
|
+
sig do
|
|
227
|
+
params(
|
|
228
|
+
chunking_strategy:
|
|
229
|
+
T.any(
|
|
230
|
+
OpenAI::AutoFileChunkingStrategyParam::OrHash,
|
|
231
|
+
OpenAI::StaticFileChunkingStrategyObjectParam::OrHash
|
|
232
|
+
)
|
|
233
|
+
).void
|
|
234
|
+
end
|
|
235
|
+
attr_writer :chunking_strategy
|
|
236
|
+
|
|
237
|
+
sig do
|
|
238
|
+
params(
|
|
239
|
+
file_id: String,
|
|
240
|
+
attributes:
|
|
241
|
+
T.nilable(
|
|
242
|
+
T::Hash[
|
|
243
|
+
Symbol,
|
|
244
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants
|
|
245
|
+
]
|
|
246
|
+
),
|
|
247
|
+
chunking_strategy:
|
|
248
|
+
T.any(
|
|
249
|
+
OpenAI::AutoFileChunkingStrategyParam::OrHash,
|
|
250
|
+
OpenAI::StaticFileChunkingStrategyObjectParam::OrHash
|
|
251
|
+
)
|
|
252
|
+
).returns(T.attached_class)
|
|
253
|
+
end
|
|
254
|
+
def self.new(
|
|
255
|
+
# A [File](https://platform.openai.com/docs/api-reference/files) ID that the
|
|
256
|
+
# vector store should use. Useful for tools like `file_search` that can access
|
|
257
|
+
# files.
|
|
258
|
+
file_id:,
|
|
259
|
+
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
260
|
+
# for storing additional information about the object in a structured format, and
|
|
261
|
+
# querying for objects via API or the dashboard. Keys are strings with a maximum
|
|
262
|
+
# length of 64 characters. Values are strings with a maximum length of 512
|
|
263
|
+
# characters, booleans, or numbers.
|
|
264
|
+
attributes: nil,
|
|
265
|
+
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
|
|
266
|
+
# strategy. Only applicable if `file_ids` is non-empty.
|
|
267
|
+
chunking_strategy: nil
|
|
268
|
+
)
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
sig do
|
|
272
|
+
override.returns(
|
|
273
|
+
{
|
|
274
|
+
file_id: String,
|
|
275
|
+
attributes:
|
|
276
|
+
T.nilable(
|
|
277
|
+
T::Hash[
|
|
278
|
+
Symbol,
|
|
279
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants
|
|
280
|
+
]
|
|
281
|
+
),
|
|
282
|
+
chunking_strategy:
|
|
283
|
+
T.any(
|
|
284
|
+
OpenAI::AutoFileChunkingStrategyParam,
|
|
285
|
+
OpenAI::StaticFileChunkingStrategyObjectParam
|
|
286
|
+
)
|
|
287
|
+
}
|
|
288
|
+
)
|
|
289
|
+
end
|
|
290
|
+
def to_hash
|
|
291
|
+
end
|
|
292
|
+
|
|
293
|
+
module Attribute
|
|
294
|
+
extend OpenAI::Internal::Type::Union
|
|
295
|
+
|
|
296
|
+
Variants = T.type_alias { T.any(String, Float, T::Boolean) }
|
|
297
|
+
|
|
298
|
+
sig do
|
|
299
|
+
override.returns(
|
|
300
|
+
T::Array[
|
|
301
|
+
OpenAI::VectorStores::FileBatchCreateParams::File::Attribute::Variants
|
|
302
|
+
]
|
|
303
|
+
)
|
|
304
|
+
end
|
|
305
|
+
def self.variants
|
|
306
|
+
end
|
|
307
|
+
end
|
|
308
|
+
end
|
|
140
309
|
end
|
|
141
310
|
end
|
|
142
311
|
end
|
data/rbi/openai/models/video.rbi
CHANGED
|
@@ -40,6 +40,10 @@ module OpenAI
|
|
|
40
40
|
sig { returns(Integer) }
|
|
41
41
|
attr_accessor :progress
|
|
42
42
|
|
|
43
|
+
# The prompt that was used to generate the video.
|
|
44
|
+
sig { returns(T.nilable(String)) }
|
|
45
|
+
attr_accessor :prompt
|
|
46
|
+
|
|
43
47
|
# Identifier of the source video if this video is a remix.
|
|
44
48
|
sig { returns(T.nilable(String)) }
|
|
45
49
|
attr_accessor :remixed_from_video_id
|
|
@@ -66,6 +70,7 @@ module OpenAI
|
|
|
66
70
|
expires_at: T.nilable(Integer),
|
|
67
71
|
model: OpenAI::VideoModel::OrSymbol,
|
|
68
72
|
progress: Integer,
|
|
73
|
+
prompt: T.nilable(String),
|
|
69
74
|
remixed_from_video_id: T.nilable(String),
|
|
70
75
|
seconds: OpenAI::VideoSeconds::OrSymbol,
|
|
71
76
|
size: OpenAI::VideoSize::OrSymbol,
|
|
@@ -88,6 +93,8 @@ module OpenAI
|
|
|
88
93
|
model:,
|
|
89
94
|
# Approximate completion percentage for the generation task.
|
|
90
95
|
progress:,
|
|
96
|
+
# The prompt that was used to generate the video.
|
|
97
|
+
prompt:,
|
|
91
98
|
# Identifier of the source video if this video is a remix.
|
|
92
99
|
remixed_from_video_id:,
|
|
93
100
|
# Duration of the generated clip in seconds.
|
|
@@ -112,6 +119,7 @@ module OpenAI
|
|
|
112
119
|
model: OpenAI::VideoModel::TaggedSymbol,
|
|
113
120
|
object: Symbol,
|
|
114
121
|
progress: Integer,
|
|
122
|
+
prompt: T.nilable(String),
|
|
115
123
|
remixed_from_video_id: T.nilable(String),
|
|
116
124
|
seconds: OpenAI::VideoSeconds::TaggedSymbol,
|
|
117
125
|
size: OpenAI::VideoSize::TaggedSymbol,
|
|
@@ -7,20 +7,19 @@ module OpenAI
|
|
|
7
7
|
# up to 512 MB, and the size of all files uploaded by one organization can be up
|
|
8
8
|
# to 1 TB.
|
|
9
9
|
#
|
|
10
|
-
# The Assistants API supports files up to 2 million tokens and of specific file
|
|
11
|
-
#
|
|
12
|
-
#
|
|
13
|
-
# details.
|
|
14
|
-
#
|
|
15
|
-
#
|
|
16
|
-
#
|
|
17
|
-
#
|
|
18
|
-
#
|
|
19
|
-
#
|
|
20
|
-
#
|
|
21
|
-
#
|
|
22
|
-
#
|
|
23
|
-
# [format](https://platform.openai.com/docs/api-reference/batch/request-input).
|
|
10
|
+
# - The Assistants API supports files up to 2 million tokens and of specific file
|
|
11
|
+
# types. See the
|
|
12
|
+
# [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools)
|
|
13
|
+
# for details.
|
|
14
|
+
# - The Fine-tuning API only supports `.jsonl` files. The input also has certain
|
|
15
|
+
# required formats for fine-tuning
|
|
16
|
+
# [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input)
|
|
17
|
+
# or
|
|
18
|
+
# [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
|
|
19
|
+
# models.
|
|
20
|
+
# - The Batch API only supports `.jsonl` files up to 200 MB in size. The input
|
|
21
|
+
# also has a specific required
|
|
22
|
+
# [format](https://platform.openai.com/docs/api-reference/batch/request-input).
|
|
24
23
|
#
|
|
25
24
|
# Please [contact us](https://help.openai.com/) if you need to increase these
|
|
26
25
|
# storage limits.
|