openai 0.30.0 → 0.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +14 -0
- data/README.md +1 -1
- data/lib/openai/models/beta/assistant_create_params.rb +3 -0
- data/lib/openai/models/beta/assistant_update_params.rb +3 -0
- data/lib/openai/models/beta/threads/run_create_params.rb +3 -0
- data/lib/openai/models/chat/completion_create_params.rb +3 -0
- data/lib/openai/models/comparison_filter.rb +29 -6
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -0
- data/lib/openai/models/evals/run_cancel_response.rb +6 -0
- data/lib/openai/models/evals/run_create_params.rb +6 -0
- data/lib/openai/models/evals/run_create_response.rb +6 -0
- data/lib/openai/models/evals/run_list_response.rb +6 -0
- data/lib/openai/models/evals/run_retrieve_response.rb +6 -0
- data/lib/openai/models/graders/score_model_grader.rb +3 -0
- data/lib/openai/models/reasoning.rb +3 -0
- data/lib/openai/models/reasoning_effort.rb +3 -0
- data/lib/openai/models/vector_stores/vector_store_file.rb +3 -3
- data/lib/openai/resources/files.rb +1 -1
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -0
- data/rbi/openai/models/beta/assistant_update_params.rbi +6 -0
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -0
- data/rbi/openai/models/chat/completion_create_params.rbi +6 -0
- data/rbi/openai/models/comparison_filter.rbi +43 -4
- data/rbi/openai/models/eval_create_response.rbi +4 -4
- data/rbi/openai/models/eval_list_response.rbi +4 -4
- data/rbi/openai/models/eval_retrieve_response.rbi +4 -4
- data/rbi/openai/models/eval_update_response.rbi +4 -4
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +6 -0
- data/rbi/openai/models/evals/run_cancel_response.rbi +12 -0
- data/rbi/openai/models/evals/run_create_params.rbi +12 -0
- data/rbi/openai/models/evals/run_create_response.rbi +12 -0
- data/rbi/openai/models/evals/run_list_response.rbi +12 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +12 -0
- data/rbi/openai/models/graders/score_model_grader.rbi +6 -0
- data/rbi/openai/models/reasoning.rbi +6 -0
- data/rbi/openai/models/reasoning_effort.rbi +3 -0
- data/rbi/openai/models/vector_stores/vector_store_file.rbi +3 -3
- data/rbi/openai/resources/beta/assistants.rbi +6 -0
- data/rbi/openai/resources/beta/threads/runs.rbi +6 -0
- data/rbi/openai/resources/chat/completions.rbi +6 -0
- data/rbi/openai/resources/files.rbi +1 -1
- data/sig/openai/models/comparison_filter.rbs +15 -1
- data/sig/openai/models/eval_create_response.rbs +2 -2
- data/sig/openai/models/eval_list_response.rbs +2 -2
- data/sig/openai/models/eval_retrieve_response.rbs +2 -2
- data/sig/openai/models/eval_update_response.rbs +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2c2acb13f76000aa282621830c5724ffe7ceebf464e7cd4d58b4d8b99d6ddd5d
|
4
|
+
data.tar.gz: e55e41d0e9dd00daa7ab4dd6409ea8f4fc68c91ccf09b8660725634ab563c56b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ffb82dfdebeca385d0f584be09ea4bbf5ec79e0031209f362a93fc1167f78b04ddc2743e77ad18f87f9e9b1a33909f9d532fb5229379139479ef09317b263289
|
7
|
+
data.tar.gz: dd09d9967b4cb16fd70bbbcc153ab34bf144a1dd3e922100f3f92fa597a3756fcb112763392745934b647d61ba48925070e55889a09a0b94ee89ff435f326cc7
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,19 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 0.31.0 (2025-10-10)
|
4
|
+
|
5
|
+
Full Changelog: [v0.30.0...v0.31.0](https://github.com/openai/openai-ruby/compare/v0.30.0...v0.31.0)
|
6
|
+
|
7
|
+
### Features
|
8
|
+
|
9
|
+
* **api:** comparison filter in/not in ([ac3e58b](https://github.com/openai/openai-ruby/commit/ac3e58bbee0c919ac84c4b3ac8b67955bca7ba88))
|
10
|
+
|
11
|
+
|
12
|
+
### Chores
|
13
|
+
|
14
|
+
* ignore linter error for tests having large collections ([90c4440](https://github.com/openai/openai-ruby/commit/90c44400f8713b7d2d0b51142f4ed5509dbca713))
|
15
|
+
* simplify model references ([d18c5af](https://github.com/openai/openai-ruby/commit/d18c5af9d05ae63616f2c83fb228c15f37cdddb0))
|
16
|
+
|
3
17
|
## 0.30.0 (2025-10-06)
|
4
18
|
|
5
19
|
Full Changelog: [v0.29.0...v0.30.0](https://github.com/openai/openai-ruby/compare/v0.29.0...v0.30.0)
|
data/README.md
CHANGED
@@ -55,6 +55,9 @@ module OpenAI
|
|
55
55
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
56
56
|
# response.
|
57
57
|
#
|
58
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
59
|
+
# effort.
|
60
|
+
#
|
58
61
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
59
62
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
60
63
|
|
@@ -55,6 +55,9 @@ module OpenAI
|
|
55
55
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
56
56
|
# response.
|
57
57
|
#
|
58
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
59
|
+
# effort.
|
60
|
+
#
|
58
61
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
59
62
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
60
63
|
|
@@ -113,6 +113,9 @@ module OpenAI
|
|
113
113
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
114
114
|
# response.
|
115
115
|
#
|
116
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
117
|
+
# effort.
|
118
|
+
#
|
116
119
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
117
120
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
118
121
|
|
@@ -197,6 +197,9 @@ module OpenAI
|
|
197
197
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
198
198
|
# response.
|
199
199
|
#
|
200
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
201
|
+
# effort.
|
202
|
+
#
|
200
203
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
201
204
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
202
205
|
|
@@ -10,7 +10,8 @@ module OpenAI
|
|
10
10
|
required :key, String
|
11
11
|
|
12
12
|
# @!attribute type
|
13
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
13
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
14
|
+
# `nin`.
|
14
15
|
#
|
15
16
|
# - `eq`: equals
|
16
17
|
# - `ne`: not equal
|
@@ -18,6 +19,8 @@ module OpenAI
|
|
18
19
|
# - `gte`: greater than or equal
|
19
20
|
# - `lt`: less than
|
20
21
|
# - `lte`: less than or equal
|
22
|
+
# - `in`: in
|
23
|
+
# - `nin`: not in
|
21
24
|
#
|
22
25
|
# @return [Symbol, OpenAI::Models::ComparisonFilter::Type]
|
23
26
|
required :type, enum: -> { OpenAI::ComparisonFilter::Type }
|
@@ -26,7 +29,7 @@ module OpenAI
|
|
26
29
|
# The value to compare against the attribute key; supports string, number, or
|
27
30
|
# boolean types.
|
28
31
|
#
|
29
|
-
# @return [String, Float, Boolean]
|
32
|
+
# @return [String, Float, Boolean, Array<String, Float>]
|
30
33
|
required :value, union: -> { OpenAI::ComparisonFilter::Value }
|
31
34
|
|
32
35
|
# @!method initialize(key:, type:, value:)
|
@@ -38,11 +41,12 @@ module OpenAI
|
|
38
41
|
#
|
39
42
|
# @param key [String] The key to compare against the value.
|
40
43
|
#
|
41
|
-
# @param type [Symbol, OpenAI::Models::ComparisonFilter::Type] Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
44
|
+
# @param type [Symbol, OpenAI::Models::ComparisonFilter::Type] Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `
|
42
45
|
#
|
43
|
-
# @param value [String, Float, Boolean] The value to compare against the attribute key; supports string, number, or bool
|
46
|
+
# @param value [String, Float, Boolean, Array<String, Float>] The value to compare against the attribute key; supports string, number, or bool
|
44
47
|
|
45
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
48
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
49
|
+
# `nin`.
|
46
50
|
#
|
47
51
|
# - `eq`: equals
|
48
52
|
# - `ne`: not equal
|
@@ -50,6 +54,8 @@ module OpenAI
|
|
50
54
|
# - `gte`: greater than or equal
|
51
55
|
# - `lt`: less than
|
52
56
|
# - `lte`: less than or equal
|
57
|
+
# - `in`: in
|
58
|
+
# - `nin`: not in
|
53
59
|
#
|
54
60
|
# @see OpenAI::Models::ComparisonFilter#type
|
55
61
|
module Type
|
@@ -79,8 +85,25 @@ module OpenAI
|
|
79
85
|
|
80
86
|
variant OpenAI::Internal::Type::Boolean
|
81
87
|
|
88
|
+
variant -> { OpenAI::Models::ComparisonFilter::Value::UnionMember3Array }
|
89
|
+
|
90
|
+
module UnionMember3
|
91
|
+
extend OpenAI::Internal::Type::Union
|
92
|
+
|
93
|
+
variant String
|
94
|
+
|
95
|
+
variant Float
|
96
|
+
|
97
|
+
# @!method self.variants
|
98
|
+
# @return [Array(String, Float)]
|
99
|
+
end
|
100
|
+
|
82
101
|
# @!method self.variants
|
83
|
-
# @return [Array(String, Float, Boolean)]
|
102
|
+
# @return [Array(String, Float, Boolean, Array<String, Float>)]
|
103
|
+
|
104
|
+
# @type [OpenAI::Internal::Type::Converter]
|
105
|
+
UnionMember3Array =
|
106
|
+
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::ComparisonFilter::Value::UnionMember3 }]
|
84
107
|
end
|
85
108
|
end
|
86
109
|
end
|
@@ -466,6 +466,9 @@ module OpenAI
|
|
466
466
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
467
467
|
# response.
|
468
468
|
#
|
469
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
470
|
+
# effort.
|
471
|
+
#
|
469
472
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
470
473
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
471
474
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -661,6 +664,9 @@ module OpenAI
|
|
661
664
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
662
665
|
# response.
|
663
666
|
#
|
667
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
668
|
+
# effort.
|
669
|
+
#
|
664
670
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
665
671
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
666
672
|
|
@@ -232,6 +232,9 @@ module OpenAI
|
|
232
232
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
233
233
|
# response.
|
234
234
|
#
|
235
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
236
|
+
# effort.
|
237
|
+
#
|
235
238
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
236
239
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
237
240
|
|
@@ -589,6 +592,9 @@ module OpenAI
|
|
589
592
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
590
593
|
# response.
|
591
594
|
#
|
595
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
596
|
+
# effort.
|
597
|
+
#
|
592
598
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
593
599
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
594
600
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -661,6 +664,9 @@ module OpenAI
|
|
661
664
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
662
665
|
# response.
|
663
666
|
#
|
667
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
668
|
+
# effort.
|
669
|
+
#
|
664
670
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
665
671
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
666
672
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -661,6 +664,9 @@ module OpenAI
|
|
661
664
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
662
665
|
# response.
|
663
666
|
#
|
667
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
668
|
+
# effort.
|
669
|
+
#
|
664
670
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
665
671
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
666
672
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -665,6 +668,9 @@ module OpenAI
|
|
665
668
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
666
669
|
# response.
|
667
670
|
#
|
671
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
672
|
+
# effort.
|
673
|
+
#
|
668
674
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
669
675
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
670
676
|
|
@@ -226,6 +226,9 @@ module OpenAI
|
|
226
226
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
227
227
|
# response.
|
228
228
|
#
|
229
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
230
|
+
# effort.
|
231
|
+
#
|
229
232
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
230
233
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
231
234
|
|
@@ -10,6 +10,9 @@ module OpenAI
|
|
10
10
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
11
11
|
# response.
|
12
12
|
#
|
13
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
14
|
+
# effort.
|
15
|
+
#
|
13
16
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
14
17
|
optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
15
18
|
|
@@ -7,6 +7,9 @@ module OpenAI
|
|
7
7
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
8
8
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
9
9
|
# response.
|
10
|
+
#
|
11
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
12
|
+
# effort.
|
10
13
|
module ReasoningEffort
|
11
14
|
extend OpenAI::Internal::Type::Enum
|
12
15
|
|
@@ -101,7 +101,7 @@ module OpenAI
|
|
101
101
|
# @see OpenAI::Models::VectorStores::VectorStoreFile#last_error
|
102
102
|
class LastError < OpenAI::Internal::Type::BaseModel
|
103
103
|
# @!attribute code
|
104
|
-
# One of `server_error` or `
|
104
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
105
105
|
#
|
106
106
|
# @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code]
|
107
107
|
required :code, enum: -> { OpenAI::VectorStores::VectorStoreFile::LastError::Code }
|
@@ -116,11 +116,11 @@ module OpenAI
|
|
116
116
|
# The last error associated with this vector store file. Will be `null` if there
|
117
117
|
# are no errors.
|
118
118
|
#
|
119
|
-
# @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] One of `server_error` or `
|
119
|
+
# @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] One of `server_error`, `unsupported_file`, or `invalid_file`.
|
120
120
|
#
|
121
121
|
# @param message [String] A human-readable description of the error.
|
122
122
|
|
123
|
-
# One of `server_error` or `
|
123
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
124
124
|
#
|
125
125
|
# @see OpenAI::Models::VectorStores::VectorStoreFile::LastError#code
|
126
126
|
module Code
|
data/lib/openai/version.rb
CHANGED
@@ -50,6 +50,9 @@ module OpenAI
|
|
50
50
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
51
51
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
52
52
|
# response.
|
53
|
+
#
|
54
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
55
|
+
# effort.
|
53
56
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
54
57
|
attr_accessor :reasoning_effort
|
55
58
|
|
@@ -212,6 +215,9 @@ module OpenAI
|
|
212
215
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
213
216
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
214
217
|
# response.
|
218
|
+
#
|
219
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
220
|
+
# effort.
|
215
221
|
reasoning_effort: nil,
|
216
222
|
# Specifies the format that the model must output. Compatible with
|
217
223
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -70,6 +70,9 @@ module OpenAI
|
|
70
70
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
71
71
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
72
72
|
# response.
|
73
|
+
#
|
74
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
75
|
+
# effort.
|
73
76
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
74
77
|
attr_accessor :reasoning_effort
|
75
78
|
|
@@ -236,6 +239,9 @@ module OpenAI
|
|
236
239
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
237
240
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
238
241
|
# response.
|
242
|
+
#
|
243
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
244
|
+
# effort.
|
239
245
|
reasoning_effort: nil,
|
240
246
|
# Specifies the format that the model must output. Compatible with
|
241
247
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -116,6 +116,9 @@ module OpenAI
|
|
116
116
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
117
117
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
118
118
|
# response.
|
119
|
+
#
|
120
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
121
|
+
# effort.
|
119
122
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
120
123
|
attr_accessor :reasoning_effort
|
121
124
|
|
@@ -334,6 +337,9 @@ module OpenAI
|
|
334
337
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
335
338
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
336
339
|
# response.
|
340
|
+
#
|
341
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
342
|
+
# effort.
|
337
343
|
reasoning_effort: nil,
|
338
344
|
# Specifies the format that the model must output. Compatible with
|
339
345
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -230,6 +230,9 @@ module OpenAI
|
|
230
230
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
231
231
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
232
232
|
# response.
|
233
|
+
#
|
234
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
235
|
+
# effort.
|
233
236
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
234
237
|
attr_accessor :reasoning_effort
|
235
238
|
|
@@ -667,6 +670,9 @@ module OpenAI
|
|
667
670
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
668
671
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
669
672
|
# response.
|
673
|
+
#
|
674
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
675
|
+
# effort.
|
670
676
|
reasoning_effort: nil,
|
671
677
|
# An object specifying the format that the model must output.
|
672
678
|
#
|
@@ -12,7 +12,8 @@ module OpenAI
|
|
12
12
|
sig { returns(String) }
|
13
13
|
attr_accessor :key
|
14
14
|
|
15
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
15
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
16
|
+
# `nin`.
|
16
17
|
#
|
17
18
|
# - `eq`: equals
|
18
19
|
# - `ne`: not equal
|
@@ -20,6 +21,8 @@ module OpenAI
|
|
20
21
|
# - `gte`: greater than or equal
|
21
22
|
# - `lt`: less than
|
22
23
|
# - `lte`: less than or equal
|
24
|
+
# - `in`: in
|
25
|
+
# - `nin`: not in
|
23
26
|
sig { returns(OpenAI::ComparisonFilter::Type::OrSymbol) }
|
24
27
|
attr_accessor :type
|
25
28
|
|
@@ -40,7 +43,8 @@ module OpenAI
|
|
40
43
|
def self.new(
|
41
44
|
# The key to compare against the value.
|
42
45
|
key:,
|
43
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
46
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
47
|
+
# `nin`.
|
44
48
|
#
|
45
49
|
# - `eq`: equals
|
46
50
|
# - `ne`: not equal
|
@@ -48,6 +52,8 @@ module OpenAI
|
|
48
52
|
# - `gte`: greater than or equal
|
49
53
|
# - `lt`: less than
|
50
54
|
# - `lte`: less than or equal
|
55
|
+
# - `in`: in
|
56
|
+
# - `nin`: not in
|
51
57
|
type:,
|
52
58
|
# The value to compare against the attribute key; supports string, number, or
|
53
59
|
# boolean types.
|
@@ -67,7 +73,8 @@ module OpenAI
|
|
67
73
|
def to_hash
|
68
74
|
end
|
69
75
|
|
70
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
76
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
77
|
+
# `nin`.
|
71
78
|
#
|
72
79
|
# - `eq`: equals
|
73
80
|
# - `ne`: not equal
|
@@ -75,6 +82,8 @@ module OpenAI
|
|
75
82
|
# - `gte`: greater than or equal
|
76
83
|
# - `lt`: less than
|
77
84
|
# - `lte`: less than or equal
|
85
|
+
# - `in`: in
|
86
|
+
# - `nin`: not in
|
78
87
|
module Type
|
79
88
|
extend OpenAI::Internal::Type::Enum
|
80
89
|
|
@@ -103,13 +112,43 @@ module OpenAI
|
|
103
112
|
module Value
|
104
113
|
extend OpenAI::Internal::Type::Union
|
105
114
|
|
106
|
-
Variants =
|
115
|
+
Variants =
|
116
|
+
T.type_alias do
|
117
|
+
T.any(
|
118
|
+
String,
|
119
|
+
Float,
|
120
|
+
T::Boolean,
|
121
|
+
T::Array[OpenAI::ComparisonFilter::Value::UnionMember3::Variants]
|
122
|
+
)
|
123
|
+
end
|
124
|
+
|
125
|
+
module UnionMember3
|
126
|
+
extend OpenAI::Internal::Type::Union
|
127
|
+
|
128
|
+
Variants = T.type_alias { T.any(String, Float) }
|
129
|
+
|
130
|
+
sig do
|
131
|
+
override.returns(
|
132
|
+
T::Array[OpenAI::ComparisonFilter::Value::UnionMember3::Variants]
|
133
|
+
)
|
134
|
+
end
|
135
|
+
def self.variants
|
136
|
+
end
|
137
|
+
end
|
107
138
|
|
108
139
|
sig do
|
109
140
|
override.returns(T::Array[OpenAI::ComparisonFilter::Value::Variants])
|
110
141
|
end
|
111
142
|
def self.variants
|
112
143
|
end
|
144
|
+
|
145
|
+
UnionMember3Array =
|
146
|
+
T.let(
|
147
|
+
OpenAI::Internal::Type::ArrayOf[
|
148
|
+
union: OpenAI::ComparisonFilter::Value::UnionMember3
|
149
|
+
],
|
150
|
+
OpenAI::Internal::Type::Converter
|
151
|
+
)
|
113
152
|
end
|
114
153
|
end
|
115
154
|
end
|
@@ -70,8 +70,8 @@ module OpenAI
|
|
70
70
|
testing_criteria:
|
71
71
|
T::Array[
|
72
72
|
T.any(
|
73
|
-
OpenAI::
|
74
|
-
OpenAI::
|
73
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
74
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
75
75
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
76
76
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython::OrHash,
|
77
77
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -223,8 +223,8 @@ module OpenAI
|
|
223
223
|
Variants =
|
224
224
|
T.type_alias do
|
225
225
|
T.any(
|
226
|
-
OpenAI::
|
227
|
-
OpenAI::
|
226
|
+
OpenAI::Graders::LabelModelGrader,
|
227
|
+
OpenAI::Graders::StringCheckGrader,
|
228
228
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity,
|
229
229
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython,
|
230
230
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -68,8 +68,8 @@ module OpenAI
|
|
68
68
|
testing_criteria:
|
69
69
|
T::Array[
|
70
70
|
T.any(
|
71
|
-
OpenAI::
|
72
|
-
OpenAI::
|
71
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
72
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
73
73
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
74
74
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython::OrHash,
|
75
75
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -221,8 +221,8 @@ module OpenAI
|
|
221
221
|
Variants =
|
222
222
|
T.type_alias do
|
223
223
|
T.any(
|
224
|
-
OpenAI::
|
225
|
-
OpenAI::
|
224
|
+
OpenAI::Graders::LabelModelGrader,
|
225
|
+
OpenAI::Graders::StringCheckGrader,
|
226
226
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity,
|
227
227
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython,
|
228
228
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -72,8 +72,8 @@ module OpenAI
|
|
72
72
|
testing_criteria:
|
73
73
|
T::Array[
|
74
74
|
T.any(
|
75
|
-
OpenAI::
|
76
|
-
OpenAI::
|
75
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
76
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
77
77
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
78
78
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython::OrHash,
|
79
79
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -225,8 +225,8 @@ module OpenAI
|
|
225
225
|
Variants =
|
226
226
|
T.type_alias do
|
227
227
|
T.any(
|
228
|
-
OpenAI::
|
229
|
-
OpenAI::
|
228
|
+
OpenAI::Graders::LabelModelGrader,
|
229
|
+
OpenAI::Graders::StringCheckGrader,
|
230
230
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity,
|
231
231
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython,
|
232
232
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -70,8 +70,8 @@ module OpenAI
|
|
70
70
|
testing_criteria:
|
71
71
|
T::Array[
|
72
72
|
T.any(
|
73
|
-
OpenAI::
|
74
|
-
OpenAI::
|
73
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
74
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
75
75
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
76
76
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython::OrHash,
|
77
77
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -223,8 +223,8 @@ module OpenAI
|
|
223
223
|
Variants =
|
224
224
|
T.type_alias do
|
225
225
|
T.any(
|
226
|
-
OpenAI::
|
227
|
-
OpenAI::
|
226
|
+
OpenAI::Graders::LabelModelGrader,
|
227
|
+
OpenAI::Graders::StringCheckGrader,
|
228
228
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity,
|
229
229
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython,
|
230
230
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -892,6 +892,9 @@ module OpenAI
|
|
892
892
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
893
893
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
894
894
|
# response.
|
895
|
+
#
|
896
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
897
|
+
# effort.
|
895
898
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
896
899
|
attr_accessor :reasoning_effort
|
897
900
|
|
@@ -992,6 +995,9 @@ module OpenAI
|
|
992
995
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
993
996
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
994
997
|
# response.
|
998
|
+
#
|
999
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1000
|
+
# effort.
|
995
1001
|
reasoning_effort: nil,
|
996
1002
|
# An object specifying the format that the model must output.
|
997
1003
|
#
|
@@ -515,6 +515,9 @@ module OpenAI
|
|
515
515
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
516
516
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
517
517
|
# response.
|
518
|
+
#
|
519
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
520
|
+
# effort.
|
518
521
|
sig do
|
519
522
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
520
523
|
end
|
@@ -574,6 +577,9 @@ module OpenAI
|
|
574
577
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
575
578
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
576
579
|
# response.
|
580
|
+
#
|
581
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
582
|
+
# effort.
|
577
583
|
reasoning_effort: nil,
|
578
584
|
# Sampling temperature. This is a query parameter used to select responses.
|
579
585
|
temperature: nil,
|
@@ -1120,6 +1126,9 @@ module OpenAI
|
|
1120
1126
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1121
1127
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1122
1128
|
# response.
|
1129
|
+
#
|
1130
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1131
|
+
# effort.
|
1123
1132
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1124
1133
|
attr_accessor :reasoning_effort
|
1125
1134
|
|
@@ -1241,6 +1250,9 @@ module OpenAI
|
|
1241
1250
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1242
1251
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1243
1252
|
# response.
|
1253
|
+
#
|
1254
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1255
|
+
# effort.
|
1244
1256
|
reasoning_effort: nil,
|
1245
1257
|
# A seed value to initialize the randomness, during sampling.
|
1246
1258
|
seed: nil,
|
@@ -425,6 +425,9 @@ module OpenAI
|
|
425
425
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
426
426
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
427
427
|
# response.
|
428
|
+
#
|
429
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
430
|
+
# effort.
|
428
431
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
429
432
|
attr_accessor :reasoning_effort
|
430
433
|
|
@@ -482,6 +485,9 @@ module OpenAI
|
|
482
485
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
483
486
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
484
487
|
# response.
|
488
|
+
#
|
489
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
490
|
+
# effort.
|
485
491
|
reasoning_effort: nil,
|
486
492
|
# Sampling temperature. This is a query parameter used to select responses.
|
487
493
|
temperature: nil,
|
@@ -1078,6 +1084,9 @@ module OpenAI
|
|
1078
1084
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1079
1085
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1080
1086
|
# response.
|
1087
|
+
#
|
1088
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1089
|
+
# effort.
|
1081
1090
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
1082
1091
|
attr_accessor :reasoning_effort
|
1083
1092
|
|
@@ -1216,6 +1225,9 @@ module OpenAI
|
|
1216
1225
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1217
1226
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1218
1227
|
# response.
|
1228
|
+
#
|
1229
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1230
|
+
# effort.
|
1219
1231
|
reasoning_effort: nil,
|
1220
1232
|
# A seed value to initialize the randomness, during sampling.
|
1221
1233
|
seed: nil,
|
@@ -515,6 +515,9 @@ module OpenAI
|
|
515
515
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
516
516
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
517
517
|
# response.
|
518
|
+
#
|
519
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
520
|
+
# effort.
|
518
521
|
sig do
|
519
522
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
520
523
|
end
|
@@ -574,6 +577,9 @@ module OpenAI
|
|
574
577
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
575
578
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
576
579
|
# response.
|
580
|
+
#
|
581
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
582
|
+
# effort.
|
577
583
|
reasoning_effort: nil,
|
578
584
|
# Sampling temperature. This is a query parameter used to select responses.
|
579
585
|
temperature: nil,
|
@@ -1120,6 +1126,9 @@ module OpenAI
|
|
1120
1126
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1121
1127
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1122
1128
|
# response.
|
1129
|
+
#
|
1130
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1131
|
+
# effort.
|
1123
1132
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1124
1133
|
attr_accessor :reasoning_effort
|
1125
1134
|
|
@@ -1241,6 +1250,9 @@ module OpenAI
|
|
1241
1250
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1242
1251
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1243
1252
|
# response.
|
1253
|
+
#
|
1254
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1255
|
+
# effort.
|
1244
1256
|
reasoning_effort: nil,
|
1245
1257
|
# A seed value to initialize the randomness, during sampling.
|
1246
1258
|
seed: nil,
|
@@ -511,6 +511,9 @@ module OpenAI
|
|
511
511
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
512
512
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
513
513
|
# response.
|
514
|
+
#
|
515
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
516
|
+
# effort.
|
514
517
|
sig do
|
515
518
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
516
519
|
end
|
@@ -570,6 +573,9 @@ module OpenAI
|
|
570
573
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
571
574
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
572
575
|
# response.
|
576
|
+
#
|
577
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
578
|
+
# effort.
|
573
579
|
reasoning_effort: nil,
|
574
580
|
# Sampling temperature. This is a query parameter used to select responses.
|
575
581
|
temperature: nil,
|
@@ -1116,6 +1122,9 @@ module OpenAI
|
|
1116
1122
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1117
1123
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1118
1124
|
# response.
|
1125
|
+
#
|
1126
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1127
|
+
# effort.
|
1119
1128
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1120
1129
|
attr_accessor :reasoning_effort
|
1121
1130
|
|
@@ -1237,6 +1246,9 @@ module OpenAI
|
|
1237
1246
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1238
1247
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1239
1248
|
# response.
|
1249
|
+
#
|
1250
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1251
|
+
# effort.
|
1240
1252
|
reasoning_effort: nil,
|
1241
1253
|
# A seed value to initialize the randomness, during sampling.
|
1242
1254
|
seed: nil,
|
@@ -517,6 +517,9 @@ module OpenAI
|
|
517
517
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
518
518
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
519
519
|
# response.
|
520
|
+
#
|
521
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
522
|
+
# effort.
|
520
523
|
sig do
|
521
524
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
522
525
|
end
|
@@ -576,6 +579,9 @@ module OpenAI
|
|
576
579
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
577
580
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
578
581
|
# response.
|
582
|
+
#
|
583
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
584
|
+
# effort.
|
579
585
|
reasoning_effort: nil,
|
580
586
|
# Sampling temperature. This is a query parameter used to select responses.
|
581
587
|
temperature: nil,
|
@@ -1122,6 +1128,9 @@ module OpenAI
|
|
1122
1128
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1123
1129
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1124
1130
|
# response.
|
1131
|
+
#
|
1132
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1133
|
+
# effort.
|
1125
1134
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1126
1135
|
attr_accessor :reasoning_effort
|
1127
1136
|
|
@@ -1243,6 +1252,9 @@ module OpenAI
|
|
1243
1252
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1244
1253
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1245
1254
|
# response.
|
1255
|
+
#
|
1256
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1257
|
+
# effort.
|
1246
1258
|
reasoning_effort: nil,
|
1247
1259
|
# A seed value to initialize the randomness, during sampling.
|
1248
1260
|
seed: nil,
|
@@ -399,6 +399,9 @@ module OpenAI
|
|
399
399
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
400
400
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
401
401
|
# response.
|
402
|
+
#
|
403
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
404
|
+
# effort.
|
402
405
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
403
406
|
attr_accessor :reasoning_effort
|
404
407
|
|
@@ -432,6 +435,9 @@ module OpenAI
|
|
432
435
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
433
436
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
434
437
|
# response.
|
438
|
+
#
|
439
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
440
|
+
# effort.
|
435
441
|
reasoning_effort: nil,
|
436
442
|
# A seed value to initialize the randomness, during sampling.
|
437
443
|
seed: nil,
|
@@ -11,6 +11,9 @@ module OpenAI
|
|
11
11
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
12
12
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
13
13
|
# response.
|
14
|
+
#
|
15
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
16
|
+
# effort.
|
14
17
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
15
18
|
attr_accessor :effort
|
16
19
|
|
@@ -46,6 +49,9 @@ module OpenAI
|
|
46
49
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
47
50
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
48
51
|
# response.
|
52
|
+
#
|
53
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
54
|
+
# effort.
|
49
55
|
effort: nil,
|
50
56
|
# **Deprecated:** use `summary` instead.
|
51
57
|
#
|
@@ -7,6 +7,9 @@ module OpenAI
|
|
7
7
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
8
8
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
9
9
|
# response.
|
10
|
+
#
|
11
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
12
|
+
# effort.
|
10
13
|
module ReasoningEffort
|
11
14
|
extend OpenAI::Internal::Type::Enum
|
12
15
|
|
@@ -190,7 +190,7 @@ module OpenAI
|
|
190
190
|
)
|
191
191
|
end
|
192
192
|
|
193
|
-
# One of `server_error` or `
|
193
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
194
194
|
sig do
|
195
195
|
returns(
|
196
196
|
OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol
|
@@ -212,7 +212,7 @@ module OpenAI
|
|
212
212
|
).returns(T.attached_class)
|
213
213
|
end
|
214
214
|
def self.new(
|
215
|
-
# One of `server_error` or `
|
215
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
216
216
|
code:,
|
217
217
|
# A human-readable description of the error.
|
218
218
|
message:
|
@@ -231,7 +231,7 @@ module OpenAI
|
|
231
231
|
def to_hash
|
232
232
|
end
|
233
233
|
|
234
|
-
# One of `server_error` or `
|
234
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
235
235
|
module Code
|
236
236
|
extend OpenAI::Internal::Type::Enum
|
237
237
|
|
@@ -65,6 +65,9 @@ module OpenAI
|
|
65
65
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
66
66
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
67
67
|
# response.
|
68
|
+
#
|
69
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
70
|
+
# effort.
|
68
71
|
reasoning_effort: nil,
|
69
72
|
# Specifies the format that the model must output. Compatible with
|
70
73
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -192,6 +195,9 @@ module OpenAI
|
|
192
195
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
193
196
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
194
197
|
# response.
|
198
|
+
#
|
199
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
200
|
+
# effort.
|
195
201
|
reasoning_effort: nil,
|
196
202
|
# Specifies the format that the model must output. Compatible with
|
197
203
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -128,6 +128,9 @@ module OpenAI
|
|
128
128
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
129
129
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
130
130
|
# response.
|
131
|
+
#
|
132
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
133
|
+
# effort.
|
131
134
|
reasoning_effort: nil,
|
132
135
|
# Body param: Specifies the format that the model must output. Compatible with
|
133
136
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -307,6 +310,9 @@ module OpenAI
|
|
307
310
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
308
311
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
309
312
|
# response.
|
313
|
+
#
|
314
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
315
|
+
# effort.
|
310
316
|
reasoning_effort: nil,
|
311
317
|
# Body param: Specifies the format that the model must output. Compatible with
|
312
318
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -222,6 +222,9 @@ module OpenAI
|
|
222
222
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
223
223
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
224
224
|
# response.
|
225
|
+
#
|
226
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
227
|
+
# effort.
|
225
228
|
reasoning_effort: nil,
|
226
229
|
# An object specifying the format that the model must output.
|
227
230
|
#
|
@@ -539,6 +542,9 @@ module OpenAI
|
|
539
542
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
540
543
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
541
544
|
# response.
|
545
|
+
#
|
546
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
547
|
+
# effort.
|
542
548
|
reasoning_effort: nil,
|
543
549
|
# An object specifying the format that the model must output.
|
544
550
|
#
|
@@ -41,12 +41,26 @@ module OpenAI
|
|
41
41
|
def self?.values: -> ::Array[OpenAI::Models::ComparisonFilter::type_]
|
42
42
|
end
|
43
43
|
|
44
|
-
type value =
|
44
|
+
type value =
|
45
|
+
String
|
46
|
+
| Float
|
47
|
+
| bool
|
48
|
+
| ::Array[OpenAI::Models::ComparisonFilter::Value::union_member3]
|
45
49
|
|
46
50
|
module Value
|
47
51
|
extend OpenAI::Internal::Type::Union
|
48
52
|
|
53
|
+
type union_member3 = String | Float
|
54
|
+
|
55
|
+
module UnionMember3
|
56
|
+
extend OpenAI::Internal::Type::Union
|
57
|
+
|
58
|
+
def self?.variants: -> ::Array[OpenAI::Models::ComparisonFilter::Value::union_member3]
|
59
|
+
end
|
60
|
+
|
49
61
|
def self?.variants: -> ::Array[OpenAI::Models::ComparisonFilter::value]
|
62
|
+
|
63
|
+
UnionMember3Array: OpenAI::Internal::Type::Converter
|
50
64
|
end
|
51
65
|
end
|
52
66
|
end
|
@@ -85,8 +85,8 @@ module OpenAI
|
|
85
85
|
end
|
86
86
|
|
87
87
|
type testing_criterion =
|
88
|
-
OpenAI::
|
89
|
-
| OpenAI::
|
88
|
+
OpenAI::Graders::LabelModelGrader
|
89
|
+
| OpenAI::Graders::StringCheckGrader
|
90
90
|
| OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity
|
91
91
|
| OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython
|
92
92
|
| OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -85,8 +85,8 @@ module OpenAI
|
|
85
85
|
end
|
86
86
|
|
87
87
|
type testing_criterion =
|
88
|
-
OpenAI::
|
89
|
-
| OpenAI::
|
88
|
+
OpenAI::Graders::LabelModelGrader
|
89
|
+
| OpenAI::Graders::StringCheckGrader
|
90
90
|
| OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity
|
91
91
|
| OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython
|
92
92
|
| OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -85,8 +85,8 @@ module OpenAI
|
|
85
85
|
end
|
86
86
|
|
87
87
|
type testing_criterion =
|
88
|
-
OpenAI::
|
89
|
-
| OpenAI::
|
88
|
+
OpenAI::Graders::LabelModelGrader
|
89
|
+
| OpenAI::Graders::StringCheckGrader
|
90
90
|
| OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity
|
91
91
|
| OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython
|
92
92
|
| OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -85,8 +85,8 @@ module OpenAI
|
|
85
85
|
end
|
86
86
|
|
87
87
|
type testing_criterion =
|
88
|
-
OpenAI::
|
89
|
-
| OpenAI::
|
88
|
+
OpenAI::Graders::LabelModelGrader
|
89
|
+
| OpenAI::Graders::StringCheckGrader
|
90
90
|
| OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity
|
91
91
|
| OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython
|
92
92
|
| OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: openai
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.31.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- OpenAI
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2025-10-
|
11
|
+
date: 2025-10-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: connection_pool
|