openai 0.30.0 → 0.32.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +28 -0
- data/README.md +1 -1
- data/lib/openai/internal/util.rb +5 -5
- data/lib/openai/models/audio/transcription_create_params.rb +42 -11
- data/lib/openai/models/audio/transcription_create_response.rb +4 -1
- data/lib/openai/models/audio/transcription_diarized.rb +160 -0
- data/lib/openai/models/audio/transcription_diarized_segment.rb +65 -0
- data/lib/openai/models/audio/transcription_stream_event.rb +7 -4
- data/lib/openai/models/audio/transcription_text_delta_event.rb +10 -1
- data/lib/openai/models/audio/transcription_text_segment_event.rb +63 -0
- data/lib/openai/models/audio_model.rb +1 -0
- data/lib/openai/models/audio_response_format.rb +5 -2
- data/lib/openai/models/beta/assistant_create_params.rb +3 -0
- data/lib/openai/models/beta/assistant_update_params.rb +3 -0
- data/lib/openai/models/beta/threads/run_create_params.rb +3 -0
- data/lib/openai/models/chat/completion_create_params.rb +3 -0
- data/lib/openai/models/comparison_filter.rb +29 -6
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -0
- data/lib/openai/models/evals/run_cancel_response.rb +6 -0
- data/lib/openai/models/evals/run_create_params.rb +6 -0
- data/lib/openai/models/evals/run_create_response.rb +6 -0
- data/lib/openai/models/evals/run_list_response.rb +6 -0
- data/lib/openai/models/evals/run_retrieve_response.rb +6 -0
- data/lib/openai/models/graders/score_model_grader.rb +3 -0
- data/lib/openai/models/realtime/audio_transcription.rb +8 -6
- data/lib/openai/models/reasoning.rb +3 -0
- data/lib/openai/models/reasoning_effort.rb +3 -0
- data/lib/openai/models/vector_store_create_params.rb +10 -1
- data/lib/openai/models/vector_stores/vector_store_file.rb +3 -3
- data/lib/openai/resources/audio/transcriptions.rb +12 -4
- data/lib/openai/resources/files.rb +1 -1
- data/lib/openai/resources/vector_stores.rb +3 -1
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +3 -0
- data/rbi/openai/models/audio/transcription_create_params.rbi +66 -16
- data/rbi/openai/models/audio/transcription_create_response.rbi +1 -0
- data/rbi/openai/models/audio/transcription_diarized.rbi +281 -0
- data/rbi/openai/models/audio/transcription_diarized_segment.rbi +87 -0
- data/rbi/openai/models/audio/transcription_stream_event.rbi +4 -3
- data/rbi/openai/models/audio/transcription_text_delta_event.rbi +14 -1
- data/rbi/openai/models/audio/transcription_text_segment_event.rbi +86 -0
- data/rbi/openai/models/audio_model.rbi +2 -0
- data/rbi/openai/models/audio_response_format.rbi +6 -2
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -0
- data/rbi/openai/models/beta/assistant_update_params.rbi +6 -0
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -0
- data/rbi/openai/models/chat/completion_create_params.rbi +6 -0
- data/rbi/openai/models/comparison_filter.rbi +43 -4
- data/rbi/openai/models/eval_create_response.rbi +4 -4
- data/rbi/openai/models/eval_list_response.rbi +4 -4
- data/rbi/openai/models/eval_retrieve_response.rbi +4 -4
- data/rbi/openai/models/eval_update_response.rbi +4 -4
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +6 -0
- data/rbi/openai/models/evals/run_cancel_response.rbi +12 -0
- data/rbi/openai/models/evals/run_create_params.rbi +12 -0
- data/rbi/openai/models/evals/run_create_response.rbi +12 -0
- data/rbi/openai/models/evals/run_list_response.rbi +12 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +12 -0
- data/rbi/openai/models/graders/score_model_grader.rbi +6 -0
- data/rbi/openai/models/realtime/audio_transcription.rbi +15 -12
- data/rbi/openai/models/reasoning.rbi +6 -0
- data/rbi/openai/models/reasoning_effort.rbi +3 -0
- data/rbi/openai/models/vector_store_create_params.rbi +13 -0
- data/rbi/openai/models/vector_stores/vector_store_file.rbi +3 -3
- data/rbi/openai/resources/audio/transcriptions.rbi +52 -14
- data/rbi/openai/resources/beta/assistants.rbi +6 -0
- data/rbi/openai/resources/beta/threads/runs.rbi +6 -0
- data/rbi/openai/resources/chat/completions.rbi +6 -0
- data/rbi/openai/resources/files.rbi +1 -1
- data/rbi/openai/resources/vector_stores.rbi +4 -0
- data/sig/openai/models/audio/transcription_create_params.rbs +14 -0
- data/sig/openai/models/audio/transcription_create_response.rbs +3 -1
- data/sig/openai/models/audio/transcription_diarized.rbs +129 -0
- data/sig/openai/models/audio/transcription_diarized_segment.rbs +47 -0
- data/sig/openai/models/audio/transcription_stream_event.rbs +2 -1
- data/sig/openai/models/audio/transcription_text_delta_event.rbs +9 -2
- data/sig/openai/models/audio/transcription_text_segment_event.rbs +47 -0
- data/sig/openai/models/audio_model.rbs +5 -1
- data/sig/openai/models/audio_response_format.rbs +3 -1
- data/sig/openai/models/comparison_filter.rbs +15 -1
- data/sig/openai/models/eval_create_response.rbs +2 -2
- data/sig/openai/models/eval_list_response.rbs +2 -2
- data/sig/openai/models/eval_retrieve_response.rbs +2 -2
- data/sig/openai/models/eval_update_response.rbs +2 -2
- data/sig/openai/models/realtime/audio_transcription.rbs +2 -2
- data/sig/openai/models/vector_store_create_params.rbs +7 -0
- data/sig/openai/resources/audio/transcriptions.rbs +4 -0
- data/sig/openai/resources/vector_stores.rbs +1 -0
- metadata +11 -2
@@ -12,7 +12,8 @@ module OpenAI
|
|
12
12
|
sig { returns(String) }
|
13
13
|
attr_accessor :key
|
14
14
|
|
15
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
15
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
16
|
+
# `nin`.
|
16
17
|
#
|
17
18
|
# - `eq`: equals
|
18
19
|
# - `ne`: not equal
|
@@ -20,6 +21,8 @@ module OpenAI
|
|
20
21
|
# - `gte`: greater than or equal
|
21
22
|
# - `lt`: less than
|
22
23
|
# - `lte`: less than or equal
|
24
|
+
# - `in`: in
|
25
|
+
# - `nin`: not in
|
23
26
|
sig { returns(OpenAI::ComparisonFilter::Type::OrSymbol) }
|
24
27
|
attr_accessor :type
|
25
28
|
|
@@ -40,7 +43,8 @@ module OpenAI
|
|
40
43
|
def self.new(
|
41
44
|
# The key to compare against the value.
|
42
45
|
key:,
|
43
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
46
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
47
|
+
# `nin`.
|
44
48
|
#
|
45
49
|
# - `eq`: equals
|
46
50
|
# - `ne`: not equal
|
@@ -48,6 +52,8 @@ module OpenAI
|
|
48
52
|
# - `gte`: greater than or equal
|
49
53
|
# - `lt`: less than
|
50
54
|
# - `lte`: less than or equal
|
55
|
+
# - `in`: in
|
56
|
+
# - `nin`: not in
|
51
57
|
type:,
|
52
58
|
# The value to compare against the attribute key; supports string, number, or
|
53
59
|
# boolean types.
|
@@ -67,7 +73,8 @@ module OpenAI
|
|
67
73
|
def to_hash
|
68
74
|
end
|
69
75
|
|
70
|
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte
|
76
|
+
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
|
77
|
+
# `nin`.
|
71
78
|
#
|
72
79
|
# - `eq`: equals
|
73
80
|
# - `ne`: not equal
|
@@ -75,6 +82,8 @@ module OpenAI
|
|
75
82
|
# - `gte`: greater than or equal
|
76
83
|
# - `lt`: less than
|
77
84
|
# - `lte`: less than or equal
|
85
|
+
# - `in`: in
|
86
|
+
# - `nin`: not in
|
78
87
|
module Type
|
79
88
|
extend OpenAI::Internal::Type::Enum
|
80
89
|
|
@@ -103,13 +112,43 @@ module OpenAI
|
|
103
112
|
module Value
|
104
113
|
extend OpenAI::Internal::Type::Union
|
105
114
|
|
106
|
-
Variants =
|
115
|
+
Variants =
|
116
|
+
T.type_alias do
|
117
|
+
T.any(
|
118
|
+
String,
|
119
|
+
Float,
|
120
|
+
T::Boolean,
|
121
|
+
T::Array[OpenAI::ComparisonFilter::Value::UnionMember3::Variants]
|
122
|
+
)
|
123
|
+
end
|
124
|
+
|
125
|
+
module UnionMember3
|
126
|
+
extend OpenAI::Internal::Type::Union
|
127
|
+
|
128
|
+
Variants = T.type_alias { T.any(String, Float) }
|
129
|
+
|
130
|
+
sig do
|
131
|
+
override.returns(
|
132
|
+
T::Array[OpenAI::ComparisonFilter::Value::UnionMember3::Variants]
|
133
|
+
)
|
134
|
+
end
|
135
|
+
def self.variants
|
136
|
+
end
|
137
|
+
end
|
107
138
|
|
108
139
|
sig do
|
109
140
|
override.returns(T::Array[OpenAI::ComparisonFilter::Value::Variants])
|
110
141
|
end
|
111
142
|
def self.variants
|
112
143
|
end
|
144
|
+
|
145
|
+
UnionMember3Array =
|
146
|
+
T.let(
|
147
|
+
OpenAI::Internal::Type::ArrayOf[
|
148
|
+
union: OpenAI::ComparisonFilter::Value::UnionMember3
|
149
|
+
],
|
150
|
+
OpenAI::Internal::Type::Converter
|
151
|
+
)
|
113
152
|
end
|
114
153
|
end
|
115
154
|
end
|
@@ -70,8 +70,8 @@ module OpenAI
|
|
70
70
|
testing_criteria:
|
71
71
|
T::Array[
|
72
72
|
T.any(
|
73
|
-
OpenAI::
|
74
|
-
OpenAI::
|
73
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
74
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
75
75
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
76
76
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython::OrHash,
|
77
77
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -223,8 +223,8 @@ module OpenAI
|
|
223
223
|
Variants =
|
224
224
|
T.type_alias do
|
225
225
|
T.any(
|
226
|
-
OpenAI::
|
227
|
-
OpenAI::
|
226
|
+
OpenAI::Graders::LabelModelGrader,
|
227
|
+
OpenAI::Graders::StringCheckGrader,
|
228
228
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity,
|
229
229
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython,
|
230
230
|
OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -68,8 +68,8 @@ module OpenAI
|
|
68
68
|
testing_criteria:
|
69
69
|
T::Array[
|
70
70
|
T.any(
|
71
|
-
OpenAI::
|
72
|
-
OpenAI::
|
71
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
72
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
73
73
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
74
74
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython::OrHash,
|
75
75
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -221,8 +221,8 @@ module OpenAI
|
|
221
221
|
Variants =
|
222
222
|
T.type_alias do
|
223
223
|
T.any(
|
224
|
-
OpenAI::
|
225
|
-
OpenAI::
|
224
|
+
OpenAI::Graders::LabelModelGrader,
|
225
|
+
OpenAI::Graders::StringCheckGrader,
|
226
226
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity,
|
227
227
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython,
|
228
228
|
OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -72,8 +72,8 @@ module OpenAI
|
|
72
72
|
testing_criteria:
|
73
73
|
T::Array[
|
74
74
|
T.any(
|
75
|
-
OpenAI::
|
76
|
-
OpenAI::
|
75
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
76
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
77
77
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
78
78
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython::OrHash,
|
79
79
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -225,8 +225,8 @@ module OpenAI
|
|
225
225
|
Variants =
|
226
226
|
T.type_alias do
|
227
227
|
T.any(
|
228
|
-
OpenAI::
|
229
|
-
OpenAI::
|
228
|
+
OpenAI::Graders::LabelModelGrader,
|
229
|
+
OpenAI::Graders::StringCheckGrader,
|
230
230
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity,
|
231
231
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython,
|
232
232
|
OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -70,8 +70,8 @@ module OpenAI
|
|
70
70
|
testing_criteria:
|
71
71
|
T::Array[
|
72
72
|
T.any(
|
73
|
-
OpenAI::
|
74
|
-
OpenAI::
|
73
|
+
OpenAI::Graders::LabelModelGrader::OrHash,
|
74
|
+
OpenAI::Graders::StringCheckGrader::OrHash,
|
75
75
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity::OrHash,
|
76
76
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython::OrHash,
|
77
77
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel::OrHash
|
@@ -223,8 +223,8 @@ module OpenAI
|
|
223
223
|
Variants =
|
224
224
|
T.type_alias do
|
225
225
|
T.any(
|
226
|
-
OpenAI::
|
227
|
-
OpenAI::
|
226
|
+
OpenAI::Graders::LabelModelGrader,
|
227
|
+
OpenAI::Graders::StringCheckGrader,
|
228
228
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity,
|
229
229
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython,
|
230
230
|
OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel
|
@@ -892,6 +892,9 @@ module OpenAI
|
|
892
892
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
893
893
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
894
894
|
# response.
|
895
|
+
#
|
896
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
897
|
+
# effort.
|
895
898
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
896
899
|
attr_accessor :reasoning_effort
|
897
900
|
|
@@ -992,6 +995,9 @@ module OpenAI
|
|
992
995
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
993
996
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
994
997
|
# response.
|
998
|
+
#
|
999
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1000
|
+
# effort.
|
995
1001
|
reasoning_effort: nil,
|
996
1002
|
# An object specifying the format that the model must output.
|
997
1003
|
#
|
@@ -515,6 +515,9 @@ module OpenAI
|
|
515
515
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
516
516
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
517
517
|
# response.
|
518
|
+
#
|
519
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
520
|
+
# effort.
|
518
521
|
sig do
|
519
522
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
520
523
|
end
|
@@ -574,6 +577,9 @@ module OpenAI
|
|
574
577
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
575
578
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
576
579
|
# response.
|
580
|
+
#
|
581
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
582
|
+
# effort.
|
577
583
|
reasoning_effort: nil,
|
578
584
|
# Sampling temperature. This is a query parameter used to select responses.
|
579
585
|
temperature: nil,
|
@@ -1120,6 +1126,9 @@ module OpenAI
|
|
1120
1126
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1121
1127
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1122
1128
|
# response.
|
1129
|
+
#
|
1130
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1131
|
+
# effort.
|
1123
1132
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1124
1133
|
attr_accessor :reasoning_effort
|
1125
1134
|
|
@@ -1241,6 +1250,9 @@ module OpenAI
|
|
1241
1250
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1242
1251
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1243
1252
|
# response.
|
1253
|
+
#
|
1254
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1255
|
+
# effort.
|
1244
1256
|
reasoning_effort: nil,
|
1245
1257
|
# A seed value to initialize the randomness, during sampling.
|
1246
1258
|
seed: nil,
|
@@ -425,6 +425,9 @@ module OpenAI
|
|
425
425
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
426
426
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
427
427
|
# response.
|
428
|
+
#
|
429
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
430
|
+
# effort.
|
428
431
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
429
432
|
attr_accessor :reasoning_effort
|
430
433
|
|
@@ -482,6 +485,9 @@ module OpenAI
|
|
482
485
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
483
486
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
484
487
|
# response.
|
488
|
+
#
|
489
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
490
|
+
# effort.
|
485
491
|
reasoning_effort: nil,
|
486
492
|
# Sampling temperature. This is a query parameter used to select responses.
|
487
493
|
temperature: nil,
|
@@ -1078,6 +1084,9 @@ module OpenAI
|
|
1078
1084
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1079
1085
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1080
1086
|
# response.
|
1087
|
+
#
|
1088
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1089
|
+
# effort.
|
1081
1090
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
1082
1091
|
attr_accessor :reasoning_effort
|
1083
1092
|
|
@@ -1216,6 +1225,9 @@ module OpenAI
|
|
1216
1225
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1217
1226
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1218
1227
|
# response.
|
1228
|
+
#
|
1229
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1230
|
+
# effort.
|
1219
1231
|
reasoning_effort: nil,
|
1220
1232
|
# A seed value to initialize the randomness, during sampling.
|
1221
1233
|
seed: nil,
|
@@ -515,6 +515,9 @@ module OpenAI
|
|
515
515
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
516
516
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
517
517
|
# response.
|
518
|
+
#
|
519
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
520
|
+
# effort.
|
518
521
|
sig do
|
519
522
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
520
523
|
end
|
@@ -574,6 +577,9 @@ module OpenAI
|
|
574
577
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
575
578
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
576
579
|
# response.
|
580
|
+
#
|
581
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
582
|
+
# effort.
|
577
583
|
reasoning_effort: nil,
|
578
584
|
# Sampling temperature. This is a query parameter used to select responses.
|
579
585
|
temperature: nil,
|
@@ -1120,6 +1126,9 @@ module OpenAI
|
|
1120
1126
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1121
1127
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1122
1128
|
# response.
|
1129
|
+
#
|
1130
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1131
|
+
# effort.
|
1123
1132
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1124
1133
|
attr_accessor :reasoning_effort
|
1125
1134
|
|
@@ -1241,6 +1250,9 @@ module OpenAI
|
|
1241
1250
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1242
1251
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1243
1252
|
# response.
|
1253
|
+
#
|
1254
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1255
|
+
# effort.
|
1244
1256
|
reasoning_effort: nil,
|
1245
1257
|
# A seed value to initialize the randomness, during sampling.
|
1246
1258
|
seed: nil,
|
@@ -511,6 +511,9 @@ module OpenAI
|
|
511
511
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
512
512
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
513
513
|
# response.
|
514
|
+
#
|
515
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
516
|
+
# effort.
|
514
517
|
sig do
|
515
518
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
516
519
|
end
|
@@ -570,6 +573,9 @@ module OpenAI
|
|
570
573
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
571
574
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
572
575
|
# response.
|
576
|
+
#
|
577
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
578
|
+
# effort.
|
573
579
|
reasoning_effort: nil,
|
574
580
|
# Sampling temperature. This is a query parameter used to select responses.
|
575
581
|
temperature: nil,
|
@@ -1116,6 +1122,9 @@ module OpenAI
|
|
1116
1122
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1117
1123
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1118
1124
|
# response.
|
1125
|
+
#
|
1126
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1127
|
+
# effort.
|
1119
1128
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1120
1129
|
attr_accessor :reasoning_effort
|
1121
1130
|
|
@@ -1237,6 +1246,9 @@ module OpenAI
|
|
1237
1246
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1238
1247
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1239
1248
|
# response.
|
1249
|
+
#
|
1250
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1251
|
+
# effort.
|
1240
1252
|
reasoning_effort: nil,
|
1241
1253
|
# A seed value to initialize the randomness, during sampling.
|
1242
1254
|
seed: nil,
|
@@ -517,6 +517,9 @@ module OpenAI
|
|
517
517
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
518
518
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
519
519
|
# response.
|
520
|
+
#
|
521
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
522
|
+
# effort.
|
520
523
|
sig do
|
521
524
|
returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
|
522
525
|
end
|
@@ -576,6 +579,9 @@ module OpenAI
|
|
576
579
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
577
580
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
578
581
|
# response.
|
582
|
+
#
|
583
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
584
|
+
# effort.
|
579
585
|
reasoning_effort: nil,
|
580
586
|
# Sampling temperature. This is a query parameter used to select responses.
|
581
587
|
temperature: nil,
|
@@ -1122,6 +1128,9 @@ module OpenAI
|
|
1122
1128
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1123
1129
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1124
1130
|
# response.
|
1131
|
+
#
|
1132
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1133
|
+
# effort.
|
1125
1134
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1126
1135
|
attr_accessor :reasoning_effort
|
1127
1136
|
|
@@ -1243,6 +1252,9 @@ module OpenAI
|
|
1243
1252
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1244
1253
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1245
1254
|
# response.
|
1255
|
+
#
|
1256
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
1257
|
+
# effort.
|
1246
1258
|
reasoning_effort: nil,
|
1247
1259
|
# A seed value to initialize the randomness, during sampling.
|
1248
1260
|
seed: nil,
|
@@ -399,6 +399,9 @@ module OpenAI
|
|
399
399
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
400
400
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
401
401
|
# response.
|
402
|
+
#
|
403
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
404
|
+
# effort.
|
402
405
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
403
406
|
attr_accessor :reasoning_effort
|
404
407
|
|
@@ -432,6 +435,9 @@ module OpenAI
|
|
432
435
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
433
436
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
434
437
|
# response.
|
438
|
+
#
|
439
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
440
|
+
# effort.
|
435
441
|
reasoning_effort: nil,
|
436
442
|
# A seed value to initialize the randomness, during sampling.
|
437
443
|
seed: nil,
|
@@ -22,7 +22,8 @@ module OpenAI
|
|
22
22
|
attr_writer :language
|
23
23
|
|
24
24
|
# The model to use for transcription. Current options are `whisper-1`,
|
25
|
-
# `gpt-4o-transcribe
|
25
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
|
26
|
+
# Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
26
27
|
sig do
|
27
28
|
returns(
|
28
29
|
T.nilable(OpenAI::Realtime::AudioTranscription::Model::OrSymbol)
|
@@ -40,8 +41,8 @@ module OpenAI
|
|
40
41
|
# An optional text to guide the model's style or continue a previous audio
|
41
42
|
# segment. For `whisper-1`, the
|
42
43
|
# [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
43
|
-
# For `gpt-4o-transcribe` models
|
44
|
-
# "expect words related to technology".
|
44
|
+
# For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
|
45
|
+
# prompt is a free text string, for example "expect words related to technology".
|
45
46
|
sig { returns(T.nilable(String)) }
|
46
47
|
attr_reader :prompt
|
47
48
|
|
@@ -61,13 +62,14 @@ module OpenAI
|
|
61
62
|
# format will improve accuracy and latency.
|
62
63
|
language: nil,
|
63
64
|
# The model to use for transcription. Current options are `whisper-1`,
|
64
|
-
# `gpt-4o-transcribe
|
65
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
|
66
|
+
# Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
65
67
|
model: nil,
|
66
68
|
# An optional text to guide the model's style or continue a previous audio
|
67
69
|
# segment. For `whisper-1`, the
|
68
70
|
# [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
69
|
-
# For `gpt-4o-transcribe` models
|
70
|
-
# "expect words related to technology".
|
71
|
+
# For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
|
72
|
+
# prompt is a free text string, for example "expect words related to technology".
|
71
73
|
prompt: nil
|
72
74
|
)
|
73
75
|
end
|
@@ -85,7 +87,8 @@ module OpenAI
|
|
85
87
|
end
|
86
88
|
|
87
89
|
# The model to use for transcription. Current options are `whisper-1`,
|
88
|
-
# `gpt-4o-transcribe
|
90
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`.
|
91
|
+
# Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
89
92
|
module Model
|
90
93
|
extend OpenAI::Internal::Type::Enum
|
91
94
|
|
@@ -100,11 +103,6 @@ module OpenAI
|
|
100
103
|
:"whisper-1",
|
101
104
|
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
102
105
|
)
|
103
|
-
GPT_4O_TRANSCRIBE_LATEST =
|
104
|
-
T.let(
|
105
|
-
:"gpt-4o-transcribe-latest",
|
106
|
-
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
107
|
-
)
|
108
106
|
GPT_4O_MINI_TRANSCRIBE =
|
109
107
|
T.let(
|
110
108
|
:"gpt-4o-mini-transcribe",
|
@@ -115,6 +113,11 @@ module OpenAI
|
|
115
113
|
:"gpt-4o-transcribe",
|
116
114
|
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
117
115
|
)
|
116
|
+
GPT_4O_TRANSCRIBE_DIARIZE =
|
117
|
+
T.let(
|
118
|
+
:"gpt-4o-transcribe-diarize",
|
119
|
+
OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol
|
120
|
+
)
|
118
121
|
|
119
122
|
sig do
|
120
123
|
override.returns(
|
@@ -11,6 +11,9 @@ module OpenAI
|
|
11
11
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
12
12
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
13
13
|
# response.
|
14
|
+
#
|
15
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
16
|
+
# effort.
|
14
17
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
15
18
|
attr_accessor :effort
|
16
19
|
|
@@ -46,6 +49,9 @@ module OpenAI
|
|
46
49
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
47
50
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
48
51
|
# response.
|
52
|
+
#
|
53
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
54
|
+
# effort.
|
49
55
|
effort: nil,
|
50
56
|
# **Deprecated:** use `summary` instead.
|
51
57
|
#
|
@@ -7,6 +7,9 @@ module OpenAI
|
|
7
7
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
8
8
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
9
9
|
# response.
|
10
|
+
#
|
11
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
12
|
+
# effort.
|
10
13
|
module ReasoningEffort
|
11
14
|
extend OpenAI::Internal::Type::Enum
|
12
15
|
|
@@ -36,6 +36,14 @@ module OpenAI
|
|
36
36
|
end
|
37
37
|
attr_writer :chunking_strategy
|
38
38
|
|
39
|
+
# A description for the vector store. Can be used to describe the vector store's
|
40
|
+
# purpose.
|
41
|
+
sig { returns(T.nilable(String)) }
|
42
|
+
attr_reader :description
|
43
|
+
|
44
|
+
sig { params(description: String).void }
|
45
|
+
attr_writer :description
|
46
|
+
|
39
47
|
# The expiration policy for a vector store.
|
40
48
|
sig { returns(T.nilable(OpenAI::VectorStoreCreateParams::ExpiresAfter)) }
|
41
49
|
attr_reader :expires_after
|
@@ -79,6 +87,7 @@ module OpenAI
|
|
79
87
|
OpenAI::AutoFileChunkingStrategyParam::OrHash,
|
80
88
|
OpenAI::StaticFileChunkingStrategyObjectParam::OrHash
|
81
89
|
),
|
90
|
+
description: String,
|
82
91
|
expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter::OrHash,
|
83
92
|
file_ids: T::Array[String],
|
84
93
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
@@ -90,6 +99,9 @@ module OpenAI
|
|
90
99
|
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
|
91
100
|
# strategy. Only applicable if `file_ids` is non-empty.
|
92
101
|
chunking_strategy: nil,
|
102
|
+
# A description for the vector store. Can be used to describe the vector store's
|
103
|
+
# purpose.
|
104
|
+
description: nil,
|
93
105
|
# The expiration policy for a vector store.
|
94
106
|
expires_after: nil,
|
95
107
|
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
|
@@ -117,6 +129,7 @@ module OpenAI
|
|
117
129
|
OpenAI::AutoFileChunkingStrategyParam,
|
118
130
|
OpenAI::StaticFileChunkingStrategyObjectParam
|
119
131
|
),
|
132
|
+
description: String,
|
120
133
|
expires_after: OpenAI::VectorStoreCreateParams::ExpiresAfter,
|
121
134
|
file_ids: T::Array[String],
|
122
135
|
metadata: T.nilable(T::Hash[Symbol, String]),
|
@@ -190,7 +190,7 @@ module OpenAI
|
|
190
190
|
)
|
191
191
|
end
|
192
192
|
|
193
|
-
# One of `server_error` or `
|
193
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
194
194
|
sig do
|
195
195
|
returns(
|
196
196
|
OpenAI::VectorStores::VectorStoreFile::LastError::Code::TaggedSymbol
|
@@ -212,7 +212,7 @@ module OpenAI
|
|
212
212
|
).returns(T.attached_class)
|
213
213
|
end
|
214
214
|
def self.new(
|
215
|
-
# One of `server_error` or `
|
215
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
216
216
|
code:,
|
217
217
|
# A human-readable description of the error.
|
218
218
|
message:
|
@@ -231,7 +231,7 @@ module OpenAI
|
|
231
231
|
def to_hash
|
232
232
|
end
|
233
233
|
|
234
|
-
# One of `server_error` or `
|
234
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
235
235
|
module Code
|
236
236
|
extend OpenAI::Internal::Type::Enum
|
237
237
|
|