openai 0.23.3 → 0.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +17 -0
- data/README.md +1 -1
- data/lib/openai/errors.rb +25 -11
- data/lib/openai/internal/conversation_cursor_page.rb +1 -1
- data/lib/openai/internal/cursor_page.rb +1 -1
- data/lib/openai/internal/page.rb +1 -1
- data/lib/openai/internal/stream.rb +1 -0
- data/lib/openai/internal/transport/base_client.rb +11 -7
- data/lib/openai/internal/type/base_page.rb +1 -1
- data/lib/openai/internal/type/base_stream.rb +9 -1
- data/lib/openai/internal/util.rb +1 -1
- data/lib/openai/models/conversations/computer_screenshot_content.rb +2 -0
- data/lib/openai/models/conversations/conversation_item.rb +1 -0
- data/lib/openai/models/conversations/input_file_content.rb +1 -34
- data/lib/openai/models/conversations/input_image_content.rb +1 -54
- data/lib/openai/models/conversations/input_text_content.rb +1 -18
- data/lib/openai/models/conversations/message.rb +43 -8
- data/lib/openai/models/conversations/output_text_content.rb +1 -49
- data/lib/openai/models/conversations/refusal_content.rb +1 -18
- data/lib/openai/models/conversations/summary_text_content.rb +7 -2
- data/lib/openai/models/conversations/text_content.rb +2 -0
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +13 -1
- data/lib/openai/models/evals/run_cancel_response.rb +13 -1
- data/lib/openai/models/evals/run_create_params.rb +13 -1
- data/lib/openai/models/evals/run_create_response.rb +13 -1
- data/lib/openai/models/evals/run_list_response.rb +13 -1
- data/lib/openai/models/evals/run_retrieve_response.rb +13 -1
- data/lib/openai/models/evals/runs/output_item_list_response.rb +49 -4
- data/lib/openai/models/evals/runs/output_item_retrieve_response.rb +49 -4
- data/lib/openai/models/graders/score_model_grader.rb +56 -3
- data/lib/openai/models/responses/response_content.rb +25 -1
- data/lib/openai/models/responses/response_content_part_added_event.rb +27 -3
- data/lib/openai/models/responses/response_content_part_done_event.rb +27 -3
- data/lib/openai/models/responses/response_reasoning_item.rb +6 -8
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +5 -10
- data/rbi/openai/errors.rbi +29 -2
- data/rbi/openai/internal/transport/base_client.rbi +4 -5
- data/rbi/openai/internal/type/base_page.rbi +1 -1
- data/rbi/openai/internal/type/base_stream.rbi +16 -1
- data/rbi/openai/internal/util.rbi +1 -1
- data/rbi/openai/models/conversations/computer_screenshot_content.rbi +1 -0
- data/rbi/openai/models/conversations/input_file_content.rbi +1 -64
- data/rbi/openai/models/conversations/input_image_content.rbi +1 -105
- data/rbi/openai/models/conversations/input_text_content.rbi +1 -30
- data/rbi/openai/models/conversations/message.rbi +46 -10
- data/rbi/openai/models/conversations/output_text_content.rbi +1 -102
- data/rbi/openai/models/conversations/refusal_content.rbi +1 -30
- data/rbi/openai/models/conversations/summary_text_content.rbi +9 -1
- data/rbi/openai/models/conversations/text_content.rbi +1 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +16 -0
- data/rbi/openai/models/evals/run_cancel_response.rbi +18 -0
- data/rbi/openai/models/evals/run_create_params.rbi +18 -0
- data/rbi/openai/models/evals/run_create_response.rbi +18 -0
- data/rbi/openai/models/evals/run_list_response.rbi +18 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +18 -0
- data/rbi/openai/models/evals/runs/output_item_list_response.rbi +88 -5
- data/rbi/openai/models/evals/runs/output_item_retrieve_response.rbi +88 -5
- data/rbi/openai/models/graders/score_model_grader.rbi +88 -4
- data/rbi/openai/models/responses/response_content.rbi +34 -1
- data/rbi/openai/models/responses/response_content_part_added_event.rbi +36 -2
- data/rbi/openai/models/responses/response_content_part_done_event.rbi +36 -2
- data/rbi/openai/models/responses/response_reasoning_item.rbi +6 -4
- data/sig/openai/errors.rbs +7 -0
- data/sig/openai/internal/type/base_stream.rbs +5 -0
- data/sig/openai/models/conversations/input_file_content.rbs +1 -35
- data/sig/openai/models/conversations/input_image_content.rbs +1 -43
- data/sig/openai/models/conversations/input_text_content.rbs +1 -11
- data/sig/openai/models/conversations/message.rbs +18 -5
- data/sig/openai/models/conversations/output_text_content.rbs +1 -46
- data/sig/openai/models/conversations/refusal_content.rbs +1 -11
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +5 -0
- data/sig/openai/models/evals/run_cancel_response.rbs +5 -0
- data/sig/openai/models/evals/run_create_params.rbs +5 -0
- data/sig/openai/models/evals/run_create_response.rbs +5 -0
- data/sig/openai/models/evals/run_list_response.rbs +5 -0
- data/sig/openai/models/evals/run_retrieve_response.rbs +5 -0
- data/sig/openai/models/evals/runs/output_item_list_response.rbs +43 -4
- data/sig/openai/models/evals/runs/output_item_retrieve_response.rbs +43 -4
- data/sig/openai/models/graders/score_model_grader.rbs +44 -5
- data/sig/openai/models/responses/response_content.rbs +13 -0
- data/sig/openai/models/responses/response_content_part_added_event.rbs +13 -0
- data/sig/openai/models/responses/response_content_part_done_event.rbs +13 -0
- metadata +2 -17
- data/lib/openai/models/conversations/container_file_citation_body.rb +0 -58
- data/lib/openai/models/conversations/file_citation_body.rb +0 -42
- data/lib/openai/models/conversations/lob_prob.rb +0 -35
- data/lib/openai/models/conversations/top_log_prob.rb +0 -29
- data/lib/openai/models/conversations/url_citation_body.rb +0 -50
- data/rbi/openai/models/conversations/container_file_citation_body.rbi +0 -82
- data/rbi/openai/models/conversations/file_citation_body.rbi +0 -61
- data/rbi/openai/models/conversations/lob_prob.rbi +0 -50
- data/rbi/openai/models/conversations/top_log_prob.rbi +0 -41
- data/rbi/openai/models/conversations/url_citation_body.rbi +0 -74
- data/sig/openai/models/conversations/container_file_citation_body.rbs +0 -47
- data/sig/openai/models/conversations/file_citation_body.rbs +0 -37
- data/sig/openai/models/conversations/lob_prob.rbs +0 -37
- data/sig/openai/models/conversations/top_log_prob.rbs +0 -28
- data/sig/openai/models/conversations/url_citation_body.rbs +0 -42
@@ -3,36 +3,7 @@
|
|
3
3
|
module OpenAI
|
4
4
|
module Models
|
5
5
|
module Conversations
|
6
|
-
|
7
|
-
OrHash =
|
8
|
-
T.type_alias do
|
9
|
-
T.any(
|
10
|
-
OpenAI::Conversations::RefusalContent,
|
11
|
-
OpenAI::Internal::AnyHash
|
12
|
-
)
|
13
|
-
end
|
14
|
-
|
15
|
-
# The refusal explanation from the model.
|
16
|
-
sig { returns(String) }
|
17
|
-
attr_accessor :refusal
|
18
|
-
|
19
|
-
# The type of the refusal. Always `refusal`.
|
20
|
-
sig { returns(Symbol) }
|
21
|
-
attr_accessor :type
|
22
|
-
|
23
|
-
sig { params(refusal: String, type: Symbol).returns(T.attached_class) }
|
24
|
-
def self.new(
|
25
|
-
# The refusal explanation from the model.
|
26
|
-
refusal:,
|
27
|
-
# The type of the refusal. Always `refusal`.
|
28
|
-
type: :refusal
|
29
|
-
)
|
30
|
-
end
|
31
|
-
|
32
|
-
sig { override.returns({ refusal: String, type: Symbol }) }
|
33
|
-
def to_hash
|
34
|
-
end
|
35
|
-
end
|
6
|
+
RefusalContent = OpenAI::Models::Responses::ResponseOutputRefusal
|
36
7
|
end
|
37
8
|
end
|
38
9
|
end
|
@@ -12,14 +12,22 @@ module OpenAI
|
|
12
12
|
)
|
13
13
|
end
|
14
14
|
|
15
|
+
# A summary of the reasoning output from the model so far.
|
15
16
|
sig { returns(String) }
|
16
17
|
attr_accessor :text
|
17
18
|
|
19
|
+
# The type of the object. Always `summary_text`.
|
18
20
|
sig { returns(Symbol) }
|
19
21
|
attr_accessor :type
|
20
22
|
|
23
|
+
# A summary text from the model.
|
21
24
|
sig { params(text: String, type: Symbol).returns(T.attached_class) }
|
22
|
-
def self.new(
|
25
|
+
def self.new(
|
26
|
+
# A summary of the reasoning output from the model so far.
|
27
|
+
text:,
|
28
|
+
# The type of the object. Always `summary_text`.
|
29
|
+
type: :summary_text
|
30
|
+
)
|
23
31
|
end
|
24
32
|
|
25
33
|
sig { override.returns({ text: String, type: Symbol }) }
|
@@ -887,6 +887,14 @@ module OpenAI
|
|
887
887
|
sig { params(max_completion_tokens: Integer).void }
|
888
888
|
attr_writer :max_completion_tokens
|
889
889
|
|
890
|
+
# Constrains effort on reasoning for
|
891
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
892
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
893
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
894
|
+
# response.
|
895
|
+
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
896
|
+
attr_accessor :reasoning_effort
|
897
|
+
|
890
898
|
# An object specifying the format that the model must output.
|
891
899
|
#
|
892
900
|
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
@@ -963,6 +971,7 @@ module OpenAI
|
|
963
971
|
sig do
|
964
972
|
params(
|
965
973
|
max_completion_tokens: Integer,
|
974
|
+
reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
966
975
|
response_format:
|
967
976
|
T.any(
|
968
977
|
OpenAI::ResponseFormatText::OrHash,
|
@@ -978,6 +987,12 @@ module OpenAI
|
|
978
987
|
def self.new(
|
979
988
|
# The maximum number of tokens in the generated output.
|
980
989
|
max_completion_tokens: nil,
|
990
|
+
# Constrains effort on reasoning for
|
991
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
992
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
993
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
994
|
+
# response.
|
995
|
+
reasoning_effort: nil,
|
981
996
|
# An object specifying the format that the model must output.
|
982
997
|
#
|
983
998
|
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
@@ -1006,6 +1021,7 @@ module OpenAI
|
|
1006
1021
|
override.returns(
|
1007
1022
|
{
|
1008
1023
|
max_completion_tokens: Integer,
|
1024
|
+
reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1009
1025
|
response_format:
|
1010
1026
|
T.any(
|
1011
1027
|
OpenAI::ResponseFormatText,
|
@@ -1115,6 +1115,14 @@ module OpenAI
|
|
1115
1115
|
sig { params(max_completion_tokens: Integer).void }
|
1116
1116
|
attr_writer :max_completion_tokens
|
1117
1117
|
|
1118
|
+
# Constrains effort on reasoning for
|
1119
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1120
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1121
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1122
|
+
# response.
|
1123
|
+
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1124
|
+
attr_accessor :reasoning_effort
|
1125
|
+
|
1118
1126
|
# A seed value to initialize the randomness, during sampling.
|
1119
1127
|
sig { returns(T.nilable(Integer)) }
|
1120
1128
|
attr_reader :seed
|
@@ -1201,6 +1209,8 @@ module OpenAI
|
|
1201
1209
|
sig do
|
1202
1210
|
params(
|
1203
1211
|
max_completion_tokens: Integer,
|
1212
|
+
reasoning_effort:
|
1213
|
+
T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1204
1214
|
seed: Integer,
|
1205
1215
|
temperature: Float,
|
1206
1216
|
text:
|
@@ -1226,6 +1236,12 @@ module OpenAI
|
|
1226
1236
|
def self.new(
|
1227
1237
|
# The maximum number of tokens in the generated output.
|
1228
1238
|
max_completion_tokens: nil,
|
1239
|
+
# Constrains effort on reasoning for
|
1240
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1241
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1242
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1243
|
+
# response.
|
1244
|
+
reasoning_effort: nil,
|
1229
1245
|
# A seed value to initialize the randomness, during sampling.
|
1230
1246
|
seed: nil,
|
1231
1247
|
# A higher temperature increases randomness in the outputs.
|
@@ -1260,6 +1276,8 @@ module OpenAI
|
|
1260
1276
|
override.returns(
|
1261
1277
|
{
|
1262
1278
|
max_completion_tokens: Integer,
|
1279
|
+
reasoning_effort:
|
1280
|
+
T.nilable(OpenAI::ReasoningEffort::TaggedSymbol),
|
1263
1281
|
seed: Integer,
|
1264
1282
|
temperature: Float,
|
1265
1283
|
text:
|
@@ -1073,6 +1073,14 @@ module OpenAI
|
|
1073
1073
|
sig { params(max_completion_tokens: Integer).void }
|
1074
1074
|
attr_writer :max_completion_tokens
|
1075
1075
|
|
1076
|
+
# Constrains effort on reasoning for
|
1077
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1078
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1079
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1080
|
+
# response.
|
1081
|
+
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
1082
|
+
attr_accessor :reasoning_effort
|
1083
|
+
|
1076
1084
|
# A seed value to initialize the randomness, during sampling.
|
1077
1085
|
sig { returns(T.nilable(Integer)) }
|
1078
1086
|
attr_reader :seed
|
@@ -1176,6 +1184,8 @@ module OpenAI
|
|
1176
1184
|
sig do
|
1177
1185
|
params(
|
1178
1186
|
max_completion_tokens: Integer,
|
1187
|
+
reasoning_effort:
|
1188
|
+
T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1179
1189
|
seed: Integer,
|
1180
1190
|
temperature: Float,
|
1181
1191
|
text:
|
@@ -1201,6 +1211,12 @@ module OpenAI
|
|
1201
1211
|
def self.new(
|
1202
1212
|
# The maximum number of tokens in the generated output.
|
1203
1213
|
max_completion_tokens: nil,
|
1214
|
+
# Constrains effort on reasoning for
|
1215
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1216
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1217
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1218
|
+
# response.
|
1219
|
+
reasoning_effort: nil,
|
1204
1220
|
# A seed value to initialize the randomness, during sampling.
|
1205
1221
|
seed: nil,
|
1206
1222
|
# A higher temperature increases randomness in the outputs.
|
@@ -1235,6 +1251,8 @@ module OpenAI
|
|
1235
1251
|
override.returns(
|
1236
1252
|
{
|
1237
1253
|
max_completion_tokens: Integer,
|
1254
|
+
reasoning_effort:
|
1255
|
+
T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1238
1256
|
seed: Integer,
|
1239
1257
|
temperature: Float,
|
1240
1258
|
text:
|
@@ -1115,6 +1115,14 @@ module OpenAI
|
|
1115
1115
|
sig { params(max_completion_tokens: Integer).void }
|
1116
1116
|
attr_writer :max_completion_tokens
|
1117
1117
|
|
1118
|
+
# Constrains effort on reasoning for
|
1119
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1120
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1121
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1122
|
+
# response.
|
1123
|
+
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1124
|
+
attr_accessor :reasoning_effort
|
1125
|
+
|
1118
1126
|
# A seed value to initialize the randomness, during sampling.
|
1119
1127
|
sig { returns(T.nilable(Integer)) }
|
1120
1128
|
attr_reader :seed
|
@@ -1201,6 +1209,8 @@ module OpenAI
|
|
1201
1209
|
sig do
|
1202
1210
|
params(
|
1203
1211
|
max_completion_tokens: Integer,
|
1212
|
+
reasoning_effort:
|
1213
|
+
T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1204
1214
|
seed: Integer,
|
1205
1215
|
temperature: Float,
|
1206
1216
|
text:
|
@@ -1226,6 +1236,12 @@ module OpenAI
|
|
1226
1236
|
def self.new(
|
1227
1237
|
# The maximum number of tokens in the generated output.
|
1228
1238
|
max_completion_tokens: nil,
|
1239
|
+
# Constrains effort on reasoning for
|
1240
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1241
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1242
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1243
|
+
# response.
|
1244
|
+
reasoning_effort: nil,
|
1229
1245
|
# A seed value to initialize the randomness, during sampling.
|
1230
1246
|
seed: nil,
|
1231
1247
|
# A higher temperature increases randomness in the outputs.
|
@@ -1260,6 +1276,8 @@ module OpenAI
|
|
1260
1276
|
override.returns(
|
1261
1277
|
{
|
1262
1278
|
max_completion_tokens: Integer,
|
1279
|
+
reasoning_effort:
|
1280
|
+
T.nilable(OpenAI::ReasoningEffort::TaggedSymbol),
|
1263
1281
|
seed: Integer,
|
1264
1282
|
temperature: Float,
|
1265
1283
|
text:
|
@@ -1111,6 +1111,14 @@ module OpenAI
|
|
1111
1111
|
sig { params(max_completion_tokens: Integer).void }
|
1112
1112
|
attr_writer :max_completion_tokens
|
1113
1113
|
|
1114
|
+
# Constrains effort on reasoning for
|
1115
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1116
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1117
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1118
|
+
# response.
|
1119
|
+
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1120
|
+
attr_accessor :reasoning_effort
|
1121
|
+
|
1114
1122
|
# A seed value to initialize the randomness, during sampling.
|
1115
1123
|
sig { returns(T.nilable(Integer)) }
|
1116
1124
|
attr_reader :seed
|
@@ -1197,6 +1205,8 @@ module OpenAI
|
|
1197
1205
|
sig do
|
1198
1206
|
params(
|
1199
1207
|
max_completion_tokens: Integer,
|
1208
|
+
reasoning_effort:
|
1209
|
+
T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1200
1210
|
seed: Integer,
|
1201
1211
|
temperature: Float,
|
1202
1212
|
text:
|
@@ -1222,6 +1232,12 @@ module OpenAI
|
|
1222
1232
|
def self.new(
|
1223
1233
|
# The maximum number of tokens in the generated output.
|
1224
1234
|
max_completion_tokens: nil,
|
1235
|
+
# Constrains effort on reasoning for
|
1236
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1237
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1238
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1239
|
+
# response.
|
1240
|
+
reasoning_effort: nil,
|
1225
1241
|
# A seed value to initialize the randomness, during sampling.
|
1226
1242
|
seed: nil,
|
1227
1243
|
# A higher temperature increases randomness in the outputs.
|
@@ -1256,6 +1272,8 @@ module OpenAI
|
|
1256
1272
|
override.returns(
|
1257
1273
|
{
|
1258
1274
|
max_completion_tokens: Integer,
|
1275
|
+
reasoning_effort:
|
1276
|
+
T.nilable(OpenAI::ReasoningEffort::TaggedSymbol),
|
1259
1277
|
seed: Integer,
|
1260
1278
|
temperature: Float,
|
1261
1279
|
text:
|
@@ -1117,6 +1117,14 @@ module OpenAI
|
|
1117
1117
|
sig { params(max_completion_tokens: Integer).void }
|
1118
1118
|
attr_writer :max_completion_tokens
|
1119
1119
|
|
1120
|
+
# Constrains effort on reasoning for
|
1121
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1122
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1123
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1124
|
+
# response.
|
1125
|
+
sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
|
1126
|
+
attr_accessor :reasoning_effort
|
1127
|
+
|
1120
1128
|
# A seed value to initialize the randomness, during sampling.
|
1121
1129
|
sig { returns(T.nilable(Integer)) }
|
1122
1130
|
attr_reader :seed
|
@@ -1203,6 +1211,8 @@ module OpenAI
|
|
1203
1211
|
sig do
|
1204
1212
|
params(
|
1205
1213
|
max_completion_tokens: Integer,
|
1214
|
+
reasoning_effort:
|
1215
|
+
T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
1206
1216
|
seed: Integer,
|
1207
1217
|
temperature: Float,
|
1208
1218
|
text:
|
@@ -1228,6 +1238,12 @@ module OpenAI
|
|
1228
1238
|
def self.new(
|
1229
1239
|
# The maximum number of tokens in the generated output.
|
1230
1240
|
max_completion_tokens: nil,
|
1241
|
+
# Constrains effort on reasoning for
|
1242
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
1243
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
1244
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
1245
|
+
# response.
|
1246
|
+
reasoning_effort: nil,
|
1231
1247
|
# A seed value to initialize the randomness, during sampling.
|
1232
1248
|
seed: nil,
|
1233
1249
|
# A higher temperature increases randomness in the outputs.
|
@@ -1262,6 +1278,8 @@ module OpenAI
|
|
1262
1278
|
override.returns(
|
1263
1279
|
{
|
1264
1280
|
max_completion_tokens: Integer,
|
1281
|
+
reasoning_effort:
|
1282
|
+
T.nilable(OpenAI::ReasoningEffort::TaggedSymbol),
|
1265
1283
|
seed: Integer,
|
1266
1284
|
temperature: Float,
|
1267
1285
|
text:
|
@@ -37,8 +37,14 @@ module OpenAI
|
|
37
37
|
sig { returns(Symbol) }
|
38
38
|
attr_accessor :object
|
39
39
|
|
40
|
-
# A list of results
|
41
|
-
sig
|
40
|
+
# A list of grader results for this output item.
|
41
|
+
sig do
|
42
|
+
returns(
|
43
|
+
T::Array[
|
44
|
+
OpenAI::Models::Evals::Runs::OutputItemListResponse::Result
|
45
|
+
]
|
46
|
+
)
|
47
|
+
end
|
42
48
|
attr_accessor :results
|
43
49
|
|
44
50
|
# The identifier of the evaluation run associated with this output item.
|
@@ -71,7 +77,10 @@ module OpenAI
|
|
71
77
|
datasource_item: T::Hash[Symbol, T.anything],
|
72
78
|
datasource_item_id: Integer,
|
73
79
|
eval_id: String,
|
74
|
-
results:
|
80
|
+
results:
|
81
|
+
T::Array[
|
82
|
+
OpenAI::Models::Evals::Runs::OutputItemListResponse::Result::OrHash
|
83
|
+
],
|
75
84
|
run_id: String,
|
76
85
|
sample:
|
77
86
|
OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::OrHash,
|
@@ -90,7 +99,7 @@ module OpenAI
|
|
90
99
|
datasource_item_id:,
|
91
100
|
# The identifier of the evaluation group.
|
92
101
|
eval_id:,
|
93
|
-
# A list of results
|
102
|
+
# A list of grader results for this output item.
|
94
103
|
results:,
|
95
104
|
# The identifier of the evaluation run associated with this output item.
|
96
105
|
run_id:,
|
@@ -112,7 +121,10 @@ module OpenAI
|
|
112
121
|
datasource_item_id: Integer,
|
113
122
|
eval_id: String,
|
114
123
|
object: Symbol,
|
115
|
-
results:
|
124
|
+
results:
|
125
|
+
T::Array[
|
126
|
+
OpenAI::Models::Evals::Runs::OutputItemListResponse::Result
|
127
|
+
],
|
116
128
|
run_id: String,
|
117
129
|
sample:
|
118
130
|
OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample,
|
@@ -123,6 +135,77 @@ module OpenAI
|
|
123
135
|
def to_hash
|
124
136
|
end
|
125
137
|
|
138
|
+
class Result < OpenAI::Internal::Type::BaseModel
|
139
|
+
OrHash =
|
140
|
+
T.type_alias do
|
141
|
+
T.any(
|
142
|
+
OpenAI::Models::Evals::Runs::OutputItemListResponse::Result,
|
143
|
+
OpenAI::Internal::AnyHash
|
144
|
+
)
|
145
|
+
end
|
146
|
+
|
147
|
+
# The name of the grader.
|
148
|
+
sig { returns(String) }
|
149
|
+
attr_accessor :name
|
150
|
+
|
151
|
+
# Whether the grader considered the output a pass.
|
152
|
+
sig { returns(T::Boolean) }
|
153
|
+
attr_accessor :passed
|
154
|
+
|
155
|
+
# The numeric score produced by the grader.
|
156
|
+
sig { returns(Float) }
|
157
|
+
attr_accessor :score
|
158
|
+
|
159
|
+
# Optional sample or intermediate data produced by the grader.
|
160
|
+
sig { returns(T.nilable(T::Hash[Symbol, T.anything])) }
|
161
|
+
attr_accessor :sample
|
162
|
+
|
163
|
+
# The grader type (for example, "string-check-grader").
|
164
|
+
sig { returns(T.nilable(String)) }
|
165
|
+
attr_reader :type
|
166
|
+
|
167
|
+
sig { params(type: String).void }
|
168
|
+
attr_writer :type
|
169
|
+
|
170
|
+
# A single grader result for an evaluation run output item.
|
171
|
+
sig do
|
172
|
+
params(
|
173
|
+
name: String,
|
174
|
+
passed: T::Boolean,
|
175
|
+
score: Float,
|
176
|
+
sample: T.nilable(T::Hash[Symbol, T.anything]),
|
177
|
+
type: String
|
178
|
+
).returns(T.attached_class)
|
179
|
+
end
|
180
|
+
def self.new(
|
181
|
+
# The name of the grader.
|
182
|
+
name:,
|
183
|
+
# Whether the grader considered the output a pass.
|
184
|
+
passed:,
|
185
|
+
# The numeric score produced by the grader.
|
186
|
+
score:,
|
187
|
+
# Optional sample or intermediate data produced by the grader.
|
188
|
+
sample: nil,
|
189
|
+
# The grader type (for example, "string-check-grader").
|
190
|
+
type: nil
|
191
|
+
)
|
192
|
+
end
|
193
|
+
|
194
|
+
sig do
|
195
|
+
override.returns(
|
196
|
+
{
|
197
|
+
name: String,
|
198
|
+
passed: T::Boolean,
|
199
|
+
score: Float,
|
200
|
+
sample: T.nilable(T::Hash[Symbol, T.anything]),
|
201
|
+
type: String
|
202
|
+
}
|
203
|
+
)
|
204
|
+
end
|
205
|
+
def to_hash
|
206
|
+
end
|
207
|
+
end
|
208
|
+
|
126
209
|
class Sample < OpenAI::Internal::Type::BaseModel
|
127
210
|
OrHash =
|
128
211
|
T.type_alias do
|
@@ -37,8 +37,14 @@ module OpenAI
|
|
37
37
|
sig { returns(Symbol) }
|
38
38
|
attr_accessor :object
|
39
39
|
|
40
|
-
# A list of results
|
41
|
-
sig
|
40
|
+
# A list of grader results for this output item.
|
41
|
+
sig do
|
42
|
+
returns(
|
43
|
+
T::Array[
|
44
|
+
OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result
|
45
|
+
]
|
46
|
+
)
|
47
|
+
end
|
42
48
|
attr_accessor :results
|
43
49
|
|
44
50
|
# The identifier of the evaluation run associated with this output item.
|
@@ -73,7 +79,10 @@ module OpenAI
|
|
73
79
|
datasource_item: T::Hash[Symbol, T.anything],
|
74
80
|
datasource_item_id: Integer,
|
75
81
|
eval_id: String,
|
76
|
-
results:
|
82
|
+
results:
|
83
|
+
T::Array[
|
84
|
+
OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result::OrHash
|
85
|
+
],
|
77
86
|
run_id: String,
|
78
87
|
sample:
|
79
88
|
OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::OrHash,
|
@@ -92,7 +101,7 @@ module OpenAI
|
|
92
101
|
datasource_item_id:,
|
93
102
|
# The identifier of the evaluation group.
|
94
103
|
eval_id:,
|
95
|
-
# A list of results
|
104
|
+
# A list of grader results for this output item.
|
96
105
|
results:,
|
97
106
|
# The identifier of the evaluation run associated with this output item.
|
98
107
|
run_id:,
|
@@ -114,7 +123,10 @@ module OpenAI
|
|
114
123
|
datasource_item_id: Integer,
|
115
124
|
eval_id: String,
|
116
125
|
object: Symbol,
|
117
|
-
results:
|
126
|
+
results:
|
127
|
+
T::Array[
|
128
|
+
OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result
|
129
|
+
],
|
118
130
|
run_id: String,
|
119
131
|
sample:
|
120
132
|
OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample,
|
@@ -125,6 +137,77 @@ module OpenAI
|
|
125
137
|
def to_hash
|
126
138
|
end
|
127
139
|
|
140
|
+
class Result < OpenAI::Internal::Type::BaseModel
|
141
|
+
OrHash =
|
142
|
+
T.type_alias do
|
143
|
+
T.any(
|
144
|
+
OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Result,
|
145
|
+
OpenAI::Internal::AnyHash
|
146
|
+
)
|
147
|
+
end
|
148
|
+
|
149
|
+
# The name of the grader.
|
150
|
+
sig { returns(String) }
|
151
|
+
attr_accessor :name
|
152
|
+
|
153
|
+
# Whether the grader considered the output a pass.
|
154
|
+
sig { returns(T::Boolean) }
|
155
|
+
attr_accessor :passed
|
156
|
+
|
157
|
+
# The numeric score produced by the grader.
|
158
|
+
sig { returns(Float) }
|
159
|
+
attr_accessor :score
|
160
|
+
|
161
|
+
# Optional sample or intermediate data produced by the grader.
|
162
|
+
sig { returns(T.nilable(T::Hash[Symbol, T.anything])) }
|
163
|
+
attr_accessor :sample
|
164
|
+
|
165
|
+
# The grader type (for example, "string-check-grader").
|
166
|
+
sig { returns(T.nilable(String)) }
|
167
|
+
attr_reader :type
|
168
|
+
|
169
|
+
sig { params(type: String).void }
|
170
|
+
attr_writer :type
|
171
|
+
|
172
|
+
# A single grader result for an evaluation run output item.
|
173
|
+
sig do
|
174
|
+
params(
|
175
|
+
name: String,
|
176
|
+
passed: T::Boolean,
|
177
|
+
score: Float,
|
178
|
+
sample: T.nilable(T::Hash[Symbol, T.anything]),
|
179
|
+
type: String
|
180
|
+
).returns(T.attached_class)
|
181
|
+
end
|
182
|
+
def self.new(
|
183
|
+
# The name of the grader.
|
184
|
+
name:,
|
185
|
+
# Whether the grader considered the output a pass.
|
186
|
+
passed:,
|
187
|
+
# The numeric score produced by the grader.
|
188
|
+
score:,
|
189
|
+
# Optional sample or intermediate data produced by the grader.
|
190
|
+
sample: nil,
|
191
|
+
# The grader type (for example, "string-check-grader").
|
192
|
+
type: nil
|
193
|
+
)
|
194
|
+
end
|
195
|
+
|
196
|
+
sig do
|
197
|
+
override.returns(
|
198
|
+
{
|
199
|
+
name: String,
|
200
|
+
passed: T::Boolean,
|
201
|
+
score: Float,
|
202
|
+
sample: T.nilable(T::Hash[Symbol, T.anything]),
|
203
|
+
type: String
|
204
|
+
}
|
205
|
+
)
|
206
|
+
end
|
207
|
+
def to_hash
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
128
211
|
class Sample < OpenAI::Internal::Type::BaseModel
|
129
212
|
OrHash =
|
130
213
|
T.type_alias do
|