openai 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1008,6 +1008,81 @@ module OpenAI
1008
1008
  sig { params(temperature: Float).void }
1009
1009
  attr_writer :temperature
1010
1010
 
1011
+ # Configuration options for a text response from the model. Can be plain text or
1012
+ # structured JSON data. Learn more:
1013
+ #
1014
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1015
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1016
+ sig do
1017
+ returns(
1018
+ T.nilable(
1019
+ OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text
1020
+ )
1021
+ )
1022
+ end
1023
+ attr_reader :text
1024
+
1025
+ sig do
1026
+ params(
1027
+ text:
1028
+ OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text::OrHash
1029
+ ).void
1030
+ end
1031
+ attr_writer :text
1032
+
1033
+ # An array of tools the model may call while generating a response. You can
1034
+ # specify which tool to use by setting the `tool_choice` parameter.
1035
+ #
1036
+ # The two categories of tools you can provide the model are:
1037
+ #
1038
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1039
+ # capabilities, like
1040
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1041
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
1042
+ # Learn more about
1043
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
1044
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
1045
+ # the model to call your own code. Learn more about
1046
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
1047
+ sig do
1048
+ returns(
1049
+ T.nilable(
1050
+ T::Array[
1051
+ T.any(
1052
+ OpenAI::Responses::FunctionTool,
1053
+ OpenAI::Responses::FileSearchTool,
1054
+ OpenAI::Responses::ComputerTool,
1055
+ OpenAI::Responses::Tool::Mcp,
1056
+ OpenAI::Responses::Tool::CodeInterpreter,
1057
+ OpenAI::Responses::Tool::ImageGeneration,
1058
+ OpenAI::Responses::Tool::LocalShell,
1059
+ OpenAI::Responses::WebSearchTool
1060
+ )
1061
+ ]
1062
+ )
1063
+ )
1064
+ end
1065
+ attr_reader :tools
1066
+
1067
+ sig do
1068
+ params(
1069
+ tools:
1070
+ T::Array[
1071
+ T.any(
1072
+ OpenAI::Responses::FunctionTool::OrHash,
1073
+ OpenAI::Responses::FileSearchTool::OrHash,
1074
+ OpenAI::Responses::ComputerTool::OrHash,
1075
+ OpenAI::Responses::Tool::Mcp::OrHash,
1076
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1077
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
1078
+ OpenAI::Responses::Tool::LocalShell::OrHash,
1079
+ OpenAI::Responses::WebSearchTool::OrHash
1080
+ )
1081
+ ]
1082
+ ).void
1083
+ end
1084
+ attr_writer :tools
1085
+
1011
1086
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
1012
1087
  sig { returns(T.nilable(Float)) }
1013
1088
  attr_reader :top_p
@@ -1020,6 +1095,21 @@ module OpenAI
1020
1095
  max_completion_tokens: Integer,
1021
1096
  seed: Integer,
1022
1097
  temperature: Float,
1098
+ text:
1099
+ OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text::OrHash,
1100
+ tools:
1101
+ T::Array[
1102
+ T.any(
1103
+ OpenAI::Responses::FunctionTool::OrHash,
1104
+ OpenAI::Responses::FileSearchTool::OrHash,
1105
+ OpenAI::Responses::ComputerTool::OrHash,
1106
+ OpenAI::Responses::Tool::Mcp::OrHash,
1107
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1108
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
1109
+ OpenAI::Responses::Tool::LocalShell::OrHash,
1110
+ OpenAI::Responses::WebSearchTool::OrHash
1111
+ )
1112
+ ],
1023
1113
  top_p: Float
1024
1114
  ).returns(T.attached_class)
1025
1115
  end
@@ -1030,6 +1120,27 @@ module OpenAI
1030
1120
  seed: nil,
1031
1121
  # A higher temperature increases randomness in the outputs.
1032
1122
  temperature: nil,
1123
+ # Configuration options for a text response from the model. Can be plain text or
1124
+ # structured JSON data. Learn more:
1125
+ #
1126
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1127
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1128
+ text: nil,
1129
+ # An array of tools the model may call while generating a response. You can
1130
+ # specify which tool to use by setting the `tool_choice` parameter.
1131
+ #
1132
+ # The two categories of tools you can provide the model are:
1133
+ #
1134
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1135
+ # capabilities, like
1136
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1137
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
1138
+ # Learn more about
1139
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
1140
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
1141
+ # the model to call your own code. Learn more about
1142
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
1143
+ tools: nil,
1033
1144
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
1034
1145
  top_p: nil
1035
1146
  )
@@ -1041,12 +1152,123 @@ module OpenAI
1041
1152
  max_completion_tokens: Integer,
1042
1153
  seed: Integer,
1043
1154
  temperature: Float,
1155
+ text:
1156
+ OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text,
1157
+ tools:
1158
+ T::Array[
1159
+ T.any(
1160
+ OpenAI::Responses::FunctionTool,
1161
+ OpenAI::Responses::FileSearchTool,
1162
+ OpenAI::Responses::ComputerTool,
1163
+ OpenAI::Responses::Tool::Mcp,
1164
+ OpenAI::Responses::Tool::CodeInterpreter,
1165
+ OpenAI::Responses::Tool::ImageGeneration,
1166
+ OpenAI::Responses::Tool::LocalShell,
1167
+ OpenAI::Responses::WebSearchTool
1168
+ )
1169
+ ],
1044
1170
  top_p: Float
1045
1171
  }
1046
1172
  )
1047
1173
  end
1048
1174
  def to_hash
1049
1175
  end
1176
+
1177
+ class Text < OpenAI::Internal::Type::BaseModel
1178
+ OrHash =
1179
+ T.type_alias do
1180
+ T.any(
1181
+ OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text,
1182
+ OpenAI::Internal::AnyHash
1183
+ )
1184
+ end
1185
+
1186
+ # An object specifying the format that the model must output.
1187
+ #
1188
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
1189
+ # ensures the model will match your supplied JSON schema. Learn more in the
1190
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1191
+ #
1192
+ # The default format is `{ "type": "text" }` with no additional options.
1193
+ #
1194
+ # **Not recommended for gpt-4o and newer models:**
1195
+ #
1196
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1197
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
1198
+ # preferred for models that support it.
1199
+ sig do
1200
+ returns(
1201
+ T.nilable(
1202
+ T.any(
1203
+ OpenAI::ResponseFormatText,
1204
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig,
1205
+ OpenAI::ResponseFormatJSONObject
1206
+ )
1207
+ )
1208
+ )
1209
+ end
1210
+ attr_reader :format_
1211
+
1212
+ sig do
1213
+ params(
1214
+ format_:
1215
+ T.any(
1216
+ OpenAI::ResponseFormatText::OrHash,
1217
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash,
1218
+ OpenAI::ResponseFormatJSONObject::OrHash
1219
+ )
1220
+ ).void
1221
+ end
1222
+ attr_writer :format_
1223
+
1224
+ # Configuration options for a text response from the model. Can be plain text or
1225
+ # structured JSON data. Learn more:
1226
+ #
1227
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1228
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1229
+ sig do
1230
+ params(
1231
+ format_:
1232
+ T.any(
1233
+ OpenAI::ResponseFormatText::OrHash,
1234
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash,
1235
+ OpenAI::ResponseFormatJSONObject::OrHash
1236
+ )
1237
+ ).returns(T.attached_class)
1238
+ end
1239
+ def self.new(
1240
+ # An object specifying the format that the model must output.
1241
+ #
1242
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
1243
+ # ensures the model will match your supplied JSON schema. Learn more in the
1244
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1245
+ #
1246
+ # The default format is `{ "type": "text" }` with no additional options.
1247
+ #
1248
+ # **Not recommended for gpt-4o and newer models:**
1249
+ #
1250
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1251
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
1252
+ # preferred for models that support it.
1253
+ format_: nil
1254
+ )
1255
+ end
1256
+
1257
+ sig do
1258
+ override.returns(
1259
+ {
1260
+ format_:
1261
+ T.any(
1262
+ OpenAI::ResponseFormatText,
1263
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig,
1264
+ OpenAI::ResponseFormatJSONObject
1265
+ )
1266
+ }
1267
+ )
1268
+ end
1269
+ def to_hash
1270
+ end
1271
+ end
1050
1272
  end
1051
1273
  end
1052
1274
 
@@ -1056,6 +1056,66 @@ module OpenAI
1056
1056
  sig { params(temperature: Float).void }
1057
1057
  attr_writer :temperature
1058
1058
 
1059
+ # Configuration options for a text response from the model. Can be plain text or
1060
+ # structured JSON data. Learn more:
1061
+ #
1062
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1063
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1064
+ sig do
1065
+ returns(
1066
+ T.nilable(
1067
+ OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text
1068
+ )
1069
+ )
1070
+ end
1071
+ attr_reader :text
1072
+
1073
+ sig do
1074
+ params(
1075
+ text:
1076
+ OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text::OrHash
1077
+ ).void
1078
+ end
1079
+ attr_writer :text
1080
+
1081
+ # An array of tools the model may call while generating a response. You can
1082
+ # specify which tool to use by setting the `tool_choice` parameter.
1083
+ #
1084
+ # The two categories of tools you can provide the model are:
1085
+ #
1086
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1087
+ # capabilities, like
1088
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1089
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
1090
+ # Learn more about
1091
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
1092
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
1093
+ # the model to call your own code. Learn more about
1094
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
1095
+ sig do
1096
+ returns(T.nilable(T::Array[OpenAI::Responses::Tool::Variants]))
1097
+ end
1098
+ attr_reader :tools
1099
+
1100
+ sig do
1101
+ params(
1102
+ tools:
1103
+ T::Array[
1104
+ T.any(
1105
+ OpenAI::Responses::FunctionTool::OrHash,
1106
+ OpenAI::Responses::FileSearchTool::OrHash,
1107
+ OpenAI::Responses::ComputerTool::OrHash,
1108
+ OpenAI::Responses::Tool::Mcp::OrHash,
1109
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1110
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
1111
+ OpenAI::Responses::Tool::LocalShell::OrHash,
1112
+ OpenAI::Responses::WebSearchTool::OrHash
1113
+ )
1114
+ ]
1115
+ ).void
1116
+ end
1117
+ attr_writer :tools
1118
+
1059
1119
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
1060
1120
  sig { returns(T.nilable(Float)) }
1061
1121
  attr_reader :top_p
@@ -1068,6 +1128,21 @@ module OpenAI
1068
1128
  max_completion_tokens: Integer,
1069
1129
  seed: Integer,
1070
1130
  temperature: Float,
1131
+ text:
1132
+ OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text::OrHash,
1133
+ tools:
1134
+ T::Array[
1135
+ T.any(
1136
+ OpenAI::Responses::FunctionTool::OrHash,
1137
+ OpenAI::Responses::FileSearchTool::OrHash,
1138
+ OpenAI::Responses::ComputerTool::OrHash,
1139
+ OpenAI::Responses::Tool::Mcp::OrHash,
1140
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1141
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
1142
+ OpenAI::Responses::Tool::LocalShell::OrHash,
1143
+ OpenAI::Responses::WebSearchTool::OrHash
1144
+ )
1145
+ ],
1071
1146
  top_p: Float
1072
1147
  ).returns(T.attached_class)
1073
1148
  end
@@ -1078,6 +1153,27 @@ module OpenAI
1078
1153
  seed: nil,
1079
1154
  # A higher temperature increases randomness in the outputs.
1080
1155
  temperature: nil,
1156
+ # Configuration options for a text response from the model. Can be plain text or
1157
+ # structured JSON data. Learn more:
1158
+ #
1159
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1160
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1161
+ text: nil,
1162
+ # An array of tools the model may call while generating a response. You can
1163
+ # specify which tool to use by setting the `tool_choice` parameter.
1164
+ #
1165
+ # The two categories of tools you can provide the model are:
1166
+ #
1167
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1168
+ # capabilities, like
1169
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1170
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
1171
+ # Learn more about
1172
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
1173
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
1174
+ # the model to call your own code. Learn more about
1175
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
1176
+ tools: nil,
1081
1177
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
1082
1178
  top_p: nil
1083
1179
  )
@@ -1089,12 +1185,103 @@ module OpenAI
1089
1185
  max_completion_tokens: Integer,
1090
1186
  seed: Integer,
1091
1187
  temperature: Float,
1188
+ text:
1189
+ OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text,
1190
+ tools: T::Array[OpenAI::Responses::Tool::Variants],
1092
1191
  top_p: Float
1093
1192
  }
1094
1193
  )
1095
1194
  end
1096
1195
  def to_hash
1097
1196
  end
1197
+
1198
+ class Text < OpenAI::Internal::Type::BaseModel
1199
+ OrHash =
1200
+ T.type_alias do
1201
+ T.any(
1202
+ OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text,
1203
+ OpenAI::Internal::AnyHash
1204
+ )
1205
+ end
1206
+
1207
+ # An object specifying the format that the model must output.
1208
+ #
1209
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
1210
+ # ensures the model will match your supplied JSON schema. Learn more in the
1211
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1212
+ #
1213
+ # The default format is `{ "type": "text" }` with no additional options.
1214
+ #
1215
+ # **Not recommended for gpt-4o and newer models:**
1216
+ #
1217
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1218
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
1219
+ # preferred for models that support it.
1220
+ sig do
1221
+ returns(
1222
+ T.nilable(
1223
+ OpenAI::Responses::ResponseFormatTextConfig::Variants
1224
+ )
1225
+ )
1226
+ end
1227
+ attr_reader :format_
1228
+
1229
+ sig do
1230
+ params(
1231
+ format_:
1232
+ T.any(
1233
+ OpenAI::ResponseFormatText::OrHash,
1234
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash,
1235
+ OpenAI::ResponseFormatJSONObject::OrHash
1236
+ )
1237
+ ).void
1238
+ end
1239
+ attr_writer :format_
1240
+
1241
+ # Configuration options for a text response from the model. Can be plain text or
1242
+ # structured JSON data. Learn more:
1243
+ #
1244
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1245
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1246
+ sig do
1247
+ params(
1248
+ format_:
1249
+ T.any(
1250
+ OpenAI::ResponseFormatText::OrHash,
1251
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash,
1252
+ OpenAI::ResponseFormatJSONObject::OrHash
1253
+ )
1254
+ ).returns(T.attached_class)
1255
+ end
1256
+ def self.new(
1257
+ # An object specifying the format that the model must output.
1258
+ #
1259
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
1260
+ # ensures the model will match your supplied JSON schema. Learn more in the
1261
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1262
+ #
1263
+ # The default format is `{ "type": "text" }` with no additional options.
1264
+ #
1265
+ # **Not recommended for gpt-4o and newer models:**
1266
+ #
1267
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1268
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
1269
+ # preferred for models that support it.
1270
+ format_: nil
1271
+ )
1272
+ end
1273
+
1274
+ sig do
1275
+ override.returns(
1276
+ {
1277
+ format_:
1278
+ OpenAI::Responses::ResponseFormatTextConfig::Variants
1279
+ }
1280
+ )
1281
+ end
1282
+ def to_hash
1283
+ end
1284
+ end
1098
1285
  end
1099
1286
  end
1100
1287