openai 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -620,20 +620,96 @@ module OpenAI
620
620
  # @return [Float, nil]
621
621
  optional :temperature, Float
622
622
 
623
+ # @!attribute text
624
+ # Configuration options for a text response from the model. Can be plain text or
625
+ # structured JSON data. Learn more:
626
+ #
627
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
628
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
629
+ #
630
+ # @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, nil]
631
+ optional :text,
632
+ -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text }
633
+
634
+ # @!attribute tools
635
+ # An array of tools the model may call while generating a response. You can
636
+ # specify which tool to use by setting the `tool_choice` parameter.
637
+ #
638
+ # The two categories of tools you can provide the model are:
639
+ #
640
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
641
+ # capabilities, like
642
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
643
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
644
+ # Learn more about
645
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
646
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
647
+ # the model to call your own code. Learn more about
648
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
649
+ #
650
+ # @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
651
+ optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
652
+
623
653
  # @!attribute top_p
624
654
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
625
655
  #
626
656
  # @return [Float, nil]
627
657
  optional :top_p, Float
628
658
 
629
- # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, top_p: nil)
659
+ # @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
660
+ # Some parameter documentations has been truncated, see
661
+ # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams}
662
+ # for more details.
663
+ #
630
664
  # @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
631
665
  #
632
666
  # @param seed [Integer] A seed value to initialize the randomness, during sampling.
633
667
  #
634
668
  # @param temperature [Float] A higher temperature increases randomness in the outputs.
635
669
  #
670
+ # @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
671
+ #
672
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
673
+ #
636
674
  # @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
675
+
676
+ # @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams#text
677
+ class Text < OpenAI::Internal::Type::BaseModel
678
+ # @!attribute format_
679
+ # An object specifying the format that the model must output.
680
+ #
681
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
682
+ # ensures the model will match your supplied JSON schema. Learn more in the
683
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
684
+ #
685
+ # The default format is `{ "type": "text" }` with no additional options.
686
+ #
687
+ # **Not recommended for gpt-4o and newer models:**
688
+ #
689
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
690
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
691
+ # preferred for models that support it.
692
+ #
693
+ # @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
694
+ optional :format_,
695
+ union: -> {
696
+ OpenAI::Responses::ResponseFormatTextConfig
697
+ },
698
+ api_name: :format
699
+
700
+ # @!method initialize(format_: nil)
701
+ # Some parameter documentations has been truncated, see
702
+ # {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text}
703
+ # for more details.
704
+ #
705
+ # Configuration options for a text response from the model. Can be plain text or
706
+ # structured JSON data. Learn more:
707
+ #
708
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
709
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
710
+ #
711
+ # @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
712
+ end
637
713
  end
638
714
  end
639
715
 
@@ -140,7 +140,9 @@ module OpenAI
140
140
  name = func[:name] ||= params.name.split("::").last
141
141
  tool_models.store(name, params)
142
142
  func.update(parameters: params.to_json_schema)
143
+ tool
143
144
  else
145
+ tool
144
146
  end
145
147
  end
146
148
  tools.replace(mapped)
@@ -117,7 +117,9 @@ module OpenAI
117
117
  name = func[:name] ||= params.name.split("::").last
118
118
  tool_models.store(name, params)
119
119
  func.update(parameters: params.to_json_schema)
120
+ tool
120
121
  else
122
+ tool
121
123
  end
122
124
  end
123
125
  tools.replace(mapped)
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.6.0"
4
+ VERSION = "0.7.0"
5
5
  end
@@ -332,6 +332,8 @@ module OpenAI
332
332
  end
333
333
 
334
334
  # @api private
335
+ #
336
+ # https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.1.md#special-considerations-for-multipart-content
335
337
  sig do
336
338
  params(body: T.anything).returns([String, T::Enumerable[String]])
337
339
  end
@@ -814,6 +814,41 @@ module OpenAI
814
814
  sig { params(max_completion_tokens: Integer).void }
815
815
  attr_writer :max_completion_tokens
816
816
 
817
+ # An object specifying the format that the model must output.
818
+ #
819
+ # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
820
+ # Outputs which ensures the model will match your supplied JSON schema. Learn more
821
+ # in the
822
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
823
+ #
824
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
825
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
826
+ # preferred for models that support it.
827
+ sig do
828
+ returns(
829
+ T.nilable(
830
+ T.any(
831
+ OpenAI::ResponseFormatText,
832
+ OpenAI::ResponseFormatJSONSchema,
833
+ OpenAI::ResponseFormatJSONObject
834
+ )
835
+ )
836
+ )
837
+ end
838
+ attr_reader :response_format
839
+
840
+ sig do
841
+ params(
842
+ response_format:
843
+ T.any(
844
+ OpenAI::ResponseFormatText::OrHash,
845
+ OpenAI::ResponseFormatJSONSchema::OrHash,
846
+ OpenAI::ResponseFormatJSONObject::OrHash
847
+ )
848
+ ).void
849
+ end
850
+ attr_writer :response_format
851
+
817
852
  # A seed value to initialize the randomness, during sampling.
818
853
  sig { returns(T.nilable(Integer)) }
819
854
  attr_reader :seed
@@ -828,6 +863,19 @@ module OpenAI
828
863
  sig { params(temperature: Float).void }
829
864
  attr_writer :temperature
830
865
 
866
+ # A list of tools the model may call. Currently, only functions are supported as a
867
+ # tool. Use this to provide a list of functions the model may generate JSON inputs
868
+ # for. A max of 128 functions are supported.
869
+ sig { returns(T.nilable(T::Array[OpenAI::Chat::ChatCompletionTool])) }
870
+ attr_reader :tools
871
+
872
+ sig do
873
+ params(
874
+ tools: T::Array[OpenAI::Chat::ChatCompletionTool::OrHash]
875
+ ).void
876
+ end
877
+ attr_writer :tools
878
+
831
879
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
832
880
  sig { returns(T.nilable(Float)) }
833
881
  attr_reader :top_p
@@ -838,18 +886,40 @@ module OpenAI
838
886
  sig do
839
887
  params(
840
888
  max_completion_tokens: Integer,
889
+ response_format:
890
+ T.any(
891
+ OpenAI::ResponseFormatText::OrHash,
892
+ OpenAI::ResponseFormatJSONSchema::OrHash,
893
+ OpenAI::ResponseFormatJSONObject::OrHash
894
+ ),
841
895
  seed: Integer,
842
896
  temperature: Float,
897
+ tools: T::Array[OpenAI::Chat::ChatCompletionTool::OrHash],
843
898
  top_p: Float
844
899
  ).returns(T.attached_class)
845
900
  end
846
901
  def self.new(
847
902
  # The maximum number of tokens in the generated output.
848
903
  max_completion_tokens: nil,
904
+ # An object specifying the format that the model must output.
905
+ #
906
+ # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
907
+ # Outputs which ensures the model will match your supplied JSON schema. Learn more
908
+ # in the
909
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
910
+ #
911
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
912
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
913
+ # preferred for models that support it.
914
+ response_format: nil,
849
915
  # A seed value to initialize the randomness, during sampling.
850
916
  seed: nil,
851
917
  # A higher temperature increases randomness in the outputs.
852
918
  temperature: nil,
919
+ # A list of tools the model may call. Currently, only functions are supported as a
920
+ # tool. Use this to provide a list of functions the model may generate JSON inputs
921
+ # for. A max of 128 functions are supported.
922
+ tools: nil,
853
923
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
854
924
  top_p: nil
855
925
  )
@@ -859,14 +929,54 @@ module OpenAI
859
929
  override.returns(
860
930
  {
861
931
  max_completion_tokens: Integer,
932
+ response_format:
933
+ T.any(
934
+ OpenAI::ResponseFormatText,
935
+ OpenAI::ResponseFormatJSONSchema,
936
+ OpenAI::ResponseFormatJSONObject
937
+ ),
862
938
  seed: Integer,
863
939
  temperature: Float,
940
+ tools: T::Array[OpenAI::Chat::ChatCompletionTool],
864
941
  top_p: Float
865
942
  }
866
943
  )
867
944
  end
868
945
  def to_hash
869
946
  end
947
+
948
+ # An object specifying the format that the model must output.
949
+ #
950
+ # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
951
+ # Outputs which ensures the model will match your supplied JSON schema. Learn more
952
+ # in the
953
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
954
+ #
955
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
956
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
957
+ # preferred for models that support it.
958
+ module ResponseFormat
959
+ extend OpenAI::Internal::Type::Union
960
+
961
+ Variants =
962
+ T.type_alias do
963
+ T.any(
964
+ OpenAI::ResponseFormatText,
965
+ OpenAI::ResponseFormatJSONSchema,
966
+ OpenAI::ResponseFormatJSONObject
967
+ )
968
+ end
969
+
970
+ sig do
971
+ override.returns(
972
+ T::Array[
973
+ OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::ResponseFormat::Variants
974
+ ]
975
+ )
976
+ end
977
+ def self.variants
978
+ end
979
+ end
870
980
  end
871
981
  end
872
982
  end
@@ -1056,6 +1056,66 @@ module OpenAI
1056
1056
  sig { params(temperature: Float).void }
1057
1057
  attr_writer :temperature
1058
1058
 
1059
+ # Configuration options for a text response from the model. Can be plain text or
1060
+ # structured JSON data. Learn more:
1061
+ #
1062
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1063
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1064
+ sig do
1065
+ returns(
1066
+ T.nilable(
1067
+ OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text
1068
+ )
1069
+ )
1070
+ end
1071
+ attr_reader :text
1072
+
1073
+ sig do
1074
+ params(
1075
+ text:
1076
+ OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text::OrHash
1077
+ ).void
1078
+ end
1079
+ attr_writer :text
1080
+
1081
+ # An array of tools the model may call while generating a response. You can
1082
+ # specify which tool to use by setting the `tool_choice` parameter.
1083
+ #
1084
+ # The two categories of tools you can provide the model are:
1085
+ #
1086
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1087
+ # capabilities, like
1088
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1089
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
1090
+ # Learn more about
1091
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
1092
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
1093
+ # the model to call your own code. Learn more about
1094
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
1095
+ sig do
1096
+ returns(T.nilable(T::Array[OpenAI::Responses::Tool::Variants]))
1097
+ end
1098
+ attr_reader :tools
1099
+
1100
+ sig do
1101
+ params(
1102
+ tools:
1103
+ T::Array[
1104
+ T.any(
1105
+ OpenAI::Responses::FunctionTool::OrHash,
1106
+ OpenAI::Responses::FileSearchTool::OrHash,
1107
+ OpenAI::Responses::ComputerTool::OrHash,
1108
+ OpenAI::Responses::Tool::Mcp::OrHash,
1109
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1110
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
1111
+ OpenAI::Responses::Tool::LocalShell::OrHash,
1112
+ OpenAI::Responses::WebSearchTool::OrHash
1113
+ )
1114
+ ]
1115
+ ).void
1116
+ end
1117
+ attr_writer :tools
1118
+
1059
1119
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
1060
1120
  sig { returns(T.nilable(Float)) }
1061
1121
  attr_reader :top_p
@@ -1068,6 +1128,21 @@ module OpenAI
1068
1128
  max_completion_tokens: Integer,
1069
1129
  seed: Integer,
1070
1130
  temperature: Float,
1131
+ text:
1132
+ OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text::OrHash,
1133
+ tools:
1134
+ T::Array[
1135
+ T.any(
1136
+ OpenAI::Responses::FunctionTool::OrHash,
1137
+ OpenAI::Responses::FileSearchTool::OrHash,
1138
+ OpenAI::Responses::ComputerTool::OrHash,
1139
+ OpenAI::Responses::Tool::Mcp::OrHash,
1140
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1141
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
1142
+ OpenAI::Responses::Tool::LocalShell::OrHash,
1143
+ OpenAI::Responses::WebSearchTool::OrHash
1144
+ )
1145
+ ],
1071
1146
  top_p: Float
1072
1147
  ).returns(T.attached_class)
1073
1148
  end
@@ -1078,6 +1153,27 @@ module OpenAI
1078
1153
  seed: nil,
1079
1154
  # A higher temperature increases randomness in the outputs.
1080
1155
  temperature: nil,
1156
+ # Configuration options for a text response from the model. Can be plain text or
1157
+ # structured JSON data. Learn more:
1158
+ #
1159
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1160
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1161
+ text: nil,
1162
+ # An array of tools the model may call while generating a response. You can
1163
+ # specify which tool to use by setting the `tool_choice` parameter.
1164
+ #
1165
+ # The two categories of tools you can provide the model are:
1166
+ #
1167
+ # - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1168
+ # capabilities, like
1169
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1170
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search).
1171
+ # Learn more about
1172
+ # [built-in tools](https://platform.openai.com/docs/guides/tools).
1173
+ # - **Function calls (custom tools)**: Functions that are defined by you, enabling
1174
+ # the model to call your own code. Learn more about
1175
+ # [function calling](https://platform.openai.com/docs/guides/function-calling).
1176
+ tools: nil,
1081
1177
  # An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
1082
1178
  top_p: nil
1083
1179
  )
@@ -1089,12 +1185,103 @@ module OpenAI
1089
1185
  max_completion_tokens: Integer,
1090
1186
  seed: Integer,
1091
1187
  temperature: Float,
1188
+ text:
1189
+ OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text,
1190
+ tools: T::Array[OpenAI::Responses::Tool::Variants],
1092
1191
  top_p: Float
1093
1192
  }
1094
1193
  )
1095
1194
  end
1096
1195
  def to_hash
1097
1196
  end
1197
+
1198
+ class Text < OpenAI::Internal::Type::BaseModel
1199
+ OrHash =
1200
+ T.type_alias do
1201
+ T.any(
1202
+ OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text,
1203
+ OpenAI::Internal::AnyHash
1204
+ )
1205
+ end
1206
+
1207
+ # An object specifying the format that the model must output.
1208
+ #
1209
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
1210
+ # ensures the model will match your supplied JSON schema. Learn more in the
1211
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1212
+ #
1213
+ # The default format is `{ "type": "text" }` with no additional options.
1214
+ #
1215
+ # **Not recommended for gpt-4o and newer models:**
1216
+ #
1217
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1218
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
1219
+ # preferred for models that support it.
1220
+ sig do
1221
+ returns(
1222
+ T.nilable(
1223
+ OpenAI::Responses::ResponseFormatTextConfig::Variants
1224
+ )
1225
+ )
1226
+ end
1227
+ attr_reader :format_
1228
+
1229
+ sig do
1230
+ params(
1231
+ format_:
1232
+ T.any(
1233
+ OpenAI::ResponseFormatText::OrHash,
1234
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash,
1235
+ OpenAI::ResponseFormatJSONObject::OrHash
1236
+ )
1237
+ ).void
1238
+ end
1239
+ attr_writer :format_
1240
+
1241
+ # Configuration options for a text response from the model. Can be plain text or
1242
+ # structured JSON data. Learn more:
1243
+ #
1244
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1245
+ # - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1246
+ sig do
1247
+ params(
1248
+ format_:
1249
+ T.any(
1250
+ OpenAI::ResponseFormatText::OrHash,
1251
+ OpenAI::Responses::ResponseFormatTextJSONSchemaConfig::OrHash,
1252
+ OpenAI::ResponseFormatJSONObject::OrHash
1253
+ )
1254
+ ).returns(T.attached_class)
1255
+ end
1256
+ def self.new(
1257
+ # An object specifying the format that the model must output.
1258
+ #
1259
+ # Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
1260
+ # ensures the model will match your supplied JSON schema. Learn more in the
1261
+ # [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1262
+ #
1263
+ # The default format is `{ "type": "text" }` with no additional options.
1264
+ #
1265
+ # **Not recommended for gpt-4o and newer models:**
1266
+ #
1267
+ # Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1268
+ # ensures the message the model generates is valid JSON. Using `json_schema` is
1269
+ # preferred for models that support it.
1270
+ format_: nil
1271
+ )
1272
+ end
1273
+
1274
+ sig do
1275
+ override.returns(
1276
+ {
1277
+ format_:
1278
+ OpenAI::Responses::ResponseFormatTextConfig::Variants
1279
+ }
1280
+ )
1281
+ end
1282
+ def to_hash
1283
+ end
1284
+ end
1098
1285
  end
1099
1286
  end
1100
1287