openai 0.8.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +41 -0
  3. data/README.md +115 -4
  4. data/lib/openai/errors.rb +22 -0
  5. data/lib/openai/internal/type/array_of.rb +6 -1
  6. data/lib/openai/internal/type/base_model.rb +76 -24
  7. data/lib/openai/internal/type/boolean.rb +7 -1
  8. data/lib/openai/internal/type/converter.rb +42 -34
  9. data/lib/openai/internal/type/enum.rb +10 -2
  10. data/lib/openai/internal/type/file_input.rb +6 -1
  11. data/lib/openai/internal/type/hash_of.rb +6 -1
  12. data/lib/openai/internal/type/union.rb +12 -7
  13. data/lib/openai/internal/type/unknown.rb +7 -1
  14. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  15. data/lib/openai/models/audio/transcription.rb +118 -1
  16. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  17. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  18. data/lib/openai/models/chat/chat_completion.rb +1 -0
  19. data/lib/openai/models/chat/chat_completion_chunk.rb +1 -0
  20. data/lib/openai/models/chat/completion_create_params.rb +1 -0
  21. data/lib/openai/models/fine_tuning/job_create_params.rb +4 -2
  22. data/lib/openai/models/image_edit_params.rb +35 -1
  23. data/lib/openai/models/responses/response.rb +41 -6
  24. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  25. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  26. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  27. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  28. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  29. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  30. data/lib/openai/models/responses/response_create_params.rb +41 -32
  31. data/lib/openai/models/responses/response_output_text.rb +18 -2
  32. data/lib/openai/models/responses/response_prompt.rb +63 -0
  33. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  34. data/lib/openai/resources/audio/speech.rb +3 -1
  35. data/lib/openai/resources/chat/completions.rb +8 -0
  36. data/lib/openai/resources/fine_tuning/jobs.rb +2 -2
  37. data/lib/openai/resources/images.rb +5 -1
  38. data/lib/openai/resources/responses.rb +18 -14
  39. data/lib/openai/version.rb +1 -1
  40. data/lib/openai.rb +1 -0
  41. data/rbi/openai/errors.rbi +16 -0
  42. data/rbi/openai/internal/type/boolean.rbi +2 -0
  43. data/rbi/openai/internal/type/converter.rbi +15 -15
  44. data/rbi/openai/internal/type/union.rbi +5 -0
  45. data/rbi/openai/internal/type/unknown.rbi +2 -0
  46. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  47. data/rbi/openai/models/audio/transcription.rbi +213 -3
  48. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  49. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  50. data/rbi/openai/models/chat/chat_completion.rbi +5 -0
  51. data/rbi/openai/models/chat/chat_completion_chunk.rbi +5 -0
  52. data/rbi/openai/models/chat/completion_create_params.rbi +5 -0
  53. data/rbi/openai/models/fine_tuning/job_create_params.rbi +8 -4
  54. data/rbi/openai/models/image_edit_params.rbi +51 -0
  55. data/rbi/openai/models/responses/response.rbi +66 -7
  56. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  57. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  58. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  59. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  60. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  61. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  62. data/rbi/openai/models/responses/response_create_params.rbi +107 -64
  63. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  64. data/rbi/openai/models/responses/response_prompt.rbi +120 -0
  65. data/rbi/openai/resources/audio/speech.rbi +6 -1
  66. data/rbi/openai/resources/fine_tuning/jobs.rbi +6 -4
  67. data/rbi/openai/resources/images.rbi +11 -0
  68. data/rbi/openai/resources/responses.rbi +56 -50
  69. data/sig/openai/errors.rbs +9 -0
  70. data/sig/openai/internal/type/converter.rbs +7 -1
  71. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  72. data/sig/openai/models/audio/transcription.rbs +95 -3
  73. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  74. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/image_edit_params.rbs +22 -0
  79. data/sig/openai/models/responses/response.rbs +22 -5
  80. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  81. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  82. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  83. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  84. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  85. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  86. data/sig/openai/models/responses/response_create_params.rbs +25 -11
  87. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  88. data/sig/openai/models/responses/response_prompt.rbs +44 -0
  89. data/sig/openai/resources/audio/speech.rbs +1 -0
  90. data/sig/openai/resources/images.rbs +2 -0
  91. data/sig/openai/resources/responses.rbs +6 -4
  92. metadata +5 -2
@@ -76,6 +76,12 @@ module OpenAI
76
76
  # @return [String]
77
77
  required :file_id, String
78
78
 
79
+ # @!attribute filename
80
+ # The filename of the file cited.
81
+ #
82
+ # @return [String]
83
+ required :filename, String
84
+
79
85
  # @!attribute index
80
86
  # The index of the file in the list of files.
81
87
  #
@@ -88,11 +94,13 @@ module OpenAI
88
94
  # @return [Symbol, :file_citation]
89
95
  required :type, const: :file_citation
90
96
 
91
- # @!method initialize(file_id:, index:, type: :file_citation)
97
+ # @!method initialize(file_id:, filename:, index:, type: :file_citation)
92
98
  # A citation to a file.
93
99
  #
94
100
  # @param file_id [String] The ID of the file.
95
101
  #
102
+ # @param filename [String] The filename of the file cited.
103
+ #
96
104
  # @param index [Integer] The index of the file in the list of files.
97
105
  #
98
106
  # @param type [Symbol, :file_citation] The type of the file citation. Always `file_citation`.
@@ -162,6 +170,12 @@ module OpenAI
162
170
  # @return [String]
163
171
  required :file_id, String
164
172
 
173
+ # @!attribute filename
174
+ # The filename of the container file cited.
175
+ #
176
+ # @return [String]
177
+ required :filename, String
178
+
165
179
  # @!attribute start_index
166
180
  # The index of the first character of the container file citation in the message.
167
181
  #
@@ -174,7 +188,7 @@ module OpenAI
174
188
  # @return [Symbol, :container_file_citation]
175
189
  required :type, const: :container_file_citation
176
190
 
177
- # @!method initialize(container_id:, end_index:, file_id:, start_index:, type: :container_file_citation)
191
+ # @!method initialize(container_id:, end_index:, file_id:, filename:, start_index:, type: :container_file_citation)
178
192
  # A citation for a container file used to generate a model response.
179
193
  #
180
194
  # @param container_id [String] The ID of the container file.
@@ -183,6 +197,8 @@ module OpenAI
183
197
  #
184
198
  # @param file_id [String] The ID of the file.
185
199
  #
200
+ # @param filename [String] The filename of the container file cited.
201
+ #
186
202
  # @param start_index [Integer] The index of the first character of the container file citation in the message.
187
203
  #
188
204
  # @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`.
@@ -0,0 +1,63 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ResponsePrompt < OpenAI::Internal::Type::BaseModel
7
+ # @!attribute id
8
+ # The unique identifier of the prompt template to use.
9
+ #
10
+ # @return [String]
11
+ required :id, String
12
+
13
+ # @!attribute variables
14
+ # Optional map of values to substitute in for variables in your prompt. The
15
+ # substitution values can either be strings, or other Response input types like
16
+ # images or files.
17
+ #
18
+ # @return [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil]
19
+ optional :variables,
20
+ -> { OpenAI::Internal::Type::HashOf[union: OpenAI::Responses::ResponsePrompt::Variable] },
21
+ nil?: true
22
+
23
+ # @!attribute version
24
+ # Optional version of the prompt template.
25
+ #
26
+ # @return [String, nil]
27
+ optional :version, String, nil?: true
28
+
29
+ # @!method initialize(id:, variables: nil, version: nil)
30
+ # Some parameter documentations has been truncated, see
31
+ # {OpenAI::Models::Responses::ResponsePrompt} for more details.
32
+ #
33
+ # Reference to a prompt template and its variables.
34
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
35
+ #
36
+ # @param id [String] The unique identifier of the prompt template to use.
37
+ #
38
+ # @param variables [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil] Optional map of values to substitute in for variables in your
39
+ #
40
+ # @param version [String, nil] Optional version of the prompt template.
41
+
42
+ # A text input to the model.
43
+ module Variable
44
+ extend OpenAI::Internal::Type::Union
45
+
46
+ variant String
47
+
48
+ # A text input to the model.
49
+ variant -> { OpenAI::Responses::ResponseInputText }
50
+
51
+ # An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
52
+ variant -> { OpenAI::Responses::ResponseInputImage }
53
+
54
+ # A file input to the model.
55
+ variant -> { OpenAI::Responses::ResponseInputFile }
56
+
57
+ # @!method self.variants
58
+ # @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)]
59
+ end
60
+ end
61
+ end
62
+ end
63
+ end
@@ -24,11 +24,11 @@ module OpenAI
24
24
  # Emitted when the full audio transcript is completed.
25
25
  variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
26
26
 
27
- # Emitted when a partial code snippet is added by the code interpreter.
27
+ # Emitted when a partial code snippet is streamed by the code interpreter.
28
28
  variant :"response.code_interpreter_call_code.delta",
29
29
  -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent }
30
30
 
31
- # Emitted when code snippet output is finalized by the code interpreter.
31
+ # Emitted when the code snippet is finalized by the code interpreter.
32
32
  variant :"response.code_interpreter_call_code.done",
33
33
  -> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent }
34
34
 
@@ -9,7 +9,7 @@ module OpenAI
9
9
  #
10
10
  # Generates audio from the input text.
11
11
  #
12
- # @overload create(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, request_options: {})
12
+ # @overload create(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {})
13
13
  #
14
14
  # @param input [String] The text to generate audio for. The maximum length is 4096 characters.
15
15
  #
@@ -23,6 +23,8 @@ module OpenAI
23
23
  #
24
24
  # @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
25
25
  #
26
+ # @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse
27
+ #
26
28
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
27
29
  #
28
30
  # @return [StringIO]
@@ -119,6 +119,14 @@ module OpenAI
119
119
  }
120
120
  }
121
121
  )
122
+ in {response_format: {type: :json_schema, json_schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
123
+ parsed.fetch(:response_format).update(
124
+ json_schema: {
125
+ strict: true,
126
+ name: model.name.split("::").last,
127
+ schema: model.to_json_schema
128
+ }
129
+ )
122
130
  in {response_format: {type: :json_schema, json_schema: {schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
123
131
  parsed.dig(:response_format, :json_schema).store(:schema, model.to_json_schema)
124
132
  in {tools: Array => tools}
@@ -16,7 +16,7 @@ module OpenAI
16
16
  # Response includes details of the enqueued job including job status and the name
17
17
  # of the fine-tuned models once complete.
18
18
  #
19
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
19
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
20
20
  #
21
21
  # @overload create(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {})
22
22
  #
@@ -59,7 +59,7 @@ module OpenAI
59
59
  #
60
60
  # Get info about a fine-tuning job.
61
61
  #
62
- # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
62
+ # [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
63
63
  #
64
64
  # @overload retrieve(fine_tuning_job_id, request_options: {})
65
65
  #
@@ -45,7 +45,7 @@ module OpenAI
45
45
  # Creates an edited or extended image given one or more source images and a
46
46
  # prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
47
47
  #
48
- # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
48
+ # @overload edit(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
49
49
  #
50
50
  # @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
51
51
  #
@@ -59,6 +59,10 @@ module OpenAI
59
59
  #
60
60
  # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
61
61
  #
62
+ # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
63
+ #
64
+ # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
65
+ #
62
66
  # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
63
67
  #
64
68
  # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
@@ -23,26 +23,28 @@ module OpenAI
23
23
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
24
24
  # your own data as input for the model's response.
25
25
  #
26
- # @overload create(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
27
- #
28
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
29
- #
30
- # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
26
+ # @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
31
27
  #
32
28
  # @param background [Boolean, nil] Whether to run the model response in the background.
33
29
  #
34
30
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
35
31
  #
36
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
32
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
33
+ #
34
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
37
35
  #
38
36
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
39
37
  #
40
38
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
41
39
  #
40
+ # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
41
+ #
42
42
  # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
43
43
  #
44
44
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
45
45
  #
46
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
47
+ #
46
48
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
47
49
  #
48
50
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -68,7 +70,7 @@ module OpenAI
68
70
  # @return [OpenAI::Models::Responses::Response]
69
71
  #
70
72
  # @see OpenAI::Models::Responses::ResponseCreateParams
71
- def create(params)
73
+ def create(params = {})
72
74
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
73
75
  if parsed[:stream]
74
76
  message = "Please use `#stream_raw` for the streaming use case."
@@ -181,26 +183,28 @@ module OpenAI
181
183
  # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
182
184
  # your own data as input for the model's response.
183
185
  #
184
- # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
185
- #
186
- # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
187
- #
188
- # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
186
+ # @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
189
187
  #
190
188
  # @param background [Boolean, nil] Whether to run the model response in the background.
191
189
  #
192
190
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
193
191
  #
194
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
192
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
193
+ #
194
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
195
195
  #
196
196
  # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
197
197
  #
198
198
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
199
199
  #
200
+ # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
201
+ #
200
202
  # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
201
203
  #
202
204
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
203
205
  #
206
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
207
+ #
204
208
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
205
209
  #
206
210
  # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -226,7 +230,7 @@ module OpenAI
226
230
  # @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
227
231
  #
228
232
  # @see OpenAI::Models::Responses::ResponseCreateParams
229
- def stream_raw(params)
233
+ def stream_raw(params = {})
230
234
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
231
235
  unless parsed.fetch(:stream, true)
232
236
  message = "Please use `#create` for the non-streaming use case."
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.8.0"
4
+ VERSION = "0.10.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -416,6 +416,7 @@ require_relative "openai/models/responses/response_output_message"
416
416
  require_relative "openai/models/responses/response_output_refusal"
417
417
  require_relative "openai/models/responses/response_output_text"
418
418
  require_relative "openai/models/responses/response_output_text_annotation_added_event"
419
+ require_relative "openai/models/responses/response_prompt"
419
420
  require_relative "openai/models/responses/response_queued_event"
420
421
  require_relative "openai/models/responses/response_reasoning_delta_event"
421
422
  require_relative "openai/models/responses/response_reasoning_done_event"
@@ -8,6 +8,22 @@ module OpenAI
8
8
  end
9
9
 
10
10
  class ConversionError < OpenAI::Errors::Error
11
+ sig { returns(T.nilable(StandardError)) }
12
+ def cause
13
+ end
14
+
15
+ # @api private
16
+ sig do
17
+ params(
18
+ on: T::Class[StandardError],
19
+ method: Symbol,
20
+ target: T.anything,
21
+ value: T.anything,
22
+ cause: T.nilable(StandardError)
23
+ ).returns(T.attached_class)
24
+ end
25
+ def self.new(on:, method:, target:, value:, cause: nil)
26
+ end
11
27
  end
12
28
 
13
29
  class APIError < OpenAI::Errors::Error
@@ -22,6 +22,8 @@ module OpenAI
22
22
 
23
23
  class << self
24
24
  # @api private
25
+ #
26
+ # Coerce value to Boolean if possible, otherwise return the original value.
25
27
  sig do
26
28
  override
27
29
  .params(
@@ -15,12 +15,14 @@ module OpenAI
15
15
  CoerceState =
16
16
  T.type_alias do
17
17
  {
18
- strictness: T.any(T::Boolean, Symbol),
18
+ translate_names: T::Boolean,
19
+ strictness: T::Boolean,
19
20
  exactness: {
20
21
  yes: Integer,
21
22
  no: Integer,
22
23
  maybe: Integer
23
24
  },
25
+ error: T::Class[StandardError],
24
26
  branched: Integer
25
27
  }
26
28
  end
@@ -84,6 +86,15 @@ module OpenAI
84
86
  def self.type_info(spec)
85
87
  end
86
88
 
89
+ # @api private
90
+ sig do
91
+ params(translate_names: T::Boolean).returns(
92
+ OpenAI::Internal::Type::Converter::CoerceState
93
+ )
94
+ end
95
+ def self.new_coerce_state(translate_names: true)
96
+ end
97
+
87
98
  # @api private
88
99
  #
89
100
  # Based on `target`, transform `value` into `target`, to the extent possible:
@@ -105,14 +116,11 @@ module OpenAI
105
116
  def self.coerce(
106
117
  target,
107
118
  value,
108
- # The `strictness` is one of `true`, `false`, or `:strong`. This informs the
109
- # coercion strategy when we have to decide between multiple possible conversion
110
- # targets:
119
+ # The `strictness` is one of `true`, `false`. This informs the coercion strategy
120
+ # when we have to decide between multiple possible conversion targets:
111
121
  #
112
122
  # - `true`: the conversion must be exact, with minimum coercion.
113
123
  # - `false`: the conversion can be approximate, with some coercion.
114
- # - `:strong`: the conversion must be exact, with no coercion, and raise an error
115
- # if not possible.
116
124
  #
117
125
  # The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For
118
126
  # any given conversion attempt, the exactness will be updated based on how closely
@@ -124,15 +132,7 @@ module OpenAI
124
132
  # - `no`: the value cannot be converted to the target type.
125
133
  #
126
134
  # See implementation below for more details.
127
- state: {
128
- strictness: true,
129
- exactness: {
130
- yes: 0,
131
- no: 0,
132
- maybe: 0
133
- },
134
- branched: 0
135
- }
135
+ state: OpenAI::Internal::Type::Converter.new_coerce_state
136
136
  )
137
137
  end
138
138
 
@@ -78,6 +78,11 @@ module OpenAI
78
78
  end
79
79
 
80
80
  # @api private
81
+ #
82
+ # Tries to efficiently coerce the given value to one of the known variants.
83
+ #
84
+ # If the value cannot match any of the known variants, the coercion is considered
85
+ # non-viable and returns the original value.
81
86
  sig do
82
87
  override
83
88
  .params(
@@ -22,6 +22,8 @@ module OpenAI
22
22
 
23
23
  class << self
24
24
  # @api private
25
+ #
26
+ # No coercion needed for Unknown type.
25
27
  sig do
26
28
  override
27
29
  .params(
@@ -60,13 +60,30 @@ module OpenAI
60
60
  attr_writer :response_format
61
61
 
62
62
  # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
63
- # the default. Does not work with `gpt-4o-mini-tts`.
63
+ # the default.
64
64
  sig { returns(T.nilable(Float)) }
65
65
  attr_reader :speed
66
66
 
67
67
  sig { params(speed: Float).void }
68
68
  attr_writer :speed
69
69
 
70
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
71
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
72
+ sig do
73
+ returns(
74
+ T.nilable(OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol)
75
+ )
76
+ end
77
+ attr_reader :stream_format
78
+
79
+ sig do
80
+ params(
81
+ stream_format:
82
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol
83
+ ).void
84
+ end
85
+ attr_writer :stream_format
86
+
70
87
  sig do
71
88
  params(
72
89
  input: String,
@@ -77,6 +94,8 @@ module OpenAI
77
94
  response_format:
78
95
  OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol,
79
96
  speed: Float,
97
+ stream_format:
98
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol,
80
99
  request_options: OpenAI::RequestOptions::OrHash
81
100
  ).returns(T.attached_class)
82
101
  end
@@ -98,8 +117,11 @@ module OpenAI
98
117
  # `wav`, and `pcm`.
99
118
  response_format: nil,
100
119
  # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
101
- # the default. Does not work with `gpt-4o-mini-tts`.
120
+ # the default.
102
121
  speed: nil,
122
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
123
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
124
+ stream_format: nil,
103
125
  request_options: {}
104
126
  )
105
127
  end
@@ -118,6 +140,8 @@ module OpenAI
118
140
  response_format:
119
141
  OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol,
120
142
  speed: Float,
143
+ stream_format:
144
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol,
121
145
  request_options: OpenAI::RequestOptions
122
146
  }
123
147
  )
@@ -267,6 +291,39 @@ module OpenAI
267
291
  def self.values
268
292
  end
269
293
  end
294
+
295
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
296
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
297
+ module StreamFormat
298
+ extend OpenAI::Internal::Type::Enum
299
+
300
+ TaggedSymbol =
301
+ T.type_alias do
302
+ T.all(Symbol, OpenAI::Audio::SpeechCreateParams::StreamFormat)
303
+ end
304
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
305
+
306
+ SSE =
307
+ T.let(
308
+ :sse,
309
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol
310
+ )
311
+ AUDIO =
312
+ T.let(
313
+ :audio,
314
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol
315
+ )
316
+
317
+ sig do
318
+ override.returns(
319
+ T::Array[
320
+ OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol
321
+ ]
322
+ )
323
+ end
324
+ def self.values
325
+ end
326
+ end
270
327
  end
271
328
  end
272
329
  end