openai 0.10.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +36 -0
- data/README.md +83 -7
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +3 -0
- data/lib/openai/helpers/streaming/events.rb +23 -0
- data/lib/openai/helpers/streaming/response_stream.rb +232 -0
- data/lib/openai/helpers/structured_output/parsed_json.rb +39 -0
- data/lib/openai/internal/stream.rb +2 -1
- data/lib/openai/internal/transport/base_client.rb +10 -2
- data/lib/openai/internal/type/base_stream.rb +3 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/chat_completion_message.rb +1 -1
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +1 -1
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_create_params.rb +64 -39
- data/lib/openai/models/responses/response_function_tool_call.rb +1 -1
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +1 -1
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/chat/completions.rb +14 -6
- data/lib/openai/resources/responses.rb +262 -81
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/streaming.rb +5 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +22 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/helpers/streaming/events.rbi +31 -0
- data/rbi/openai/helpers/streaming/response_stream.rbi +104 -0
- data/rbi/openai/internal/type/base_stream.rbi +8 -1
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_create_params.rbi +91 -55
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/responses.rbi +188 -39
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/rbi/openai/streaming.rbi +5 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/internal/type/base_stream.rbs +4 -0
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_create_params.rbs +13 -1
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/responses.rbs +4 -0
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +63 -2
@@ -23,7 +23,7 @@ module OpenAI
|
|
23
23
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
24
24
|
# your own data as input for the model's response.
|
25
25
|
#
|
26
|
-
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
26
|
+
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
27
27
|
#
|
28
28
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
29
29
|
#
|
@@ -35,6 +35,8 @@ module OpenAI
|
|
35
35
|
#
|
36
36
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
37
37
|
#
|
38
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
39
|
+
#
|
38
40
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
39
41
|
#
|
40
42
|
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
@@ -47,7 +49,7 @@ module OpenAI
|
|
47
49
|
#
|
48
50
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
49
51
|
#
|
50
|
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the
|
52
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
51
53
|
#
|
52
54
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
53
55
|
#
|
@@ -55,10 +57,12 @@ module OpenAI
|
|
55
57
|
#
|
56
58
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
57
59
|
#
|
58
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
60
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
59
61
|
#
|
60
62
|
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
61
63
|
#
|
64
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
65
|
+
#
|
62
66
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
63
67
|
#
|
64
68
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
@@ -77,81 +81,12 @@ module OpenAI
|
|
77
81
|
raise ArgumentError.new(message)
|
78
82
|
end
|
79
83
|
|
80
|
-
model =
|
81
|
-
tool_models = {}
|
82
|
-
case parsed
|
83
|
-
in {text: OpenAI::StructuredOutput::JsonSchemaConverter => model}
|
84
|
-
parsed.update(
|
85
|
-
text: {
|
86
|
-
format: {
|
87
|
-
type: :json_schema,
|
88
|
-
strict: true,
|
89
|
-
name: model.name.split("::").last,
|
90
|
-
schema: model.to_json_schema
|
91
|
-
}
|
92
|
-
}
|
93
|
-
)
|
94
|
-
in {text: {format: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
|
95
|
-
parsed.fetch(:text).update(
|
96
|
-
format: {
|
97
|
-
type: :json_schema,
|
98
|
-
strict: true,
|
99
|
-
name: model.name.split("::").last,
|
100
|
-
schema: model.to_json_schema
|
101
|
-
}
|
102
|
-
)
|
103
|
-
in {text: {format: {type: :json_schema, schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
|
104
|
-
parsed.dig(:text, :format).store(:schema, model.to_json_schema)
|
105
|
-
in {tools: Array => tools}
|
106
|
-
mapped = tools.map do |tool|
|
107
|
-
case tool
|
108
|
-
in OpenAI::StructuredOutput::JsonSchemaConverter
|
109
|
-
name = tool.name.split("::").last
|
110
|
-
tool_models.store(name, tool)
|
111
|
-
{
|
112
|
-
type: :function,
|
113
|
-
strict: true,
|
114
|
-
name: name,
|
115
|
-
parameters: tool.to_json_schema
|
116
|
-
}
|
117
|
-
in {type: :function, parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}
|
118
|
-
func = tool.fetch(:function)
|
119
|
-
name = func[:name] ||= params.name.split("::").last
|
120
|
-
tool_models.store(name, params)
|
121
|
-
func.update(parameters: params.to_json_schema)
|
122
|
-
tool
|
123
|
-
else
|
124
|
-
tool
|
125
|
-
end
|
126
|
-
end
|
127
|
-
tools.replace(mapped)
|
128
|
-
else
|
129
|
-
end
|
84
|
+
model, tool_models = get_structured_output_models(parsed)
|
130
85
|
|
131
86
|
unwrap = ->(raw) do
|
132
|
-
|
133
|
-
raw[:output]
|
134
|
-
&.flat_map do |output|
|
135
|
-
next [] unless output[:type] == "message"
|
136
|
-
output[:content].to_a
|
137
|
-
end
|
138
|
-
&.each do |content|
|
139
|
-
next unless content[:type] == "output_text"
|
140
|
-
parsed = JSON.parse(content.fetch(:text), symbolize_names: true)
|
141
|
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
142
|
-
content.store(:parsed, coerced)
|
143
|
-
end
|
144
|
-
end
|
145
|
-
raw[:output]&.each do |output|
|
146
|
-
next unless output[:type] == "function_call"
|
147
|
-
next if (model = tool_models[output.fetch(:name)]).nil?
|
148
|
-
parsed = JSON.parse(output.fetch(:arguments), symbolize_names: true)
|
149
|
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
150
|
-
output.store(:parsed, coerced)
|
151
|
-
end
|
152
|
-
|
153
|
-
raw
|
87
|
+
parse_structured_outputs!(raw, model, tool_models)
|
154
88
|
end
|
89
|
+
|
155
90
|
@client.request(
|
156
91
|
method: :post,
|
157
92
|
path: "responses",
|
@@ -162,8 +97,112 @@ module OpenAI
|
|
162
97
|
)
|
163
98
|
end
|
164
99
|
|
165
|
-
|
166
|
-
|
100
|
+
# See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
|
101
|
+
#
|
102
|
+
# Some parameter documentations has been truncated, see
|
103
|
+
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
|
104
|
+
#
|
105
|
+
# Creates a model response. Provide
|
106
|
+
# [text](https://platform.openai.com/docs/guides/text) or
|
107
|
+
# [image](https://platform.openai.com/docs/guides/images) inputs to generate
|
108
|
+
# [text](https://platform.openai.com/docs/guides/text) or
|
109
|
+
# [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
|
110
|
+
# the model call your own
|
111
|
+
# [custom code](https://platform.openai.com/docs/guides/function-calling) or use
|
112
|
+
# built-in [tools](https://platform.openai.com/docs/guides/tools) like
|
113
|
+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
|
114
|
+
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
115
|
+
# your own data as input for the model's response.
|
116
|
+
#
|
117
|
+
# @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
118
|
+
#
|
119
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
120
|
+
#
|
121
|
+
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
122
|
+
#
|
123
|
+
# @param background [Boolean, nil] Whether to run the model response in the background.
|
124
|
+
#
|
125
|
+
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
126
|
+
#
|
127
|
+
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
128
|
+
#
|
129
|
+
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
130
|
+
#
|
131
|
+
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
132
|
+
#
|
133
|
+
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
|
134
|
+
#
|
135
|
+
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to resume streams from a given response.
|
136
|
+
#
|
137
|
+
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
138
|
+
#
|
139
|
+
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
140
|
+
#
|
141
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
|
142
|
+
#
|
143
|
+
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
144
|
+
#
|
145
|
+
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
146
|
+
#
|
147
|
+
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
148
|
+
#
|
149
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
150
|
+
#
|
151
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
152
|
+
#
|
153
|
+
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
154
|
+
#
|
155
|
+
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
156
|
+
#
|
157
|
+
# @param user [String] A stable identifier for your end-users.
|
158
|
+
#
|
159
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
|
160
|
+
#
|
161
|
+
# @return [OpenAI::Helpers::Streaming::ResponseStream]
|
162
|
+
#
|
163
|
+
# @see OpenAI::Models::Responses::ResponseCreateParams
|
164
|
+
def stream(params)
|
165
|
+
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
|
166
|
+
starting_after, previous_response_id = parsed.values_at(:starting_after, :previous_response_id)
|
167
|
+
|
168
|
+
if starting_after && !previous_response_id
|
169
|
+
raise ArgumentError, "starting_after can only be used with previous_response_id"
|
170
|
+
end
|
171
|
+
model, tool_models = get_structured_output_models(parsed)
|
172
|
+
|
173
|
+
if previous_response_id
|
174
|
+
retrieve_params = {}
|
175
|
+
retrieve_params[:include] = params[:include] if params[:include]
|
176
|
+
retrieve_params[:request_options] = params[:request_options] if params[:request_options]
|
177
|
+
|
178
|
+
raw_stream = retrieve_streaming(previous_response_id, retrieve_params)
|
179
|
+
else
|
180
|
+
unwrap = ->(raw) do
|
181
|
+
if raw[:type] == "response.completed" && raw[:response]
|
182
|
+
parse_structured_outputs!(raw[:response], model, tool_models)
|
183
|
+
end
|
184
|
+
raw
|
185
|
+
end
|
186
|
+
|
187
|
+
parsed[:stream] = true
|
188
|
+
|
189
|
+
raw_stream = @client.request(
|
190
|
+
method: :post,
|
191
|
+
path: "responses",
|
192
|
+
headers: {"accept" => "text/event-stream"},
|
193
|
+
body: parsed,
|
194
|
+
stream: OpenAI::Internal::Stream,
|
195
|
+
model: OpenAI::Models::Responses::ResponseStreamEvent,
|
196
|
+
unwrap: unwrap,
|
197
|
+
options: options
|
198
|
+
)
|
199
|
+
end
|
200
|
+
|
201
|
+
OpenAI::Streaming::ResponseStream.new(
|
202
|
+
raw_stream: raw_stream,
|
203
|
+
text_format: model,
|
204
|
+
starting_after: starting_after
|
205
|
+
)
|
167
206
|
end
|
168
207
|
|
169
208
|
# See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
|
@@ -183,7 +222,7 @@ module OpenAI
|
|
183
222
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
184
223
|
# your own data as input for the model's response.
|
185
224
|
#
|
186
|
-
# @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
225
|
+
# @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
187
226
|
#
|
188
227
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
189
228
|
#
|
@@ -195,19 +234,21 @@ module OpenAI
|
|
195
234
|
#
|
196
235
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
197
236
|
#
|
237
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
238
|
+
#
|
198
239
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
199
240
|
#
|
200
241
|
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
201
242
|
#
|
202
243
|
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
|
203
244
|
#
|
204
|
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
|
245
|
+
# @param previous_response_id [String, nil] The unique ID of the previous response to the response to the model. Use this to resume streams from a given response.
|
205
246
|
#
|
206
247
|
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
207
248
|
#
|
208
249
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
209
250
|
#
|
210
|
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the
|
251
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
211
252
|
#
|
212
253
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
213
254
|
#
|
@@ -215,10 +256,12 @@ module OpenAI
|
|
215
256
|
#
|
216
257
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
217
258
|
#
|
218
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
259
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
219
260
|
#
|
220
261
|
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
221
262
|
#
|
263
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
264
|
+
#
|
222
265
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
223
266
|
#
|
224
267
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
@@ -237,6 +280,7 @@ module OpenAI
|
|
237
280
|
raise ArgumentError.new(message)
|
238
281
|
end
|
239
282
|
parsed.store(:stream, true)
|
283
|
+
|
240
284
|
@client.request(
|
241
285
|
method: :post,
|
242
286
|
path: "responses",
|
@@ -370,6 +414,143 @@ module OpenAI
|
|
370
414
|
@client = client
|
371
415
|
@input_items = OpenAI::Resources::Responses::InputItems.new(client: client)
|
372
416
|
end
|
417
|
+
|
418
|
+
private
|
419
|
+
|
420
|
+
# Post-processes raw API responses to parse and coerce structured outputs into typed Ruby objects.
|
421
|
+
#
|
422
|
+
# This method enhances the raw API response by parsing JSON content in structured outputs
|
423
|
+
# (both text outputs and function/tool calls) and converting them to their corresponding
|
424
|
+
# Ruby types using the JsonSchemaConverter models identified during request preparation.
|
425
|
+
#
|
426
|
+
# @param raw [Hash] The raw API response hash that will be mutated with parsed data
|
427
|
+
# @param model [JsonSchemaConverter|nil] The converter for structured text output, if specified
|
428
|
+
# @param tool_models [Hash<String, JsonSchemaConverter>] Hash mapping tool names to their converters
|
429
|
+
# @return [Hash] The mutated raw response with added :parsed fields containing typed Ruby objects
|
430
|
+
#
|
431
|
+
# The method performs two main transformations:
|
432
|
+
# 1. For structured text outputs: Finds output_text content, parses the JSON, and coerces it
|
433
|
+
# to the model type, adding the result as content[:parsed]
|
434
|
+
# 2. For function/tool calls: Looks up the tool's converter by name, parses the arguments JSON,
|
435
|
+
# and coerces it to the appropriate type, adding the result as output[:parsed]
|
436
|
+
def parse_structured_outputs!(raw, model, tool_models)
|
437
|
+
if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
|
438
|
+
raw[:output]
|
439
|
+
&.flat_map do |output|
|
440
|
+
next [] unless output[:type] == "message"
|
441
|
+
output[:content].to_a
|
442
|
+
end
|
443
|
+
&.each do |content|
|
444
|
+
next unless content[:type] == "output_text"
|
445
|
+
begin
|
446
|
+
parsed = JSON.parse(content.fetch(:text), symbolize_names: true)
|
447
|
+
rescue JSON::ParserError => e
|
448
|
+
parsed = e
|
449
|
+
end
|
450
|
+
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
451
|
+
content.store(:parsed, coerced)
|
452
|
+
end
|
453
|
+
end
|
454
|
+
raw[:output]&.each do |output|
|
455
|
+
next unless output[:type] == "function_call"
|
456
|
+
next if (model = tool_models[output.fetch(:name)]).nil?
|
457
|
+
begin
|
458
|
+
parsed = JSON.parse(output.fetch(:arguments), symbolize_names: true)
|
459
|
+
rescue JSON::ParserError => e
|
460
|
+
parsed = e
|
461
|
+
end
|
462
|
+
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
463
|
+
output.store(:parsed, coerced)
|
464
|
+
end
|
465
|
+
|
466
|
+
raw
|
467
|
+
end
|
468
|
+
|
469
|
+
# Extracts structured output models from request parameters and converts them to JSON Schema format.
|
470
|
+
#
|
471
|
+
# This method processes the parsed request parameters to identify any JsonSchemaConverter instances
|
472
|
+
# that define expected output schemas. It transforms these Ruby schema definitions into the JSON
|
473
|
+
# Schema format required by the OpenAI API, enabling type-safe structured outputs.
|
474
|
+
#
|
475
|
+
# @param parsed [Hash] The parsed request parameters that may contain structured output definitions
|
476
|
+
# @return [Array<(JsonSchemaConverter|nil, Hash)>] A tuple containing:
|
477
|
+
# - model: The JsonSchemaConverter for structured text output (or nil if not specified)
|
478
|
+
# - tool_models: Hash mapping tool names to their JsonSchemaConverter models
|
479
|
+
#
|
480
|
+
# The method handles multiple ways structured outputs can be specified:
|
481
|
+
# - Direct text format: { text: JsonSchemaConverter }
|
482
|
+
# - Nested text format: { text: { format: JsonSchemaConverter } }
|
483
|
+
# - Deep nested format: { text: { format: { type: :json_schema, schema: JsonSchemaConverter } } }
|
484
|
+
# - Tool parameters: { tools: [JsonSchemaConverter, ...] } or tools with parameters as converters
|
485
|
+
def get_structured_output_models(parsed)
|
486
|
+
model = nil
|
487
|
+
tool_models = {}
|
488
|
+
|
489
|
+
case parsed
|
490
|
+
in {text: OpenAI::StructuredOutput::JsonSchemaConverter => model}
|
491
|
+
parsed.update(
|
492
|
+
text: {
|
493
|
+
format: {
|
494
|
+
type: :json_schema,
|
495
|
+
strict: true,
|
496
|
+
name: model.name.split("::").last,
|
497
|
+
schema: model.to_json_schema
|
498
|
+
}
|
499
|
+
}
|
500
|
+
)
|
501
|
+
in {text: {format: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
|
502
|
+
parsed.fetch(:text).update(
|
503
|
+
format: {
|
504
|
+
type: :json_schema,
|
505
|
+
strict: true,
|
506
|
+
name: model.name.split("::").last,
|
507
|
+
schema: model.to_json_schema
|
508
|
+
}
|
509
|
+
)
|
510
|
+
in {text: {format: {type: :json_schema,
|
511
|
+
schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
|
512
|
+
parsed.dig(:text, :format).store(:schema, model.to_json_schema)
|
513
|
+
in {tools: Array => tools}
|
514
|
+
# rubocop:disable Metrics/BlockLength
|
515
|
+
mapped = tools.map do |tool|
|
516
|
+
case tool
|
517
|
+
in OpenAI::StructuredOutput::JsonSchemaConverter
|
518
|
+
name = tool.name.split("::").last
|
519
|
+
tool_models.store(name, tool)
|
520
|
+
{
|
521
|
+
type: :function,
|
522
|
+
strict: true,
|
523
|
+
name: name,
|
524
|
+
parameters: tool.to_json_schema
|
525
|
+
}
|
526
|
+
in {type: :function, parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}
|
527
|
+
func = tool.fetch(:function)
|
528
|
+
name = func[:name] ||= params.name.split("::").last
|
529
|
+
tool_models.store(name, params)
|
530
|
+
func.update(parameters: params.to_json_schema)
|
531
|
+
tool
|
532
|
+
in {type: _, function: {parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params, **}}
|
533
|
+
name = tool[:function][:name] || params.name.split("::").last
|
534
|
+
tool_models.store(name, params)
|
535
|
+
tool[:function][:parameters] = params.to_json_schema
|
536
|
+
tool
|
537
|
+
in {type: _, function: Hash => func} if func[:parameters].is_a?(Class) && func[:parameters] < OpenAI::Internal::Type::BaseModel
|
538
|
+
params = func[:parameters]
|
539
|
+
name = func[:name] || params.name.split("::").last
|
540
|
+
tool_models.store(name, params)
|
541
|
+
func[:parameters] = params.to_json_schema
|
542
|
+
tool
|
543
|
+
else
|
544
|
+
tool
|
545
|
+
end
|
546
|
+
end
|
547
|
+
# rubocop:enable Metrics/BlockLength
|
548
|
+
tools.replace(mapped)
|
549
|
+
else
|
550
|
+
end
|
551
|
+
|
552
|
+
[model, tool_models]
|
553
|
+
end
|
373
554
|
end
|
374
555
|
end
|
375
556
|
end
|
@@ -0,0 +1,124 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "openssl"
|
4
|
+
require "base64"
|
5
|
+
|
6
|
+
module OpenAI
|
7
|
+
module Resources
|
8
|
+
class Webhooks
|
9
|
+
# Validates that the given payload was sent by OpenAI and parses the payload.
|
10
|
+
#
|
11
|
+
# @param payload [String] The raw webhook payload as a string
|
12
|
+
# @param headers [Hash] The webhook headers
|
13
|
+
# @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
|
14
|
+
#
|
15
|
+
# @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseCreatedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent]
|
16
|
+
#
|
17
|
+
# @raise [ArgumentError] if signature verification fails
|
18
|
+
def unwrap(
|
19
|
+
payload,
|
20
|
+
headers = {},
|
21
|
+
webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"]
|
22
|
+
)
|
23
|
+
verify_signature(payload, headers, webhook_secret)
|
24
|
+
|
25
|
+
parsed = JSON.parse(payload, symbolize_names: true)
|
26
|
+
OpenAI::Internal::Type::Converter.coerce(OpenAI::Models::Webhooks::UnwrapWebhookEvent, parsed)
|
27
|
+
end
|
28
|
+
|
29
|
+
# Validates whether or not the webhook payload was sent by OpenAI.
|
30
|
+
#
|
31
|
+
# @param payload [String] The webhook payload as a string
|
32
|
+
# @param headers [Hash] The webhook headers
|
33
|
+
# @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
|
34
|
+
# @param tolerance [Integer] Maximum age of the webhook in seconds (default: 300 = 5 minutes)
|
35
|
+
#
|
36
|
+
# @raise [ArgumentError] if the signature is invalid
|
37
|
+
def verify_signature(
|
38
|
+
payload,
|
39
|
+
headers,
|
40
|
+
webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"],
|
41
|
+
tolerance = 300
|
42
|
+
)
|
43
|
+
if webhook_secret.nil?
|
44
|
+
raise ArgumentError,
|
45
|
+
"The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " \
|
46
|
+
"or passed to this function"
|
47
|
+
end
|
48
|
+
|
49
|
+
# Extract required headers
|
50
|
+
signature_header = headers["webhook-signature"] || headers[:webhook_signature]
|
51
|
+
timestamp_header = headers["webhook-timestamp"] || headers[:webhook_timestamp]
|
52
|
+
webhook_id = headers["webhook-id"] || headers[:webhook_id]
|
53
|
+
|
54
|
+
if signature_header.nil?
|
55
|
+
raise ArgumentError, "Missing required webhook-signature header"
|
56
|
+
end
|
57
|
+
|
58
|
+
if timestamp_header.nil?
|
59
|
+
raise ArgumentError, "Missing required webhook-timestamp header"
|
60
|
+
end
|
61
|
+
|
62
|
+
if webhook_id.nil?
|
63
|
+
raise ArgumentError, "Missing required webhook-id header"
|
64
|
+
end
|
65
|
+
|
66
|
+
# Validate timestamp to prevent replay attacks
|
67
|
+
begin
|
68
|
+
timestamp_seconds = timestamp_header.to_i
|
69
|
+
rescue ArgumentError
|
70
|
+
raise ArgumentError, "Invalid webhook timestamp format"
|
71
|
+
end
|
72
|
+
|
73
|
+
now = Time.now.to_i
|
74
|
+
|
75
|
+
if now - timestamp_seconds > tolerance
|
76
|
+
raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too old"
|
77
|
+
end
|
78
|
+
|
79
|
+
if timestamp_seconds > now + tolerance
|
80
|
+
raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too new"
|
81
|
+
end
|
82
|
+
|
83
|
+
# Extract signatures from v1,<base64> format
|
84
|
+
# The signature header can have multiple values, separated by spaces.
|
85
|
+
# Each value is in the format v1,<base64>. We should accept if any match.
|
86
|
+
signatures = signature_header.split.map do |part|
|
87
|
+
if part.start_with?("v1,")
|
88
|
+
part[3..]
|
89
|
+
else
|
90
|
+
part
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
# Decode the secret if it starts with whsec_
|
95
|
+
decoded_secret = if webhook_secret.start_with?("whsec_")
|
96
|
+
Base64.decode64(webhook_secret[6..])
|
97
|
+
else
|
98
|
+
webhook_secret
|
99
|
+
end
|
100
|
+
|
101
|
+
# Create the signed payload: {webhook_id}.{timestamp}.{payload}
|
102
|
+
signed_payload = "#{webhook_id}.#{timestamp_header}.#{payload}"
|
103
|
+
|
104
|
+
# Compute HMAC-SHA256 signature
|
105
|
+
expected_signature = Base64.encode64(
|
106
|
+
OpenSSL::HMAC.digest("sha256", decoded_secret, signed_payload)
|
107
|
+
).strip
|
108
|
+
|
109
|
+
# Accept if any signature matches using timing-safe comparison
|
110
|
+
return if signatures.any? { |signature| OpenSSL.secure_compare(expected_signature, signature) }
|
111
|
+
|
112
|
+
raise OpenAI::Errors::InvalidWebhookSignatureError,
|
113
|
+
"The given webhook signature does not match the expected signature"
|
114
|
+
end
|
115
|
+
|
116
|
+
# @api private
|
117
|
+
#
|
118
|
+
# @param client [OpenAI::Client]
|
119
|
+
def initialize(client:)
|
120
|
+
@client = client
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -56,6 +56,7 @@ require_relative "openai/helpers/structured_output/enum_of"
|
|
56
56
|
require_relative "openai/helpers/structured_output/union_of"
|
57
57
|
require_relative "openai/helpers/structured_output/array_of"
|
58
58
|
require_relative "openai/helpers/structured_output/base_model"
|
59
|
+
require_relative "openai/helpers/structured_output/parsed_json"
|
59
60
|
require_relative "openai/helpers/structured_output"
|
60
61
|
require_relative "openai/structured_output"
|
61
62
|
require_relative "openai/models/reasoning_effort"
|
@@ -441,6 +442,7 @@ require_relative "openai/models/responses/response_web_search_call_in_progress_e
|
|
441
442
|
require_relative "openai/models/responses/response_web_search_call_searching_event"
|
442
443
|
require_relative "openai/models/responses/tool"
|
443
444
|
require_relative "openai/models/responses/tool_choice_function"
|
445
|
+
require_relative "openai/models/responses/tool_choice_mcp"
|
444
446
|
require_relative "openai/models/responses/tool_choice_options"
|
445
447
|
require_relative "openai/models/responses/tool_choice_types"
|
446
448
|
require_relative "openai/models/responses/web_search_tool"
|
@@ -477,6 +479,22 @@ require_relative "openai/models/vector_stores/vector_store_file_deleted"
|
|
477
479
|
require_relative "openai/models/vector_store_search_params"
|
478
480
|
require_relative "openai/models/vector_store_search_response"
|
479
481
|
require_relative "openai/models/vector_store_update_params"
|
482
|
+
require_relative "openai/models/webhooks/batch_cancelled_webhook_event"
|
483
|
+
require_relative "openai/models/webhooks/batch_completed_webhook_event"
|
484
|
+
require_relative "openai/models/webhooks/batch_expired_webhook_event"
|
485
|
+
require_relative "openai/models/webhooks/batch_failed_webhook_event"
|
486
|
+
require_relative "openai/models/webhooks/eval_run_canceled_webhook_event"
|
487
|
+
require_relative "openai/models/webhooks/eval_run_failed_webhook_event"
|
488
|
+
require_relative "openai/models/webhooks/eval_run_succeeded_webhook_event"
|
489
|
+
require_relative "openai/models/webhooks/fine_tuning_job_cancelled_webhook_event"
|
490
|
+
require_relative "openai/models/webhooks/fine_tuning_job_failed_webhook_event"
|
491
|
+
require_relative "openai/models/webhooks/fine_tuning_job_succeeded_webhook_event"
|
492
|
+
require_relative "openai/models/webhooks/response_cancelled_webhook_event"
|
493
|
+
require_relative "openai/models/webhooks/response_completed_webhook_event"
|
494
|
+
require_relative "openai/models/webhooks/response_failed_webhook_event"
|
495
|
+
require_relative "openai/models/webhooks/response_incomplete_webhook_event"
|
496
|
+
require_relative "openai/models/webhooks/unwrap_webhook_event"
|
497
|
+
require_relative "openai/models/webhooks/webhook_unwrap_params"
|
480
498
|
require_relative "openai/models"
|
481
499
|
require_relative "openai/resources/audio"
|
482
500
|
require_relative "openai/resources/audio/speech"
|
@@ -521,3 +539,7 @@ require_relative "openai/resources/uploads/parts"
|
|
521
539
|
require_relative "openai/resources/vector_stores"
|
522
540
|
require_relative "openai/resources/vector_stores/file_batches"
|
523
541
|
require_relative "openai/resources/vector_stores/files"
|
542
|
+
require_relative "openai/resources/webhooks"
|
543
|
+
require_relative "openai/helpers/streaming/events"
|
544
|
+
require_relative "openai/helpers/streaming/response_stream"
|
545
|
+
require_relative "openai/streaming"
|
data/rbi/openai/client.rbi
CHANGED
@@ -0,0 +1,31 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Helpers
|
5
|
+
module Streaming
|
6
|
+
class ResponseTextDeltaEvent < OpenAI::Models::Responses::ResponseTextDeltaEvent
|
7
|
+
sig { returns(String) }
|
8
|
+
def snapshot
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
class ResponseTextDoneEvent < OpenAI::Models::Responses::ResponseTextDoneEvent
|
13
|
+
sig { returns(T.untyped) }
|
14
|
+
def parsed
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
|
19
|
+
sig { returns(String) }
|
20
|
+
def snapshot
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
class ResponseCompletedEvent < OpenAI::Models::Responses::ResponseCompletedEvent
|
25
|
+
sig { returns(OpenAI::Models::Responses::Response) }
|
26
|
+
def response
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|