openai 0.11.0 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/README.md +5 -7
  4. data/lib/openai/helpers/streaming/events.rb +23 -0
  5. data/lib/openai/helpers/streaming/response_stream.rb +232 -0
  6. data/lib/openai/helpers/structured_output/parsed_json.rb +39 -0
  7. data/lib/openai/internal/stream.rb +2 -1
  8. data/lib/openai/internal/transport/base_client.rb +10 -2
  9. data/lib/openai/internal/type/base_stream.rb +3 -1
  10. data/lib/openai/models/audio/transcription.rb +4 -4
  11. data/lib/openai/models/audio/transcription_verbose.rb +4 -4
  12. data/lib/openai/models/chat/chat_completion_message.rb +1 -1
  13. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +1 -1
  14. data/lib/openai/models/file_object.rb +5 -4
  15. data/lib/openai/models/responses/response_function_tool_call.rb +1 -1
  16. data/lib/openai/models/responses/response_input_file.rb +9 -1
  17. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +5 -5
  18. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +5 -5
  19. data/lib/openai/models/responses/response_output_text.rb +1 -1
  20. data/lib/openai/models/responses/response_output_text_annotation_added_event.rb +5 -5
  21. data/lib/openai/models/responses/response_stream_event.rb +3 -3
  22. data/lib/openai/models/responses/tool.rb +9 -1
  23. data/lib/openai/resources/chat/completions.rb +12 -4
  24. data/lib/openai/resources/responses.rb +248 -75
  25. data/lib/openai/streaming.rb +5 -0
  26. data/lib/openai/version.rb +1 -1
  27. data/lib/openai.rb +4 -0
  28. data/rbi/openai/helpers/streaming/events.rbi +31 -0
  29. data/rbi/openai/helpers/streaming/response_stream.rbi +104 -0
  30. data/rbi/openai/internal/type/base_stream.rbi +8 -1
  31. data/rbi/openai/models/audio/transcription.rbi +4 -4
  32. data/rbi/openai/models/audio/transcription_verbose.rbi +4 -6
  33. data/rbi/openai/models/file_object.rbi +7 -6
  34. data/rbi/openai/models/responses/response_input_file.rbi +11 -0
  35. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +3 -3
  36. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +3 -3
  37. data/rbi/openai/models/responses/response_output_text_annotation_added_event.rbi +3 -3
  38. data/rbi/openai/models/responses/tool.rbi +12 -1
  39. data/rbi/openai/resources/responses.rbi +126 -1
  40. data/rbi/openai/streaming.rbi +5 -0
  41. data/sig/openai/internal/type/base_stream.rbs +4 -0
  42. data/sig/openai/models/audio/transcription.rbs +4 -4
  43. data/sig/openai/models/audio/transcription_verbose.rbs +4 -4
  44. data/sig/openai/models/file_object.rbs +2 -0
  45. data/sig/openai/models/responses/response_input_file.rbs +7 -0
  46. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  47. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  48. data/sig/openai/models/responses/response_output_text_annotation_added_event.rbs +4 -4
  49. data/sig/openai/models/responses/tool.rbs +9 -2
  50. metadata +9 -2
@@ -151,13 +151,13 @@ module OpenAI
151
151
  -> { OpenAI::Responses::ResponseImageGenCallPartialImageEvent }
152
152
 
153
153
  # Emitted when there is a delta (partial update) to the arguments of an MCP tool call.
154
- variant :"response.mcp_call.arguments_delta",
154
+ variant :"response.mcp_call_arguments.delta",
155
155
  -> {
156
156
  OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent
157
157
  }
158
158
 
159
159
  # Emitted when the arguments for an MCP tool call are finalized.
160
- variant :"response.mcp_call.arguments_done",
160
+ variant :"response.mcp_call_arguments.done",
161
161
  -> {
162
162
  OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent
163
163
  }
@@ -185,7 +185,7 @@ module OpenAI
185
185
  -> { OpenAI::Responses::ResponseMcpListToolsInProgressEvent }
186
186
 
187
187
  # Emitted when an annotation is added to output text content.
188
- variant :"response.output_text_annotation.added",
188
+ variant :"response.output_text.annotation.added",
189
189
  -> { OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent }
190
190
 
191
191
  # Emitted when a response is queued and waiting to be processed.
@@ -74,7 +74,13 @@ module OpenAI
74
74
  # @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting, nil]
75
75
  optional :require_approval, union: -> { OpenAI::Responses::Tool::Mcp::RequireApproval }, nil?: true
76
76
 
77
- # @!method initialize(server_label:, server_url:, allowed_tools: nil, headers: nil, require_approval: nil, type: :mcp)
77
+ # @!attribute server_description
78
+ # Optional description of the MCP server, used to provide more context.
79
+ #
80
+ # @return [String, nil]
81
+ optional :server_description, String
82
+
83
+ # @!method initialize(server_label:, server_url:, allowed_tools: nil, headers: nil, require_approval: nil, server_description: nil, type: :mcp)
78
84
  # Some parameter documentations has been truncated, see
79
85
  # {OpenAI::Models::Responses::Tool::Mcp} for more details.
80
86
  #
@@ -92,6 +98,8 @@ module OpenAI
92
98
  #
93
99
  # @param require_approval [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting, nil] Specify which of the MCP server's tools require approval.
94
100
  #
101
+ # @param server_description [String] Optional description of the MCP server, used to provide more context.
102
+ #
95
103
  # @param type [Symbol, :mcp] The type of the MCP tool. Always `mcp`.
96
104
 
97
105
  # List of allowed tool names or a filter object.
@@ -104,7 +104,6 @@ module OpenAI
104
104
  raise ArgumentError.new(message)
105
105
  end
106
106
 
107
- # rubocop:disable Layout/LineLength
108
107
  model = nil
109
108
  tool_models = {}
110
109
  case parsed
@@ -157,11 +156,16 @@ module OpenAI
157
156
  else
158
157
  end
159
158
 
159
+ # rubocop:disable Metrics/BlockLength
160
160
  unwrap = ->(raw) do
161
161
  if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
162
162
  raw[:choices]&.each do |choice|
163
163
  message = choice.fetch(:message)
164
- parsed = JSON.parse(message.fetch(:content), symbolize_names: true)
164
+ begin
165
+ parsed = JSON.parse(message.fetch(:content), symbolize_names: true)
166
+ rescue JSON::ParserError => e
167
+ parsed = e
168
+ end
165
169
  coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
166
170
  message.store(:parsed, coerced)
167
171
  end
@@ -171,7 +175,11 @@ module OpenAI
171
175
  func = tool_call.fetch(:function)
172
176
  next if (model = tool_models[func.fetch(:name)]).nil?
173
177
 
174
- parsed = JSON.parse(func.fetch(:arguments), symbolize_names: true)
178
+ begin
179
+ parsed = JSON.parse(func.fetch(:arguments), symbolize_names: true)
180
+ rescue JSON::ParserError => e
181
+ parsed = e
182
+ end
175
183
  coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
176
184
  func.store(:parsed, coerced)
177
185
  end
@@ -179,7 +187,7 @@ module OpenAI
179
187
 
180
188
  raw
181
189
  end
182
- # rubocop:enable Layout/LineLength
190
+ # rubocop:enable Metrics/BlockLength
183
191
 
184
192
  @client.request(
185
193
  method: :post,
@@ -81,81 +81,12 @@ module OpenAI
81
81
  raise ArgumentError.new(message)
82
82
  end
83
83
 
84
- model = nil
85
- tool_models = {}
86
- case parsed
87
- in {text: OpenAI::StructuredOutput::JsonSchemaConverter => model}
88
- parsed.update(
89
- text: {
90
- format: {
91
- type: :json_schema,
92
- strict: true,
93
- name: model.name.split("::").last,
94
- schema: model.to_json_schema
95
- }
96
- }
97
- )
98
- in {text: {format: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
99
- parsed.fetch(:text).update(
100
- format: {
101
- type: :json_schema,
102
- strict: true,
103
- name: model.name.split("::").last,
104
- schema: model.to_json_schema
105
- }
106
- )
107
- in {text: {format: {type: :json_schema, schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
108
- parsed.dig(:text, :format).store(:schema, model.to_json_schema)
109
- in {tools: Array => tools}
110
- mapped = tools.map do |tool|
111
- case tool
112
- in OpenAI::StructuredOutput::JsonSchemaConverter
113
- name = tool.name.split("::").last
114
- tool_models.store(name, tool)
115
- {
116
- type: :function,
117
- strict: true,
118
- name: name,
119
- parameters: tool.to_json_schema
120
- }
121
- in {type: :function, parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}
122
- func = tool.fetch(:function)
123
- name = func[:name] ||= params.name.split("::").last
124
- tool_models.store(name, params)
125
- func.update(parameters: params.to_json_schema)
126
- tool
127
- else
128
- tool
129
- end
130
- end
131
- tools.replace(mapped)
132
- else
133
- end
84
+ model, tool_models = get_structured_output_models(parsed)
134
85
 
135
86
  unwrap = ->(raw) do
136
- if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
137
- raw[:output]
138
- &.flat_map do |output|
139
- next [] unless output[:type] == "message"
140
- output[:content].to_a
141
- end
142
- &.each do |content|
143
- next unless content[:type] == "output_text"
144
- parsed = JSON.parse(content.fetch(:text), symbolize_names: true)
145
- coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
146
- content.store(:parsed, coerced)
147
- end
148
- end
149
- raw[:output]&.each do |output|
150
- next unless output[:type] == "function_call"
151
- next if (model = tool_models[output.fetch(:name)]).nil?
152
- parsed = JSON.parse(output.fetch(:arguments), symbolize_names: true)
153
- coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
154
- output.store(:parsed, coerced)
155
- end
156
-
157
- raw
87
+ parse_structured_outputs!(raw, model, tool_models)
158
88
  end
89
+
159
90
  @client.request(
160
91
  method: :post,
161
92
  path: "responses",
@@ -166,8 +97,112 @@ module OpenAI
166
97
  )
167
98
  end
168
99
 
169
- def stream
170
- raise NotImplementedError.new("higher level helpers are coming soon!")
100
+ # See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
101
+ #
102
+ # Some parameter documentations has been truncated, see
103
+ # {OpenAI::Models::Responses::ResponseCreateParams} for more details.
104
+ #
105
+ # Creates a model response. Provide
106
+ # [text](https://platform.openai.com/docs/guides/text) or
107
+ # [image](https://platform.openai.com/docs/guides/images) inputs to generate
108
+ # [text](https://platform.openai.com/docs/guides/text) or
109
+ # [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
110
+ # the model call your own
111
+ # [custom code](https://platform.openai.com/docs/guides/function-calling) or use
112
+ # built-in [tools](https://platform.openai.com/docs/guides/tools) like
113
+ # [web search](https://platform.openai.com/docs/guides/tools-web-search) or
114
+ # [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
115
+ # your own data as input for the model's response.
116
+ #
117
+ # @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
118
+ #
119
+ # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
120
+ #
121
+ # @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
122
+ #
123
+ # @param background [Boolean, nil] Whether to run the model response in the background.
124
+ #
125
+ # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
126
+ #
127
+ # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
128
+ #
129
+ # @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
130
+ #
131
+ # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
132
+ #
133
+ # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
134
+ #
135
+ # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to resume streams from a given response.
136
+ #
137
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
138
+ #
139
+ # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
140
+ #
141
+ # @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
142
+ #
143
+ # @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
144
+ #
145
+ # @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
146
+ #
147
+ # @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
148
+ #
149
+ # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
150
+ #
151
+ # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
152
+ #
153
+ # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
154
+ #
155
+ # @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
156
+ #
157
+ # @param user [String] A stable identifier for your end-users.
158
+ #
159
+ # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
160
+ #
161
+ # @return [OpenAI::Helpers::Streaming::ResponseStream]
162
+ #
163
+ # @see OpenAI::Models::Responses::ResponseCreateParams
164
+ def stream(params)
165
+ parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
166
+ starting_after, previous_response_id = parsed.values_at(:starting_after, :previous_response_id)
167
+
168
+ if starting_after && !previous_response_id
169
+ raise ArgumentError, "starting_after can only be used with previous_response_id"
170
+ end
171
+ model, tool_models = get_structured_output_models(parsed)
172
+
173
+ if previous_response_id
174
+ retrieve_params = {}
175
+ retrieve_params[:include] = params[:include] if params[:include]
176
+ retrieve_params[:request_options] = params[:request_options] if params[:request_options]
177
+
178
+ raw_stream = retrieve_streaming(previous_response_id, retrieve_params)
179
+ else
180
+ unwrap = ->(raw) do
181
+ if raw[:type] == "response.completed" && raw[:response]
182
+ parse_structured_outputs!(raw[:response], model, tool_models)
183
+ end
184
+ raw
185
+ end
186
+
187
+ parsed[:stream] = true
188
+
189
+ raw_stream = @client.request(
190
+ method: :post,
191
+ path: "responses",
192
+ headers: {"accept" => "text/event-stream"},
193
+ body: parsed,
194
+ stream: OpenAI::Internal::Stream,
195
+ model: OpenAI::Models::Responses::ResponseStreamEvent,
196
+ unwrap: unwrap,
197
+ options: options
198
+ )
199
+ end
200
+
201
+ OpenAI::Streaming::ResponseStream.new(
202
+ raw_stream: raw_stream,
203
+ text_format: model,
204
+ starting_after: starting_after
205
+ )
171
206
  end
172
207
 
173
208
  # See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
@@ -207,7 +242,7 @@ module OpenAI
207
242
  #
208
243
  # @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
209
244
  #
210
- # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
245
+ # @param previous_response_id [String, nil] The unique ID of the previous response to the response to the model. Use this to resume streams from a given response.
211
246
  #
212
247
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
213
248
  #
@@ -245,6 +280,7 @@ module OpenAI
245
280
  raise ArgumentError.new(message)
246
281
  end
247
282
  parsed.store(:stream, true)
283
+
248
284
  @client.request(
249
285
  method: :post,
250
286
  path: "responses",
@@ -378,6 +414,143 @@ module OpenAI
378
414
  @client = client
379
415
  @input_items = OpenAI::Resources::Responses::InputItems.new(client: client)
380
416
  end
417
+
418
+ private
419
+
420
+ # Post-processes raw API responses to parse and coerce structured outputs into typed Ruby objects.
421
+ #
422
+ # This method enhances the raw API response by parsing JSON content in structured outputs
423
+ # (both text outputs and function/tool calls) and converting them to their corresponding
424
+ # Ruby types using the JsonSchemaConverter models identified during request preparation.
425
+ #
426
+ # @param raw [Hash] The raw API response hash that will be mutated with parsed data
427
+ # @param model [JsonSchemaConverter|nil] The converter for structured text output, if specified
428
+ # @param tool_models [Hash<String, JsonSchemaConverter>] Hash mapping tool names to their converters
429
+ # @return [Hash] The mutated raw response with added :parsed fields containing typed Ruby objects
430
+ #
431
+ # The method performs two main transformations:
432
+ # 1. For structured text outputs: Finds output_text content, parses the JSON, and coerces it
433
+ # to the model type, adding the result as content[:parsed]
434
+ # 2. For function/tool calls: Looks up the tool's converter by name, parses the arguments JSON,
435
+ # and coerces it to the appropriate type, adding the result as output[:parsed]
436
+ def parse_structured_outputs!(raw, model, tool_models)
437
+ if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
438
+ raw[:output]
439
+ &.flat_map do |output|
440
+ next [] unless output[:type] == "message"
441
+ output[:content].to_a
442
+ end
443
+ &.each do |content|
444
+ next unless content[:type] == "output_text"
445
+ begin
446
+ parsed = JSON.parse(content.fetch(:text), symbolize_names: true)
447
+ rescue JSON::ParserError => e
448
+ parsed = e
449
+ end
450
+ coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
451
+ content.store(:parsed, coerced)
452
+ end
453
+ end
454
+ raw[:output]&.each do |output|
455
+ next unless output[:type] == "function_call"
456
+ next if (model = tool_models[output.fetch(:name)]).nil?
457
+ begin
458
+ parsed = JSON.parse(output.fetch(:arguments), symbolize_names: true)
459
+ rescue JSON::ParserError => e
460
+ parsed = e
461
+ end
462
+ coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
463
+ output.store(:parsed, coerced)
464
+ end
465
+
466
+ raw
467
+ end
468
+
469
+ # Extracts structured output models from request parameters and converts them to JSON Schema format.
470
+ #
471
+ # This method processes the parsed request parameters to identify any JsonSchemaConverter instances
472
+ # that define expected output schemas. It transforms these Ruby schema definitions into the JSON
473
+ # Schema format required by the OpenAI API, enabling type-safe structured outputs.
474
+ #
475
+ # @param parsed [Hash] The parsed request parameters that may contain structured output definitions
476
+ # @return [Array<(JsonSchemaConverter|nil, Hash)>] A tuple containing:
477
+ # - model: The JsonSchemaConverter for structured text output (or nil if not specified)
478
+ # - tool_models: Hash mapping tool names to their JsonSchemaConverter models
479
+ #
480
+ # The method handles multiple ways structured outputs can be specified:
481
+ # - Direct text format: { text: JsonSchemaConverter }
482
+ # - Nested text format: { text: { format: JsonSchemaConverter } }
483
+ # - Deep nested format: { text: { format: { type: :json_schema, schema: JsonSchemaConverter } } }
484
+ # - Tool parameters: { tools: [JsonSchemaConverter, ...] } or tools with parameters as converters
485
+ def get_structured_output_models(parsed)
486
+ model = nil
487
+ tool_models = {}
488
+
489
+ case parsed
490
+ in {text: OpenAI::StructuredOutput::JsonSchemaConverter => model}
491
+ parsed.update(
492
+ text: {
493
+ format: {
494
+ type: :json_schema,
495
+ strict: true,
496
+ name: model.name.split("::").last,
497
+ schema: model.to_json_schema
498
+ }
499
+ }
500
+ )
501
+ in {text: {format: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
502
+ parsed.fetch(:text).update(
503
+ format: {
504
+ type: :json_schema,
505
+ strict: true,
506
+ name: model.name.split("::").last,
507
+ schema: model.to_json_schema
508
+ }
509
+ )
510
+ in {text: {format: {type: :json_schema,
511
+ schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
512
+ parsed.dig(:text, :format).store(:schema, model.to_json_schema)
513
+ in {tools: Array => tools}
514
+ # rubocop:disable Metrics/BlockLength
515
+ mapped = tools.map do |tool|
516
+ case tool
517
+ in OpenAI::StructuredOutput::JsonSchemaConverter
518
+ name = tool.name.split("::").last
519
+ tool_models.store(name, tool)
520
+ {
521
+ type: :function,
522
+ strict: true,
523
+ name: name,
524
+ parameters: tool.to_json_schema
525
+ }
526
+ in {type: :function, parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}
527
+ func = tool.fetch(:function)
528
+ name = func[:name] ||= params.name.split("::").last
529
+ tool_models.store(name, params)
530
+ func.update(parameters: params.to_json_schema)
531
+ tool
532
+ in {type: _, function: {parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params, **}}
533
+ name = tool[:function][:name] || params.name.split("::").last
534
+ tool_models.store(name, params)
535
+ tool[:function][:parameters] = params.to_json_schema
536
+ tool
537
+ in {type: _, function: Hash => func} if func[:parameters].is_a?(Class) && func[:parameters] < OpenAI::Internal::Type::BaseModel
538
+ params = func[:parameters]
539
+ name = func[:name] || params.name.split("::").last
540
+ tool_models.store(name, params)
541
+ func[:parameters] = params.to_json_schema
542
+ tool
543
+ else
544
+ tool
545
+ end
546
+ end
547
+ # rubocop:enable Metrics/BlockLength
548
+ tools.replace(mapped)
549
+ else
550
+ end
551
+
552
+ [model, tool_models]
553
+ end
381
554
  end
382
555
  end
383
556
  end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ Streaming = Helpers::Streaming
5
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.11.0"
4
+ VERSION = "0.13.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -56,6 +56,7 @@ require_relative "openai/helpers/structured_output/enum_of"
56
56
  require_relative "openai/helpers/structured_output/union_of"
57
57
  require_relative "openai/helpers/structured_output/array_of"
58
58
  require_relative "openai/helpers/structured_output/base_model"
59
+ require_relative "openai/helpers/structured_output/parsed_json"
59
60
  require_relative "openai/helpers/structured_output"
60
61
  require_relative "openai/structured_output"
61
62
  require_relative "openai/models/reasoning_effort"
@@ -539,3 +540,6 @@ require_relative "openai/resources/vector_stores"
539
540
  require_relative "openai/resources/vector_stores/file_batches"
540
541
  require_relative "openai/resources/vector_stores/files"
541
542
  require_relative "openai/resources/webhooks"
543
+ require_relative "openai/helpers/streaming/events"
544
+ require_relative "openai/helpers/streaming/response_stream"
545
+ require_relative "openai/streaming"
@@ -0,0 +1,31 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Helpers
5
+ module Streaming
6
+ class ResponseTextDeltaEvent < OpenAI::Models::Responses::ResponseTextDeltaEvent
7
+ sig { returns(String) }
8
+ def snapshot
9
+ end
10
+ end
11
+
12
+ class ResponseTextDoneEvent < OpenAI::Models::Responses::ResponseTextDoneEvent
13
+ sig { returns(T.untyped) }
14
+ def parsed
15
+ end
16
+ end
17
+
18
+ class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
19
+ sig { returns(String) }
20
+ def snapshot
21
+ end
22
+ end
23
+
24
+ class ResponseCompletedEvent < OpenAI::Models::Responses::ResponseCompletedEvent
25
+ sig { returns(OpenAI::Models::Responses::Response) }
26
+ def response
27
+ end
28
+ end
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,104 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Helpers
5
+ module Streaming
6
+ class ResponseStream
7
+ include OpenAI::Internal::Type::BaseStream
8
+
9
+ # Define the type union for streaming events that can be yielded
10
+ ResponseStreamEvent =
11
+ T.type_alias do
12
+ T.any(
13
+ OpenAI::Streaming::ResponseTextDeltaEvent,
14
+ OpenAI::Streaming::ResponseTextDoneEvent,
15
+ OpenAI::Streaming::ResponseCompletedEvent,
16
+ OpenAI::Streaming::ResponseFunctionCallArgumentsDeltaEvent,
17
+ # Pass through other raw events
18
+ OpenAI::Models::Responses::ResponseStreamEvent::Variants
19
+ )
20
+ end
21
+
22
+ Message = type_member { { fixed: ResponseStreamEvent } }
23
+ Elem = type_member { { fixed: ResponseStreamEvent } }
24
+
25
+ sig do
26
+ params(
27
+ raw_stream: T.untyped,
28
+ text_format: T.untyped,
29
+ starting_after: T.nilable(Integer)
30
+ ).void
31
+ end
32
+ def initialize(raw_stream:, text_format:, starting_after:)
33
+ end
34
+
35
+ sig { void }
36
+ def close
37
+ end
38
+
39
+ sig { returns(T.self_type) }
40
+ def until_done
41
+ end
42
+
43
+ sig { returns(OpenAI::Models::Responses::Response) }
44
+ def get_final_response
45
+ end
46
+
47
+ sig { returns(String) }
48
+ def get_output_text
49
+ end
50
+
51
+ sig { returns(T::Enumerator::Lazy[String]) }
52
+ def text
53
+ end
54
+
55
+ # Override the each method to properly type the yielded events
56
+ sig do
57
+ params(
58
+ block: T.nilable(T.proc.params(event: ResponseStreamEvent).void)
59
+ ).returns(T.any(T.self_type, T::Enumerator[ResponseStreamEvent]))
60
+ end
61
+ def each(&block)
62
+ end
63
+
64
+ private
65
+
66
+ sig { returns(T.untyped) }
67
+ def iterator
68
+ end
69
+ end
70
+
71
+ class ResponseStreamState
72
+ sig { returns(T.nilable(OpenAI::Models::Responses::Response)) }
73
+ attr_reader :completed_response
74
+
75
+ sig { params(text_format: T.untyped).void }
76
+ def initialize(text_format:)
77
+ end
78
+
79
+ sig { params(event: T.untyped).returns(T::Array[T.untyped]) }
80
+ def handle_event(event)
81
+ end
82
+
83
+ sig do
84
+ params(
85
+ event: T.untyped,
86
+ current_snapshot: T.nilable(OpenAI::Models::Responses::Response)
87
+ ).returns(OpenAI::Models::Responses::Response)
88
+ end
89
+ def accumulate_event(event:, current_snapshot:)
90
+ end
91
+
92
+ private
93
+
94
+ sig { params(text: T.nilable(String)).returns(T.untyped) }
95
+ def parse_structured_text(text)
96
+ end
97
+
98
+ sig { params(object: T.untyped, expected_type: Symbol).void }
99
+ def assert_type(object, expected_type)
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
@@ -52,10 +52,17 @@ module OpenAI
52
52
  url: URI::Generic,
53
53
  status: Integer,
54
54
  response: Net::HTTPResponse,
55
+ unwrap:
56
+ T.any(
57
+ Symbol,
58
+ Integer,
59
+ T::Array[T.any(Symbol, Integer)],
60
+ T.proc.params(arg0: T.anything).returns(T.anything)
61
+ ),
55
62
  stream: T::Enumerable[Message]
56
63
  ).void
57
64
  end
58
- def initialize(model:, url:, status:, response:, stream:)
65
+ def initialize(model:, url:, status:, response:, unwrap:, stream:)
59
66
  end
60
67
 
61
68
  # @api private
@@ -291,7 +291,7 @@ module OpenAI
291
291
 
292
292
  # Duration of the input audio in seconds.
293
293
  sig { returns(Float) }
294
- attr_accessor :duration
294
+ attr_accessor :seconds
295
295
 
296
296
  # The type of the usage object. Always `duration` for this variant.
297
297
  sig { returns(Symbol) }
@@ -299,17 +299,17 @@ module OpenAI
299
299
 
300
300
  # Usage statistics for models billed by audio input duration.
301
301
  sig do
302
- params(duration: Float, type: Symbol).returns(T.attached_class)
302
+ params(seconds: Float, type: Symbol).returns(T.attached_class)
303
303
  end
304
304
  def self.new(
305
305
  # Duration of the input audio in seconds.
306
- duration:,
306
+ seconds:,
307
307
  # The type of the usage object. Always `duration` for this variant.
308
308
  type: :duration
309
309
  )
310
310
  end
311
311
 
312
- sig { override.returns({ duration: Float, type: Symbol }) }
312
+ sig { override.returns({ seconds: Float, type: Symbol }) }
313
313
  def to_hash
314
314
  end
315
315
  end