openai 0.10.0 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +36 -0
  3. data/README.md +83 -7
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/helpers/streaming/events.rb +23 -0
  7. data/lib/openai/helpers/streaming/response_stream.rb +232 -0
  8. data/lib/openai/helpers/structured_output/parsed_json.rb +39 -0
  9. data/lib/openai/internal/stream.rb +2 -1
  10. data/lib/openai/internal/transport/base_client.rb +10 -2
  11. data/lib/openai/internal/type/base_stream.rb +3 -1
  12. data/lib/openai/models/all_models.rb +4 -0
  13. data/lib/openai/models/chat/chat_completion.rb +32 -31
  14. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  15. data/lib/openai/models/chat/chat_completion_message.rb +1 -1
  16. data/lib/openai/models/chat/chat_completion_message_tool_call.rb +1 -1
  17. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  18. data/lib/openai/models/images_response.rb +92 -1
  19. data/lib/openai/models/responses/response.rb +59 -35
  20. data/lib/openai/models/responses/response_create_params.rb +64 -39
  21. data/lib/openai/models/responses/response_function_tool_call.rb +1 -1
  22. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  23. data/lib/openai/models/responses/response_includable.rb +8 -6
  24. data/lib/openai/models/responses/response_output_text.rb +1 -1
  25. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  26. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  27. data/lib/openai/models/responses_model.rb +4 -0
  28. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  29. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  30. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  32. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  33. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  34. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  35. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  36. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  37. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  38. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  39. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  40. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  41. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  42. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  43. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  44. data/lib/openai/models.rb +2 -0
  45. data/lib/openai/resources/chat/completions.rb +14 -6
  46. data/lib/openai/resources/responses.rb +262 -81
  47. data/lib/openai/resources/webhooks.rb +124 -0
  48. data/lib/openai/streaming.rb +5 -0
  49. data/lib/openai/version.rb +1 -1
  50. data/lib/openai.rb +22 -0
  51. data/rbi/openai/client.rbi +3 -0
  52. data/rbi/openai/helpers/streaming/events.rbi +31 -0
  53. data/rbi/openai/helpers/streaming/response_stream.rbi +104 -0
  54. data/rbi/openai/internal/type/base_stream.rbi +8 -1
  55. data/rbi/openai/models/all_models.rbi +20 -0
  56. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  57. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  58. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  59. data/rbi/openai/models/images_response.rbi +146 -0
  60. data/rbi/openai/models/responses/response.rbi +75 -44
  61. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  62. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  63. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  64. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  65. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  66. data/rbi/openai/models/responses_model.rbi +20 -0
  67. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  68. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  69. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  70. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  71. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  72. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  73. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  74. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  75. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  76. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  77. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  78. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  79. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  80. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  81. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  82. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  83. data/rbi/openai/models.rbi +2 -0
  84. data/rbi/openai/resources/chat/completions.rbi +34 -30
  85. data/rbi/openai/resources/responses.rbi +188 -39
  86. data/rbi/openai/resources/webhooks.rbi +68 -0
  87. data/rbi/openai/streaming.rbi +5 -0
  88. data/sig/openai/client.rbs +2 -0
  89. data/sig/openai/internal/type/base_stream.rbs +4 -0
  90. data/sig/openai/models/all_models.rbs +8 -0
  91. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  92. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  93. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  94. data/sig/openai/models/images_response.rbs +83 -0
  95. data/sig/openai/models/responses/response.rbs +13 -1
  96. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  97. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  98. data/sig/openai/models/responses/response_includable.rbs +7 -5
  99. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  100. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  101. data/sig/openai/models/responses_model.rbs +8 -0
  102. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  103. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  104. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  105. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  106. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  107. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  108. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  109. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  110. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  111. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  112. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  113. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  114. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  115. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  116. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  117. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  118. data/sig/openai/models.rbs +2 -0
  119. data/sig/openai/resources/responses.rbs +4 -0
  120. data/sig/openai/resources/webhooks.rbs +33 -0
  121. metadata +63 -2
@@ -29,6 +29,7 @@ module OpenAI
29
29
  input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
30
30
  instructions: T.nilable(String),
31
31
  max_output_tokens: T.nilable(Integer),
32
+ max_tool_calls: T.nilable(Integer),
32
33
  metadata: T.nilable(T::Hash[Symbol, String]),
33
34
  model:
34
35
  T.any(
@@ -55,7 +56,8 @@ module OpenAI
55
56
  T.any(
56
57
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
57
58
  OpenAI::Responses::ToolChoiceTypes::OrHash,
58
- OpenAI::Responses::ToolChoiceFunction::OrHash
59
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
60
+ OpenAI::Responses::ToolChoiceMcp::OrHash
59
61
  ),
60
62
  tools:
61
63
  T::Array[
@@ -70,6 +72,7 @@ module OpenAI
70
72
  OpenAI::Responses::WebSearchTool::OrHash
71
73
  )
72
74
  ],
75
+ top_logprobs: T.nilable(Integer),
73
76
  top_p: T.nilable(Float),
74
77
  truncation:
75
78
  T.nilable(
@@ -87,18 +90,19 @@ module OpenAI
87
90
  # Specify additional output data to include in the model response. Currently
88
91
  # supported values are:
89
92
  #
93
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
94
+ # in code interpreter tool call items.
95
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
96
+ # call output.
90
97
  # - `file_search_call.results`: Include the search results of the file search tool
91
98
  # call.
92
99
  # - `message.input_image.image_url`: Include image urls from the input message.
93
- # - `computer_call_output.output.image_url`: Include image urls from the computer
94
- # call output.
100
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
95
101
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
96
102
  # tokens in reasoning item outputs. This enables reasoning items to be used in
97
103
  # multi-turn conversations when using the Responses API statelessly (like when
98
104
  # the `store` parameter is set to `false`, or when an organization is enrolled
99
105
  # in the zero data retention program).
100
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
101
- # in code interpreter tool call items.
102
106
  include: nil,
103
107
  # Text, image, or file inputs to the model, used to generate a response.
104
108
  #
@@ -120,6 +124,11 @@ module OpenAI
120
124
  # including visible output tokens and
121
125
  # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
122
126
  max_output_tokens: nil,
127
+ # The maximum number of total calls to built-in tools that can be processed in a
128
+ # response. This maximum number applies across all built-in tool calls, not per
129
+ # individual tool. Any further attempts to call a tool by the model will be
130
+ # ignored.
131
+ max_tool_calls: nil,
123
132
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
124
133
  # for storing additional information about the object in a structured format, and
125
134
  # querying for objects via API or the dashboard.
@@ -147,23 +156,23 @@ module OpenAI
147
156
  # Configuration options for
148
157
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
149
158
  reasoning: nil,
150
- # Specifies the latency tier to use for processing the request. This parameter is
151
- # relevant for customers subscribed to the scale tier service:
159
+ # Specifies the processing type used for serving the request.
152
160
  #
153
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
154
- # utilize scale tier credits until they are exhausted.
155
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
156
- # be processed using the default service tier with a lower uptime SLA and no
157
- # latency guarantee.
158
- # - If set to 'default', the request will be processed using the default service
159
- # tier with a lower uptime SLA and no latency guarantee.
160
- # - If set to 'flex', the request will be processed with the Flex Processing
161
- # service tier.
162
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
161
+ # - If set to 'auto', then the request will be processed with the service tier
162
+ # configured in the Project settings. Unless otherwise configured, the Project
163
+ # will use 'default'.
164
+ # - If set to 'default', then the requset will be processed with the standard
165
+ # pricing and performance for the selected model.
166
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
167
+ # 'priority', then the request will be processed with the corresponding service
168
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
169
+ # Priority processing.
163
170
  # - When not set, the default behavior is 'auto'.
164
171
  #
165
- # When this parameter is set, the response body will include the `service_tier`
166
- # utilized.
172
+ # When the `service_tier` parameter is set, the response body will include the
173
+ # `service_tier` value based on the processing mode actually used to serve the
174
+ # request. This response value may be different from the value set in the
175
+ # parameter.
167
176
  service_tier: nil,
168
177
  # Whether to store the generated model response for later retrieval via API.
169
178
  store: nil,
@@ -197,6 +206,9 @@ module OpenAI
197
206
  # the model to call your own code. Learn more about
198
207
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
199
208
  tools: nil,
209
+ # An integer between 0 and 20 specifying the number of most likely tokens to
210
+ # return at each token position, each with an associated log probability.
211
+ top_logprobs: nil,
200
212
  # An alternative to sampling with temperature, called nucleus sampling, where the
201
213
  # model considers the results of the tokens with top_p probability mass. So 0.1
202
214
  # means only the tokens comprising the top 10% probability mass are considered.
@@ -245,6 +257,7 @@ module OpenAI
245
257
  input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
246
258
  instructions: T.nilable(String),
247
259
  max_output_tokens: T.nilable(Integer),
260
+ max_tool_calls: T.nilable(Integer),
248
261
  metadata: T.nilable(T::Hash[Symbol, String]),
249
262
  model:
250
263
  T.any(
@@ -262,12 +275,19 @@ module OpenAI
262
275
  ),
263
276
  store: T.nilable(T::Boolean),
264
277
  temperature: T.nilable(Float),
265
- text: OpenAI::Responses::ResponseTextConfig::OrHash,
278
+ text:
279
+ T.nilable(
280
+ T.any(
281
+ OpenAI::Responses::ResponseTextConfig::OrHash,
282
+ OpenAI::StructuredOutput::JsonSchemaConverter
283
+ )
284
+ ),
266
285
  tool_choice:
267
286
  T.any(
268
287
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
269
288
  OpenAI::Responses::ToolChoiceTypes::OrHash,
270
- OpenAI::Responses::ToolChoiceFunction::OrHash
289
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
290
+ OpenAI::Responses::ToolChoiceMcp::OrHash
271
291
  ),
272
292
  tools:
273
293
  T::Array[
@@ -282,6 +302,7 @@ module OpenAI
282
302
  OpenAI::Responses::WebSearchTool::OrHash
283
303
  )
284
304
  ],
305
+ top_logprobs: T.nilable(Integer),
285
306
  top_p: T.nilable(Float),
286
307
  truncation:
287
308
  T.nilable(
@@ -303,18 +324,19 @@ module OpenAI
303
324
  # Specify additional output data to include in the model response. Currently
304
325
  # supported values are:
305
326
  #
327
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
328
+ # in code interpreter tool call items.
329
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
330
+ # call output.
306
331
  # - `file_search_call.results`: Include the search results of the file search tool
307
332
  # call.
308
333
  # - `message.input_image.image_url`: Include image urls from the input message.
309
- # - `computer_call_output.output.image_url`: Include image urls from the computer
310
- # call output.
334
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
311
335
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
312
336
  # tokens in reasoning item outputs. This enables reasoning items to be used in
313
337
  # multi-turn conversations when using the Responses API statelessly (like when
314
338
  # the `store` parameter is set to `false`, or when an organization is enrolled
315
339
  # in the zero data retention program).
316
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
317
- # in code interpreter tool call items.
318
340
  include: nil,
319
341
  # Text, image, or file inputs to the model, used to generate a response.
320
342
  #
@@ -336,6 +358,11 @@ module OpenAI
336
358
  # including visible output tokens and
337
359
  # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
338
360
  max_output_tokens: nil,
361
+ # The maximum number of total calls to built-in tools that can be processed in a
362
+ # response. This maximum number applies across all built-in tool calls, not per
363
+ # individual tool. Any further attempts to call a tool by the model will be
364
+ # ignored.
365
+ max_tool_calls: nil,
339
366
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
340
367
  # for storing additional information about the object in a structured format, and
341
368
  # querying for objects via API or the dashboard.
@@ -363,23 +390,23 @@ module OpenAI
363
390
  # Configuration options for
364
391
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
365
392
  reasoning: nil,
366
- # Specifies the latency tier to use for processing the request. This parameter is
367
- # relevant for customers subscribed to the scale tier service:
393
+ # Specifies the processing type used for serving the request.
368
394
  #
369
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
370
- # utilize scale tier credits until they are exhausted.
371
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
372
- # be processed using the default service tier with a lower uptime SLA and no
373
- # latency guarantee.
374
- # - If set to 'default', the request will be processed using the default service
375
- # tier with a lower uptime SLA and no latency guarantee.
376
- # - If set to 'flex', the request will be processed with the Flex Processing
377
- # service tier.
378
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
395
+ # - If set to 'auto', then the request will be processed with the service tier
396
+ # configured in the Project settings. Unless otherwise configured, the Project
397
+ # will use 'default'.
398
+ # - If set to 'default', then the requset will be processed with the standard
399
+ # pricing and performance for the selected model.
400
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
401
+ # 'priority', then the request will be processed with the corresponding service
402
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
403
+ # Priority processing.
379
404
  # - When not set, the default behavior is 'auto'.
380
405
  #
381
- # When this parameter is set, the response body will include the `service_tier`
382
- # utilized.
406
+ # When the `service_tier` parameter is set, the response body will include the
407
+ # `service_tier` value based on the processing mode actually used to serve the
408
+ # request. This response value may be different from the value set in the
409
+ # parameter.
383
410
  service_tier: nil,
384
411
  # Whether to store the generated model response for later retrieval via API.
385
412
  store: nil,
@@ -413,6 +440,9 @@ module OpenAI
413
440
  # the model to call your own code. Learn more about
414
441
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
415
442
  tools: nil,
443
+ # An integer between 0 and 20 specifying the number of most likely tokens to
444
+ # return at each token position, each with an associated log probability.
445
+ top_logprobs: nil,
416
446
  # An alternative to sampling with temperature, called nucleus sampling, where the
417
447
  # model considers the results of the tokens with top_p probability mass. So 0.1
418
448
  # means only the tokens comprising the top 10% probability mass are considered.
@@ -438,6 +468,125 @@ module OpenAI
438
468
  )
439
469
  end
440
470
 
471
+ # See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
472
+ #
473
+ # Creates a model response with a higher-level streaming interface that provides
474
+ # helper methods for processing events and aggregating stream outputs.
475
+ sig do
476
+ params(
477
+ input:
478
+ T.nilable(OpenAI::Responses::ResponseCreateParams::Input::Variants),
479
+ model:
480
+ T.nilable(
481
+ T.any(
482
+ String,
483
+ OpenAI::ChatModel::OrSymbol,
484
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
485
+ )
486
+ ),
487
+ background: T.nilable(T::Boolean),
488
+ include:
489
+ T.nilable(
490
+ T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
491
+ ),
492
+ instructions: T.nilable(String),
493
+ max_output_tokens: T.nilable(Integer),
494
+ metadata: T.nilable(T::Hash[Symbol, String]),
495
+ parallel_tool_calls: T.nilable(T::Boolean),
496
+ previous_response_id: T.nilable(String),
497
+ prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
498
+ reasoning: T.nilable(OpenAI::Reasoning::OrHash),
499
+ service_tier:
500
+ T.nilable(
501
+ OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
502
+ ),
503
+ store: T.nilable(T::Boolean),
504
+ temperature: T.nilable(Float),
505
+ text:
506
+ T.any(
507
+ OpenAI::Responses::ResponseTextConfig::OrHash,
508
+ OpenAI::StructuredOutput::JsonSchemaConverter
509
+ ),
510
+ tool_choice:
511
+ T.any(
512
+ OpenAI::Responses::ToolChoiceOptions::OrSymbol,
513
+ OpenAI::Responses::ToolChoiceTypes::OrHash,
514
+ OpenAI::Responses::ToolChoiceFunction::OrHash
515
+ ),
516
+ tools:
517
+ T.nilable(
518
+ T::Array[
519
+ T.any(
520
+ OpenAI::Responses::FunctionTool::OrHash,
521
+ OpenAI::Responses::FileSearchTool::OrHash,
522
+ OpenAI::Responses::ComputerTool::OrHash,
523
+ OpenAI::Responses::Tool::Mcp::OrHash,
524
+ OpenAI::Responses::Tool::CodeInterpreter::OrHash,
525
+ OpenAI::Responses::Tool::ImageGeneration::OrHash,
526
+ OpenAI::Responses::Tool::LocalShell::OrHash,
527
+ OpenAI::Responses::WebSearchTool::OrHash,
528
+ OpenAI::StructuredOutput::JsonSchemaConverter
529
+ )
530
+ ]
531
+ ),
532
+ top_p: T.nilable(Float),
533
+ truncation:
534
+ T.nilable(
535
+ OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol
536
+ ),
537
+ user: T.nilable(String),
538
+ starting_after: T.nilable(Integer),
539
+ request_options: T.nilable(OpenAI::RequestOptions::OrHash)
540
+ ).returns(OpenAI::Streaming::ResponseStream)
541
+ end
542
+ def stream(
543
+ # Text, image, or file inputs to the model, used to generate a response.
544
+ input: nil,
545
+ # Model ID used to generate the response, like `gpt-4o` or `o3`.
546
+ model: nil,
547
+ # Whether to run the model response in the background.
548
+ background: nil,
549
+ # Specify additional output data to include in the model response.
550
+ include: nil,
551
+ # A system (or developer) message inserted into the model's context.
552
+ instructions: nil,
553
+ # An upper bound for the number of tokens that can be generated for a response.
554
+ max_output_tokens: nil,
555
+ # Set of 16 key-value pairs that can be attached to an object.
556
+ metadata: nil,
557
+ # Whether to allow the model to run tool calls in parallel.
558
+ parallel_tool_calls: nil,
559
+ # The unique ID of the previous response to the model. Use this to create
560
+ # multi-turn conversations.
561
+ previous_response_id: nil,
562
+ # Reference to a prompt template and its variables.
563
+ prompt: nil,
564
+ # Configuration options for reasoning models.
565
+ reasoning: nil,
566
+ # Specifies the latency tier to use for processing the request.
567
+ service_tier: nil,
568
+ # Whether to store the generated model response for later retrieval via API.
569
+ store: nil,
570
+ # What sampling temperature to use, between 0 and 2.
571
+ temperature: nil,
572
+ # Configuration options for a text response from the model.
573
+ text: nil,
574
+ # How the model should select which tool (or tools) to use when generating a response.
575
+ tool_choice: nil,
576
+ # An array of tools the model may call while generating a response.
577
+ tools: nil,
578
+ # An alternative to sampling with temperature, called nucleus sampling.
579
+ top_p: nil,
580
+ # The truncation strategy to use for the model response.
581
+ truncation: nil,
582
+ # A stable identifier for your end-users.
583
+ user: nil,
584
+ # The sequence number of the event after which to start streaming (for resuming streams).
585
+ starting_after: nil,
586
+ request_options: {}
587
+ )
588
+ end
589
+
441
590
  # See {OpenAI::Resources::Responses#retrieve_streaming} for streaming counterpart.
442
591
  #
443
592
  # Retrieves a model response with the given ID.
@@ -0,0 +1,68 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Resources
5
+ class Webhooks
6
+ # Validates that the given payload was sent by OpenAI and parses the payload.
7
+ sig do
8
+ params(
9
+ payload: String,
10
+ headers: T.nilable(T::Hash[T.any(String, Symbol), String]),
11
+ webhook_secret: T.nilable(String)
12
+ ).returns(
13
+ T.any(
14
+ OpenAI::Webhooks::BatchCancelledWebhookEvent,
15
+ OpenAI::Webhooks::BatchCompletedWebhookEvent,
16
+ OpenAI::Webhooks::BatchExpiredWebhookEvent,
17
+ OpenAI::Webhooks::BatchFailedWebhookEvent,
18
+ OpenAI::Webhooks::EvalRunCanceledWebhookEvent,
19
+ OpenAI::Webhooks::EvalRunFailedWebhookEvent,
20
+ OpenAI::Webhooks::EvalRunSucceededWebhookEvent,
21
+ OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent,
22
+ OpenAI::Webhooks::FineTuningJobFailedWebhookEvent,
23
+ OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent,
24
+ OpenAI::Webhooks::ResponseCancelledWebhookEvent,
25
+ OpenAI::Webhooks::ResponseCompletedWebhookEvent,
26
+ OpenAI::Webhooks::ResponseFailedWebhookEvent,
27
+ OpenAI::Webhooks::ResponseIncompleteWebhookEvent
28
+ )
29
+ )
30
+ end
31
+ def unwrap(
32
+ # The raw webhook payload as a string
33
+ payload,
34
+ # The webhook headers
35
+ headers = {},
36
+ # The webhook secret (optional, will use ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
37
+ webhook_secret = nil
38
+ )
39
+ end
40
+
41
+ # Validates whether or not the webhook payload was sent by OpenAI.
42
+ sig do
43
+ params(
44
+ payload: String,
45
+ headers: T::Hash[T.any(String, Symbol), String],
46
+ webhook_secret: T.nilable(String),
47
+ tolerance: Integer
48
+ ).void
49
+ end
50
+ def verify_signature(
51
+ # The webhook payload as a string
52
+ payload,
53
+ # The webhook headers
54
+ headers,
55
+ # The webhook secret (optional, will use ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
56
+ webhook_secret = nil,
57
+ # Maximum age of the webhook in seconds (default: 300 = 5 minutes)
58
+ tolerance = 300
59
+ )
60
+ end
61
+
62
+ # @api private
63
+ sig { params(client: OpenAI::Client).returns(T.attached_class) }
64
+ def self.new(client:)
65
+ end
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,5 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ Streaming = OpenAI::Helpers::Streaming
5
+ end
@@ -36,6 +36,8 @@ module OpenAI
36
36
 
37
37
  attr_reader vector_stores: OpenAI::Resources::VectorStores
38
38
 
39
+ attr_reader webhooks: OpenAI::Resources::Webhooks
40
+
39
41
  attr_reader beta: OpenAI::Resources::Beta
40
42
 
41
43
  attr_reader batches: OpenAI::Resources::Batches
@@ -23,6 +23,10 @@ module OpenAI
23
23
  url: URI::Generic,
24
24
  status: Integer,
25
25
  response: top,
26
+ unwrap: Symbol
27
+ | Integer
28
+ | ::Array[Symbol | Integer]
29
+ | ^(top arg0) -> top,
26
30
  stream: Enumerable[Message]
27
31
  ) -> void
28
32
 
@@ -13,6 +13,10 @@ module OpenAI
13
13
  | :"o1-pro-2025-03-19"
14
14
  | :"o3-pro"
15
15
  | :"o3-pro-2025-06-10"
16
+ | :"o3-deep-research"
17
+ | :"o3-deep-research-2025-06-26"
18
+ | :"o4-mini-deep-research"
19
+ | :"o4-mini-deep-research-2025-06-26"
16
20
  | :"computer-use-preview"
17
21
  | :"computer-use-preview-2025-03-11"
18
22
 
@@ -23,6 +27,10 @@ module OpenAI
23
27
  O1_PRO_2025_03_19: :"o1-pro-2025-03-19"
24
28
  O3_PRO: :"o3-pro"
25
29
  O3_PRO_2025_06_10: :"o3-pro-2025-06-10"
30
+ O3_DEEP_RESEARCH: :"o3-deep-research"
31
+ O3_DEEP_RESEARCH_2025_06_26: :"o3-deep-research-2025-06-26"
32
+ O4_MINI_DEEP_RESEARCH: :"o4-mini-deep-research"
33
+ O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26"
26
34
  COMPUTER_USE_PREVIEW: :"computer-use-preview"
27
35
  COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11"
28
36
 
@@ -127,7 +127,7 @@ module OpenAI
127
127
  end
128
128
  end
129
129
 
130
- type service_tier = :auto | :default | :flex | :scale
130
+ type service_tier = :auto | :default | :flex | :scale | :priority
131
131
 
132
132
  module ServiceTier
133
133
  extend OpenAI::Internal::Type::Enum
@@ -136,6 +136,7 @@ module OpenAI
136
136
  DEFAULT: :default
137
137
  FLEX: :flex
138
138
  SCALE: :scale
139
+ PRIORITY: :priority
139
140
 
140
141
  def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier]
141
142
  end
@@ -272,7 +272,7 @@ module OpenAI
272
272
  end
273
273
  end
274
274
 
275
- type service_tier = :auto | :default | :flex | :scale
275
+ type service_tier = :auto | :default | :flex | :scale | :priority
276
276
 
277
277
  module ServiceTier
278
278
  extend OpenAI::Internal::Type::Enum
@@ -281,6 +281,7 @@ module OpenAI
281
281
  DEFAULT: :default
282
282
  FLEX: :flex
283
283
  SCALE: :scale
284
+ PRIORITY: :priority
284
285
 
285
286
  def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier]
286
287
  end
@@ -280,7 +280,7 @@ module OpenAI
280
280
  def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::response_format]
281
281
  end
282
282
 
283
- type service_tier = :auto | :default | :flex | :scale
283
+ type service_tier = :auto | :default | :flex | :scale | :priority
284
284
 
285
285
  module ServiceTier
286
286
  extend OpenAI::Internal::Type::Enum
@@ -289,6 +289,7 @@ module OpenAI
289
289
  DEFAULT: :default
290
290
  FLEX: :flex
291
291
  SCALE: :scale
292
+ PRIORITY: :priority
292
293
 
293
294
  def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier]
294
295
  end
@@ -3,17 +3,45 @@ module OpenAI
3
3
  type images_response =
4
4
  {
5
5
  created: Integer,
6
+ background: OpenAI::Models::ImagesResponse::background,
6
7
  data: ::Array[OpenAI::Image],
8
+ output_format: OpenAI::Models::ImagesResponse::output_format,
9
+ quality: OpenAI::Models::ImagesResponse::quality,
10
+ size: OpenAI::Models::ImagesResponse::size,
7
11
  usage: OpenAI::ImagesResponse::Usage
8
12
  }
9
13
 
10
14
  class ImagesResponse < OpenAI::Internal::Type::BaseModel
11
15
  attr_accessor created: Integer
12
16
 
17
+ attr_reader background: OpenAI::Models::ImagesResponse::background?
18
+
19
+ def background=: (
20
+ OpenAI::Models::ImagesResponse::background
21
+ ) -> OpenAI::Models::ImagesResponse::background
22
+
13
23
  attr_reader data: ::Array[OpenAI::Image]?
14
24
 
15
25
  def data=: (::Array[OpenAI::Image]) -> ::Array[OpenAI::Image]
16
26
 
27
+ attr_reader output_format: OpenAI::Models::ImagesResponse::output_format?
28
+
29
+ def output_format=: (
30
+ OpenAI::Models::ImagesResponse::output_format
31
+ ) -> OpenAI::Models::ImagesResponse::output_format
32
+
33
+ attr_reader quality: OpenAI::Models::ImagesResponse::quality?
34
+
35
+ def quality=: (
36
+ OpenAI::Models::ImagesResponse::quality
37
+ ) -> OpenAI::Models::ImagesResponse::quality
38
+
39
+ attr_reader size: OpenAI::Models::ImagesResponse::size?
40
+
41
+ def size=: (
42
+ OpenAI::Models::ImagesResponse::size
43
+ ) -> OpenAI::Models::ImagesResponse::size
44
+
17
45
  attr_reader usage: OpenAI::ImagesResponse::Usage?
18
46
 
19
47
  def usage=: (
@@ -22,16 +50,71 @@ module OpenAI
22
50
 
23
51
  def initialize: (
24
52
  created: Integer,
53
+ ?background: OpenAI::Models::ImagesResponse::background,
25
54
  ?data: ::Array[OpenAI::Image],
55
+ ?output_format: OpenAI::Models::ImagesResponse::output_format,
56
+ ?quality: OpenAI::Models::ImagesResponse::quality,
57
+ ?size: OpenAI::Models::ImagesResponse::size,
26
58
  ?usage: OpenAI::ImagesResponse::Usage
27
59
  ) -> void
28
60
 
29
61
  def to_hash: -> {
30
62
  created: Integer,
63
+ background: OpenAI::Models::ImagesResponse::background,
31
64
  data: ::Array[OpenAI::Image],
65
+ output_format: OpenAI::Models::ImagesResponse::output_format,
66
+ quality: OpenAI::Models::ImagesResponse::quality,
67
+ size: OpenAI::Models::ImagesResponse::size,
32
68
  usage: OpenAI::ImagesResponse::Usage
33
69
  }
34
70
 
71
+ type background = :transparent | :opaque
72
+
73
+ module Background
74
+ extend OpenAI::Internal::Type::Enum
75
+
76
+ TRANSPARENT: :transparent
77
+ OPAQUE: :opaque
78
+
79
+ def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::background]
80
+ end
81
+
82
+ type output_format = :png | :webp | :jpeg
83
+
84
+ module OutputFormat
85
+ extend OpenAI::Internal::Type::Enum
86
+
87
+ PNG: :png
88
+ WEBP: :webp
89
+ JPEG: :jpeg
90
+
91
+ def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::output_format]
92
+ end
93
+
94
+ type quality = :low | :medium | :high
95
+
96
+ module Quality
97
+ extend OpenAI::Internal::Type::Enum
98
+
99
+ LOW: :low
100
+ MEDIUM: :medium
101
+ HIGH: :high
102
+
103
+ def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::quality]
104
+ end
105
+
106
+ type size = :"1024x1024" | :"1024x1536" | :"1536x1024"
107
+
108
+ module Size
109
+ extend OpenAI::Internal::Type::Enum
110
+
111
+ SIZE_1024X1024: :"1024x1024"
112
+ SIZE_1024X1536: :"1024x1536"
113
+ SIZE_1536X1024: :"1536x1024"
114
+
115
+ def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::size]
116
+ end
117
+
35
118
  type usage =
36
119
  {
37
120
  input_tokens: Integer,