openai 0.10.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +3 -0
  6. data/lib/openai/models/all_models.rb +4 -0
  7. data/lib/openai/models/chat/chat_completion.rb +32 -31
  8. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  9. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  10. data/lib/openai/models/images_response.rb +92 -1
  11. data/lib/openai/models/responses/response.rb +59 -35
  12. data/lib/openai/models/responses/response_create_params.rb +64 -39
  13. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  14. data/lib/openai/models/responses/response_includable.rb +8 -6
  15. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  16. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  17. data/lib/openai/models/responses_model.rb +4 -0
  18. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  19. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  20. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  21. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  22. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  23. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  24. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  25. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  26. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  27. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  28. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  29. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  30. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  31. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  32. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  33. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  34. data/lib/openai/models.rb +2 -0
  35. data/lib/openai/resources/chat/completions.rb +2 -2
  36. data/lib/openai/resources/responses.rb +14 -6
  37. data/lib/openai/resources/webhooks.rb +124 -0
  38. data/lib/openai/version.rb +1 -1
  39. data/lib/openai.rb +18 -0
  40. data/rbi/openai/client.rbi +3 -0
  41. data/rbi/openai/models/all_models.rbi +20 -0
  42. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  43. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  44. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  45. data/rbi/openai/models/images_response.rbi +146 -0
  46. data/rbi/openai/models/responses/response.rbi +75 -44
  47. data/rbi/openai/models/responses/response_create_params.rbi +91 -55
  48. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  49. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  50. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  51. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  52. data/rbi/openai/models/responses_model.rbi +20 -0
  53. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  54. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  55. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  56. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  57. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  58. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  59. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  60. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  61. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  62. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  63. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  64. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  65. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  66. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  67. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  68. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  69. data/rbi/openai/models.rbi +2 -0
  70. data/rbi/openai/resources/chat/completions.rbi +34 -30
  71. data/rbi/openai/resources/responses.rbi +62 -38
  72. data/rbi/openai/resources/webhooks.rbi +68 -0
  73. data/sig/openai/client.rbs +2 -0
  74. data/sig/openai/models/all_models.rbs +8 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/images_response.rbs +83 -0
  79. data/sig/openai/models/responses/response.rbs +13 -1
  80. data/sig/openai/models/responses/response_create_params.rbs +13 -1
  81. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  82. data/sig/openai/models/responses/response_includable.rbs +7 -5
  83. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  84. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  85. data/sig/openai/models/responses_model.rbs +8 -0
  86. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  87. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  88. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  89. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  90. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  91. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  92. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  93. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  94. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  95. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  96. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  97. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  98. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  99. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  100. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  101. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  102. data/sig/openai/models.rbs +2 -0
  103. data/sig/openai/resources/responses.rbs +4 -0
  104. data/sig/openai/resources/webhooks.rbs +33 -0
  105. metadata +56 -2
@@ -23,18 +23,19 @@ module OpenAI
23
23
  # Specify additional output data to include in the model response. Currently
24
24
  # supported values are:
25
25
  #
26
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
27
+ # in code interpreter tool call items.
28
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
29
+ # call output.
26
30
  # - `file_search_call.results`: Include the search results of the file search tool
27
31
  # call.
28
32
  # - `message.input_image.image_url`: Include image urls from the input message.
29
- # - `computer_call_output.output.image_url`: Include image urls from the computer
30
- # call output.
33
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
31
34
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
32
35
  # tokens in reasoning item outputs. This enables reasoning items to be used in
33
36
  # multi-turn conversations when using the Responses API statelessly (like when
34
37
  # the `store` parameter is set to `false`, or when an organization is enrolled
35
38
  # in the zero data retention program).
36
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
37
- # in code interpreter tool call items.
38
39
  sig do
39
40
  returns(
40
41
  T.nilable(T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol])
@@ -79,6 +80,13 @@ module OpenAI
79
80
  sig { returns(T.nilable(Integer)) }
80
81
  attr_accessor :max_output_tokens
81
82
 
83
+ # The maximum number of total calls to built-in tools that can be processed in a
84
+ # response. This maximum number applies across all built-in tool calls, not per
85
+ # individual tool. Any further attempts to call a tool by the model will be
86
+ # ignored.
87
+ sig { returns(T.nilable(Integer)) }
88
+ attr_accessor :max_tool_calls
89
+
82
90
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
83
91
  # for storing additional information about the object in a structured format, and
84
92
  # querying for objects via API or the dashboard.
@@ -150,23 +158,23 @@ module OpenAI
150
158
  sig { params(reasoning: T.nilable(OpenAI::Reasoning::OrHash)).void }
151
159
  attr_writer :reasoning
152
160
 
153
- # Specifies the latency tier to use for processing the request. This parameter is
154
- # relevant for customers subscribed to the scale tier service:
161
+ # Specifies the processing type used for serving the request.
155
162
  #
156
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
157
- # utilize scale tier credits until they are exhausted.
158
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
159
- # be processed using the default service tier with a lower uptime SLA and no
160
- # latency guarantee.
161
- # - If set to 'default', the request will be processed using the default service
162
- # tier with a lower uptime SLA and no latency guarantee.
163
- # - If set to 'flex', the request will be processed with the Flex Processing
164
- # service tier.
165
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
163
+ # - If set to 'auto', then the request will be processed with the service tier
164
+ # configured in the Project settings. Unless otherwise configured, the Project
165
+ # will use 'default'.
166
+ # - If set to 'default', then the requset will be processed with the standard
167
+ # pricing and performance for the selected model.
168
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
169
+ # 'priority', then the request will be processed with the corresponding service
170
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
171
+ # Priority processing.
166
172
  # - When not set, the default behavior is 'auto'.
167
173
  #
168
- # When this parameter is set, the response body will include the `service_tier`
169
- # utilized.
174
+ # When the `service_tier` parameter is set, the response body will include the
175
+ # `service_tier` value based on the processing mode actually used to serve the
176
+ # request. This response value may be different from the value set in the
177
+ # parameter.
170
178
  sig do
171
179
  returns(
172
180
  T.nilable(
@@ -215,7 +223,8 @@ module OpenAI
215
223
  T.any(
216
224
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
217
225
  OpenAI::Responses::ToolChoiceTypes,
218
- OpenAI::Responses::ToolChoiceFunction
226
+ OpenAI::Responses::ToolChoiceFunction,
227
+ OpenAI::Responses::ToolChoiceMcp
219
228
  )
220
229
  )
221
230
  )
@@ -228,7 +237,8 @@ module OpenAI
228
237
  T.any(
229
238
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
230
239
  OpenAI::Responses::ToolChoiceTypes::OrHash,
231
- OpenAI::Responses::ToolChoiceFunction::OrHash
240
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
241
+ OpenAI::Responses::ToolChoiceMcp::OrHash
232
242
  )
233
243
  ).void
234
244
  end
@@ -287,6 +297,11 @@ module OpenAI
287
297
  end
288
298
  attr_writer :tools
289
299
 
300
+ # An integer between 0 and 20 specifying the number of most likely tokens to
301
+ # return at each token position, each with an associated log probability.
302
+ sig { returns(T.nilable(Integer)) }
303
+ attr_accessor :top_logprobs
304
+
290
305
  # An alternative to sampling with temperature, called nucleus sampling, where the
291
306
  # model considers the results of the tokens with top_p probability mass. So 0.1
292
307
  # means only the tokens comprising the top 10% probability mass are considered.
@@ -330,6 +345,7 @@ module OpenAI
330
345
  input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
331
346
  instructions: T.nilable(String),
332
347
  max_output_tokens: T.nilable(Integer),
348
+ max_tool_calls: T.nilable(Integer),
333
349
  metadata: T.nilable(T::Hash[Symbol, String]),
334
350
  model:
335
351
  T.any(
@@ -352,7 +368,8 @@ module OpenAI
352
368
  T.any(
353
369
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
354
370
  OpenAI::Responses::ToolChoiceTypes::OrHash,
355
- OpenAI::Responses::ToolChoiceFunction::OrHash
371
+ OpenAI::Responses::ToolChoiceFunction::OrHash,
372
+ OpenAI::Responses::ToolChoiceMcp::OrHash
356
373
  ),
357
374
  tools:
358
375
  T::Array[
@@ -367,6 +384,7 @@ module OpenAI
367
384
  OpenAI::Responses::WebSearchTool::OrHash
368
385
  )
369
386
  ],
387
+ top_logprobs: T.nilable(Integer),
370
388
  top_p: T.nilable(Float),
371
389
  truncation:
372
390
  T.nilable(
@@ -383,18 +401,19 @@ module OpenAI
383
401
  # Specify additional output data to include in the model response. Currently
384
402
  # supported values are:
385
403
  #
404
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
405
+ # in code interpreter tool call items.
406
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
407
+ # call output.
386
408
  # - `file_search_call.results`: Include the search results of the file search tool
387
409
  # call.
388
410
  # - `message.input_image.image_url`: Include image urls from the input message.
389
- # - `computer_call_output.output.image_url`: Include image urls from the computer
390
- # call output.
411
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
391
412
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
392
413
  # tokens in reasoning item outputs. This enables reasoning items to be used in
393
414
  # multi-turn conversations when using the Responses API statelessly (like when
394
415
  # the `store` parameter is set to `false`, or when an organization is enrolled
395
416
  # in the zero data retention program).
396
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
397
- # in code interpreter tool call items.
398
417
  include: nil,
399
418
  # Text, image, or file inputs to the model, used to generate a response.
400
419
  #
@@ -416,6 +435,11 @@ module OpenAI
416
435
  # including visible output tokens and
417
436
  # [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
418
437
  max_output_tokens: nil,
438
+ # The maximum number of total calls to built-in tools that can be processed in a
439
+ # response. This maximum number applies across all built-in tool calls, not per
440
+ # individual tool. Any further attempts to call a tool by the model will be
441
+ # ignored.
442
+ max_tool_calls: nil,
419
443
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
420
444
  # for storing additional information about the object in a structured format, and
421
445
  # querying for objects via API or the dashboard.
@@ -443,23 +467,23 @@ module OpenAI
443
467
  # Configuration options for
444
468
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning).
445
469
  reasoning: nil,
446
- # Specifies the latency tier to use for processing the request. This parameter is
447
- # relevant for customers subscribed to the scale tier service:
470
+ # Specifies the processing type used for serving the request.
448
471
  #
449
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
450
- # utilize scale tier credits until they are exhausted.
451
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
452
- # be processed using the default service tier with a lower uptime SLA and no
453
- # latency guarantee.
454
- # - If set to 'default', the request will be processed using the default service
455
- # tier with a lower uptime SLA and no latency guarantee.
456
- # - If set to 'flex', the request will be processed with the Flex Processing
457
- # service tier.
458
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
472
+ # - If set to 'auto', then the request will be processed with the service tier
473
+ # configured in the Project settings. Unless otherwise configured, the Project
474
+ # will use 'default'.
475
+ # - If set to 'default', then the requset will be processed with the standard
476
+ # pricing and performance for the selected model.
477
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
478
+ # 'priority', then the request will be processed with the corresponding service
479
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
480
+ # Priority processing.
459
481
  # - When not set, the default behavior is 'auto'.
460
482
  #
461
- # When this parameter is set, the response body will include the `service_tier`
462
- # utilized.
483
+ # When the `service_tier` parameter is set, the response body will include the
484
+ # `service_tier` value based on the processing mode actually used to serve the
485
+ # request. This response value may be different from the value set in the
486
+ # parameter.
463
487
  service_tier: nil,
464
488
  # Whether to store the generated model response for later retrieval via API.
465
489
  store: nil,
@@ -493,6 +517,9 @@ module OpenAI
493
517
  # the model to call your own code. Learn more about
494
518
  # [function calling](https://platform.openai.com/docs/guides/function-calling).
495
519
  tools: nil,
520
+ # An integer between 0 and 20 specifying the number of most likely tokens to
521
+ # return at each token position, each with an associated log probability.
522
+ top_logprobs: nil,
496
523
  # An alternative to sampling with temperature, called nucleus sampling, where the
497
524
  # model considers the results of the tokens with top_p probability mass. So 0.1
498
525
  # means only the tokens comprising the top 10% probability mass are considered.
@@ -526,6 +553,7 @@ module OpenAI
526
553
  input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
527
554
  instructions: T.nilable(String),
528
555
  max_output_tokens: T.nilable(Integer),
556
+ max_tool_calls: T.nilable(Integer),
529
557
  metadata: T.nilable(T::Hash[Symbol, String]),
530
558
  model:
531
559
  T.any(
@@ -548,7 +576,8 @@ module OpenAI
548
576
  T.any(
549
577
  OpenAI::Responses::ToolChoiceOptions::OrSymbol,
550
578
  OpenAI::Responses::ToolChoiceTypes,
551
- OpenAI::Responses::ToolChoiceFunction
579
+ OpenAI::Responses::ToolChoiceFunction,
580
+ OpenAI::Responses::ToolChoiceMcp
552
581
  ),
553
582
  tools:
554
583
  T::Array[
@@ -563,6 +592,7 @@ module OpenAI
563
592
  OpenAI::Responses::WebSearchTool
564
593
  )
565
594
  ],
595
+ top_logprobs: T.nilable(Integer),
566
596
  top_p: T.nilable(Float),
567
597
  truncation:
568
598
  T.nilable(
@@ -605,23 +635,23 @@ module OpenAI
605
635
  end
606
636
  end
607
637
 
608
- # Specifies the latency tier to use for processing the request. This parameter is
609
- # relevant for customers subscribed to the scale tier service:
638
+ # Specifies the processing type used for serving the request.
610
639
  #
611
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
612
- # utilize scale tier credits until they are exhausted.
613
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
614
- # be processed using the default service tier with a lower uptime SLA and no
615
- # latency guarantee.
616
- # - If set to 'default', the request will be processed using the default service
617
- # tier with a lower uptime SLA and no latency guarantee.
618
- # - If set to 'flex', the request will be processed with the Flex Processing
619
- # service tier.
620
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
640
+ # - If set to 'auto', then the request will be processed with the service tier
641
+ # configured in the Project settings. Unless otherwise configured, the Project
642
+ # will use 'default'.
643
+ # - If set to 'default', then the requset will be processed with the standard
644
+ # pricing and performance for the selected model.
645
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
646
+ # 'priority', then the request will be processed with the corresponding service
647
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
648
+ # Priority processing.
621
649
  # - When not set, the default behavior is 'auto'.
622
650
  #
623
- # When this parameter is set, the response body will include the `service_tier`
624
- # utilized.
651
+ # When the `service_tier` parameter is set, the response body will include the
652
+ # `service_tier` value based on the processing mode actually used to serve the
653
+ # request. This response value may be different from the value set in the
654
+ # parameter.
625
655
  module ServiceTier
626
656
  extend OpenAI::Internal::Type::Enum
627
657
 
@@ -654,6 +684,11 @@ module OpenAI
654
684
  :scale,
655
685
  OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol
656
686
  )
687
+ PRIORITY =
688
+ T.let(
689
+ :priority,
690
+ OpenAI::Responses::ResponseCreateParams::ServiceTier::TaggedSymbol
691
+ )
657
692
 
658
693
  sig do
659
694
  override.returns(
@@ -677,7 +712,8 @@ module OpenAI
677
712
  T.any(
678
713
  OpenAI::Responses::ToolChoiceOptions::TaggedSymbol,
679
714
  OpenAI::Responses::ToolChoiceTypes,
680
- OpenAI::Responses::ToolChoiceFunction
715
+ OpenAI::Responses::ToolChoiceFunction,
716
+ OpenAI::Responses::ToolChoiceMcp
681
717
  )
682
718
  end
683
719
 
@@ -16,6 +16,19 @@ module OpenAI
16
16
  sig { returns(String) }
17
17
  attr_accessor :id
18
18
 
19
+ # An object describing the specific action taken in this web search call. Includes
20
+ # details on how the model used the web (search, open_page, find).
21
+ sig do
22
+ returns(
23
+ T.any(
24
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Search,
25
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage,
26
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Find
27
+ )
28
+ )
29
+ end
30
+ attr_accessor :action
31
+
19
32
  # The status of the web search tool call.
20
33
  sig do
21
34
  returns(
@@ -34,6 +47,12 @@ module OpenAI
34
47
  sig do
35
48
  params(
36
49
  id: String,
50
+ action:
51
+ T.any(
52
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Search::OrHash,
53
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage::OrHash,
54
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Find::OrHash
55
+ ),
37
56
  status:
38
57
  OpenAI::Responses::ResponseFunctionWebSearch::Status::OrSymbol,
39
58
  type: Symbol
@@ -42,6 +61,9 @@ module OpenAI
42
61
  def self.new(
43
62
  # The unique ID of the web search tool call.
44
63
  id:,
64
+ # An object describing the specific action taken in this web search call. Includes
65
+ # details on how the model used the web (search, open_page, find).
66
+ action:,
45
67
  # The status of the web search tool call.
46
68
  status:,
47
69
  # The type of the web search tool call. Always `web_search_call`.
@@ -53,6 +75,12 @@ module OpenAI
53
75
  override.returns(
54
76
  {
55
77
  id: String,
78
+ action:
79
+ T.any(
80
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Search,
81
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage,
82
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Find
83
+ ),
56
84
  status:
57
85
  OpenAI::Responses::ResponseFunctionWebSearch::Status::OrSymbol,
58
86
  type: Symbol
@@ -62,6 +90,141 @@ module OpenAI
62
90
  def to_hash
63
91
  end
64
92
 
93
+ # An object describing the specific action taken in this web search call. Includes
94
+ # details on how the model used the web (search, open_page, find).
95
+ module Action
96
+ extend OpenAI::Internal::Type::Union
97
+
98
+ Variants =
99
+ T.type_alias do
100
+ T.any(
101
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Search,
102
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage,
103
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Find
104
+ )
105
+ end
106
+
107
+ class Search < OpenAI::Internal::Type::BaseModel
108
+ OrHash =
109
+ T.type_alias do
110
+ T.any(
111
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Search,
112
+ OpenAI::Internal::AnyHash
113
+ )
114
+ end
115
+
116
+ # The search query.
117
+ sig { returns(String) }
118
+ attr_accessor :query
119
+
120
+ # The action type.
121
+ sig { returns(Symbol) }
122
+ attr_accessor :type
123
+
124
+ # Action type "search" - Performs a web search query.
125
+ sig do
126
+ params(query: String, type: Symbol).returns(T.attached_class)
127
+ end
128
+ def self.new(
129
+ # The search query.
130
+ query:,
131
+ # The action type.
132
+ type: :search
133
+ )
134
+ end
135
+
136
+ sig { override.returns({ query: String, type: Symbol }) }
137
+ def to_hash
138
+ end
139
+ end
140
+
141
+ class OpenPage < OpenAI::Internal::Type::BaseModel
142
+ OrHash =
143
+ T.type_alias do
144
+ T.any(
145
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage,
146
+ OpenAI::Internal::AnyHash
147
+ )
148
+ end
149
+
150
+ # The action type.
151
+ sig { returns(Symbol) }
152
+ attr_accessor :type
153
+
154
+ # The URL opened by the model.
155
+ sig { returns(String) }
156
+ attr_accessor :url
157
+
158
+ # Action type "open_page" - Opens a specific URL from search results.
159
+ sig { params(url: String, type: Symbol).returns(T.attached_class) }
160
+ def self.new(
161
+ # The URL opened by the model.
162
+ url:,
163
+ # The action type.
164
+ type: :open_page
165
+ )
166
+ end
167
+
168
+ sig { override.returns({ type: Symbol, url: String }) }
169
+ def to_hash
170
+ end
171
+ end
172
+
173
+ class Find < OpenAI::Internal::Type::BaseModel
174
+ OrHash =
175
+ T.type_alias do
176
+ T.any(
177
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Find,
178
+ OpenAI::Internal::AnyHash
179
+ )
180
+ end
181
+
182
+ # The pattern or text to search for within the page.
183
+ sig { returns(String) }
184
+ attr_accessor :pattern
185
+
186
+ # The action type.
187
+ sig { returns(Symbol) }
188
+ attr_accessor :type
189
+
190
+ # The URL of the page searched for the pattern.
191
+ sig { returns(String) }
192
+ attr_accessor :url
193
+
194
+ # Action type "find": Searches for a pattern within a loaded page.
195
+ sig do
196
+ params(pattern: String, url: String, type: Symbol).returns(
197
+ T.attached_class
198
+ )
199
+ end
200
+ def self.new(
201
+ # The pattern or text to search for within the page.
202
+ pattern:,
203
+ # The URL of the page searched for the pattern.
204
+ url:,
205
+ # The action type.
206
+ type: :find
207
+ )
208
+ end
209
+
210
+ sig do
211
+ override.returns({ pattern: String, type: Symbol, url: String })
212
+ end
213
+ def to_hash
214
+ end
215
+ end
216
+
217
+ sig do
218
+ override.returns(
219
+ T::Array[
220
+ OpenAI::Responses::ResponseFunctionWebSearch::Action::Variants
221
+ ]
222
+ )
223
+ end
224
+ def self.variants
225
+ end
226
+ end
227
+
65
228
  # The status of the web search tool call.
66
229
  module Status
67
230
  extend OpenAI::Internal::Type::Enum
@@ -6,18 +6,19 @@ module OpenAI
6
6
  # Specify additional output data to include in the model response. Currently
7
7
  # supported values are:
8
8
  #
9
+ # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
10
+ # in code interpreter tool call items.
11
+ # - `computer_call_output.output.image_url`: Include image urls from the computer
12
+ # call output.
9
13
  # - `file_search_call.results`: Include the search results of the file search tool
10
14
  # call.
11
15
  # - `message.input_image.image_url`: Include image urls from the input message.
12
- # - `computer_call_output.output.image_url`: Include image urls from the computer
13
- # call output.
16
+ # - `message.output_text.logprobs`: Include logprobs with assistant messages.
14
17
  # - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
15
18
  # tokens in reasoning item outputs. This enables reasoning items to be used in
16
19
  # multi-turn conversations when using the Responses API statelessly (like when
17
20
  # the `store` parameter is set to `false`, or when an organization is enrolled
18
21
  # in the zero data retention program).
19
- # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
20
- # in code interpreter tool call items.
21
22
  module ResponseIncludable
22
23
  extend OpenAI::Internal::Type::Enum
23
24
 
@@ -25,6 +26,16 @@ module OpenAI
25
26
  T.type_alias { T.all(Symbol, OpenAI::Responses::ResponseIncludable) }
26
27
  OrSymbol = T.type_alias { T.any(Symbol, String) }
27
28
 
29
+ CODE_INTERPRETER_CALL_OUTPUTS =
30
+ T.let(
31
+ :"code_interpreter_call.outputs",
32
+ OpenAI::Responses::ResponseIncludable::TaggedSymbol
33
+ )
34
+ COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL =
35
+ T.let(
36
+ :"computer_call_output.output.image_url",
37
+ OpenAI::Responses::ResponseIncludable::TaggedSymbol
38
+ )
28
39
  FILE_SEARCH_CALL_RESULTS =
29
40
  T.let(
30
41
  :"file_search_call.results",
@@ -35,9 +46,9 @@ module OpenAI
35
46
  :"message.input_image.image_url",
36
47
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
37
48
  )
38
- COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL =
49
+ MESSAGE_OUTPUT_TEXT_LOGPROBS =
39
50
  T.let(
40
- :"computer_call_output.output.image_url",
51
+ :"message.output_text.logprobs",
41
52
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
42
53
  )
43
54
  REASONING_ENCRYPTED_CONTENT =
@@ -45,11 +56,6 @@ module OpenAI
45
56
  :"reasoning.encrypted_content",
46
57
  OpenAI::Responses::ResponseIncludable::TaggedSymbol
47
58
  )
48
- CODE_INTERPRETER_CALL_OUTPUTS =
49
- T.let(
50
- :"code_interpreter_call.outputs",
51
- OpenAI::Responses::ResponseIncludable::TaggedSymbol
52
- )
53
59
 
54
60
  sig do
55
61
  override.returns(
@@ -0,0 +1,53 @@
1
+ # typed: strong
2
+
3
+ module OpenAI
4
+ module Models
5
+ module Responses
6
+ class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel
7
+ OrHash =
8
+ T.type_alias do
9
+ T.any(OpenAI::Responses::ToolChoiceMcp, OpenAI::Internal::AnyHash)
10
+ end
11
+
12
+ # The label of the MCP server to use.
13
+ sig { returns(String) }
14
+ attr_accessor :server_label
15
+
16
+ # For MCP tools, the type is always `mcp`.
17
+ sig { returns(Symbol) }
18
+ attr_accessor :type
19
+
20
+ # The name of the tool to call on the server.
21
+ sig { returns(T.nilable(String)) }
22
+ attr_accessor :name
23
+
24
+ # Use this option to force the model to call a specific tool on a remote MCP
25
+ # server.
26
+ sig do
27
+ params(
28
+ server_label: String,
29
+ name: T.nilable(String),
30
+ type: Symbol
31
+ ).returns(T.attached_class)
32
+ end
33
+ def self.new(
34
+ # The label of the MCP server to use.
35
+ server_label:,
36
+ # The name of the tool to call on the server.
37
+ name: nil,
38
+ # For MCP tools, the type is always `mcp`.
39
+ type: :mcp
40
+ )
41
+ end
42
+
43
+ sig do
44
+ override.returns(
45
+ { server_label: String, type: Symbol, name: T.nilable(String) }
46
+ )
47
+ end
48
+ def to_hash
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
@@ -18,7 +18,6 @@ module OpenAI
18
18
  # - `web_search_preview`
19
19
  # - `computer_use_preview`
20
20
  # - `code_interpreter`
21
- # - `mcp`
22
21
  # - `image_generation`
23
22
  sig { returns(OpenAI::Responses::ToolChoiceTypes::Type::OrSymbol) }
24
23
  attr_accessor :type
@@ -40,7 +39,6 @@ module OpenAI
40
39
  # - `web_search_preview`
41
40
  # - `computer_use_preview`
42
41
  # - `code_interpreter`
43
- # - `mcp`
44
42
  # - `image_generation`
45
43
  type:
46
44
  )
@@ -63,7 +61,6 @@ module OpenAI
63
61
  # - `web_search_preview`
64
62
  # - `computer_use_preview`
65
63
  # - `code_interpreter`
66
- # - `mcp`
67
64
  # - `image_generation`
68
65
  module Type
69
66
  extend OpenAI::Internal::Type::Enum
@@ -104,8 +101,6 @@ module OpenAI
104
101
  :code_interpreter,
105
102
  OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol
106
103
  )
107
- MCP =
108
- T.let(:mcp, OpenAI::Responses::ToolChoiceTypes::Type::TaggedSymbol)
109
104
 
110
105
  sig do
111
106
  override.returns(
@@ -43,6 +43,26 @@ module OpenAI
43
43
  :"o3-pro-2025-06-10",
44
44
  OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
45
45
  )
46
+ O3_DEEP_RESEARCH =
47
+ T.let(
48
+ :"o3-deep-research",
49
+ OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
50
+ )
51
+ O3_DEEP_RESEARCH_2025_06_26 =
52
+ T.let(
53
+ :"o3-deep-research-2025-06-26",
54
+ OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
55
+ )
56
+ O4_MINI_DEEP_RESEARCH =
57
+ T.let(
58
+ :"o4-mini-deep-research",
59
+ OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
60
+ )
61
+ O4_MINI_DEEP_RESEARCH_2025_06_26 =
62
+ T.let(
63
+ :"o4-mini-deep-research-2025-06-26",
64
+ OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
65
+ )
46
66
  COMPUTER_USE_PREVIEW =
47
67
  T.let(
48
68
  :"computer-use-preview",