openai 0.35.1 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +17 -0
  3. data/README.md +21 -15
  4. data/lib/openai/helpers/structured_output/union_of.rb +5 -1
  5. data/lib/openai/internal/transport/pooled_net_requester.rb +6 -2
  6. data/lib/openai/internal/type/enum.rb +6 -6
  7. data/lib/openai/models/batch_create_params.rb +9 -6
  8. data/lib/openai/models/beta/assistant_create_params.rb +9 -5
  9. data/lib/openai/models/beta/assistant_update_params.rb +9 -5
  10. data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
  11. data/lib/openai/models/chat/completion_create_params.rb +37 -6
  12. data/lib/openai/models/chat_model.rb +5 -0
  13. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  14. data/lib/openai/models/conversations/conversation_item.rb +13 -1
  15. data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
  16. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  17. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
  18. data/lib/openai/models/evals/run_cancel_response.rb +20 -12
  19. data/lib/openai/models/evals/run_create_params.rb +20 -12
  20. data/lib/openai/models/evals/run_create_response.rb +20 -12
  21. data/lib/openai/models/evals/run_list_response.rb +20 -12
  22. data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
  23. data/lib/openai/models/graders/score_model_grader.rb +9 -5
  24. data/lib/openai/models/reasoning.rb +10 -6
  25. data/lib/openai/models/reasoning_effort.rb +10 -5
  26. data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
  27. data/lib/openai/models/responses/function_shell_tool.rb +20 -0
  28. data/lib/openai/models/responses/input_token_count_params.rb +14 -8
  29. data/lib/openai/models/responses/response.rb +46 -11
  30. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
  31. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
  32. data/lib/openai/models/responses/response_create_params.rb +42 -9
  33. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
  34. data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
  35. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
  36. data/lib/openai/models/responses/response_input_item.rb +395 -1
  37. data/lib/openai/models/responses/response_item.rb +13 -1
  38. data/lib/openai/models/responses/response_item_list.rb +2 -2
  39. data/lib/openai/models/responses/response_output_item.rb +13 -1
  40. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  41. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  42. data/lib/openai/models/responses/tool.rb +7 -1
  43. data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
  44. data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
  45. data/lib/openai/resources/chat/completions.rb +6 -2
  46. data/lib/openai/resources/conversations/items.rb +3 -3
  47. data/lib/openai/resources/conversations.rb +1 -1
  48. data/lib/openai/resources/responses/input_items.rb +1 -1
  49. data/lib/openai/resources/responses/input_tokens.rb +3 -3
  50. data/lib/openai/resources/responses.rb +12 -8
  51. data/lib/openai/version.rb +1 -1
  52. data/lib/openai.rb +10 -0
  53. data/manifest.yaml +1 -0
  54. data/rbi/openai/internal/transport/pooled_net_requester.rbi +6 -2
  55. data/rbi/openai/models/batch_create_params.rbi +17 -9
  56. data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
  57. data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
  58. data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
  59. data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
  60. data/rbi/openai/models/chat_model.rbi +7 -0
  61. data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
  62. data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
  63. data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
  64. data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
  65. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
  66. data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
  67. data/rbi/openai/models/evals/run_create_params.rbi +44 -20
  68. data/rbi/openai/models/evals/run_create_response.rbi +40 -20
  69. data/rbi/openai/models/evals/run_list_response.rbi +40 -20
  70. data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
  71. data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
  72. data/rbi/openai/models/reasoning.rbi +18 -10
  73. data/rbi/openai/models/reasoning_effort.rbi +10 -5
  74. data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
  75. data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
  76. data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
  77. data/rbi/openai/models/responses/response.rbi +73 -2
  78. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
  79. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
  80. data/rbi/openai/models/responses/response_create_params.rbi +87 -5
  81. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
  82. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
  83. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
  84. data/rbi/openai/models/responses/response_input_item.rbi +675 -0
  85. data/rbi/openai/models/responses/response_item.rbi +4 -0
  86. data/rbi/openai/models/responses/response_item_list.rbi +4 -0
  87. data/rbi/openai/models/responses/response_output_item.rbi +4 -0
  88. data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
  89. data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
  90. data/rbi/openai/models/responses/tool.rbi +2 -0
  91. data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
  92. data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
  93. data/rbi/openai/resources/batches.rbi +4 -3
  94. data/rbi/openai/resources/beta/assistants.rbi +18 -10
  95. data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
  96. data/rbi/openai/resources/chat/completions.rbi +38 -12
  97. data/rbi/openai/resources/conversations/items.rbi +4 -0
  98. data/rbi/openai/resources/conversations.rbi +4 -0
  99. data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
  100. data/rbi/openai/resources/responses.rbi +28 -2
  101. data/sig/openai/internal/transport/pooled_net_requester.rbs +4 -1
  102. data/sig/openai/models/batch_create_params.rbs +2 -0
  103. data/sig/openai/models/chat/completion_create_params.rbs +16 -0
  104. data/sig/openai/models/chat_model.rbs +11 -1
  105. data/sig/openai/models/conversations/conversation_item.rbs +4 -0
  106. data/sig/openai/models/reasoning_effort.rbs +2 -1
  107. data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
  108. data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
  109. data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
  110. data/sig/openai/models/responses/response.rbs +18 -0
  111. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
  112. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
  113. data/sig/openai/models/responses/response_create_params.rbs +18 -0
  114. data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
  115. data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
  116. data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
  117. data/sig/openai/models/responses/response_input_item.rbs +276 -0
  118. data/sig/openai/models/responses/response_item.rbs +4 -0
  119. data/sig/openai/models/responses/response_output_item.rbs +4 -0
  120. data/sig/openai/models/responses/tool.rbs +2 -0
  121. data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
  122. data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
  123. data/sig/openai/resources/chat/completions.rbs +2 -0
  124. data/sig/openai/resources/responses.rbs +2 -0
  125. metadata +29 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 07be6bea1a531ce6245b72aa45682ff1485fdc40ea05cab7ad9731aae8e4496e
4
- data.tar.gz: aeb5a58556e34b61646e985f432c9f250cbc0259916eb5ebdecbec34c1f853a0
3
+ metadata.gz: 17baebbeaa4b29e9cd335b7cf1c0bef7a1e63832804c6f332e6cb508a4afd4d8
4
+ data.tar.gz: b9009868811efc0f57c9b33bbe51c031436e0ccebb2f944ad5d60259095a4099
5
5
  SHA512:
6
- metadata.gz: e41f0e47371787755d2b8fc3370285c873ec19aa12995d679c03b804d9be73ecee94de61ff1321c60722a09244262364371ec1db64ad4cf96fafbdfdf2c70c39
7
- data.tar.gz: 371503bbcc663eab6ee955c26b328837a2e952004cfcd7a219ca9b913edffe5c3658f7a3d15d8e3dc3bcc00d2376b09f39758f3465d73289f10d5b3cf757692c
6
+ metadata.gz: 8b5ca35d9d0758f61af7f3b7966017ad6df9b3e0a582d2cbc3bc455ac3d651bed29554710a0c05e22a5ad481e0469cb8ab0448cbe1d356c52f07ee2bd0cfff65
7
+ data.tar.gz: 474f40178d640e14ddb001786670e0e063c45b7205895115a72a28075597ed798b52ab3cd1952296e847822f056433bb907d7022dae5c3780451c872709485f9
data/CHANGELOG.md CHANGED
@@ -1,5 +1,22 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.36.0 (2025-11-13)
4
+
5
+ Full Changelog: [v0.35.2...v0.36.0](https://github.com/openai/openai-ruby/compare/v0.35.2...v0.36.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** gpt 5.1 ([26ece0e](https://github.com/openai/openai-ruby/commit/26ece0eb68486e40066c89f626b9a83c4f274889))
10
+
11
+ ## 0.35.2 (2025-11-05)
12
+
13
+ Full Changelog: [v0.35.1...v0.35.2](https://github.com/openai/openai-ruby/compare/v0.35.1...v0.35.2)
14
+
15
+ ### Bug Fixes
16
+
17
+ * better thread safety via early initializing SSL store during HTTP client creation ([e7d9a3d](https://github.com/openai/openai-ruby/commit/e7d9a3d70c0930ac248b4da680296213cb3e163d))
18
+ * schema generation ([#862](https://github.com/openai/openai-ruby/issues/862)) ([2c9b91a](https://github.com/openai/openai-ruby/commit/2c9b91acc79262dd56ef52854ad64384f172984b))
19
+
3
20
  ## 0.35.1 (2025-11-04)
4
21
 
5
22
  Full Changelog: [v0.35.0...v0.35.1](https://github.com/openai/openai-ruby/compare/v0.35.0...v0.35.1)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.35.1"
18
+ gem "openai", "~> 0.36.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -30,7 +30,10 @@ openai = OpenAI::Client.new(
30
30
  api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted
31
31
  )
32
32
 
33
- chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5")
33
+ chat_completion = openai.chat.completions.create(
34
+ messages: [{role: "user", content: "Say this is a test"}],
35
+ model: :"gpt-5.1"
36
+ )
34
37
 
35
38
  puts(chat_completion)
36
39
  ```
@@ -42,7 +45,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
42
45
  ```ruby
43
46
  stream = openai.responses.stream(
44
47
  input: "Write a haiku about OpenAI.",
45
- model: :"gpt-5"
48
+ model: :"gpt-5.1"
46
49
  )
47
50
 
48
51
  stream.each do |event|
@@ -340,7 +343,7 @@ openai = OpenAI::Client.new(
340
343
  # Or, configure per-request:
341
344
  openai.chat.completions.create(
342
345
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
343
- model: :"gpt-5",
346
+ model: :"gpt-5.1",
344
347
  request_options: {max_retries: 5}
345
348
  )
346
349
  ```
@@ -358,7 +361,7 @@ openai = OpenAI::Client.new(
358
361
  # Or, configure per-request:
359
362
  openai.chat.completions.create(
360
363
  messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
361
- model: :"gpt-5",
364
+ model: :"gpt-5.1",
362
365
  request_options: {timeout: 5}
363
366
  )
364
367
  ```
@@ -393,7 +396,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
393
396
  chat_completion =
394
397
  openai.chat.completions.create(
395
398
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
396
- model: :"gpt-5",
399
+ model: :"gpt-5.1",
397
400
  request_options: {
398
401
  extra_query: {my_query_parameter: value},
399
402
  extra_body: {my_body_parameter: value},
@@ -441,7 +444,7 @@ You can provide typesafe request parameters like so:
441
444
  ```ruby
442
445
  openai.chat.completions.create(
443
446
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
444
- model: :"gpt-5"
447
+ model: :"gpt-5.1"
445
448
  )
446
449
  ```
447
450
 
@@ -449,12 +452,15 @@ Or, equivalently:
449
452
 
450
453
  ```ruby
451
454
  # Hashes work, but are not typesafe:
452
- openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: :"gpt-5")
455
+ openai.chat.completions.create(
456
+ messages: [{role: "user", content: "Say this is a test"}],
457
+ model: :"gpt-5.1"
458
+ )
453
459
 
454
460
  # You can also splat a full Params class:
455
461
  params = OpenAI::Chat::CompletionCreateParams.new(
456
462
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
457
- model: :"gpt-5"
463
+ model: :"gpt-5.1"
458
464
  )
459
465
  openai.chat.completions.create(**params)
460
466
  ```
@@ -464,11 +470,11 @@ openai.chat.completions.create(**params)
464
470
  Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime:
465
471
 
466
472
  ```ruby
467
- # :minimal
468
- puts(OpenAI::ReasoningEffort::MINIMAL)
473
+ # :"in-memory"
474
+ puts(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY)
469
475
 
470
- # Revealed type: `T.all(OpenAI::ReasoningEffort, Symbol)`
471
- T.reveal_type(OpenAI::ReasoningEffort::MINIMAL)
476
+ # Revealed type: `T.all(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention, Symbol)`
477
+ T.reveal_type(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY)
472
478
  ```
473
479
 
474
480
  Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value:
@@ -476,13 +482,13 @@ Enum parameters have a "relaxed" type, so you can either pass in enum constants
476
482
  ```ruby
477
483
  # Using the enum constants preserves the tagged type information:
478
484
  openai.chat.completions.create(
479
- reasoning_effort: OpenAI::ReasoningEffort::MINIMAL,
485
+ prompt_cache_retention: OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY,
480
486
  # …
481
487
  )
482
488
 
483
489
  # Literal values are also permissible:
484
490
  openai.chat.completions.create(
485
- reasoning_effort: :minimal,
491
+ prompt_cache_retention: :"in-memory",
486
492
  # …
487
493
  )
488
494
  ```
@@ -21,6 +21,7 @@ module OpenAI
21
21
  #
22
22
  # @return [Hash{Symbol=>Object}]
23
23
  def to_json_schema_inner(state:)
24
+ # rubocop:disable Metrics/BlockLength
24
25
  OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.cache_def!(state, type: self) do
25
26
  path = state.fetch(:path)
26
27
  mergeable_keys = {[:anyOf] => 0, [:type] => 0}
@@ -33,7 +34,9 @@ module OpenAI
33
34
  end
34
35
 
35
36
  schemas.each do |schema|
36
- mergeable_keys.each_key { mergeable_keys[_1] += 1 if schema.keys == _1 }
37
+ mergeable_keys.each_key do
38
+ mergeable_keys[_1] += 1 if schema.keys == _1 && schema[_1].is_a?(Array)
39
+ end
37
40
  end
38
41
  mergeable = mergeable_keys.any? { _1.last == schemas.length }
39
42
  if mergeable
@@ -48,6 +51,7 @@ module OpenAI
48
51
  }
49
52
  end
50
53
  end
54
+ # rubocop:enable Metrics/BlockLength
51
55
  end
52
56
 
53
57
  private_class_method :new
@@ -16,10 +16,11 @@ module OpenAI
16
16
  class << self
17
17
  # @api private
18
18
  #
19
+ # @param cert_store [OpenSSL::X509::Store]
19
20
  # @param url [URI::Generic]
20
21
  #
21
22
  # @return [Net::HTTP]
22
- def connect(url)
23
+ def connect(cert_store:, url:)
23
24
  port =
24
25
  case [url.port, url.scheme]
25
26
  in [Integer, _]
@@ -33,6 +34,8 @@ module OpenAI
33
34
  Net::HTTP.new(url.host, port).tap do
34
35
  _1.use_ssl = %w[https wss].include?(url.scheme)
35
36
  _1.max_retries = 0
37
+
38
+ (_1.cert_store = cert_store) if _1.use_ssl?
36
39
  end
37
40
  end
38
41
 
@@ -102,7 +105,7 @@ module OpenAI
102
105
  pool =
103
106
  @mutex.synchronize do
104
107
  @pools[origin] ||= ConnectionPool.new(size: @size) do
105
- self.class.connect(url)
108
+ self.class.connect(cert_store: @cert_store, url: url)
106
109
  end
107
110
  end
108
111
 
@@ -192,6 +195,7 @@ module OpenAI
192
195
  def initialize(size: self.class::DEFAULT_MAX_CONNECTIONS)
193
196
  @mutex = Mutex.new
194
197
  @size = size
198
+ @cert_store = OpenSSL::X509::Store.new.tap(&:set_default_paths)
195
199
  @pools = {}
196
200
  end
197
201
 
@@ -19,11 +19,11 @@ module OpenAI
19
19
  # @example
20
20
  # # `chat_model` is a `OpenAI::ChatModel`
21
21
  # case chat_model
22
- # when OpenAI::ChatModel::GPT_5
22
+ # when OpenAI::ChatModel::GPT_5_1
23
23
  # # ...
24
- # when OpenAI::ChatModel::GPT_5_MINI
24
+ # when OpenAI::ChatModel::GPT_5_1_2025_11_13
25
25
  # # ...
26
- # when OpenAI::ChatModel::GPT_5_NANO
26
+ # when OpenAI::ChatModel::GPT_5_1_CODEX
27
27
  # # ...
28
28
  # else
29
29
  # puts(chat_model)
@@ -31,11 +31,11 @@ module OpenAI
31
31
  #
32
32
  # @example
33
33
  # case chat_model
34
- # in :"gpt-5"
34
+ # in :"gpt-5.1"
35
35
  # # ...
36
- # in :"gpt-5-mini"
36
+ # in :"gpt-5.1-2025-11-13"
37
37
  # # ...
38
- # in :"gpt-5-nano"
38
+ # in :"gpt-5.1-codex"
39
39
  # # ...
40
40
  # else
41
41
  # puts(chat_model)
@@ -16,9 +16,10 @@ module OpenAI
16
16
 
17
17
  # @!attribute endpoint
18
18
  # The endpoint to be used for all requests in the batch. Currently
19
- # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
20
- # are supported. Note that `/v1/embeddings` batches are also restricted to a
21
- # maximum of 50,000 embedding inputs across all requests in the batch.
19
+ # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
20
+ # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
21
+ # restricted to a maximum of 50,000 embedding inputs across all requests in the
22
+ # batch.
22
23
  #
23
24
  # @return [Symbol, OpenAI::Models::BatchCreateParams::Endpoint]
24
25
  required :endpoint, enum: -> { OpenAI::BatchCreateParams::Endpoint }
@@ -83,9 +84,10 @@ module OpenAI
83
84
  end
84
85
 
85
86
  # The endpoint to be used for all requests in the batch. Currently
86
- # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
87
- # are supported. Note that `/v1/embeddings` batches are also restricted to a
88
- # maximum of 50,000 embedding inputs across all requests in the batch.
87
+ # `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
88
+ # and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
89
+ # restricted to a maximum of 50,000 embedding inputs across all requests in the
90
+ # batch.
89
91
  module Endpoint
90
92
  extend OpenAI::Internal::Type::Enum
91
93
 
@@ -93,6 +95,7 @@ module OpenAI
93
95
  V1_CHAT_COMPLETIONS = :"/v1/chat/completions"
94
96
  V1_EMBEDDINGS = :"/v1/embeddings"
95
97
  V1_COMPLETIONS = :"/v1/completions"
98
+ V1_MODERATIONS = :"/v1/moderations"
96
99
 
97
100
  # @!method self.values
98
101
  # @return [Array<Symbol>]
@@ -51,12 +51,16 @@ module OpenAI
51
51
  # @!attribute reasoning_effort
52
52
  # Constrains effort on reasoning for
53
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
54
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
55
- # effort can result in faster responses and fewer tokens used on reasoning in a
56
- # response.
54
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
55
+ # reasoning effort can result in faster responses and fewer tokens used on
56
+ # reasoning in a response.
57
57
  #
58
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
59
- # effort.
58
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
59
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
60
+ # calls are supported for all reasoning values in gpt-5.1.
61
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
+ # support `none`.
63
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
60
64
  #
61
65
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
62
66
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -51,12 +51,16 @@ module OpenAI
51
51
  # @!attribute reasoning_effort
52
52
  # Constrains effort on reasoning for
53
53
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
54
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
55
- # effort can result in faster responses and fewer tokens used on reasoning in a
56
- # response.
54
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
55
+ # reasoning effort can result in faster responses and fewer tokens used on
56
+ # reasoning in a response.
57
57
  #
58
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
59
- # effort.
58
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
59
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
60
+ # calls are supported for all reasoning values in gpt-5.1.
61
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
+ # support `none`.
63
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
60
64
  #
61
65
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
62
66
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -109,12 +109,16 @@ module OpenAI
109
109
  # @!attribute reasoning_effort
110
110
  # Constrains effort on reasoning for
111
111
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
112
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
113
- # effort can result in faster responses and fewer tokens used on reasoning in a
114
- # response.
115
- #
116
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
117
- # effort.
112
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
113
+ # reasoning effort can result in faster responses and fewer tokens used on
114
+ # reasoning in a response.
115
+ #
116
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
117
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
118
+ # calls are supported for all reasoning values in gpt-5.1.
119
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
120
+ # support `none`.
121
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
118
122
  #
119
123
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
120
124
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -190,15 +190,30 @@ module OpenAI
190
190
  # @return [String, nil]
191
191
  optional :prompt_cache_key, String
192
192
 
193
+ # @!attribute prompt_cache_retention
194
+ # The retention policy for the prompt cache. Set to `24h` to enable extended
195
+ # prompt caching, which keeps cached prefixes active for longer, up to a maximum
196
+ # of 24 hours.
197
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
198
+ #
199
+ # @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil]
200
+ optional :prompt_cache_retention,
201
+ enum: -> { OpenAI::Chat::CompletionCreateParams::PromptCacheRetention },
202
+ nil?: true
203
+
193
204
  # @!attribute reasoning_effort
194
205
  # Constrains effort on reasoning for
195
206
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
196
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
197
- # effort can result in faster responses and fewer tokens used on reasoning in a
198
- # response.
207
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
208
+ # reasoning effort can result in faster responses and fewer tokens used on
209
+ # reasoning in a response.
199
210
  #
200
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
201
- # effort.
211
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
212
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
213
+ # calls are supported for all reasoning values in gpt-5.1.
214
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
215
+ # support `none`.
216
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
202
217
  #
203
218
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
204
219
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -368,7 +383,7 @@ module OpenAI
368
383
  # @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
369
384
  optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
370
385
 
371
- # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
386
+ # @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
372
387
  # Some parameter documentations has been truncated, see
373
388
  # {OpenAI::Models::Chat::CompletionCreateParams} for more details.
374
389
  #
@@ -406,6 +421,8 @@ module OpenAI
406
421
  #
407
422
  # @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
408
423
  #
424
+ # @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
425
+ #
409
426
  # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
410
427
  #
411
428
  # @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
@@ -551,6 +568,20 @@ module OpenAI
551
568
  # @return [Array<Symbol>]
552
569
  end
553
570
 
571
+ # The retention policy for the prompt cache. Set to `24h` to enable extended
572
+ # prompt caching, which keeps cached prefixes active for longer, up to a maximum
573
+ # of 24 hours.
574
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
575
+ module PromptCacheRetention
576
+ extend OpenAI::Internal::Type::Enum
577
+
578
+ IN_MEMORY = :"in-memory"
579
+ PROMPT_CACHE_RETENTION_24H = :"24h"
580
+
581
+ # @!method self.values
582
+ # @return [Array<Symbol>]
583
+ end
584
+
554
585
  # An object specifying the format that the model must output.
555
586
  #
556
587
  # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -5,6 +5,11 @@ module OpenAI
5
5
  module ChatModel
6
6
  extend OpenAI::Internal::Type::Enum
7
7
 
8
+ GPT_5_1 = :"gpt-5.1"
9
+ GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
10
+ GPT_5_1_CODEX = :"gpt-5.1-codex"
11
+ GPT_5_1_MINI = :"gpt-5.1-mini"
12
+ GPT_5_1_CHAT_LATEST = :"gpt-5.1-chat-latest"
8
13
  GPT_5 = :"gpt-5"
9
14
  GPT_5_MINI = :"gpt-5-mini"
10
15
  GPT_5_NANO = :"gpt-5-nano"
@@ -12,7 +12,7 @@ module OpenAI
12
12
  # Initial items to include in the conversation context. You may add up to 20 items
13
13
  # at a time.
14
14
  #
15
- # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
15
+ # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
16
16
  optional :items,
17
17
  -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] },
18
18
  nil?: true
@@ -32,7 +32,7 @@ module OpenAI
32
32
  # Some parameter documentations has been truncated, see
33
33
  # {OpenAI::Models::Conversations::ConversationCreateParams} for more details.
34
34
  #
35
- # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
35
+ # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
36
36
  #
37
37
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
38
38
  #
@@ -54,6 +54,18 @@ module OpenAI
54
54
  # The output of a local shell tool call.
55
55
  variant :local_shell_call_output, -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput }
56
56
 
57
+ # A tool call that executes one or more shell commands in a managed environment.
58
+ variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall }
59
+
60
+ # The output of a shell tool call.
61
+ variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput }
62
+
63
+ # A tool call that applies file diffs by creating, deleting, or updating files.
64
+ variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall }
65
+
66
+ # The output emitted by an apply patch tool call.
67
+ variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput }
68
+
57
69
  # A list of tools available on an MCP server.
58
70
  variant :mcp_list_tools, -> { OpenAI::Conversations::ConversationItem::McpListTools }
59
71
 
@@ -592,7 +604,7 @@ module OpenAI
592
604
  end
593
605
 
594
606
  # @!method self.variants
595
- # @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)]
607
+ # @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)]
596
608
  end
597
609
  end
598
610
 
@@ -8,7 +8,7 @@ module OpenAI
8
8
  # @!attribute data
9
9
  # A list of conversation items.
10
10
  #
11
- # @return [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
11
+ # @return [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
12
12
  required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::ConversationItem] }
13
13
 
14
14
  # @!attribute first_id
@@ -38,7 +38,7 @@ module OpenAI
38
38
  # @!method initialize(data:, first_id:, has_more:, last_id:, object: :list)
39
39
  # A list of Conversation items.
40
40
  #
41
- # @param data [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>] A list of conversation items.
41
+ # @param data [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>] A list of conversation items.
42
42
  #
43
43
  # @param first_id [String] The ID of the first item in the list.
44
44
  #
@@ -11,7 +11,7 @@ module OpenAI
11
11
  # @!attribute items
12
12
  # The items to add to the conversation. You may add up to 20 items at a time.
13
13
  #
14
- # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
14
+ # @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
15
15
  required :items, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] }
16
16
 
17
17
  # @!attribute include
@@ -26,7 +26,7 @@ module OpenAI
26
26
  # Some parameter documentations has been truncated, see
27
27
  # {OpenAI::Models::Conversations::ItemCreateParams} for more details.
28
28
  #
29
- # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] The items to add to the conversation. You may add up to 20 items at a time.
29
+ # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] The items to add to the conversation. You may add up to 20 items at a time.
30
30
  #
31
31
  # @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
32
32
  #
@@ -462,12 +462,16 @@ module OpenAI
462
462
  # @!attribute reasoning_effort
463
463
  # Constrains effort on reasoning for
464
464
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
465
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
466
- # effort can result in faster responses and fewer tokens used on reasoning in a
467
- # response.
465
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
466
+ # reasoning effort can result in faster responses and fewer tokens used on
467
+ # reasoning in a response.
468
468
  #
469
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
470
- # effort.
469
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
470
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
471
+ # calls are supported for all reasoning values in gpt-5.1.
472
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
473
+ # support `none`.
474
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
471
475
  #
472
476
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
473
477
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true