openai 0.35.1 → 0.36.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +17 -0
  3. data/README.md +21 -15
  4. data/lib/openai/helpers/structured_output/union_of.rb +5 -1
  5. data/lib/openai/internal/transport/pooled_net_requester.rb +6 -2
  6. data/lib/openai/internal/type/enum.rb +6 -6
  7. data/lib/openai/models/batch_create_params.rb +9 -6
  8. data/lib/openai/models/beta/assistant_create_params.rb +9 -5
  9. data/lib/openai/models/beta/assistant_update_params.rb +9 -5
  10. data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
  11. data/lib/openai/models/chat/completion_create_params.rb +37 -6
  12. data/lib/openai/models/chat_model.rb +5 -0
  13. data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
  14. data/lib/openai/models/conversations/conversation_item.rb +13 -1
  15. data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
  16. data/lib/openai/models/conversations/item_create_params.rb +2 -2
  17. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
  18. data/lib/openai/models/evals/run_cancel_response.rb +20 -12
  19. data/lib/openai/models/evals/run_create_params.rb +20 -12
  20. data/lib/openai/models/evals/run_create_response.rb +20 -12
  21. data/lib/openai/models/evals/run_list_response.rb +20 -12
  22. data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
  23. data/lib/openai/models/graders/score_model_grader.rb +9 -5
  24. data/lib/openai/models/reasoning.rb +10 -6
  25. data/lib/openai/models/reasoning_effort.rb +10 -5
  26. data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
  27. data/lib/openai/models/responses/function_shell_tool.rb +20 -0
  28. data/lib/openai/models/responses/input_token_count_params.rb +14 -8
  29. data/lib/openai/models/responses/response.rb +46 -11
  30. data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
  31. data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
  32. data/lib/openai/models/responses/response_create_params.rb +42 -9
  33. data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
  34. data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
  35. data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
  36. data/lib/openai/models/responses/response_input_item.rb +395 -1
  37. data/lib/openai/models/responses/response_item.rb +13 -1
  38. data/lib/openai/models/responses/response_item_list.rb +2 -2
  39. data/lib/openai/models/responses/response_output_item.rb +13 -1
  40. data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
  41. data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
  42. data/lib/openai/models/responses/tool.rb +7 -1
  43. data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
  44. data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
  45. data/lib/openai/resources/chat/completions.rb +6 -2
  46. data/lib/openai/resources/conversations/items.rb +3 -3
  47. data/lib/openai/resources/conversations.rb +1 -1
  48. data/lib/openai/resources/responses/input_items.rb +1 -1
  49. data/lib/openai/resources/responses/input_tokens.rb +3 -3
  50. data/lib/openai/resources/responses.rb +12 -8
  51. data/lib/openai/version.rb +1 -1
  52. data/lib/openai.rb +10 -0
  53. data/manifest.yaml +1 -0
  54. data/rbi/openai/internal/transport/pooled_net_requester.rbi +6 -2
  55. data/rbi/openai/models/batch_create_params.rbi +17 -9
  56. data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
  57. data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
  58. data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
  59. data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
  60. data/rbi/openai/models/chat_model.rbi +7 -0
  61. data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
  62. data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
  63. data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
  64. data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
  65. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
  66. data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
  67. data/rbi/openai/models/evals/run_create_params.rbi +44 -20
  68. data/rbi/openai/models/evals/run_create_response.rbi +40 -20
  69. data/rbi/openai/models/evals/run_list_response.rbi +40 -20
  70. data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
  71. data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
  72. data/rbi/openai/models/reasoning.rbi +18 -10
  73. data/rbi/openai/models/reasoning_effort.rbi +10 -5
  74. data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
  75. data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
  76. data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
  77. data/rbi/openai/models/responses/response.rbi +73 -2
  78. data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
  79. data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
  80. data/rbi/openai/models/responses/response_create_params.rbi +87 -5
  81. data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
  82. data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
  83. data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
  84. data/rbi/openai/models/responses/response_input_item.rbi +675 -0
  85. data/rbi/openai/models/responses/response_item.rbi +4 -0
  86. data/rbi/openai/models/responses/response_item_list.rbi +4 -0
  87. data/rbi/openai/models/responses/response_output_item.rbi +4 -0
  88. data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
  89. data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
  90. data/rbi/openai/models/responses/tool.rbi +2 -0
  91. data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
  92. data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
  93. data/rbi/openai/resources/batches.rbi +4 -3
  94. data/rbi/openai/resources/beta/assistants.rbi +18 -10
  95. data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
  96. data/rbi/openai/resources/chat/completions.rbi +38 -12
  97. data/rbi/openai/resources/conversations/items.rbi +4 -0
  98. data/rbi/openai/resources/conversations.rbi +4 -0
  99. data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
  100. data/rbi/openai/resources/responses.rbi +28 -2
  101. data/sig/openai/internal/transport/pooled_net_requester.rbs +4 -1
  102. data/sig/openai/models/batch_create_params.rbs +2 -0
  103. data/sig/openai/models/chat/completion_create_params.rbs +16 -0
  104. data/sig/openai/models/chat_model.rbs +11 -1
  105. data/sig/openai/models/conversations/conversation_item.rbs +4 -0
  106. data/sig/openai/models/reasoning_effort.rbs +2 -1
  107. data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
  108. data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
  109. data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
  110. data/sig/openai/models/responses/response.rbs +18 -0
  111. data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
  112. data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
  113. data/sig/openai/models/responses/response_create_params.rbs +18 -0
  114. data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
  115. data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
  116. data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
  117. data/sig/openai/models/responses/response_input_item.rbs +276 -0
  118. data/sig/openai/models/responses/response_item.rbs +4 -0
  119. data/sig/openai/models/responses/response_output_item.rbs +4 -0
  120. data/sig/openai/models/responses/tool.rbs +2 -0
  121. data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
  122. data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
  123. data/sig/openai/resources/chat/completions.rbs +2 -0
  124. data/sig/openai/resources/responses.rbs +2 -0
  125. metadata +29 -2
@@ -47,12 +47,16 @@ module OpenAI
47
47
 
48
48
  # Constrains effort on reasoning for
49
49
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
50
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
51
- # effort can result in faster responses and fewer tokens used on reasoning in a
52
- # response.
50
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
51
+ # reasoning effort can result in faster responses and fewer tokens used on
52
+ # reasoning in a response.
53
53
  #
54
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
55
- # effort.
54
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
55
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
56
+ # calls are supported for all reasoning values in gpt-5.1.
57
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
58
+ # support `none`.
59
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
56
60
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
57
61
  attr_accessor :reasoning_effort
58
62
 
@@ -212,12 +216,16 @@ module OpenAI
212
216
  name: nil,
213
217
  # Constrains effort on reasoning for
214
218
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
215
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
216
- # effort can result in faster responses and fewer tokens used on reasoning in a
217
- # response.
219
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
220
+ # reasoning effort can result in faster responses and fewer tokens used on
221
+ # reasoning in a response.
218
222
  #
219
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
220
- # effort.
223
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
224
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
225
+ # calls are supported for all reasoning values in gpt-5.1.
226
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
227
+ # support `none`.
228
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
221
229
  reasoning_effort: nil,
222
230
  # Specifies the format that the model must output. Compatible with
223
231
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -67,12 +67,16 @@ module OpenAI
67
67
 
68
68
  # Constrains effort on reasoning for
69
69
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
70
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
71
- # effort can result in faster responses and fewer tokens used on reasoning in a
72
- # response.
70
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
71
+ # reasoning effort can result in faster responses and fewer tokens used on
72
+ # reasoning in a response.
73
73
  #
74
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
75
- # effort.
74
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
75
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
76
+ # calls are supported for all reasoning values in gpt-5.1.
77
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
78
+ # support `none`.
79
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
76
80
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
77
81
  attr_accessor :reasoning_effort
78
82
 
@@ -236,12 +240,16 @@ module OpenAI
236
240
  name: nil,
237
241
  # Constrains effort on reasoning for
238
242
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
239
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
240
- # effort can result in faster responses and fewer tokens used on reasoning in a
241
- # response.
243
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
244
+ # reasoning effort can result in faster responses and fewer tokens used on
245
+ # reasoning in a response.
242
246
  #
243
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
244
- # effort.
247
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
248
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
249
+ # calls are supported for all reasoning values in gpt-5.1.
250
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
251
+ # support `none`.
252
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
245
253
  reasoning_effort: nil,
246
254
  # Specifies the format that the model must output. Compatible with
247
255
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -113,12 +113,16 @@ module OpenAI
113
113
 
114
114
  # Constrains effort on reasoning for
115
115
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
116
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
117
- # effort can result in faster responses and fewer tokens used on reasoning in a
118
- # response.
116
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
117
+ # reasoning effort can result in faster responses and fewer tokens used on
118
+ # reasoning in a response.
119
119
  #
120
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
121
- # effort.
120
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
121
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
122
+ # calls are supported for all reasoning values in gpt-5.1.
123
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
124
+ # support `none`.
125
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
122
126
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
123
127
  attr_accessor :reasoning_effort
124
128
 
@@ -334,12 +338,16 @@ module OpenAI
334
338
  parallel_tool_calls: nil,
335
339
  # Constrains effort on reasoning for
336
340
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
337
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
338
- # effort can result in faster responses and fewer tokens used on reasoning in a
339
- # response.
341
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
342
+ # reasoning effort can result in faster responses and fewer tokens used on
343
+ # reasoning in a response.
340
344
  #
341
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
342
- # effort.
345
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
346
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
347
+ # calls are supported for all reasoning values in gpt-5.1.
348
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
349
+ # support `none`.
350
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
343
351
  reasoning_effort: nil,
344
352
  # Specifies the format that the model must output. Compatible with
345
353
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -225,14 +225,31 @@ module OpenAI
225
225
  sig { params(prompt_cache_key: String).void }
226
226
  attr_writer :prompt_cache_key
227
227
 
228
+ # The retention policy for the prompt cache. Set to `24h` to enable extended
229
+ # prompt caching, which keeps cached prefixes active for longer, up to a maximum
230
+ # of 24 hours.
231
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
232
+ sig do
233
+ returns(
234
+ T.nilable(
235
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol
236
+ )
237
+ )
238
+ end
239
+ attr_accessor :prompt_cache_retention
240
+
228
241
  # Constrains effort on reasoning for
229
242
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
230
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
231
- # effort can result in faster responses and fewer tokens used on reasoning in a
232
- # response.
243
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
244
+ # reasoning effort can result in faster responses and fewer tokens used on
245
+ # reasoning in a response.
233
246
  #
234
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
235
- # effort.
247
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
248
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
249
+ # calls are supported for all reasoning values in gpt-5.1.
250
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
251
+ # support `none`.
252
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
236
253
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
237
254
  attr_accessor :reasoning_effort
238
255
 
@@ -515,6 +532,10 @@ module OpenAI
515
532
  T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash),
516
533
  presence_penalty: T.nilable(Float),
517
534
  prompt_cache_key: String,
535
+ prompt_cache_retention:
536
+ T.nilable(
537
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol
538
+ ),
518
539
  reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
519
540
  response_format:
520
541
  T.any(
@@ -665,14 +686,23 @@ module OpenAI
665
686
  # hit rates. Replaces the `user` field.
666
687
  # [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
667
688
  prompt_cache_key: nil,
689
+ # The retention policy for the prompt cache. Set to `24h` to enable extended
690
+ # prompt caching, which keeps cached prefixes active for longer, up to a maximum
691
+ # of 24 hours.
692
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
693
+ prompt_cache_retention: nil,
668
694
  # Constrains effort on reasoning for
669
695
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
670
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
671
- # effort can result in faster responses and fewer tokens used on reasoning in a
672
- # response.
696
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
697
+ # reasoning effort can result in faster responses and fewer tokens used on
698
+ # reasoning in a response.
673
699
  #
674
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
675
- # effort.
700
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
701
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
702
+ # calls are supported for all reasoning values in gpt-5.1.
703
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
704
+ # support `none`.
705
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
676
706
  reasoning_effort: nil,
677
707
  # An object specifying the format that the model must output.
678
708
  #
@@ -815,6 +845,10 @@ module OpenAI
815
845
  T.nilable(OpenAI::Chat::ChatCompletionPredictionContent),
816
846
  presence_penalty: T.nilable(Float),
817
847
  prompt_cache_key: String,
848
+ prompt_cache_retention:
849
+ T.nilable(
850
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol
851
+ ),
818
852
  reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
819
853
  response_format:
820
854
  T.any(
@@ -1062,6 +1096,44 @@ module OpenAI
1062
1096
  end
1063
1097
  end
1064
1098
 
1099
+ # The retention policy for the prompt cache. Set to `24h` to enable extended
1100
+ # prompt caching, which keeps cached prefixes active for longer, up to a maximum
1101
+ # of 24 hours.
1102
+ # [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1103
+ module PromptCacheRetention
1104
+ extend OpenAI::Internal::Type::Enum
1105
+
1106
+ TaggedSymbol =
1107
+ T.type_alias do
1108
+ T.all(
1109
+ Symbol,
1110
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention
1111
+ )
1112
+ end
1113
+ OrSymbol = T.type_alias { T.any(Symbol, String) }
1114
+
1115
+ IN_MEMORY =
1116
+ T.let(
1117
+ :"in-memory",
1118
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol
1119
+ )
1120
+ PROMPT_CACHE_RETENTION_24H =
1121
+ T.let(
1122
+ :"24h",
1123
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol
1124
+ )
1125
+
1126
+ sig do
1127
+ override.returns(
1128
+ T::Array[
1129
+ OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::TaggedSymbol
1130
+ ]
1131
+ )
1132
+ end
1133
+ def self.values
1134
+ end
1135
+ end
1136
+
1065
1137
  # An object specifying the format that the model must output.
1066
1138
  #
1067
1139
  # Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -8,6 +8,13 @@ module OpenAI
8
8
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ChatModel) }
9
9
  OrSymbol = T.type_alias { T.any(Symbol, String) }
10
10
 
11
+ GPT_5_1 = T.let(:"gpt-5.1", OpenAI::ChatModel::TaggedSymbol)
12
+ GPT_5_1_2025_11_13 =
13
+ T.let(:"gpt-5.1-2025-11-13", OpenAI::ChatModel::TaggedSymbol)
14
+ GPT_5_1_CODEX = T.let(:"gpt-5.1-codex", OpenAI::ChatModel::TaggedSymbol)
15
+ GPT_5_1_MINI = T.let(:"gpt-5.1-mini", OpenAI::ChatModel::TaggedSymbol)
16
+ GPT_5_1_CHAT_LATEST =
17
+ T.let(:"gpt-5.1-chat-latest", OpenAI::ChatModel::TaggedSymbol)
11
18
  GPT_5 = T.let(:"gpt-5", OpenAI::ChatModel::TaggedSymbol)
12
19
  GPT_5_MINI = T.let(:"gpt-5-mini", OpenAI::ChatModel::TaggedSymbol)
13
20
  GPT_5_NANO = T.let(:"gpt-5-nano", OpenAI::ChatModel::TaggedSymbol)
@@ -36,6 +36,10 @@ module OpenAI
36
36
  OpenAI::Responses::ResponseCodeInterpreterToolCall,
37
37
  OpenAI::Responses::ResponseInputItem::LocalShellCall,
38
38
  OpenAI::Responses::ResponseInputItem::LocalShellCallOutput,
39
+ OpenAI::Responses::ResponseInputItem::ShellCall,
40
+ OpenAI::Responses::ResponseInputItem::ShellCallOutput,
41
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCall,
42
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput,
39
43
  OpenAI::Responses::ResponseInputItem::McpListTools,
40
44
  OpenAI::Responses::ResponseInputItem::McpApprovalRequest,
41
45
  OpenAI::Responses::ResponseInputItem::McpApprovalResponse,
@@ -79,6 +83,10 @@ module OpenAI
79
83
  OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
80
84
  OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash,
81
85
  OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash,
86
+ OpenAI::Responses::ResponseInputItem::ShellCall::OrHash,
87
+ OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash,
88
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash,
89
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash,
82
90
  OpenAI::Responses::ResponseInputItem::McpListTools::OrHash,
83
91
  OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash,
84
92
  OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash,
@@ -129,6 +137,10 @@ module OpenAI
129
137
  OpenAI::Responses::ResponseCodeInterpreterToolCall,
130
138
  OpenAI::Responses::ResponseInputItem::LocalShellCall,
131
139
  OpenAI::Responses::ResponseInputItem::LocalShellCallOutput,
140
+ OpenAI::Responses::ResponseInputItem::ShellCall,
141
+ OpenAI::Responses::ResponseInputItem::ShellCallOutput,
142
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCall,
143
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput,
132
144
  OpenAI::Responses::ResponseInputItem::McpListTools,
133
145
  OpenAI::Responses::ResponseInputItem::McpApprovalRequest,
134
146
  OpenAI::Responses::ResponseInputItem::McpApprovalResponse,
@@ -26,6 +26,10 @@ module OpenAI
26
26
  OpenAI::Responses::ResponseCodeInterpreterToolCall,
27
27
  OpenAI::Conversations::ConversationItem::LocalShellCall,
28
28
  OpenAI::Conversations::ConversationItem::LocalShellCallOutput,
29
+ OpenAI::Responses::ResponseFunctionShellToolCall,
30
+ OpenAI::Responses::ResponseFunctionShellToolCallOutput,
31
+ OpenAI::Responses::ResponseApplyPatchToolCall,
32
+ OpenAI::Responses::ResponseApplyPatchToolCallOutput,
29
33
  OpenAI::Conversations::ConversationItem::McpListTools,
30
34
  OpenAI::Conversations::ConversationItem::McpApprovalRequest,
31
35
  OpenAI::Conversations::ConversationItem::McpApprovalResponse,
@@ -54,6 +54,10 @@ module OpenAI
54
54
  OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
55
55
  OpenAI::Conversations::ConversationItem::LocalShellCall::OrHash,
56
56
  OpenAI::Conversations::ConversationItem::LocalShellCallOutput::OrHash,
57
+ OpenAI::Responses::ResponseFunctionShellToolCall::OrHash,
58
+ OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash,
59
+ OpenAI::Responses::ResponseApplyPatchToolCall::OrHash,
60
+ OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash,
57
61
  OpenAI::Conversations::ConversationItem::McpListTools::OrHash,
58
62
  OpenAI::Conversations::ConversationItem::McpApprovalRequest::OrHash,
59
63
  OpenAI::Conversations::ConversationItem::McpApprovalResponse::OrHash,
@@ -34,6 +34,10 @@ module OpenAI
34
34
  OpenAI::Responses::ResponseCodeInterpreterToolCall,
35
35
  OpenAI::Responses::ResponseInputItem::LocalShellCall,
36
36
  OpenAI::Responses::ResponseInputItem::LocalShellCallOutput,
37
+ OpenAI::Responses::ResponseInputItem::ShellCall,
38
+ OpenAI::Responses::ResponseInputItem::ShellCallOutput,
39
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCall,
40
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput,
37
41
  OpenAI::Responses::ResponseInputItem::McpListTools,
38
42
  OpenAI::Responses::ResponseInputItem::McpApprovalRequest,
39
43
  OpenAI::Responses::ResponseInputItem::McpApprovalResponse,
@@ -83,6 +87,10 @@ module OpenAI
83
87
  OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
84
88
  OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash,
85
89
  OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash,
90
+ OpenAI::Responses::ResponseInputItem::ShellCall::OrHash,
91
+ OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash,
92
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash,
93
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash,
86
94
  OpenAI::Responses::ResponseInputItem::McpListTools::OrHash,
87
95
  OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash,
88
96
  OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash,
@@ -127,6 +135,10 @@ module OpenAI
127
135
  OpenAI::Responses::ResponseCodeInterpreterToolCall,
128
136
  OpenAI::Responses::ResponseInputItem::LocalShellCall,
129
137
  OpenAI::Responses::ResponseInputItem::LocalShellCallOutput,
138
+ OpenAI::Responses::ResponseInputItem::ShellCall,
139
+ OpenAI::Responses::ResponseInputItem::ShellCallOutput,
140
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCall,
141
+ OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput,
130
142
  OpenAI::Responses::ResponseInputItem::McpListTools,
131
143
  OpenAI::Responses::ResponseInputItem::McpApprovalRequest,
132
144
  OpenAI::Responses::ResponseInputItem::McpApprovalResponse,
@@ -889,12 +889,16 @@ module OpenAI
889
889
 
890
890
  # Constrains effort on reasoning for
891
891
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
892
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
893
- # effort can result in faster responses and fewer tokens used on reasoning in a
894
- # response.
892
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
893
+ # reasoning effort can result in faster responses and fewer tokens used on
894
+ # reasoning in a response.
895
895
  #
896
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
897
- # effort.
896
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
897
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
898
+ # calls are supported for all reasoning values in gpt-5.1.
899
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
900
+ # support `none`.
901
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
898
902
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
899
903
  attr_accessor :reasoning_effort
900
904
 
@@ -992,12 +996,16 @@ module OpenAI
992
996
  max_completion_tokens: nil,
993
997
  # Constrains effort on reasoning for
994
998
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
995
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
996
- # effort can result in faster responses and fewer tokens used on reasoning in a
997
- # response.
999
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1000
+ # reasoning effort can result in faster responses and fewer tokens used on
1001
+ # reasoning in a response.
998
1002
  #
999
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1000
- # effort.
1003
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1004
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1005
+ # calls are supported for all reasoning values in gpt-5.1.
1006
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1007
+ # support `none`.
1008
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1001
1009
  reasoning_effort: nil,
1002
1010
  # An object specifying the format that the model must output.
1003
1011
  #
@@ -512,12 +512,16 @@ module OpenAI
512
512
 
513
513
  # Constrains effort on reasoning for
514
514
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
515
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
516
- # effort can result in faster responses and fewer tokens used on reasoning in a
517
- # response.
515
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
516
+ # reasoning effort can result in faster responses and fewer tokens used on
517
+ # reasoning in a response.
518
518
  #
519
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
520
- # effort.
519
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
520
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
521
+ # calls are supported for all reasoning values in gpt-5.1.
522
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
523
+ # support `none`.
524
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
521
525
  sig do
522
526
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
523
527
  end
@@ -574,12 +578,16 @@ module OpenAI
574
578
  model: nil,
575
579
  # Constrains effort on reasoning for
576
580
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
577
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
578
- # effort can result in faster responses and fewer tokens used on reasoning in a
579
- # response.
581
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
582
+ # reasoning effort can result in faster responses and fewer tokens used on
583
+ # reasoning in a response.
580
584
  #
581
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
582
- # effort.
585
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
586
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
587
+ # calls are supported for all reasoning values in gpt-5.1.
588
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
589
+ # support `none`.
590
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
583
591
  reasoning_effort: nil,
584
592
  # Sampling temperature. This is a query parameter used to select responses.
585
593
  temperature: nil,
@@ -1123,12 +1131,16 @@ module OpenAI
1123
1131
 
1124
1132
  # Constrains effort on reasoning for
1125
1133
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1126
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1127
- # effort can result in faster responses and fewer tokens used on reasoning in a
1128
- # response.
1134
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1135
+ # reasoning effort can result in faster responses and fewer tokens used on
1136
+ # reasoning in a response.
1129
1137
  #
1130
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1131
- # effort.
1138
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1139
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1140
+ # calls are supported for all reasoning values in gpt-5.1.
1141
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1142
+ # support `none`.
1143
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1132
1144
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1133
1145
  attr_accessor :reasoning_effort
1134
1146
 
@@ -1199,7 +1211,9 @@ module OpenAI
1199
1211
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1200
1212
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1201
1213
  OpenAI::Responses::Tool::LocalShell::OrHash,
1214
+ OpenAI::Responses::FunctionShellTool::OrHash,
1202
1215
  OpenAI::Responses::CustomTool::OrHash,
1216
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1203
1217
  OpenAI::Responses::WebSearchTool::OrHash,
1204
1218
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1205
1219
  )
@@ -1234,7 +1248,9 @@ module OpenAI
1234
1248
  OpenAI::Responses::Tool::CodeInterpreter::OrHash,
1235
1249
  OpenAI::Responses::Tool::ImageGeneration::OrHash,
1236
1250
  OpenAI::Responses::Tool::LocalShell::OrHash,
1251
+ OpenAI::Responses::FunctionShellTool::OrHash,
1237
1252
  OpenAI::Responses::CustomTool::OrHash,
1253
+ OpenAI::Responses::ApplyPatchTool::OrHash,
1238
1254
  OpenAI::Responses::WebSearchTool::OrHash,
1239
1255
  OpenAI::Responses::WebSearchPreviewTool::OrHash
1240
1256
  )
@@ -1247,12 +1263,16 @@ module OpenAI
1247
1263
  max_completion_tokens: nil,
1248
1264
  # Constrains effort on reasoning for
1249
1265
  # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1250
- # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
1251
- # effort can result in faster responses and fewer tokens used on reasoning in a
1252
- # response.
1266
+ # supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1267
+ # reasoning effort can result in faster responses and fewer tokens used on
1268
+ # reasoning in a response.
1253
1269
  #
1254
- # Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
1255
- # effort.
1270
+ # - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1271
+ # reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1272
+ # calls are supported for all reasoning values in gpt-5.1.
1273
+ # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1274
+ # support `none`.
1275
+ # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1256
1276
  reasoning_effort: nil,
1257
1277
  # A seed value to initialize the randomness, during sampling.
1258
1278
  seed: nil,