openai 0.35.2 → 0.36.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +21 -15
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/models/batch_create_params.rb +9 -6
- data/lib/openai/models/beta/assistant_create_params.rb +9 -5
- data/lib/openai/models/beta/assistant_update_params.rb +9 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
- data/lib/openai/models/chat/completion_create_params.rb +37 -6
- data/lib/openai/models/chat_model.rb +5 -0
- data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
- data/lib/openai/models/conversations/conversation_item.rb +13 -1
- data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
- data/lib/openai/models/conversations/item_create_params.rb +2 -2
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
- data/lib/openai/models/evals/run_cancel_response.rb +20 -12
- data/lib/openai/models/evals/run_create_params.rb +20 -12
- data/lib/openai/models/evals/run_create_response.rb +20 -12
- data/lib/openai/models/evals/run_list_response.rb +20 -12
- data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
- data/lib/openai/models/graders/score_model_grader.rb +9 -5
- data/lib/openai/models/reasoning.rb +10 -6
- data/lib/openai/models/reasoning_effort.rb +10 -5
- data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
- data/lib/openai/models/responses/function_shell_tool.rb +20 -0
- data/lib/openai/models/responses/input_token_count_params.rb +14 -8
- data/lib/openai/models/responses/response.rb +46 -11
- data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
- data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
- data/lib/openai/models/responses/response_create_params.rb +42 -9
- data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
- data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
- data/lib/openai/models/responses/response_input_item.rb +395 -1
- data/lib/openai/models/responses/response_item.rb +13 -1
- data/lib/openai/models/responses/response_item_list.rb +2 -2
- data/lib/openai/models/responses/response_output_item.rb +13 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/tool.rb +7 -1
- data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
- data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
- data/lib/openai/resources/chat/completions.rb +6 -2
- data/lib/openai/resources/conversations/items.rb +3 -3
- data/lib/openai/resources/conversations.rb +1 -1
- data/lib/openai/resources/responses/input_items.rb +1 -1
- data/lib/openai/resources/responses/input_tokens.rb +3 -3
- data/lib/openai/resources/responses.rb +12 -8
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +9 -0
- data/rbi/openai/models/batch_create_params.rbi +17 -9
- data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
- data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
- data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
- data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
- data/rbi/openai/models/chat_model.rbi +7 -0
- data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
- data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
- data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
- data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
- data/rbi/openai/models/evals/run_create_params.rbi +44 -20
- data/rbi/openai/models/evals/run_create_response.rbi +40 -20
- data/rbi/openai/models/evals/run_list_response.rbi +40 -20
- data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
- data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
- data/rbi/openai/models/reasoning.rbi +18 -10
- data/rbi/openai/models/reasoning_effort.rbi +10 -5
- data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
- data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
- data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
- data/rbi/openai/models/responses/response.rbi +73 -2
- data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
- data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
- data/rbi/openai/models/responses/response_create_params.rbi +87 -5
- data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
- data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
- data/rbi/openai/models/responses/response_input_item.rbi +675 -0
- data/rbi/openai/models/responses/response_item.rbi +4 -0
- data/rbi/openai/models/responses/response_item_list.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
- data/rbi/openai/models/responses/tool.rbi +2 -0
- data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
- data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
- data/rbi/openai/resources/batches.rbi +4 -3
- data/rbi/openai/resources/beta/assistants.rbi +18 -10
- data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
- data/rbi/openai/resources/chat/completions.rbi +38 -12
- data/rbi/openai/resources/conversations/items.rbi +4 -0
- data/rbi/openai/resources/conversations.rbi +4 -0
- data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
- data/rbi/openai/resources/responses.rbi +28 -2
- data/sig/openai/models/batch_create_params.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +16 -0
- data/sig/openai/models/chat_model.rbs +11 -1
- data/sig/openai/models/conversations/conversation_item.rbs +4 -0
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
- data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
- data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
- data/sig/openai/models/responses/response.rbs +18 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
- data/sig/openai/models/responses/response_create_params.rbs +18 -0
- data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
- data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
- data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
- data/sig/openai/models/responses/response_input_item.rbs +276 -0
- data/sig/openai/models/responses/response_item.rbs +4 -0
- data/sig/openai/models/responses/response_output_item.rbs +4 -0
- data/sig/openai/models/responses/tool.rbs +2 -0
- data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
- data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
- data/sig/openai/resources/chat/completions.rbs +2 -0
- data/sig/openai/resources/responses.rbs +2 -0
- metadata +29 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 17baebbeaa4b29e9cd335b7cf1c0bef7a1e63832804c6f332e6cb508a4afd4d8
|
|
4
|
+
data.tar.gz: b9009868811efc0f57c9b33bbe51c031436e0ccebb2f944ad5d60259095a4099
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 8b5ca35d9d0758f61af7f3b7966017ad6df9b3e0a582d2cbc3bc455ac3d651bed29554710a0c05e22a5ad481e0469cb8ab0448cbe1d356c52f07ee2bd0cfff65
|
|
7
|
+
data.tar.gz: 474f40178d640e14ddb001786670e0e063c45b7205895115a72a28075597ed798b52ab3cd1952296e847822f056433bb907d7022dae5c3780451c872709485f9
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.36.0 (2025-11-13)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v0.35.2...v0.36.0](https://github.com/openai/openai-ruby/compare/v0.35.2...v0.36.0)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* **api:** gpt 5.1 ([26ece0e](https://github.com/openai/openai-ruby/commit/26ece0eb68486e40066c89f626b9a83c4f274889))
|
|
10
|
+
|
|
3
11
|
## 0.35.2 (2025-11-05)
|
|
4
12
|
|
|
5
13
|
Full Changelog: [v0.35.1...v0.35.2](https://github.com/openai/openai-ruby/compare/v0.35.1...v0.35.2)
|
data/README.md
CHANGED
|
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
|
|
|
15
15
|
<!-- x-release-please-start-version -->
|
|
16
16
|
|
|
17
17
|
```ruby
|
|
18
|
-
gem "openai", "~> 0.
|
|
18
|
+
gem "openai", "~> 0.36.0"
|
|
19
19
|
```
|
|
20
20
|
|
|
21
21
|
<!-- x-release-please-end -->
|
|
@@ -30,7 +30,10 @@ openai = OpenAI::Client.new(
|
|
|
30
30
|
api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted
|
|
31
31
|
)
|
|
32
32
|
|
|
33
|
-
chat_completion = openai.chat.completions.create(
|
|
33
|
+
chat_completion = openai.chat.completions.create(
|
|
34
|
+
messages: [{role: "user", content: "Say this is a test"}],
|
|
35
|
+
model: :"gpt-5.1"
|
|
36
|
+
)
|
|
34
37
|
|
|
35
38
|
puts(chat_completion)
|
|
36
39
|
```
|
|
@@ -42,7 +45,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
|
|
|
42
45
|
```ruby
|
|
43
46
|
stream = openai.responses.stream(
|
|
44
47
|
input: "Write a haiku about OpenAI.",
|
|
45
|
-
model: :"gpt-5"
|
|
48
|
+
model: :"gpt-5.1"
|
|
46
49
|
)
|
|
47
50
|
|
|
48
51
|
stream.each do |event|
|
|
@@ -340,7 +343,7 @@ openai = OpenAI::Client.new(
|
|
|
340
343
|
# Or, configure per-request:
|
|
341
344
|
openai.chat.completions.create(
|
|
342
345
|
messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
|
|
343
|
-
model: :"gpt-5",
|
|
346
|
+
model: :"gpt-5.1",
|
|
344
347
|
request_options: {max_retries: 5}
|
|
345
348
|
)
|
|
346
349
|
```
|
|
@@ -358,7 +361,7 @@ openai = OpenAI::Client.new(
|
|
|
358
361
|
# Or, configure per-request:
|
|
359
362
|
openai.chat.completions.create(
|
|
360
363
|
messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
|
|
361
|
-
model: :"gpt-5",
|
|
364
|
+
model: :"gpt-5.1",
|
|
362
365
|
request_options: {timeout: 5}
|
|
363
366
|
)
|
|
364
367
|
```
|
|
@@ -393,7 +396,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
|
|
|
393
396
|
chat_completion =
|
|
394
397
|
openai.chat.completions.create(
|
|
395
398
|
messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
|
|
396
|
-
model: :"gpt-5",
|
|
399
|
+
model: :"gpt-5.1",
|
|
397
400
|
request_options: {
|
|
398
401
|
extra_query: {my_query_parameter: value},
|
|
399
402
|
extra_body: {my_body_parameter: value},
|
|
@@ -441,7 +444,7 @@ You can provide typesafe request parameters like so:
|
|
|
441
444
|
```ruby
|
|
442
445
|
openai.chat.completions.create(
|
|
443
446
|
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
|
|
444
|
-
model: :"gpt-5"
|
|
447
|
+
model: :"gpt-5.1"
|
|
445
448
|
)
|
|
446
449
|
```
|
|
447
450
|
|
|
@@ -449,12 +452,15 @@ Or, equivalently:
|
|
|
449
452
|
|
|
450
453
|
```ruby
|
|
451
454
|
# Hashes work, but are not typesafe:
|
|
452
|
-
openai.chat.completions.create(
|
|
455
|
+
openai.chat.completions.create(
|
|
456
|
+
messages: [{role: "user", content: "Say this is a test"}],
|
|
457
|
+
model: :"gpt-5.1"
|
|
458
|
+
)
|
|
453
459
|
|
|
454
460
|
# You can also splat a full Params class:
|
|
455
461
|
params = OpenAI::Chat::CompletionCreateParams.new(
|
|
456
462
|
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
|
|
457
|
-
model: :"gpt-5"
|
|
463
|
+
model: :"gpt-5.1"
|
|
458
464
|
)
|
|
459
465
|
openai.chat.completions.create(**params)
|
|
460
466
|
```
|
|
@@ -464,11 +470,11 @@ openai.chat.completions.create(**params)
|
|
|
464
470
|
Since this library does not depend on `sorbet-runtime`, it cannot provide [`T::Enum`](https://sorbet.org/docs/tenum) instances. Instead, we provide "tagged symbols" instead, which is always a primitive at runtime:
|
|
465
471
|
|
|
466
472
|
```ruby
|
|
467
|
-
# :
|
|
468
|
-
puts(OpenAI::
|
|
473
|
+
# :"in-memory"
|
|
474
|
+
puts(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY)
|
|
469
475
|
|
|
470
|
-
# Revealed type: `T.all(OpenAI::
|
|
471
|
-
T.reveal_type(OpenAI::
|
|
476
|
+
# Revealed type: `T.all(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention, Symbol)`
|
|
477
|
+
T.reveal_type(OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY)
|
|
472
478
|
```
|
|
473
479
|
|
|
474
480
|
Enum parameters have a "relaxed" type, so you can either pass in enum constants or their literal value:
|
|
@@ -476,13 +482,13 @@ Enum parameters have a "relaxed" type, so you can either pass in enum constants
|
|
|
476
482
|
```ruby
|
|
477
483
|
# Using the enum constants preserves the tagged type information:
|
|
478
484
|
openai.chat.completions.create(
|
|
479
|
-
|
|
485
|
+
prompt_cache_retention: OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::IN_MEMORY,
|
|
480
486
|
# …
|
|
481
487
|
)
|
|
482
488
|
|
|
483
489
|
# Literal values are also permissible:
|
|
484
490
|
openai.chat.completions.create(
|
|
485
|
-
|
|
491
|
+
prompt_cache_retention: :"in-memory",
|
|
486
492
|
# …
|
|
487
493
|
)
|
|
488
494
|
```
|
|
@@ -19,11 +19,11 @@ module OpenAI
|
|
|
19
19
|
# @example
|
|
20
20
|
# # `chat_model` is a `OpenAI::ChatModel`
|
|
21
21
|
# case chat_model
|
|
22
|
-
# when OpenAI::ChatModel::
|
|
22
|
+
# when OpenAI::ChatModel::GPT_5_1
|
|
23
23
|
# # ...
|
|
24
|
-
# when OpenAI::ChatModel::
|
|
24
|
+
# when OpenAI::ChatModel::GPT_5_1_2025_11_13
|
|
25
25
|
# # ...
|
|
26
|
-
# when OpenAI::ChatModel::
|
|
26
|
+
# when OpenAI::ChatModel::GPT_5_1_CODEX
|
|
27
27
|
# # ...
|
|
28
28
|
# else
|
|
29
29
|
# puts(chat_model)
|
|
@@ -31,11 +31,11 @@ module OpenAI
|
|
|
31
31
|
#
|
|
32
32
|
# @example
|
|
33
33
|
# case chat_model
|
|
34
|
-
# in :"gpt-5"
|
|
34
|
+
# in :"gpt-5.1"
|
|
35
35
|
# # ...
|
|
36
|
-
# in :"gpt-5-
|
|
36
|
+
# in :"gpt-5.1-2025-11-13"
|
|
37
37
|
# # ...
|
|
38
|
-
# in :"gpt-5-
|
|
38
|
+
# in :"gpt-5.1-codex"
|
|
39
39
|
# # ...
|
|
40
40
|
# else
|
|
41
41
|
# puts(chat_model)
|
|
@@ -16,9 +16,10 @@ module OpenAI
|
|
|
16
16
|
|
|
17
17
|
# @!attribute endpoint
|
|
18
18
|
# The endpoint to be used for all requests in the batch. Currently
|
|
19
|
-
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
|
|
20
|
-
# are supported. Note that `/v1/embeddings` batches are also
|
|
21
|
-
# maximum of 50,000 embedding inputs across all requests in the
|
|
19
|
+
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
|
|
20
|
+
# and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
|
|
21
|
+
# restricted to a maximum of 50,000 embedding inputs across all requests in the
|
|
22
|
+
# batch.
|
|
22
23
|
#
|
|
23
24
|
# @return [Symbol, OpenAI::Models::BatchCreateParams::Endpoint]
|
|
24
25
|
required :endpoint, enum: -> { OpenAI::BatchCreateParams::Endpoint }
|
|
@@ -83,9 +84,10 @@ module OpenAI
|
|
|
83
84
|
end
|
|
84
85
|
|
|
85
86
|
# The endpoint to be used for all requests in the batch. Currently
|
|
86
|
-
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
|
|
87
|
-
# are supported. Note that `/v1/embeddings` batches are also
|
|
88
|
-
# maximum of 50,000 embedding inputs across all requests in the
|
|
87
|
+
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
|
|
88
|
+
# and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
|
|
89
|
+
# restricted to a maximum of 50,000 embedding inputs across all requests in the
|
|
90
|
+
# batch.
|
|
89
91
|
module Endpoint
|
|
90
92
|
extend OpenAI::Internal::Type::Enum
|
|
91
93
|
|
|
@@ -93,6 +95,7 @@ module OpenAI
|
|
|
93
95
|
V1_CHAT_COMPLETIONS = :"/v1/chat/completions"
|
|
94
96
|
V1_EMBEDDINGS = :"/v1/embeddings"
|
|
95
97
|
V1_COMPLETIONS = :"/v1/completions"
|
|
98
|
+
V1_MODERATIONS = :"/v1/moderations"
|
|
96
99
|
|
|
97
100
|
# @!method self.values
|
|
98
101
|
# @return [Array<Symbol>]
|
|
@@ -51,12 +51,16 @@ module OpenAI
|
|
|
51
51
|
# @!attribute reasoning_effort
|
|
52
52
|
# Constrains effort on reasoning for
|
|
53
53
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
54
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
55
|
-
# effort can result in faster responses and fewer tokens used on
|
|
56
|
-
# response.
|
|
54
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
55
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
56
|
+
# reasoning in a response.
|
|
57
57
|
#
|
|
58
|
-
#
|
|
59
|
-
#
|
|
58
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
59
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
60
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
61
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
62
|
+
# support `none`.
|
|
63
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
60
64
|
#
|
|
61
65
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
62
66
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -51,12 +51,16 @@ module OpenAI
|
|
|
51
51
|
# @!attribute reasoning_effort
|
|
52
52
|
# Constrains effort on reasoning for
|
|
53
53
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
54
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
55
|
-
# effort can result in faster responses and fewer tokens used on
|
|
56
|
-
# response.
|
|
54
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
55
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
56
|
+
# reasoning in a response.
|
|
57
57
|
#
|
|
58
|
-
#
|
|
59
|
-
#
|
|
58
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
59
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
60
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
61
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
62
|
+
# support `none`.
|
|
63
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
60
64
|
#
|
|
61
65
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
62
66
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -109,12 +109,16 @@ module OpenAI
|
|
|
109
109
|
# @!attribute reasoning_effort
|
|
110
110
|
# Constrains effort on reasoning for
|
|
111
111
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
112
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
113
|
-
# effort can result in faster responses and fewer tokens used on
|
|
114
|
-
# response.
|
|
115
|
-
#
|
|
116
|
-
#
|
|
117
|
-
#
|
|
112
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
113
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
114
|
+
# reasoning in a response.
|
|
115
|
+
#
|
|
116
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
117
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
118
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
119
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
120
|
+
# support `none`.
|
|
121
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
118
122
|
#
|
|
119
123
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
120
124
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -190,15 +190,30 @@ module OpenAI
|
|
|
190
190
|
# @return [String, nil]
|
|
191
191
|
optional :prompt_cache_key, String
|
|
192
192
|
|
|
193
|
+
# @!attribute prompt_cache_retention
|
|
194
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
195
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
196
|
+
# of 24 hours.
|
|
197
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
198
|
+
#
|
|
199
|
+
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil]
|
|
200
|
+
optional :prompt_cache_retention,
|
|
201
|
+
enum: -> { OpenAI::Chat::CompletionCreateParams::PromptCacheRetention },
|
|
202
|
+
nil?: true
|
|
203
|
+
|
|
193
204
|
# @!attribute reasoning_effort
|
|
194
205
|
# Constrains effort on reasoning for
|
|
195
206
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
196
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
197
|
-
# effort can result in faster responses and fewer tokens used on
|
|
198
|
-
# response.
|
|
207
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
208
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
209
|
+
# reasoning in a response.
|
|
199
210
|
#
|
|
200
|
-
#
|
|
201
|
-
#
|
|
211
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
212
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
213
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
214
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
215
|
+
# support `none`.
|
|
216
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
202
217
|
#
|
|
203
218
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
204
219
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -368,7 +383,7 @@ module OpenAI
|
|
|
368
383
|
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
|
|
369
384
|
optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
|
|
370
385
|
|
|
371
|
-
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
|
|
386
|
+
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, verbosity: nil, web_search_options: nil, request_options: {})
|
|
372
387
|
# Some parameter documentations has been truncated, see
|
|
373
388
|
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
|
|
374
389
|
#
|
|
@@ -406,6 +421,8 @@ module OpenAI
|
|
|
406
421
|
#
|
|
407
422
|
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
|
408
423
|
#
|
|
424
|
+
# @param prompt_cache_retention [Symbol, OpenAI::Models::Chat::CompletionCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
|
|
425
|
+
#
|
|
409
426
|
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
|
|
410
427
|
#
|
|
411
428
|
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
|
|
@@ -551,6 +568,20 @@ module OpenAI
|
|
|
551
568
|
# @return [Array<Symbol>]
|
|
552
569
|
end
|
|
553
570
|
|
|
571
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
572
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
573
|
+
# of 24 hours.
|
|
574
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
575
|
+
module PromptCacheRetention
|
|
576
|
+
extend OpenAI::Internal::Type::Enum
|
|
577
|
+
|
|
578
|
+
IN_MEMORY = :"in-memory"
|
|
579
|
+
PROMPT_CACHE_RETENTION_24H = :"24h"
|
|
580
|
+
|
|
581
|
+
# @!method self.values
|
|
582
|
+
# @return [Array<Symbol>]
|
|
583
|
+
end
|
|
584
|
+
|
|
554
585
|
# An object specifying the format that the model must output.
|
|
555
586
|
#
|
|
556
587
|
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
@@ -5,6 +5,11 @@ module OpenAI
|
|
|
5
5
|
module ChatModel
|
|
6
6
|
extend OpenAI::Internal::Type::Enum
|
|
7
7
|
|
|
8
|
+
GPT_5_1 = :"gpt-5.1"
|
|
9
|
+
GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
|
|
10
|
+
GPT_5_1_CODEX = :"gpt-5.1-codex"
|
|
11
|
+
GPT_5_1_MINI = :"gpt-5.1-mini"
|
|
12
|
+
GPT_5_1_CHAT_LATEST = :"gpt-5.1-chat-latest"
|
|
8
13
|
GPT_5 = :"gpt-5"
|
|
9
14
|
GPT_5_MINI = :"gpt-5-mini"
|
|
10
15
|
GPT_5_NANO = :"gpt-5-nano"
|
|
@@ -12,7 +12,7 @@ module OpenAI
|
|
|
12
12
|
# Initial items to include in the conversation context. You may add up to 20 items
|
|
13
13
|
# at a time.
|
|
14
14
|
#
|
|
15
|
-
# @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
|
15
|
+
# @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
|
16
16
|
optional :items,
|
|
17
17
|
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] },
|
|
18
18
|
nil?: true
|
|
@@ -32,7 +32,7 @@ module OpenAI
|
|
|
32
32
|
# Some parameter documentations has been truncated, see
|
|
33
33
|
# {OpenAI::Models::Conversations::ConversationCreateParams} for more details.
|
|
34
34
|
#
|
|
35
|
-
# @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
|
|
35
|
+
# @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context. You may add up to 20 items
|
|
36
36
|
#
|
|
37
37
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
|
38
38
|
#
|
|
@@ -54,6 +54,18 @@ module OpenAI
|
|
|
54
54
|
# The output of a local shell tool call.
|
|
55
55
|
variant :local_shell_call_output, -> { OpenAI::Conversations::ConversationItem::LocalShellCallOutput }
|
|
56
56
|
|
|
57
|
+
# A tool call that executes one or more shell commands in a managed environment.
|
|
58
|
+
variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall }
|
|
59
|
+
|
|
60
|
+
# The output of a shell tool call.
|
|
61
|
+
variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput }
|
|
62
|
+
|
|
63
|
+
# A tool call that applies file diffs by creating, deleting, or updating files.
|
|
64
|
+
variant :apply_patch_call, -> { OpenAI::Responses::ResponseApplyPatchToolCall }
|
|
65
|
+
|
|
66
|
+
# The output emitted by an apply patch tool call.
|
|
67
|
+
variant :apply_patch_call_output, -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput }
|
|
68
|
+
|
|
57
69
|
# A list of tools available on an MCP server.
|
|
58
70
|
variant :mcp_list_tools, -> { OpenAI::Conversations::ConversationItem::McpListTools }
|
|
59
71
|
|
|
@@ -592,7 +604,7 @@ module OpenAI
|
|
|
592
604
|
end
|
|
593
605
|
|
|
594
606
|
# @!method self.variants
|
|
595
|
-
# @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)]
|
|
607
|
+
# @return [Array(OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput)]
|
|
596
608
|
end
|
|
597
609
|
end
|
|
598
610
|
|
|
@@ -8,7 +8,7 @@ module OpenAI
|
|
|
8
8
|
# @!attribute data
|
|
9
9
|
# A list of conversation items.
|
|
10
10
|
#
|
|
11
|
-
# @return [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
|
|
11
|
+
# @return [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>]
|
|
12
12
|
required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Conversations::ConversationItem] }
|
|
13
13
|
|
|
14
14
|
# @!attribute first_id
|
|
@@ -38,7 +38,7 @@ module OpenAI
|
|
|
38
38
|
# @!method initialize(data:, first_id:, has_more:, last_id:, object: :list)
|
|
39
39
|
# A list of Conversation items.
|
|
40
40
|
#
|
|
41
|
-
# @param data [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>] A list of conversation items.
|
|
41
|
+
# @param data [Array<OpenAI::Models::Conversations::Message, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Conversations::ConversationItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCall, OpenAI::Models::Conversations::ConversationItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Conversations::ConversationItem::McpListTools, OpenAI::Models::Conversations::ConversationItem::McpApprovalRequest, OpenAI::Models::Conversations::ConversationItem::McpApprovalResponse, OpenAI::Models::Conversations::ConversationItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput>] A list of conversation items.
|
|
42
42
|
#
|
|
43
43
|
# @param first_id [String] The ID of the first item in the list.
|
|
44
44
|
#
|
|
@@ -11,7 +11,7 @@ module OpenAI
|
|
|
11
11
|
# @!attribute items
|
|
12
12
|
# The items to add to the conversation. You may add up to 20 items at a time.
|
|
13
13
|
#
|
|
14
|
-
# @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
|
|
14
|
+
# @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
|
|
15
15
|
required :items, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputItem] }
|
|
16
16
|
|
|
17
17
|
# @!attribute include
|
|
@@ -26,7 +26,7 @@ module OpenAI
|
|
|
26
26
|
# Some parameter documentations has been truncated, see
|
|
27
27
|
# {OpenAI::Models::Conversations::ItemCreateParams} for more details.
|
|
28
28
|
#
|
|
29
|
-
# @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] The items to add to the conversation. You may add up to 20 items at a time.
|
|
29
|
+
# @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] The items to add to the conversation. You may add up to 20 items at a time.
|
|
30
30
|
#
|
|
31
31
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
|
|
32
32
|
#
|
|
@@ -462,12 +462,16 @@ module OpenAI
|
|
|
462
462
|
# @!attribute reasoning_effort
|
|
463
463
|
# Constrains effort on reasoning for
|
|
464
464
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
465
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
466
|
-
# effort can result in faster responses and fewer tokens used on
|
|
467
|
-
# response.
|
|
465
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
466
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
467
|
+
# reasoning in a response.
|
|
468
468
|
#
|
|
469
|
-
#
|
|
470
|
-
#
|
|
469
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
470
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
471
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
472
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
473
|
+
# support `none`.
|
|
474
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
471
475
|
#
|
|
472
476
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
473
477
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -316,12 +316,16 @@ module OpenAI
|
|
|
316
316
|
# @!attribute reasoning_effort
|
|
317
317
|
# Constrains effort on reasoning for
|
|
318
318
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
319
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
320
|
-
# effort can result in faster responses and fewer tokens used on
|
|
321
|
-
# response.
|
|
319
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
320
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
321
|
+
# reasoning in a response.
|
|
322
322
|
#
|
|
323
|
-
#
|
|
324
|
-
#
|
|
323
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
324
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
325
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
326
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
327
|
+
# support `none`.
|
|
328
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
325
329
|
#
|
|
326
330
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
327
331
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -660,12 +664,16 @@ module OpenAI
|
|
|
660
664
|
# @!attribute reasoning_effort
|
|
661
665
|
# Constrains effort on reasoning for
|
|
662
666
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
663
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
664
|
-
# effort can result in faster responses and fewer tokens used on
|
|
665
|
-
# response.
|
|
667
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
668
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
669
|
+
# reasoning in a response.
|
|
666
670
|
#
|
|
667
|
-
#
|
|
668
|
-
#
|
|
671
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
672
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
673
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
674
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
675
|
+
# support `none`.
|
|
676
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
669
677
|
#
|
|
670
678
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
|
671
679
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
|
@@ -709,7 +717,7 @@ module OpenAI
|
|
|
709
717
|
# the model to call your own code. Learn more about
|
|
710
718
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
|
711
719
|
#
|
|
712
|
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
|
|
720
|
+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
|
|
713
721
|
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
|
714
722
|
|
|
715
723
|
# @!attribute top_p
|
|
@@ -733,7 +741,7 @@ module OpenAI
|
|
|
733
741
|
#
|
|
734
742
|
# @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
|
|
735
743
|
#
|
|
736
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
|
744
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
|
737
745
|
#
|
|
738
746
|
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
|
|
739
747
|
|