openai 0.38.0 → 0.39.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +13 -0
  3. data/README.md +9 -9
  4. data/lib/openai/internal/type/enum.rb +6 -6
  5. data/lib/openai/models/beta/assistant_create_params.rb +1 -1
  6. data/lib/openai/models/beta/assistant_update_params.rb +1 -1
  7. data/lib/openai/models/beta/threads/run_create_params.rb +1 -1
  8. data/lib/openai/models/chat/completion_create_params.rb +1 -1
  9. data/lib/openai/models/chat_model.rb +5 -0
  10. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +1 -1
  11. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  12. data/lib/openai/models/evals/run_create_params.rb +2 -2
  13. data/lib/openai/models/evals/run_create_response.rb +2 -2
  14. data/lib/openai/models/evals/run_list_response.rb +2 -2
  15. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  16. data/lib/openai/models/graders/score_model_grader.rb +7 -3
  17. data/lib/openai/models/reasoning.rb +5 -3
  18. data/lib/openai/models/reasoning_effort.rb +1 -1
  19. data/lib/openai/models/responses/response_compact_params.rb +15 -0
  20. data/lib/openai/resources/responses.rb +5 -5
  21. data/lib/openai/version.rb +1 -1
  22. data/rbi/openai/models/beta/assistant_create_params.rbi +2 -2
  23. data/rbi/openai/models/beta/assistant_update_params.rbi +2 -2
  24. data/rbi/openai/models/beta/threads/run_create_params.rbi +2 -2
  25. data/rbi/openai/models/chat/completion_create_params.rbi +2 -2
  26. data/rbi/openai/models/chat_model.rbi +8 -0
  27. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +2 -2
  28. data/rbi/openai/models/evals/run_cancel_response.rbi +4 -4
  29. data/rbi/openai/models/evals/run_create_params.rbi +4 -4
  30. data/rbi/openai/models/evals/run_create_response.rbi +4 -4
  31. data/rbi/openai/models/evals/run_list_response.rbi +4 -4
  32. data/rbi/openai/models/evals/run_retrieve_response.rbi +4 -4
  33. data/rbi/openai/models/graders/score_model_grader.rbi +6 -4
  34. data/rbi/openai/models/reasoning.rbi +8 -5
  35. data/rbi/openai/models/reasoning_effort.rbi +1 -1
  36. data/rbi/openai/models/responses/response_compact_params.rbi +25 -0
  37. data/rbi/openai/resources/beta/assistants.rbi +2 -2
  38. data/rbi/openai/resources/beta/threads/runs.rbi +2 -2
  39. data/rbi/openai/resources/chat/completions.rbi +2 -2
  40. data/sig/openai/models/chat_model.rbs +11 -1
  41. data/sig/openai/models/responses/response_compact_params.rbs +11 -1
  42. metadata +2 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ea6434eae9ddaa1fdf22d4069a4ae253720b292816054e4a8a4076a4a5fbde28
4
- data.tar.gz: 291feafd4b0164c5e6020e9a6cd85c70333c65ac0753bb39677e760e5410d487
3
+ metadata.gz: 6fe08f20637a8f850964152557cf797e6c078ea740bd5c5e4d5e88ef0ed5c0b9
4
+ data.tar.gz: b372a97f47fe9ae0c1c41fdffbd0d5a7f9b42e46fb3d7a976e5c5f3c11970cba
5
5
  SHA512:
6
- metadata.gz: c515391408af1a74b506b750d7c78ae7b56bdb9cec40e21fe6d80d6d5c923941d476a529c6d51c87b2e1abbeae9ae78e5d4216520eed3dc3aa2d4403384a0bfb
7
- data.tar.gz: 7866eb758cd765d796fe156904c93f005778ce1fad88d5fc0011c33ac5786c20a57ecbfc18bd39f16027e533840cde09966ca387b80fd41dfe205db864ef7aa9
6
+ metadata.gz: 5db010ad9ae69bc8bcc36538de8ef659e2e62438b80b08a7ee85ca952c6df79e67ac3f0d640846b31bac4ce55f41fcc45942062d23cf2ccc0f7e50b5ff2c28fb
7
+ data.tar.gz: 2f5c2ae60e053513757f7237af9e3559a09396e648549322e0ac35eda38811e95810992fb5ccf0fae1354c85f595dcebc6621d9d4a46e2e4ae54fdde1a4af82d
data/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.39.0 (2025-12-11)
4
+
5
+ Full Changelog: [v0.38.0...v0.39.0](https://github.com/openai/openai-ruby/compare/v0.38.0...v0.39.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** gpt 5.2 ([369f26b](https://github.com/openai/openai-ruby/commit/369f26b5e42ddef31d07278e43776415f5d49b62))
10
+
11
+
12
+ ### Bug Fixes
13
+
14
+ * Create new responses with previous_response_id ([#869](https://github.com/openai/openai-ruby/issues/869)) ([b14e2aa](https://github.com/openai/openai-ruby/commit/b14e2aa2351d102c4af30809ebb4a5cca61a1165))
15
+
3
16
  ## 0.38.0 (2025-12-08)
4
17
 
5
18
  Full Changelog: [v0.37.0...v0.38.0](https://github.com/openai/openai-ruby/compare/v0.37.0...v0.38.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.38.0"
18
+ gem "openai", "~> 0.39.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -32,7 +32,7 @@ openai = OpenAI::Client.new(
32
32
 
33
33
  chat_completion = openai.chat.completions.create(
34
34
  messages: [{role: "user", content: "Say this is a test"}],
35
- model: :"gpt-5.1"
35
+ model: :"gpt-5.2"
36
36
  )
37
37
 
38
38
  puts(chat_completion)
@@ -45,7 +45,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
45
45
  ```ruby
46
46
  stream = openai.responses.stream(
47
47
  input: "Write a haiku about OpenAI.",
48
- model: :"gpt-5.1"
48
+ model: :"gpt-5.2"
49
49
  )
50
50
 
51
51
  stream.each do |event|
@@ -343,7 +343,7 @@ openai = OpenAI::Client.new(
343
343
  # Or, configure per-request:
344
344
  openai.chat.completions.create(
345
345
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
346
- model: :"gpt-5.1",
346
+ model: :"gpt-5.2",
347
347
  request_options: {max_retries: 5}
348
348
  )
349
349
  ```
@@ -361,7 +361,7 @@ openai = OpenAI::Client.new(
361
361
  # Or, configure per-request:
362
362
  openai.chat.completions.create(
363
363
  messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
364
- model: :"gpt-5.1",
364
+ model: :"gpt-5.2",
365
365
  request_options: {timeout: 5}
366
366
  )
367
367
  ```
@@ -396,7 +396,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
396
396
  chat_completion =
397
397
  openai.chat.completions.create(
398
398
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
399
- model: :"gpt-5.1",
399
+ model: :"gpt-5.2",
400
400
  request_options: {
401
401
  extra_query: {my_query_parameter: value},
402
402
  extra_body: {my_body_parameter: value},
@@ -444,7 +444,7 @@ You can provide typesafe request parameters like so:
444
444
  ```ruby
445
445
  openai.chat.completions.create(
446
446
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
447
- model: :"gpt-5.1"
447
+ model: :"gpt-5.2"
448
448
  )
449
449
  ```
450
450
 
@@ -454,13 +454,13 @@ Or, equivalently:
454
454
  # Hashes work, but are not typesafe:
455
455
  openai.chat.completions.create(
456
456
  messages: [{role: "user", content: "Say this is a test"}],
457
- model: :"gpt-5.1"
457
+ model: :"gpt-5.2"
458
458
  )
459
459
 
460
460
  # You can also splat a full Params class:
461
461
  params = OpenAI::Chat::CompletionCreateParams.new(
462
462
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
463
- model: :"gpt-5.1"
463
+ model: :"gpt-5.2"
464
464
  )
465
465
  openai.chat.completions.create(**params)
466
466
  ```
@@ -19,11 +19,11 @@ module OpenAI
19
19
  # @example
20
20
  # # `chat_model` is a `OpenAI::ChatModel`
21
21
  # case chat_model
22
- # when OpenAI::ChatModel::GPT_5_1
22
+ # when OpenAI::ChatModel::GPT_5_2
23
23
  # # ...
24
- # when OpenAI::ChatModel::GPT_5_1_2025_11_13
24
+ # when OpenAI::ChatModel::GPT_5_2_2025_12_11
25
25
  # # ...
26
- # when OpenAI::ChatModel::GPT_5_1_CODEX
26
+ # when OpenAI::ChatModel::GPT_5_2_CHAT_LATEST
27
27
  # # ...
28
28
  # else
29
29
  # puts(chat_model)
@@ -31,11 +31,11 @@ module OpenAI
31
31
  #
32
32
  # @example
33
33
  # case chat_model
34
- # in :"gpt-5.1"
34
+ # in :"gpt-5.2"
35
35
  # # ...
36
- # in :"gpt-5.1-2025-11-13"
36
+ # in :"gpt-5.2-2025-12-11"
37
37
  # # ...
38
- # in :"gpt-5.1-codex"
38
+ # in :"gpt-5.2-chat-latest"
39
39
  # # ...
40
40
  # else
41
41
  # puts(chat_model)
@@ -61,7 +61,7 @@ module OpenAI
61
61
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
62
  # support `none`.
63
63
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
65
65
  #
66
66
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
67
67
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -61,7 +61,7 @@ module OpenAI
61
61
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
62
  # support `none`.
63
63
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
65
65
  #
66
66
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
67
67
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -119,7 +119,7 @@ module OpenAI
119
119
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
120
120
  # support `none`.
121
121
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
122
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
122
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
123
123
  #
124
124
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
125
125
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -214,7 +214,7 @@ module OpenAI
214
214
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
215
215
  # support `none`.
216
216
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
217
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
217
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
218
218
  #
219
219
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
220
220
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -5,6 +5,11 @@ module OpenAI
5
5
  module ChatModel
6
6
  extend OpenAI::Internal::Type::Enum
7
7
 
8
+ GPT_5_2 = :"gpt-5.2"
9
+ GPT_5_2_2025_12_11 = :"gpt-5.2-2025-12-11"
10
+ GPT_5_2_CHAT_LATEST = :"gpt-5.2-chat-latest"
11
+ GPT_5_2_PRO = :"gpt-5.2-pro"
12
+ GPT_5_2_PRO_2025_12_11 = :"gpt-5.2-pro-2025-12-11"
8
13
  GPT_5_1 = :"gpt-5.1"
9
14
  GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
10
15
  GPT_5_1_CODEX = :"gpt-5.1-codex"
@@ -472,7 +472,7 @@ module OpenAI
472
472
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
473
473
  # support `none`.
474
474
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
475
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
475
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
476
476
  #
477
477
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
478
478
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ module OpenAI
675
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676
676
  # support `none`.
677
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679
679
  #
680
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -238,7 +238,7 @@ module OpenAI
238
238
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
239
239
  # support `none`.
240
240
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
241
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
241
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
242
242
  #
243
243
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
244
244
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -603,7 +603,7 @@ module OpenAI
603
603
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
604
604
  # support `none`.
605
605
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
606
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
606
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
607
607
  #
608
608
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
609
609
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ module OpenAI
675
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676
676
  # support `none`.
677
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679
679
  #
680
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ module OpenAI
675
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676
676
  # support `none`.
677
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679
679
  #
680
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -679,7 +679,7 @@ module OpenAI
679
679
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
680
680
  # support `none`.
681
681
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
682
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
682
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
683
683
  #
684
684
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
685
685
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -5,7 +5,8 @@ module OpenAI
5
5
  module Graders
6
6
  class ScoreModelGrader < OpenAI::Internal::Type::BaseModel
7
7
  # @!attribute input
8
- # The input text. This may include template strings.
8
+ # The input messages evaluated by the grader. Supports text, output text, input
9
+ # image, and input audio content blocks, and may include template strings.
9
10
  #
10
11
  # @return [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>]
11
12
  required :input, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Graders::ScoreModelGrader::Input] }
@@ -41,9 +42,12 @@ module OpenAI
41
42
  optional :sampling_params, -> { OpenAI::Graders::ScoreModelGrader::SamplingParams }
42
43
 
43
44
  # @!method initialize(input:, model:, name:, range: nil, sampling_params: nil, type: :score_model)
45
+ # Some parameter documentations has been truncated, see
46
+ # {OpenAI::Models::Graders::ScoreModelGrader} for more details.
47
+ #
44
48
  # A ScoreModelGrader object that uses a model to assign a score to the input.
45
49
  #
46
- # @param input [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>] The input text. This may include template strings.
50
+ # @param input [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>] The input messages evaluated by the grader. Supports text, output text, input im
47
51
  #
48
52
  # @param model [String] The model to use for the evaluation.
49
53
  #
@@ -232,7 +236,7 @@ module OpenAI
232
236
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
233
237
  # support `none`.
234
238
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
235
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
239
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
236
240
  #
237
241
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
238
242
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -16,7 +16,7 @@ module OpenAI
16
16
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
17
17
  # support `none`.
18
18
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
19
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
19
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
20
20
  #
21
21
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
22
22
  optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -38,7 +38,8 @@ module OpenAI
38
38
  # debugging and understanding the model's reasoning process. One of `auto`,
39
39
  # `concise`, or `detailed`.
40
40
  #
41
- # `concise` is only supported for `computer-use-preview` models.
41
+ # `concise` is supported for `computer-use-preview` models and all reasoning
42
+ # models after `gpt-5`.
42
43
  #
43
44
  # @return [Symbol, OpenAI::Models::Reasoning::Summary, nil]
44
45
  optional :summary, enum: -> { OpenAI::Reasoning::Summary }, nil?: true
@@ -82,7 +83,8 @@ module OpenAI
82
83
  # debugging and understanding the model's reasoning process. One of `auto`,
83
84
  # `concise`, or `detailed`.
84
85
  #
85
- # `concise` is only supported for `computer-use-preview` models.
86
+ # `concise` is supported for `computer-use-preview` models and all reasoning
87
+ # models after `gpt-5`.
86
88
  #
87
89
  # @see OpenAI::Models::Reasoning#summary
88
90
  module Summary
@@ -14,7 +14,7 @@ module OpenAI
14
14
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
15
15
  # support `none`.
16
16
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
17
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
17
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
18
18
  module ReasoningEffort
19
19
  extend OpenAI::Internal::Type::Enum
20
20
 
@@ -64,6 +64,16 @@ module OpenAI
64
64
  module Model
65
65
  extend OpenAI::Internal::Type::Union
66
66
 
67
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2 }
68
+
69
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_2025_12_11 }
70
+
71
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_CHAT_LATEST }
72
+
73
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_PRO }
74
+
75
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_PRO_2025_12_11 }
76
+
67
77
  variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_1 }
68
78
 
69
79
  variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_1_2025_11_13 }
@@ -237,6 +247,11 @@ module OpenAI
237
247
 
238
248
  # @!group
239
249
 
250
+ GPT_5_2 = :"gpt-5.2"
251
+ GPT_5_2_2025_12_11 = :"gpt-5.2-2025-12-11"
252
+ GPT_5_2_CHAT_LATEST = :"gpt-5.2-chat-latest"
253
+ GPT_5_2_PRO = :"gpt-5.2-pro"
254
+ GPT_5_2_PRO_2025_12_11 = :"gpt-5.2-pro-2025-12-11"
240
255
  GPT_5_1 = :"gpt-5.1"
241
256
  GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
242
257
  GPT_5_1_CODEX = :"gpt-5.1-codex"
@@ -186,10 +186,10 @@ module OpenAI
186
186
  # @see OpenAI::Models::Responses::ResponseCreateParams
187
187
  def stream(params)
188
188
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
189
- starting_after, previous_response_id = parsed.values_at(:starting_after, :previous_response_id)
189
+ starting_after, response_id = parsed.values_at(:starting_after, :response_id)
190
190
 
191
- if starting_after && !previous_response_id
192
- raise ArgumentError, "starting_after can only be used with previous_response_id"
191
+ if starting_after && !response_id
192
+ raise ArgumentError, "starting_after can only be used with response_id"
193
193
  end
194
194
  model, tool_models = get_structured_output_models(parsed)
195
195
 
@@ -200,11 +200,11 @@ module OpenAI
200
200
  raw
201
201
  end
202
202
 
203
- if previous_response_id
203
+ if response_id
204
204
  retrieve_params = params.slice(:include, :request_options)
205
205
 
206
206
  raw_stream = retrieve_streaming_internal(
207
- previous_response_id,
207
+ response_id,
208
208
  params: retrieve_params,
209
209
  unwrap: unwrap
210
210
  )
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.38.0"
4
+ VERSION = "0.39.0"
5
5
  end
@@ -57,7 +57,7 @@ module OpenAI
57
57
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
58
58
  # support `none`.
59
59
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
60
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
60
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
61
61
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
62
62
  attr_accessor :reasoning_effort
63
63
 
@@ -227,7 +227,7 @@ module OpenAI
227
227
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
228
228
  # support `none`.
229
229
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
230
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
230
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
231
231
  reasoning_effort: nil,
232
232
  # Specifies the format that the model must output. Compatible with
233
233
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -77,7 +77,7 @@ module OpenAI
77
77
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
78
78
  # support `none`.
79
79
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
80
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
80
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
81
81
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
82
82
  attr_accessor :reasoning_effort
83
83
 
@@ -251,7 +251,7 @@ module OpenAI
251
251
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
252
252
  # support `none`.
253
253
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
254
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
254
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
255
255
  reasoning_effort: nil,
256
256
  # Specifies the format that the model must output. Compatible with
257
257
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -123,7 +123,7 @@ module OpenAI
123
123
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
124
124
  # support `none`.
125
125
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
126
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
126
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
127
127
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
128
128
  attr_accessor :reasoning_effort
129
129
 
@@ -349,7 +349,7 @@ module OpenAI
349
349
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
350
350
  # support `none`.
351
351
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
352
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
352
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
353
353
  reasoning_effort: nil,
354
354
  # Specifies the format that the model must output. Compatible with
355
355
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -250,7 +250,7 @@ module OpenAI
250
250
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
251
251
  # support `none`.
252
252
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
253
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
253
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
254
254
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
255
255
  attr_accessor :reasoning_effort
256
256
 
@@ -704,7 +704,7 @@ module OpenAI
704
704
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
705
705
  # support `none`.
706
706
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
707
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
707
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
708
708
  reasoning_effort: nil,
709
709
  # An object specifying the format that the model must output.
710
710
  #
@@ -8,6 +8,14 @@ module OpenAI
8
8
  TaggedSymbol = T.type_alias { T.all(Symbol, OpenAI::ChatModel) }
9
9
  OrSymbol = T.type_alias { T.any(Symbol, String) }
10
10
 
11
+ GPT_5_2 = T.let(:"gpt-5.2", OpenAI::ChatModel::TaggedSymbol)
12
+ GPT_5_2_2025_12_11 =
13
+ T.let(:"gpt-5.2-2025-12-11", OpenAI::ChatModel::TaggedSymbol)
14
+ GPT_5_2_CHAT_LATEST =
15
+ T.let(:"gpt-5.2-chat-latest", OpenAI::ChatModel::TaggedSymbol)
16
+ GPT_5_2_PRO = T.let(:"gpt-5.2-pro", OpenAI::ChatModel::TaggedSymbol)
17
+ GPT_5_2_PRO_2025_12_11 =
18
+ T.let(:"gpt-5.2-pro-2025-12-11", OpenAI::ChatModel::TaggedSymbol)
11
19
  GPT_5_1 = T.let(:"gpt-5.1", OpenAI::ChatModel::TaggedSymbol)
12
20
  GPT_5_1_2025_11_13 =
13
21
  T.let(:"gpt-5.1-2025-11-13", OpenAI::ChatModel::TaggedSymbol)
@@ -899,7 +899,7 @@ module OpenAI
899
899
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
900
900
  # support `none`.
901
901
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
902
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
902
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
903
903
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
904
904
  attr_accessor :reasoning_effort
905
905
 
@@ -1007,7 +1007,7 @@ module OpenAI
1007
1007
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1008
1008
  # support `none`.
1009
1009
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1010
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1010
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1011
1011
  reasoning_effort: nil,
1012
1012
  # An object specifying the format that the model must output.
1013
1013
  #
@@ -522,7 +522,7 @@ module OpenAI
522
522
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
523
523
  # support `none`.
524
524
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
525
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
525
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
526
526
  sig do
527
527
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
528
528
  end
@@ -589,7 +589,7 @@ module OpenAI
589
589
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
590
590
  # support `none`.
591
591
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
592
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
592
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
593
593
  reasoning_effort: nil,
594
594
  # Sampling temperature. This is a query parameter used to select responses.
595
595
  temperature: nil,
@@ -1143,7 +1143,7 @@ module OpenAI
1143
1143
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1144
1144
  # support `none`.
1145
1145
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1146
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1146
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1147
1147
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1148
1148
  attr_accessor :reasoning_effort
1149
1149
 
@@ -1276,7 +1276,7 @@ module OpenAI
1276
1276
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1277
1277
  # support `none`.
1278
1278
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1279
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1279
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1280
1280
  reasoning_effort: nil,
1281
1281
  # A seed value to initialize the randomness, during sampling.
1282
1282
  seed: nil,
@@ -432,7 +432,7 @@ module OpenAI
432
432
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
433
433
  # support `none`.
434
434
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
435
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
435
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
436
436
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
437
437
  attr_accessor :reasoning_effort
438
438
 
@@ -497,7 +497,7 @@ module OpenAI
497
497
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
498
498
  # support `none`.
499
499
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
500
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
500
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
501
501
  reasoning_effort: nil,
502
502
  # Sampling temperature. This is a query parameter used to select responses.
503
503
  temperature: nil,
@@ -1101,7 +1101,7 @@ module OpenAI
1101
1101
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1102
1102
  # support `none`.
1103
1103
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1104
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1104
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1105
1105
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
1106
1106
  attr_accessor :reasoning_effort
1107
1107
 
@@ -1253,7 +1253,7 @@ module OpenAI
1253
1253
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1254
1254
  # support `none`.
1255
1255
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1256
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1256
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1257
1257
  reasoning_effort: nil,
1258
1258
  # A seed value to initialize the randomness, during sampling.
1259
1259
  seed: nil,
@@ -522,7 +522,7 @@ module OpenAI
522
522
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
523
523
  # support `none`.
524
524
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
525
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
525
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
526
526
  sig do
527
527
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
528
528
  end
@@ -589,7 +589,7 @@ module OpenAI
589
589
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
590
590
  # support `none`.
591
591
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
592
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
592
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
593
593
  reasoning_effort: nil,
594
594
  # Sampling temperature. This is a query parameter used to select responses.
595
595
  temperature: nil,
@@ -1143,7 +1143,7 @@ module OpenAI
1143
1143
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1144
1144
  # support `none`.
1145
1145
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1146
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1146
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1147
1147
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1148
1148
  attr_accessor :reasoning_effort
1149
1149
 
@@ -1276,7 +1276,7 @@ module OpenAI
1276
1276
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1277
1277
  # support `none`.
1278
1278
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1279
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1279
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1280
1280
  reasoning_effort: nil,
1281
1281
  # A seed value to initialize the randomness, during sampling.
1282
1282
  seed: nil,
@@ -518,7 +518,7 @@ module OpenAI
518
518
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
519
519
  # support `none`.
520
520
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
521
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
521
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
522
522
  sig do
523
523
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
524
524
  end
@@ -585,7 +585,7 @@ module OpenAI
585
585
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
586
586
  # support `none`.
587
587
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
588
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
588
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
589
589
  reasoning_effort: nil,
590
590
  # Sampling temperature. This is a query parameter used to select responses.
591
591
  temperature: nil,
@@ -1139,7 +1139,7 @@ module OpenAI
1139
1139
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1140
1140
  # support `none`.
1141
1141
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1142
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1142
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1143
1143
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1144
1144
  attr_accessor :reasoning_effort
1145
1145
 
@@ -1272,7 +1272,7 @@ module OpenAI
1272
1272
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1273
1273
  # support `none`.
1274
1274
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1275
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1275
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1276
1276
  reasoning_effort: nil,
1277
1277
  # A seed value to initialize the randomness, during sampling.
1278
1278
  seed: nil,
@@ -524,7 +524,7 @@ module OpenAI
524
524
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
525
525
  # support `none`.
526
526
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
527
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
527
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
528
528
  sig do
529
529
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
530
530
  end
@@ -591,7 +591,7 @@ module OpenAI
591
591
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
592
592
  # support `none`.
593
593
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
594
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
594
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
595
595
  reasoning_effort: nil,
596
596
  # Sampling temperature. This is a query parameter used to select responses.
597
597
  temperature: nil,
@@ -1145,7 +1145,7 @@ module OpenAI
1145
1145
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1146
1146
  # support `none`.
1147
1147
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1148
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1148
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1149
1149
  sig { returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol)) }
1150
1150
  attr_accessor :reasoning_effort
1151
1151
 
@@ -1278,7 +1278,7 @@ module OpenAI
1278
1278
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1279
1279
  # support `none`.
1280
1280
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1281
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
1281
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
1282
1282
  reasoning_effort: nil,
1283
1283
  # A seed value to initialize the randomness, during sampling.
1284
1284
  seed: nil,
@@ -11,7 +11,8 @@ module OpenAI
11
11
  T.any(OpenAI::Graders::ScoreModelGrader, OpenAI::Internal::AnyHash)
12
12
  end
13
13
 
14
- # The input text. This may include template strings.
14
+ # The input messages evaluated by the grader. Supports text, output text, input
15
+ # image, and input audio content blocks, and may include template strings.
15
16
  sig { returns(T::Array[OpenAI::Graders::ScoreModelGrader::Input]) }
16
17
  attr_accessor :input
17
18
 
@@ -61,7 +62,8 @@ module OpenAI
61
62
  ).returns(T.attached_class)
62
63
  end
63
64
  def self.new(
64
- # The input text. This may include template strings.
65
+ # The input messages evaluated by the grader. Supports text, output text, input
66
+ # image, and input audio content blocks, and may include template strings.
65
67
  input:,
66
68
  # The model to use for the evaluation.
67
69
  model:,
@@ -406,7 +408,7 @@ module OpenAI
406
408
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
407
409
  # support `none`.
408
410
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
409
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
411
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
410
412
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
411
413
  attr_accessor :reasoning_effort
412
414
 
@@ -447,7 +449,7 @@ module OpenAI
447
449
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
448
450
  # support `none`.
449
451
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
450
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
452
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
451
453
  reasoning_effort: nil,
452
454
  # A seed value to initialize the randomness, during sampling.
453
455
  seed: nil,
@@ -18,7 +18,7 @@ module OpenAI
18
18
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
19
19
  # support `none`.
20
20
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
21
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
21
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
22
22
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
23
23
  attr_accessor :effort
24
24
 
@@ -34,7 +34,8 @@ module OpenAI
34
34
  # debugging and understanding the model's reasoning process. One of `auto`,
35
35
  # `concise`, or `detailed`.
36
36
  #
37
- # `concise` is only supported for `computer-use-preview` models.
37
+ # `concise` is supported for `computer-use-preview` models and all reasoning
38
+ # models after `gpt-5`.
38
39
  sig { returns(T.nilable(OpenAI::Reasoning::Summary::OrSymbol)) }
39
40
  attr_accessor :summary
40
41
 
@@ -63,7 +64,7 @@ module OpenAI
63
64
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
64
65
  # support `none`.
65
66
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
66
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
67
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
67
68
  effort: nil,
68
69
  # **Deprecated:** use `summary` instead.
69
70
  #
@@ -75,7 +76,8 @@ module OpenAI
75
76
  # debugging and understanding the model's reasoning process. One of `auto`,
76
77
  # `concise`, or `detailed`.
77
78
  #
78
- # `concise` is only supported for `computer-use-preview` models.
79
+ # `concise` is supported for `computer-use-preview` models and all reasoning
80
+ # models after `gpt-5`.
79
81
  summary: nil
80
82
  )
81
83
  end
@@ -124,7 +126,8 @@ module OpenAI
124
126
  # debugging and understanding the model's reasoning process. One of `auto`,
125
127
  # `concise`, or `detailed`.
126
128
  #
127
- # `concise` is only supported for `computer-use-preview` models.
129
+ # `concise` is supported for `computer-use-preview` models and all reasoning
130
+ # models after `gpt-5`.
128
131
  module Summary
129
132
  extend OpenAI::Internal::Type::Enum
130
133
 
@@ -14,7 +14,7 @@ module OpenAI
14
14
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
15
15
  # support `none`.
16
16
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
17
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
17
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
18
18
  module ReasoningEffort
19
19
  extend OpenAI::Internal::Type::Enum
20
20
 
@@ -150,6 +150,31 @@ module OpenAI
150
150
  end
151
151
  OrSymbol = T.type_alias { T.any(Symbol, String) }
152
152
 
153
+ GPT_5_2 =
154
+ T.let(
155
+ :"gpt-5.2",
156
+ OpenAI::Responses::ResponseCompactParams::Model::TaggedSymbol
157
+ )
158
+ GPT_5_2_2025_12_11 =
159
+ T.let(
160
+ :"gpt-5.2-2025-12-11",
161
+ OpenAI::Responses::ResponseCompactParams::Model::TaggedSymbol
162
+ )
163
+ GPT_5_2_CHAT_LATEST =
164
+ T.let(
165
+ :"gpt-5.2-chat-latest",
166
+ OpenAI::Responses::ResponseCompactParams::Model::TaggedSymbol
167
+ )
168
+ GPT_5_2_PRO =
169
+ T.let(
170
+ :"gpt-5.2-pro",
171
+ OpenAI::Responses::ResponseCompactParams::Model::TaggedSymbol
172
+ )
173
+ GPT_5_2_PRO_2025_12_11 =
174
+ T.let(
175
+ :"gpt-5.2-pro-2025-12-11",
176
+ OpenAI::Responses::ResponseCompactParams::Model::TaggedSymbol
177
+ )
153
178
  GPT_5_1 =
154
179
  T.let(
155
180
  :"gpt-5.1",
@@ -72,7 +72,7 @@ module OpenAI
72
72
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
73
73
  # support `none`.
74
74
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
75
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
75
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
76
76
  reasoning_effort: nil,
77
77
  # Specifies the format that the model must output. Compatible with
78
78
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -207,7 +207,7 @@ module OpenAI
207
207
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
208
208
  # support `none`.
209
209
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
210
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
210
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
211
211
  reasoning_effort: nil,
212
212
  # Specifies the format that the model must output. Compatible with
213
213
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -135,7 +135,7 @@ module OpenAI
135
135
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
136
136
  # support `none`.
137
137
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
138
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
138
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
139
139
  reasoning_effort: nil,
140
140
  # Body param: Specifies the format that the model must output. Compatible with
141
141
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -322,7 +322,7 @@ module OpenAI
322
322
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
323
323
  # support `none`.
324
324
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
325
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
325
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
326
326
  reasoning_effort: nil,
327
327
  # Body param: Specifies the format that the model must output. Compatible with
328
328
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -238,7 +238,7 @@ module OpenAI
238
238
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
239
239
  # support `none`.
240
240
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
241
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
241
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
242
242
  reasoning_effort: nil,
243
243
  # An object specifying the format that the model must output.
244
244
  #
@@ -572,7 +572,7 @@ module OpenAI
572
572
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
573
573
  # support `none`.
574
574
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
575
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
575
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
576
576
  reasoning_effort: nil,
577
577
  # An object specifying the format that the model must output.
578
578
  #
@@ -1,7 +1,12 @@
1
1
  module OpenAI
2
2
  module Models
3
3
  type chat_model =
4
- :"gpt-5.1"
4
+ :"gpt-5.2"
5
+ | :"gpt-5.2-2025-12-11"
6
+ | :"gpt-5.2-chat-latest"
7
+ | :"gpt-5.2-pro"
8
+ | :"gpt-5.2-pro-2025-12-11"
9
+ | :"gpt-5.1"
5
10
  | :"gpt-5.1-2025-11-13"
6
11
  | :"gpt-5.1-codex"
7
12
  | :"gpt-5.1-mini"
@@ -72,6 +77,11 @@ module OpenAI
72
77
  module ChatModel
73
78
  extend OpenAI::Internal::Type::Enum
74
79
 
80
+ GPT_5_2: :"gpt-5.2"
81
+ GPT_5_2_2025_12_11: :"gpt-5.2-2025-12-11"
82
+ GPT_5_2_CHAT_LATEST: :"gpt-5.2-chat-latest"
83
+ GPT_5_2_PRO: :"gpt-5.2-pro"
84
+ GPT_5_2_PRO_2025_12_11: :"gpt-5.2-pro-2025-12-11"
75
85
  GPT_5_1: :"gpt-5.1"
76
86
  GPT_5_1_2025_11_13: :"gpt-5.1-2025-11-13"
77
87
  GPT_5_1_CODEX: :"gpt-5.1-codex"
@@ -39,7 +39,12 @@ module OpenAI
39
39
  }
40
40
 
41
41
  type model =
42
- :"gpt-5.1"
42
+ :"gpt-5.2"
43
+ | :"gpt-5.2-2025-12-11"
44
+ | :"gpt-5.2-chat-latest"
45
+ | :"gpt-5.2-pro"
46
+ | :"gpt-5.2-pro-2025-12-11"
47
+ | :"gpt-5.1"
43
48
  | :"gpt-5.1-2025-11-13"
44
49
  | :"gpt-5.1-codex"
45
50
  | :"gpt-5.1-mini"
@@ -127,6 +132,11 @@ module OpenAI
127
132
 
128
133
  def self?.variants: -> ::Array[OpenAI::Models::Responses::ResponseCompactParams::model]
129
134
 
135
+ GPT_5_2: :"gpt-5.2"
136
+ GPT_5_2_2025_12_11: :"gpt-5.2-2025-12-11"
137
+ GPT_5_2_CHAT_LATEST: :"gpt-5.2-chat-latest"
138
+ GPT_5_2_PRO: :"gpt-5.2-pro"
139
+ GPT_5_2_PRO_2025_12_11: :"gpt-5.2-pro-2025-12-11"
130
140
  GPT_5_1: :"gpt-5.1"
131
141
  GPT_5_1_2025_11_13: :"gpt-5.1-2025-11-13"
132
142
  GPT_5_1_CODEX: :"gpt-5.1-codex"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.38.0
4
+ version: 0.39.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-12-10 00:00:00.000000000 Z
11
+ date: 2025-12-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: base64