openai 0.37.0 → 0.39.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +21 -0
  3. data/README.md +9 -9
  4. data/lib/openai/internal/type/enum.rb +6 -6
  5. data/lib/openai/models/beta/assistant_create_params.rb +1 -1
  6. data/lib/openai/models/beta/assistant_update_params.rb +1 -1
  7. data/lib/openai/models/beta/threads/run_create_params.rb +1 -1
  8. data/lib/openai/models/chat/completion_create_params.rb +1 -1
  9. data/lib/openai/models/chat_model.rb +5 -0
  10. data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +1 -1
  11. data/lib/openai/models/evals/run_cancel_response.rb +2 -2
  12. data/lib/openai/models/evals/run_create_params.rb +2 -2
  13. data/lib/openai/models/evals/run_create_response.rb +2 -2
  14. data/lib/openai/models/evals/run_list_response.rb +2 -2
  15. data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
  16. data/lib/openai/models/graders/score_model_grader.rb +7 -3
  17. data/lib/openai/models/reasoning.rb +5 -3
  18. data/lib/openai/models/reasoning_effort.rb +1 -1
  19. data/lib/openai/models/responses/response_compact_params.rb +45 -30
  20. data/lib/openai/resources/responses.rb +9 -9
  21. data/lib/openai/version.rb +1 -1
  22. data/rbi/openai/models/beta/assistant_create_params.rbi +2 -2
  23. data/rbi/openai/models/beta/assistant_update_params.rbi +2 -2
  24. data/rbi/openai/models/beta/threads/run_create_params.rbi +2 -2
  25. data/rbi/openai/models/chat/completion_create_params.rbi +2 -2
  26. data/rbi/openai/models/chat_model.rbi +8 -0
  27. data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +2 -2
  28. data/rbi/openai/models/evals/run_cancel_response.rbi +4 -4
  29. data/rbi/openai/models/evals/run_create_params.rbi +4 -4
  30. data/rbi/openai/models/evals/run_create_response.rbi +4 -4
  31. data/rbi/openai/models/evals/run_list_response.rbi +4 -4
  32. data/rbi/openai/models/evals/run_retrieve_response.rbi +4 -4
  33. data/rbi/openai/models/graders/score_model_grader.rbi +6 -4
  34. data/rbi/openai/models/reasoning.rbi +8 -5
  35. data/rbi/openai/models/reasoning_effort.rbi +1 -1
  36. data/rbi/openai/models/responses/response_compact_params.rbi +87 -62
  37. data/rbi/openai/resources/beta/assistants.rbi +2 -2
  38. data/rbi/openai/resources/beta/threads/runs.rbi +2 -2
  39. data/rbi/openai/resources/chat/completions.rbi +2 -2
  40. data/rbi/openai/resources/responses.rbi +11 -11
  41. data/sig/openai/models/chat_model.rbs +11 -1
  42. data/sig/openai/models/responses/response_compact_params.rbs +27 -17
  43. data/sig/openai/resources/responses.rbs +1 -1
  44. metadata +2 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 136d7cc343b404ef69a7d7d32adcc6942d66b3e87385ed6ad02d91cf6014dad6
4
- data.tar.gz: b7192ae587a9921a96e547b52c64dbd312b8b8d66ee6b6de9f572b18a0809625
3
+ metadata.gz: 6fe08f20637a8f850964152557cf797e6c078ea740bd5c5e4d5e88ef0ed5c0b9
4
+ data.tar.gz: b372a97f47fe9ae0c1c41fdffbd0d5a7f9b42e46fb3d7a976e5c5f3c11970cba
5
5
  SHA512:
6
- metadata.gz: 900ac61caada3d3a9e099ecd37807f331eaa2785b05183bad4b512deb5d2698b4926142cc5d63bcbb870f21bf5659a70601e4389309dbbcbe92e06d8513b3f96
7
- data.tar.gz: 148cff14a45e1e1473e946221cad39b44c0c4162d993ee5333367cf64b850f2beafc11016596cc274e1ece7a2526ba5aed1d446a06451294a71c18b69eb280c8
6
+ metadata.gz: 5db010ad9ae69bc8bcc36538de8ef659e2e62438b80b08a7ee85ca952c6df79e67ac3f0d640846b31bac4ce55f41fcc45942062d23cf2ccc0f7e50b5ff2c28fb
7
+ data.tar.gz: 2f5c2ae60e053513757f7237af9e3559a09396e648549322e0ac35eda38811e95810992fb5ccf0fae1354c85f595dcebc6621d9d4a46e2e4ae54fdde1a4af82d
data/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.39.0 (2025-12-11)
4
+
5
+ Full Changelog: [v0.38.0...v0.39.0](https://github.com/openai/openai-ruby/compare/v0.38.0...v0.39.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** gpt 5.2 ([369f26b](https://github.com/openai/openai-ruby/commit/369f26b5e42ddef31d07278e43776415f5d49b62))
10
+
11
+
12
+ ### Bug Fixes
13
+
14
+ * Create new responses with previous_response_id ([#869](https://github.com/openai/openai-ruby/issues/869)) ([b14e2aa](https://github.com/openai/openai-ruby/commit/b14e2aa2351d102c4af30809ebb4a5cca61a1165))
15
+
16
+ ## 0.38.0 (2025-12-08)
17
+
18
+ Full Changelog: [v0.37.0...v0.38.0](https://github.com/openai/openai-ruby/compare/v0.37.0...v0.38.0)
19
+
20
+ ### Features
21
+
22
+ * **api:** make model required for the responses/compact endpoint ([94ad657](https://github.com/openai/openai-ruby/commit/94ad657d3824838dbb1517bb6aa43341a0581102))
23
+
3
24
  ## 0.37.0 (2025-12-04)
4
25
 
5
26
  Full Changelog: [v0.36.1...v0.37.0](https://github.com/openai/openai-ruby/compare/v0.36.1...v0.37.0)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.37.0"
18
+ gem "openai", "~> 0.39.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -32,7 +32,7 @@ openai = OpenAI::Client.new(
32
32
 
33
33
  chat_completion = openai.chat.completions.create(
34
34
  messages: [{role: "user", content: "Say this is a test"}],
35
- model: :"gpt-5.1"
35
+ model: :"gpt-5.2"
36
36
  )
37
37
 
38
38
  puts(chat_completion)
@@ -45,7 +45,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
45
45
  ```ruby
46
46
  stream = openai.responses.stream(
47
47
  input: "Write a haiku about OpenAI.",
48
- model: :"gpt-5.1"
48
+ model: :"gpt-5.2"
49
49
  )
50
50
 
51
51
  stream.each do |event|
@@ -343,7 +343,7 @@ openai = OpenAI::Client.new(
343
343
  # Or, configure per-request:
344
344
  openai.chat.completions.create(
345
345
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
346
- model: :"gpt-5.1",
346
+ model: :"gpt-5.2",
347
347
  request_options: {max_retries: 5}
348
348
  )
349
349
  ```
@@ -361,7 +361,7 @@ openai = OpenAI::Client.new(
361
361
  # Or, configure per-request:
362
362
  openai.chat.completions.create(
363
363
  messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
364
- model: :"gpt-5.1",
364
+ model: :"gpt-5.2",
365
365
  request_options: {timeout: 5}
366
366
  )
367
367
  ```
@@ -396,7 +396,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
396
396
  chat_completion =
397
397
  openai.chat.completions.create(
398
398
  messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
399
- model: :"gpt-5.1",
399
+ model: :"gpt-5.2",
400
400
  request_options: {
401
401
  extra_query: {my_query_parameter: value},
402
402
  extra_body: {my_body_parameter: value},
@@ -444,7 +444,7 @@ You can provide typesafe request parameters like so:
444
444
  ```ruby
445
445
  openai.chat.completions.create(
446
446
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
447
- model: :"gpt-5.1"
447
+ model: :"gpt-5.2"
448
448
  )
449
449
  ```
450
450
 
@@ -454,13 +454,13 @@ Or, equivalently:
454
454
  # Hashes work, but are not typesafe:
455
455
  openai.chat.completions.create(
456
456
  messages: [{role: "user", content: "Say this is a test"}],
457
- model: :"gpt-5.1"
457
+ model: :"gpt-5.2"
458
458
  )
459
459
 
460
460
  # You can also splat a full Params class:
461
461
  params = OpenAI::Chat::CompletionCreateParams.new(
462
462
  messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
463
- model: :"gpt-5.1"
463
+ model: :"gpt-5.2"
464
464
  )
465
465
  openai.chat.completions.create(**params)
466
466
  ```
@@ -19,11 +19,11 @@ module OpenAI
19
19
  # @example
20
20
  # # `chat_model` is a `OpenAI::ChatModel`
21
21
  # case chat_model
22
- # when OpenAI::ChatModel::GPT_5_1
22
+ # when OpenAI::ChatModel::GPT_5_2
23
23
  # # ...
24
- # when OpenAI::ChatModel::GPT_5_1_2025_11_13
24
+ # when OpenAI::ChatModel::GPT_5_2_2025_12_11
25
25
  # # ...
26
- # when OpenAI::ChatModel::GPT_5_1_CODEX
26
+ # when OpenAI::ChatModel::GPT_5_2_CHAT_LATEST
27
27
  # # ...
28
28
  # else
29
29
  # puts(chat_model)
@@ -31,11 +31,11 @@ module OpenAI
31
31
  #
32
32
  # @example
33
33
  # case chat_model
34
- # in :"gpt-5.1"
34
+ # in :"gpt-5.2"
35
35
  # # ...
36
- # in :"gpt-5.1-2025-11-13"
36
+ # in :"gpt-5.2-2025-12-11"
37
37
  # # ...
38
- # in :"gpt-5.1-codex"
38
+ # in :"gpt-5.2-chat-latest"
39
39
  # # ...
40
40
  # else
41
41
  # puts(chat_model)
@@ -61,7 +61,7 @@ module OpenAI
61
61
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
62
  # support `none`.
63
63
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
65
65
  #
66
66
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
67
67
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -61,7 +61,7 @@ module OpenAI
61
61
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
62
62
  # support `none`.
63
63
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
64
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
64
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
65
65
  #
66
66
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
67
67
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -119,7 +119,7 @@ module OpenAI
119
119
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
120
120
  # support `none`.
121
121
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
122
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
122
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
123
123
  #
124
124
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
125
125
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -214,7 +214,7 @@ module OpenAI
214
214
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
215
215
  # support `none`.
216
216
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
217
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
217
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
218
218
  #
219
219
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
220
220
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -5,6 +5,11 @@ module OpenAI
5
5
  module ChatModel
6
6
  extend OpenAI::Internal::Type::Enum
7
7
 
8
+ GPT_5_2 = :"gpt-5.2"
9
+ GPT_5_2_2025_12_11 = :"gpt-5.2-2025-12-11"
10
+ GPT_5_2_CHAT_LATEST = :"gpt-5.2-chat-latest"
11
+ GPT_5_2_PRO = :"gpt-5.2-pro"
12
+ GPT_5_2_PRO_2025_12_11 = :"gpt-5.2-pro-2025-12-11"
8
13
  GPT_5_1 = :"gpt-5.1"
9
14
  GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
10
15
  GPT_5_1_CODEX = :"gpt-5.1-codex"
@@ -472,7 +472,7 @@ module OpenAI
472
472
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
473
473
  # support `none`.
474
474
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
475
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
475
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
476
476
  #
477
477
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
478
478
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ module OpenAI
675
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676
676
  # support `none`.
677
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679
679
  #
680
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -238,7 +238,7 @@ module OpenAI
238
238
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
239
239
  # support `none`.
240
240
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
241
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
241
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
242
242
  #
243
243
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
244
244
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -603,7 +603,7 @@ module OpenAI
603
603
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
604
604
  # support `none`.
605
605
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
606
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
606
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
607
607
  #
608
608
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
609
609
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ module OpenAI
675
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676
676
  # support `none`.
677
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679
679
  #
680
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -675,7 +675,7 @@ module OpenAI
675
675
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
676
676
  # support `none`.
677
677
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
678
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
678
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
679
679
  #
680
680
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
681
681
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -326,7 +326,7 @@ module OpenAI
326
326
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
327
327
  # support `none`.
328
328
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
329
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
329
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
330
330
  #
331
331
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
332
332
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -679,7 +679,7 @@ module OpenAI
679
679
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
680
680
  # support `none`.
681
681
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
682
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
682
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
683
683
  #
684
684
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
685
685
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -5,7 +5,8 @@ module OpenAI
5
5
  module Graders
6
6
  class ScoreModelGrader < OpenAI::Internal::Type::BaseModel
7
7
  # @!attribute input
8
- # The input text. This may include template strings.
8
+ # The input messages evaluated by the grader. Supports text, output text, input
9
+ # image, and input audio content blocks, and may include template strings.
9
10
  #
10
11
  # @return [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>]
11
12
  required :input, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Graders::ScoreModelGrader::Input] }
@@ -41,9 +42,12 @@ module OpenAI
41
42
  optional :sampling_params, -> { OpenAI::Graders::ScoreModelGrader::SamplingParams }
42
43
 
43
44
  # @!method initialize(input:, model:, name:, range: nil, sampling_params: nil, type: :score_model)
45
+ # Some parameter documentations has been truncated, see
46
+ # {OpenAI::Models::Graders::ScoreModelGrader} for more details.
47
+ #
44
48
  # A ScoreModelGrader object that uses a model to assign a score to the input.
45
49
  #
46
- # @param input [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>] The input text. This may include template strings.
50
+ # @param input [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>] The input messages evaluated by the grader. Supports text, output text, input im
47
51
  #
48
52
  # @param model [String] The model to use for the evaluation.
49
53
  #
@@ -232,7 +236,7 @@ module OpenAI
232
236
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
233
237
  # support `none`.
234
238
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
235
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
239
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
236
240
  #
237
241
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
238
242
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -16,7 +16,7 @@ module OpenAI
16
16
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
17
17
  # support `none`.
18
18
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
19
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
19
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
20
20
  #
21
21
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
22
22
  optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -38,7 +38,8 @@ module OpenAI
38
38
  # debugging and understanding the model's reasoning process. One of `auto`,
39
39
  # `concise`, or `detailed`.
40
40
  #
41
- # `concise` is only supported for `computer-use-preview` models.
41
+ # `concise` is supported for `computer-use-preview` models and all reasoning
42
+ # models after `gpt-5`.
42
43
  #
43
44
  # @return [Symbol, OpenAI::Models::Reasoning::Summary, nil]
44
45
  optional :summary, enum: -> { OpenAI::Reasoning::Summary }, nil?: true
@@ -82,7 +83,8 @@ module OpenAI
82
83
  # debugging and understanding the model's reasoning process. One of `auto`,
83
84
  # `concise`, or `detailed`.
84
85
  #
85
- # `concise` is only supported for `computer-use-preview` models.
86
+ # `concise` is supported for `computer-use-preview` models and all reasoning
87
+ # models after `gpt-5`.
86
88
  #
87
89
  # @see OpenAI::Models::Reasoning#summary
88
90
  module Summary
@@ -14,7 +14,7 @@ module OpenAI
14
14
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
15
15
  # support `none`.
16
16
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
17
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
17
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
18
18
  module ReasoningEffort
19
19
  extend OpenAI::Internal::Type::Enum
20
20
 
@@ -8,6 +8,16 @@ module OpenAI
8
8
  extend OpenAI::Internal::Type::RequestParameters::Converter
9
9
  include OpenAI::Internal::Type::RequestParameters
10
10
 
11
+ # @!attribute model
12
+ # Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
13
+ # wide range of models with different capabilities, performance characteristics,
14
+ # and price points. Refer to the
15
+ # [model guide](https://platform.openai.com/docs/models) to browse and compare
16
+ # available models.
17
+ #
18
+ # @return [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil]
19
+ required :model, union: -> { OpenAI::Responses::ResponseCompactParams::Model }, nil?: true
20
+
11
21
  # @!attribute input
12
22
  # Text, image, or file inputs to the model, used to generate a response
13
23
  #
@@ -23,16 +33,6 @@ module OpenAI
23
33
  # @return [String, nil]
24
34
  optional :instructions, String, nil?: true
25
35
 
26
- # @!attribute model
27
- # Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
28
- # wide range of models with different capabilities, performance characteristics,
29
- # and price points. Refer to the
30
- # [model guide](https://platform.openai.com/docs/models) to browse and compare
31
- # available models.
32
- #
33
- # @return [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil]
34
- optional :model, union: -> { OpenAI::Responses::ResponseCompactParams::Model }, nil?: true
35
-
36
36
  # @!attribute previous_response_id
37
37
  # The unique ID of the previous response to the model. Use this to create
38
38
  # multi-turn conversations. Learn more about
@@ -42,37 +42,20 @@ module OpenAI
42
42
  # @return [String, nil]
43
43
  optional :previous_response_id, String, nil?: true
44
44
 
45
- # @!method initialize(input: nil, instructions: nil, model: nil, previous_response_id: nil, request_options: {})
45
+ # @!method initialize(model:, input: nil, instructions: nil, previous_response_id: nil, request_options: {})
46
46
  # Some parameter documentations has been truncated, see
47
47
  # {OpenAI::Models::Responses::ResponseCompactParams} for more details.
48
48
  #
49
+ # @param model [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil] Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wi
50
+ #
49
51
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
50
52
  #
51
53
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
52
54
  #
53
- # @param model [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil] Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wi
54
- #
55
55
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to create multi-tu
56
56
  #
57
57
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
58
58
 
59
- # Text, image, or file inputs to the model, used to generate a response
60
- module Input
61
- extend OpenAI::Internal::Type::Union
62
-
63
- # A text input to the model, equivalent to a text input with the `user` role.
64
- variant String
65
-
66
- variant -> { OpenAI::Models::Responses::ResponseCompactParams::Input::ResponseInputItemArray }
67
-
68
- # @!method self.variants
69
- # @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
70
-
71
- # @type [OpenAI::Internal::Type::Converter]
72
- ResponseInputItemArray =
73
- OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
74
- end
75
-
76
59
  # Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a
77
60
  # wide range of models with different capabilities, performance characteristics,
78
61
  # and price points. Refer to the
@@ -81,6 +64,16 @@ module OpenAI
81
64
  module Model
82
65
  extend OpenAI::Internal::Type::Union
83
66
 
67
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2 }
68
+
69
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_2025_12_11 }
70
+
71
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_CHAT_LATEST }
72
+
73
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_PRO }
74
+
75
+ variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_2_PRO_2025_12_11 }
76
+
84
77
  variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_1 }
85
78
 
86
79
  variant const: -> { OpenAI::Models::Responses::ResponseCompactParams::Model::GPT_5_1_2025_11_13 }
@@ -254,6 +247,11 @@ module OpenAI
254
247
 
255
248
  # @!group
256
249
 
250
+ GPT_5_2 = :"gpt-5.2"
251
+ GPT_5_2_2025_12_11 = :"gpt-5.2-2025-12-11"
252
+ GPT_5_2_CHAT_LATEST = :"gpt-5.2-chat-latest"
253
+ GPT_5_2_PRO = :"gpt-5.2-pro"
254
+ GPT_5_2_PRO_2025_12_11 = :"gpt-5.2-pro-2025-12-11"
257
255
  GPT_5_1 = :"gpt-5.1"
258
256
  GPT_5_1_2025_11_13 = :"gpt-5.1-2025-11-13"
259
257
  GPT_5_1_CODEX = :"gpt-5.1-codex"
@@ -338,6 +336,23 @@ module OpenAI
338
336
 
339
337
  # @!endgroup
340
338
  end
339
+
340
+ # Text, image, or file inputs to the model, used to generate a response
341
+ module Input
342
+ extend OpenAI::Internal::Type::Union
343
+
344
+ # A text input to the model, equivalent to a text input with the `user` role.
345
+ variant String
346
+
347
+ variant -> { OpenAI::Models::Responses::ResponseCompactParams::Input::ResponseInputItemArray }
348
+
349
+ # @!method self.variants
350
+ # @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
351
+
352
+ # @type [OpenAI::Internal::Type::Converter]
353
+ ResponseInputItemArray =
354
+ OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
355
+ end
341
356
  end
342
357
  end
343
358
  end
@@ -186,10 +186,10 @@ module OpenAI
186
186
  # @see OpenAI::Models::Responses::ResponseCreateParams
187
187
  def stream(params)
188
188
  parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
189
- starting_after, previous_response_id = parsed.values_at(:starting_after, :previous_response_id)
189
+ starting_after, response_id = parsed.values_at(:starting_after, :response_id)
190
190
 
191
- if starting_after && !previous_response_id
192
- raise ArgumentError, "starting_after can only be used with previous_response_id"
191
+ if starting_after && !response_id
192
+ raise ArgumentError, "starting_after can only be used with response_id"
193
193
  end
194
194
  model, tool_models = get_structured_output_models(parsed)
195
195
 
@@ -200,11 +200,11 @@ module OpenAI
200
200
  raw
201
201
  end
202
202
 
203
- if previous_response_id
203
+ if response_id
204
204
  retrieve_params = params.slice(:include, :request_options)
205
205
 
206
206
  raw_stream = retrieve_streaming_internal(
207
- previous_response_id,
207
+ response_id,
208
208
  params: retrieve_params,
209
209
  unwrap: unwrap
210
210
  )
@@ -464,14 +464,14 @@ module OpenAI
464
464
  #
465
465
  # Compact conversation
466
466
  #
467
- # @overload compact(input: nil, instructions: nil, model: nil, previous_response_id: nil, request_options: {})
467
+ # @overload compact(model:, input: nil, instructions: nil, previous_response_id: nil, request_options: {})
468
+ #
469
+ # @param model [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil] Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wi
468
470
  #
469
471
  # @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseCompactionItemParam, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Text, image, or file inputs to the model, used to generate a response
470
472
  #
471
473
  # @param instructions [String, nil] A system (or developer) message inserted into the model's context.
472
474
  #
473
- # @param model [Symbol, String, OpenAI::Models::Responses::ResponseCompactParams::Model, nil] Model ID used to generate the response, like `gpt-5` or `o3`. OpenAI offers a wi
474
- #
475
475
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to create multi-tu
476
476
  #
477
477
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
@@ -479,7 +479,7 @@ module OpenAI
479
479
  # @return [OpenAI::Models::Responses::CompactedResponse]
480
480
  #
481
481
  # @see OpenAI::Models::Responses::ResponseCompactParams
482
- def compact(params = {})
482
+ def compact(params)
483
483
  parsed, options = OpenAI::Responses::ResponseCompactParams.dump_request(params)
484
484
  @client.request(
485
485
  method: :post,
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.37.0"
4
+ VERSION = "0.39.0"
5
5
  end
@@ -57,7 +57,7 @@ module OpenAI
57
57
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
58
58
  # support `none`.
59
59
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
60
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
60
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
61
61
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
62
62
  attr_accessor :reasoning_effort
63
63
 
@@ -227,7 +227,7 @@ module OpenAI
227
227
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
228
228
  # support `none`.
229
229
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
230
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
230
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
231
231
  reasoning_effort: nil,
232
232
  # Specifies the format that the model must output. Compatible with
233
233
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -77,7 +77,7 @@ module OpenAI
77
77
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
78
78
  # support `none`.
79
79
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
80
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
80
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
81
81
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
82
82
  attr_accessor :reasoning_effort
83
83
 
@@ -251,7 +251,7 @@ module OpenAI
251
251
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
252
252
  # support `none`.
253
253
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
254
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
254
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
255
255
  reasoning_effort: nil,
256
256
  # Specifies the format that the model must output. Compatible with
257
257
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
@@ -123,7 +123,7 @@ module OpenAI
123
123
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
124
124
  # support `none`.
125
125
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
126
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
126
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
127
127
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
128
128
  attr_accessor :reasoning_effort
129
129
 
@@ -349,7 +349,7 @@ module OpenAI
349
349
  # - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
350
350
  # support `none`.
351
351
  # - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
352
- # - `xhigh` is currently only supported for `gpt-5.1-codex-max`.
352
+ # - `xhigh` is supported for all models after `gpt-5.1-codex-max`.
353
353
  reasoning_effort: nil,
354
354
  # Specifies the format that the model must output. Compatible with
355
355
  # [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),