openai 0.23.2 → 0.23.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7603886aae923da50eb83881be4d69cf1b5be3616b9903a784f16bcd124c023d
4
- data.tar.gz: 03feb2933c7a27590301dc9c805d66ce2e7f45e743515e7e46bf9bbf9d61dfe3
3
+ metadata.gz: 78de67c6327d605033609ccd07e0d74abf3435c29fca8a16814e47bb9a3adccb
4
+ data.tar.gz: 5acaec7441b93b103201e5b5f9848537f6e61126baaaffaf468073a063970d48
5
5
  SHA512:
6
- metadata.gz: 6de05c188535d0ed867bf936a6e97a7f452bd6924678b672d23f7868feeda7144df86dac454bfa29300d81844799c1ab77adc42767a314975d566acacb3aaefd
7
- data.tar.gz: bf693ec699be028060d13db1fa9bd5ef3bb980118d5c4b292877bc6a16a88bd743ee10cd6c6a9d3828aa15d31e8e55058d660d831af3179050c68b110d8c705f
6
+ metadata.gz: 0c3830e5b495692fa49be0fb6f23da597a75f08c376105e98fd7ede52ec06374dbb4e0234739edf2c355f7a76a8e8d77a858dbce211d512f74bddc818717e47e
7
+ data.tar.gz: 751b22f53a5176438c4a8ee65a67393860ebecd282b8847fc50b0a9ccfe1c39ecc9940c501cb51f2e67ff8746816ff385f3d2721a527533c02e95ab0cc1eaf2d
data/CHANGELOG.md CHANGED
@@ -1,5 +1,13 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.23.3 (2025-09-15)
4
+
5
+ Full Changelog: [v0.23.2...v0.23.3](https://github.com/openai/openai-ruby/compare/v0.23.2...v0.23.3)
6
+
7
+ ### Chores
8
+
9
+ * **api:** docs and spec refactoring ([81ccb86](https://github.com/openai/openai-ruby/commit/81ccb86c346e51a2b5d532a5997358aa86977572))
10
+
3
11
  ## 0.23.2 (2025-09-11)
4
12
 
5
13
  Full Changelog: [v0.23.1...v0.23.2](https://github.com/openai/openai-ruby/compare/v0.23.1...v0.23.2)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.23.2"
18
+ gem "openai", "~> 0.23.3"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -21,9 +21,12 @@ module OpenAI
21
21
  optional :limit, Integer
22
22
 
23
23
  # @!attribute metadata
24
- # A list of metadata keys to filter the Chat Completions by. Example:
24
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
25
+ # for storing additional information about the object in a structured format, and
26
+ # querying for objects via API or the dashboard.
25
27
  #
26
- # `metadata[key1]=value1&metadata[key2]=value2`
28
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
29
+ # a maximum length of 512 characters.
27
30
  #
28
31
  # @return [Hash{Symbol=>String}, nil]
29
32
  optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
@@ -49,7 +52,7 @@ module OpenAI
49
52
  #
50
53
  # @param limit [Integer] Number of Chat Completions to retrieve.
51
54
  #
52
- # @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example:
55
+ # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
53
56
  #
54
57
  # @param model [String] The model used to generate the Chat Completions.
55
58
  #
@@ -18,8 +18,12 @@ module OpenAI
18
18
  nil?: true
19
19
 
20
20
  # @!attribute metadata
21
- # Set of 16 key-value pairs that can be attached to an object. Useful for storing
22
- # additional information about the object in a structured format.
21
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
22
+ # for storing additional information about the object in a structured format, and
23
+ # querying for objects via API or the dashboard.
24
+ #
25
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
26
+ # a maximum length of 512 characters.
23
27
  #
24
28
  # @return [Hash{Symbol=>String}, nil]
25
29
  optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
@@ -30,7 +34,7 @@ module OpenAI
30
34
  #
31
35
  # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context.
32
36
  #
33
- # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. Useful for
37
+ # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
34
38
  #
35
39
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
36
40
  end
@@ -314,8 +314,11 @@ module OpenAI
314
314
  optional :model, String, nil?: true
315
315
 
316
316
  # @!attribute reasoning_effort
317
- # Optional reasoning effort parameter. This is a query parameter used to select
318
- # responses.
317
+ # Constrains effort on reasoning for
318
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
+ # effort can result in faster responses and fewer tokens used on reasoning in a
321
+ # response.
319
322
  #
320
323
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
321
324
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -361,7 +364,7 @@ module OpenAI
361
364
  #
362
365
  # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
363
366
  #
364
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
367
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
365
368
  #
366
369
  # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
367
370
  #
@@ -226,8 +226,11 @@ module OpenAI
226
226
  optional :model, String, nil?: true
227
227
 
228
228
  # @!attribute reasoning_effort
229
- # Optional reasoning effort parameter. This is a query parameter used to select
230
- # responses.
229
+ # Constrains effort on reasoning for
230
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
231
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
232
+ # effort can result in faster responses and fewer tokens used on reasoning in a
233
+ # response.
231
234
  #
232
235
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
233
236
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -273,7 +276,7 @@ module OpenAI
273
276
  #
274
277
  # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
275
278
  #
276
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
279
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
277
280
  #
278
281
  # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
279
282
  #
@@ -314,8 +314,11 @@ module OpenAI
314
314
  optional :model, String, nil?: true
315
315
 
316
316
  # @!attribute reasoning_effort
317
- # Optional reasoning effort parameter. This is a query parameter used to select
318
- # responses.
317
+ # Constrains effort on reasoning for
318
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
+ # effort can result in faster responses and fewer tokens used on reasoning in a
321
+ # response.
319
322
  #
320
323
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
321
324
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -361,7 +364,7 @@ module OpenAI
361
364
  #
362
365
  # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
363
366
  #
364
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
367
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
365
368
  #
366
369
  # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
367
370
  #
@@ -314,8 +314,11 @@ module OpenAI
314
314
  optional :model, String, nil?: true
315
315
 
316
316
  # @!attribute reasoning_effort
317
- # Optional reasoning effort parameter. This is a query parameter used to select
318
- # responses.
317
+ # Constrains effort on reasoning for
318
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
+ # effort can result in faster responses and fewer tokens used on reasoning in a
321
+ # response.
319
322
  #
320
323
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
321
324
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -361,7 +364,7 @@ module OpenAI
361
364
  #
362
365
  # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
363
366
  #
364
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
367
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
365
368
  #
366
369
  # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
367
370
  #
@@ -314,8 +314,11 @@ module OpenAI
314
314
  optional :model, String, nil?: true
315
315
 
316
316
  # @!attribute reasoning_effort
317
- # Optional reasoning effort parameter. This is a query parameter used to select
318
- # responses.
317
+ # Constrains effort on reasoning for
318
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
319
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
320
+ # effort can result in faster responses and fewer tokens used on reasoning in a
321
+ # response.
319
322
  #
320
323
  # @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
321
324
  optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
@@ -361,7 +364,7 @@ module OpenAI
361
364
  #
362
365
  # @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
363
366
  #
364
- # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
367
+ # @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Constrains effort on reasoning for
365
368
  #
366
369
  # @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
367
370
  #
@@ -387,7 +387,7 @@ module OpenAI
387
387
  #
388
388
  # @param limit [Integer] Number of Chat Completions to retrieve.
389
389
  #
390
- # @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example:
390
+ # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
391
391
  #
392
392
  # @param model [String] The model used to generate the Chat Completions.
393
393
  #
@@ -15,7 +15,7 @@ module OpenAI
15
15
  #
16
16
  # @param items [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] Initial items to include in the conversation context.
17
17
  #
18
- # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. Useful for
18
+ # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
19
19
  #
20
20
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
21
21
  #
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.23.2"
4
+ VERSION = "0.23.3"
5
5
  end
@@ -26,9 +26,12 @@ module OpenAI
26
26
  sig { params(limit: Integer).void }
27
27
  attr_writer :limit
28
28
 
29
- # A list of metadata keys to filter the Chat Completions by. Example:
29
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
30
+ # for storing additional information about the object in a structured format, and
31
+ # querying for objects via API or the dashboard.
30
32
  #
31
- # `metadata[key1]=value1&metadata[key2]=value2`
33
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
34
+ # a maximum length of 512 characters.
32
35
  sig { returns(T.nilable(T::Hash[Symbol, String])) }
33
36
  attr_accessor :metadata
34
37
 
@@ -70,9 +73,12 @@ module OpenAI
70
73
  after: nil,
71
74
  # Number of Chat Completions to retrieve.
72
75
  limit: nil,
73
- # A list of metadata keys to filter the Chat Completions by. Example:
76
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
77
+ # for storing additional information about the object in a structured format, and
78
+ # querying for objects via API or the dashboard.
74
79
  #
75
- # `metadata[key1]=value1&metadata[key2]=value2`
80
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
81
+ # a maximum length of 512 characters.
76
82
  metadata: nil,
77
83
  # The model used to generate the Chat Completions.
78
84
  model: nil,
@@ -50,8 +50,12 @@ module OpenAI
50
50
  end
51
51
  attr_accessor :items
52
52
 
53
- # Set of 16 key-value pairs that can be attached to an object. Useful for storing
54
- # additional information about the object in a structured format.
53
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
54
+ # for storing additional information about the object in a structured format, and
55
+ # querying for objects via API or the dashboard.
56
+ #
57
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
58
+ # a maximum length of 512 characters.
55
59
  sig { returns(T.nilable(T::Hash[Symbol, String])) }
56
60
  attr_accessor :metadata
57
61
 
@@ -93,8 +97,12 @@ module OpenAI
93
97
  # Initial items to include in the conversation context. You may add up to 20 items
94
98
  # at a time.
95
99
  items: nil,
96
- # Set of 16 key-value pairs that can be attached to an object. Useful for storing
97
- # additional information about the object in a structured format.
100
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
101
+ # for storing additional information about the object in a structured format, and
102
+ # querying for objects via API or the dashboard.
103
+ #
104
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
105
+ # a maximum length of 512 characters.
98
106
  metadata: nil,
99
107
  request_options: {}
100
108
  )
@@ -510,8 +510,11 @@ module OpenAI
510
510
  sig { returns(T.nilable(String)) }
511
511
  attr_accessor :model
512
512
 
513
- # Optional reasoning effort parameter. This is a query parameter used to select
514
- # responses.
513
+ # Constrains effort on reasoning for
514
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
515
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
516
+ # effort can result in faster responses and fewer tokens used on reasoning in a
517
+ # response.
515
518
  sig do
516
519
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
517
520
  end
@@ -566,8 +569,11 @@ module OpenAI
566
569
  # The name of the model to find responses for. This is a query parameter used to
567
570
  # select responses.
568
571
  model: nil,
569
- # Optional reasoning effort parameter. This is a query parameter used to select
570
- # responses.
572
+ # Constrains effort on reasoning for
573
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
574
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
575
+ # effort can result in faster responses and fewer tokens used on reasoning in a
576
+ # response.
571
577
  reasoning_effort: nil,
572
578
  # Sampling temperature. This is a query parameter used to select responses.
573
579
  temperature: nil,
@@ -420,8 +420,11 @@ module OpenAI
420
420
  sig { returns(T.nilable(String)) }
421
421
  attr_accessor :model
422
422
 
423
- # Optional reasoning effort parameter. This is a query parameter used to select
424
- # responses.
423
+ # Constrains effort on reasoning for
424
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
425
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
426
+ # effort can result in faster responses and fewer tokens used on reasoning in a
427
+ # response.
425
428
  sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
426
429
  attr_accessor :reasoning_effort
427
430
 
@@ -474,8 +477,11 @@ module OpenAI
474
477
  # The name of the model to find responses for. This is a query parameter used to
475
478
  # select responses.
476
479
  model: nil,
477
- # Optional reasoning effort parameter. This is a query parameter used to select
478
- # responses.
480
+ # Constrains effort on reasoning for
481
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
482
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
483
+ # effort can result in faster responses and fewer tokens used on reasoning in a
484
+ # response.
479
485
  reasoning_effort: nil,
480
486
  # Sampling temperature. This is a query parameter used to select responses.
481
487
  temperature: nil,
@@ -510,8 +510,11 @@ module OpenAI
510
510
  sig { returns(T.nilable(String)) }
511
511
  attr_accessor :model
512
512
 
513
- # Optional reasoning effort parameter. This is a query parameter used to select
514
- # responses.
513
+ # Constrains effort on reasoning for
514
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
515
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
516
+ # effort can result in faster responses and fewer tokens used on reasoning in a
517
+ # response.
515
518
  sig do
516
519
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
517
520
  end
@@ -566,8 +569,11 @@ module OpenAI
566
569
  # The name of the model to find responses for. This is a query parameter used to
567
570
  # select responses.
568
571
  model: nil,
569
- # Optional reasoning effort parameter. This is a query parameter used to select
570
- # responses.
572
+ # Constrains effort on reasoning for
573
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
574
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
575
+ # effort can result in faster responses and fewer tokens used on reasoning in a
576
+ # response.
571
577
  reasoning_effort: nil,
572
578
  # Sampling temperature. This is a query parameter used to select responses.
573
579
  temperature: nil,
@@ -506,8 +506,11 @@ module OpenAI
506
506
  sig { returns(T.nilable(String)) }
507
507
  attr_accessor :model
508
508
 
509
- # Optional reasoning effort parameter. This is a query parameter used to select
510
- # responses.
509
+ # Constrains effort on reasoning for
510
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
511
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
512
+ # effort can result in faster responses and fewer tokens used on reasoning in a
513
+ # response.
511
514
  sig do
512
515
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
513
516
  end
@@ -562,8 +565,11 @@ module OpenAI
562
565
  # The name of the model to find responses for. This is a query parameter used to
563
566
  # select responses.
564
567
  model: nil,
565
- # Optional reasoning effort parameter. This is a query parameter used to select
566
- # responses.
568
+ # Constrains effort on reasoning for
569
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
570
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
571
+ # effort can result in faster responses and fewer tokens used on reasoning in a
572
+ # response.
567
573
  reasoning_effort: nil,
568
574
  # Sampling temperature. This is a query parameter used to select responses.
569
575
  temperature: nil,
@@ -512,8 +512,11 @@ module OpenAI
512
512
  sig { returns(T.nilable(String)) }
513
513
  attr_accessor :model
514
514
 
515
- # Optional reasoning effort parameter. This is a query parameter used to select
516
- # responses.
515
+ # Constrains effort on reasoning for
516
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
517
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
518
+ # effort can result in faster responses and fewer tokens used on reasoning in a
519
+ # response.
517
520
  sig do
518
521
  returns(T.nilable(OpenAI::ReasoningEffort::TaggedSymbol))
519
522
  end
@@ -568,8 +571,11 @@ module OpenAI
568
571
  # The name of the model to find responses for. This is a query parameter used to
569
572
  # select responses.
570
573
  model: nil,
571
- # Optional reasoning effort parameter. This is a query parameter used to select
572
- # responses.
574
+ # Constrains effort on reasoning for
575
+ # [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
576
+ # supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
577
+ # effort can result in faster responses and fewer tokens used on reasoning in a
578
+ # response.
573
579
  reasoning_effort: nil,
574
580
  # Sampling temperature. This is a query parameter used to select responses.
575
581
  temperature: nil,
@@ -699,9 +699,12 @@ module OpenAI
699
699
  after: nil,
700
700
  # Number of Chat Completions to retrieve.
701
701
  limit: nil,
702
- # A list of metadata keys to filter the Chat Completions by. Example:
702
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
703
+ # for storing additional information about the object in a structured format, and
704
+ # querying for objects via API or the dashboard.
703
705
  #
704
- # `metadata[key1]=value1&metadata[key2]=value2`
706
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
707
+ # a maximum length of 512 characters.
705
708
  metadata: nil,
706
709
  # The model used to generate the Chat Completions.
707
710
  model: nil,
@@ -45,8 +45,12 @@ module OpenAI
45
45
  # Initial items to include in the conversation context. You may add up to 20 items
46
46
  # at a time.
47
47
  items: nil,
48
- # Set of 16 key-value pairs that can be attached to an object. Useful for storing
49
- # additional information about the object in a structured format.
48
+ # Set of 16 key-value pairs that can be attached to an object. This can be useful
49
+ # for storing additional information about the object in a structured format, and
50
+ # querying for objects via API or the dashboard.
51
+ #
52
+ # Keys are strings with a maximum length of 64 characters. Values are strings with
53
+ # a maximum length of 512 characters.
50
54
  metadata: nil,
51
55
  request_options: {}
52
56
  )
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: openai
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.23.2
4
+ version: 0.23.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - OpenAI
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2025-09-12 00:00:00.000000000 Z
11
+ date: 2025-09-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: connection_pool