openai 0.61.0 → 0.62.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +18 -0
  3. data/README.md +1 -1
  4. data/lib/openai/models/admin/organization/usage_audio_speeches_response.rb +10 -1
  5. data/lib/openai/models/admin/organization/usage_audio_transcriptions_response.rb +10 -1
  6. data/lib/openai/models/admin/organization/usage_code_interpreter_sessions_response.rb +10 -1
  7. data/lib/openai/models/admin/organization/usage_completions_response.rb +10 -1
  8. data/lib/openai/models/admin/organization/usage_costs_response.rb +10 -1
  9. data/lib/openai/models/admin/organization/usage_embeddings_response.rb +10 -1
  10. data/lib/openai/models/admin/organization/usage_images_response.rb +10 -1
  11. data/lib/openai/models/admin/organization/usage_moderations_response.rb +10 -1
  12. data/lib/openai/models/admin/organization/usage_vector_stores_response.rb +10 -1
  13. data/lib/openai/models/chat/chat_completion_token_logprob.rb +1 -2
  14. data/lib/openai/models/chat/completion_create_params.rb +4 -3
  15. data/lib/openai/models/image_edit_params.rb +85 -31
  16. data/lib/openai/models/image_generate_params.rb +78 -26
  17. data/lib/openai/models/image_model.rb +5 -2
  18. data/lib/openai/models/realtime/audio_transcription.rb +37 -5
  19. data/lib/openai/models/realtime/client_secret_create_response.rb +1 -2
  20. data/lib/openai/models/realtime/realtime_audio_config_input.rb +3 -0
  21. data/lib/openai/models/realtime/realtime_audio_input_turn_detection.rb +3 -0
  22. data/lib/openai/models/realtime/realtime_reasoning.rb +24 -0
  23. data/lib/openai/models/realtime/realtime_reasoning_effort.rb +22 -0
  24. data/lib/openai/models/realtime/realtime_response_create_params.rb +18 -1
  25. data/lib/openai/models/realtime/realtime_session.rb +6 -0
  26. data/lib/openai/models/realtime/realtime_session_create_request.rb +21 -1
  27. data/lib/openai/models/realtime/realtime_session_create_response.rb +41 -17
  28. data/lib/openai/models/realtime/realtime_transcription_session_audio_input.rb +3 -0
  29. data/lib/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rb +3 -0
  30. data/lib/openai/models/realtime/realtime_transcription_session_create_response.rb +9 -5
  31. data/lib/openai/models/realtime/realtime_transcription_session_turn_detection.rb +2 -1
  32. data/lib/openai/models/realtime/realtime_translation_client_event.rb +45 -0
  33. data/lib/openai/models/realtime/realtime_translation_client_secret_create_request.rb +85 -0
  34. data/lib/openai/models/realtime/realtime_translation_client_secret_create_response.rb +42 -0
  35. data/lib/openai/models/realtime/realtime_translation_input_audio_buffer_append_event.rb +51 -0
  36. data/lib/openai/models/realtime/realtime_translation_input_transcript_delta_event.rb +55 -0
  37. data/lib/openai/models/realtime/realtime_translation_output_audio_delta_event.rb +89 -0
  38. data/lib/openai/models/realtime/realtime_translation_output_transcript_delta_event.rb +54 -0
  39. data/lib/openai/models/realtime/realtime_translation_server_event.rb +53 -0
  40. data/lib/openai/models/realtime/realtime_translation_session.rb +158 -0
  41. data/lib/openai/models/realtime/realtime_translation_session_close_event.rb +30 -0
  42. data/lib/openai/models/realtime/realtime_translation_session_closed_event.rb +28 -0
  43. data/lib/openai/models/realtime/realtime_translation_session_create_request.rb +138 -0
  44. data/lib/openai/models/realtime/realtime_translation_session_created_event.rb +38 -0
  45. data/lib/openai/models/realtime/realtime_translation_session_update_event.rb +43 -0
  46. data/lib/openai/models/realtime/realtime_translation_session_update_request.rb +129 -0
  47. data/lib/openai/models/realtime/realtime_translation_session_updated_event.rb +37 -0
  48. data/lib/openai/models/realtime/transcription_session_updated_event.rb +1 -2
  49. data/lib/openai/models/responses/response.rb +4 -3
  50. data/lib/openai/models/responses/response_create_params.rb +4 -3
  51. data/lib/openai/models/responses/response_includable.rb +2 -0
  52. data/lib/openai/models/responses/response_text_delta_event.rb +2 -2
  53. data/lib/openai/models/responses/response_text_done_event.rb +2 -2
  54. data/lib/openai/models/responses/responses_client_event.rb +4 -3
  55. data/lib/openai/models/responses/tool.rb +81 -16
  56. data/lib/openai/resources/chat/completions.rb +2 -2
  57. data/lib/openai/resources/images.rb +6 -6
  58. data/lib/openai/resources/realtime/calls.rb +5 -1
  59. data/lib/openai/resources/responses.rb +2 -2
  60. data/lib/openai/version.rb +1 -1
  61. data/lib/openai.rb +18 -1
  62. data/rbi/openai/models/admin/organization/usage_audio_speeches_response.rbi +11 -1
  63. data/rbi/openai/models/admin/organization/usage_audio_transcriptions_response.rbi +11 -1
  64. data/rbi/openai/models/admin/organization/usage_code_interpreter_sessions_response.rbi +11 -1
  65. data/rbi/openai/models/admin/organization/usage_completions_response.rbi +11 -1
  66. data/rbi/openai/models/admin/organization/usage_costs_response.rbi +11 -1
  67. data/rbi/openai/models/admin/organization/usage_embeddings_response.rbi +11 -1
  68. data/rbi/openai/models/admin/organization/usage_images_response.rbi +11 -1
  69. data/rbi/openai/models/admin/organization/usage_moderations_response.rbi +11 -1
  70. data/rbi/openai/models/admin/organization/usage_vector_stores_response.rbi +11 -1
  71. data/rbi/openai/models/chat/chat_completion_token_logprob.rbi +2 -4
  72. data/rbi/openai/models/chat/completion_create_params.rbi +6 -4
  73. data/rbi/openai/models/image_edit_params.rbi +102 -45
  74. data/rbi/openai/models/image_generate_params.rbi +93 -39
  75. data/rbi/openai/models/image_model.rbi +8 -3
  76. data/rbi/openai/models/realtime/audio_transcription.rbi +85 -6
  77. data/rbi/openai/models/realtime/realtime_audio_config_input.rbi +6 -0
  78. data/rbi/openai/models/realtime/realtime_audio_input_turn_detection.rbi +3 -0
  79. data/rbi/openai/models/realtime/realtime_reasoning.rbi +54 -0
  80. data/rbi/openai/models/realtime/realtime_reasoning_effort.rbi +44 -0
  81. data/rbi/openai/models/realtime/realtime_response_create_params.rbi +26 -0
  82. data/rbi/openai/models/realtime/realtime_session.rbi +9 -0
  83. data/rbi/openai/models/realtime/realtime_session_create_request.rbi +31 -0
  84. data/rbi/openai/models/realtime/realtime_session_create_response.rbi +53 -32
  85. data/rbi/openai/models/realtime/realtime_transcription_session_audio_input.rbi +6 -0
  86. data/rbi/openai/models/realtime/realtime_transcription_session_audio_input_turn_detection.rbi +3 -0
  87. data/rbi/openai/models/realtime/realtime_transcription_session_create_response.rbi +13 -7
  88. data/rbi/openai/models/realtime/realtime_transcription_session_turn_detection.rbi +2 -1
  89. data/rbi/openai/models/realtime/realtime_translation_client_event.rbi +29 -0
  90. data/rbi/openai/models/realtime/realtime_translation_client_secret_create_request.rbi +193 -0
  91. data/rbi/openai/models/realtime/realtime_translation_client_secret_create_response.rbi +69 -0
  92. data/rbi/openai/models/realtime/realtime_translation_input_audio_buffer_append_event.rbi +69 -0
  93. data/rbi/openai/models/realtime/realtime_translation_input_transcript_delta_event.rbi +77 -0
  94. data/rbi/openai/models/realtime/realtime_translation_output_audio_delta_event.rbi +148 -0
  95. data/rbi/openai/models/realtime/realtime_translation_output_transcript_delta_event.rbi +76 -0
  96. data/rbi/openai/models/realtime/realtime_translation_server_event.rbi +33 -0
  97. data/rbi/openai/models/realtime/realtime_translation_session.rbi +339 -0
  98. data/rbi/openai/models/realtime/realtime_translation_session_close_event.rbi +44 -0
  99. data/rbi/openai/models/realtime/realtime_translation_session_closed_event.rbi +39 -0
  100. data/rbi/openai/models/realtime/realtime_translation_session_create_request.rbi +322 -0
  101. data/rbi/openai/models/realtime/realtime_translation_session_created_event.rbi +68 -0
  102. data/rbi/openai/models/realtime/realtime_translation_session_update_event.rbi +78 -0
  103. data/rbi/openai/models/realtime/realtime_translation_session_update_request.rbi +313 -0
  104. data/rbi/openai/models/realtime/realtime_translation_session_updated_event.rbi +67 -0
  105. data/rbi/openai/models/realtime/transcription_session_updated_event.rbi +0 -2
  106. data/rbi/openai/models/responses/response.rbi +6 -4
  107. data/rbi/openai/models/responses/response_create_params.rbi +6 -4
  108. data/rbi/openai/models/responses/response_includable.rbi +2 -0
  109. data/rbi/openai/models/responses/response_text_delta_event.rbi +2 -2
  110. data/rbi/openai/models/responses/response_text_done_event.rbi +2 -2
  111. data/rbi/openai/models/responses/responses_client_event.rbi +6 -4
  112. data/rbi/openai/models/responses/tool.rbi +122 -27
  113. data/rbi/openai/resources/chat/completions.rbi +6 -4
  114. data/rbi/openai/resources/images.rbi +110 -44
  115. data/rbi/openai/resources/realtime/calls.rbi +7 -0
  116. data/rbi/openai/resources/responses.rbi +6 -4
  117. data/sig/openai/models/admin/organization/usage_audio_speeches_response.rbs +7 -2
  118. data/sig/openai/models/admin/organization/usage_audio_transcriptions_response.rbs +7 -2
  119. data/sig/openai/models/admin/organization/usage_code_interpreter_sessions_response.rbs +7 -2
  120. data/sig/openai/models/admin/organization/usage_completions_response.rbs +7 -2
  121. data/sig/openai/models/admin/organization/usage_costs_response.rbs +7 -2
  122. data/sig/openai/models/admin/organization/usage_embeddings_response.rbs +7 -2
  123. data/sig/openai/models/admin/organization/usage_images_response.rbs +7 -2
  124. data/sig/openai/models/admin/organization/usage_moderations_response.rbs +7 -2
  125. data/sig/openai/models/admin/organization/usage_vector_stores_response.rbs +7 -2
  126. data/sig/openai/models/image_edit_params.rbs +5 -4
  127. data/sig/openai/models/image_generate_params.rbs +5 -4
  128. data/sig/openai/models/image_model.rbs +11 -5
  129. data/sig/openai/models/realtime/audio_transcription.rbs +25 -0
  130. data/sig/openai/models/realtime/realtime_reasoning.rbs +24 -0
  131. data/sig/openai/models/realtime/realtime_reasoning_effort.rbs +20 -0
  132. data/sig/openai/models/realtime/realtime_response_create_params.rbs +16 -0
  133. data/sig/openai/models/realtime/realtime_session_create_request.rbs +18 -0
  134. data/sig/openai/models/realtime/realtime_session_create_response.rbs +27 -4
  135. data/sig/openai/models/realtime/realtime_transcription_session_create_response.rbs +4 -8
  136. data/sig/openai/models/realtime/realtime_translation_client_event.rbs +16 -0
  137. data/sig/openai/models/realtime/realtime_translation_client_secret_create_request.rbs +69 -0
  138. data/sig/openai/models/realtime/realtime_translation_client_secret_create_response.rbs +32 -0
  139. data/sig/openai/models/realtime/realtime_translation_input_audio_buffer_append_event.rbs +34 -0
  140. data/sig/openai/models/realtime/realtime_translation_input_transcript_delta_event.rbs +37 -0
  141. data/sig/openai/models/realtime/realtime_translation_output_audio_delta_event.rbs +70 -0
  142. data/sig/openai/models/realtime/realtime_translation_output_transcript_delta_event.rbs +37 -0
  143. data/sig/openai/models/realtime/realtime_translation_server_event.rbs +20 -0
  144. data/sig/openai/models/realtime/realtime_translation_session.rbs +131 -0
  145. data/sig/openai/models/realtime/realtime_translation_session_close_event.rbs +20 -0
  146. data/sig/openai/models/realtime/realtime_translation_session_closed_event.rbs +18 -0
  147. data/sig/openai/models/realtime/realtime_translation_session_create_request.rbs +120 -0
  148. data/sig/openai/models/realtime/realtime_translation_session_created_event.rbs +32 -0
  149. data/sig/openai/models/realtime/realtime_translation_session_update_event.rbs +34 -0
  150. data/sig/openai/models/realtime/realtime_translation_session_update_request.rbs +115 -0
  151. data/sig/openai/models/realtime/realtime_translation_session_updated_event.rbs +32 -0
  152. data/sig/openai/models/responses/tool.rbs +15 -5
  153. data/sig/openai/resources/realtime/calls.rbs +2 -0
  154. metadata +56 -5
  155. data/lib/openai/models/realtime/realtime_session_client_secret.rb +0 -36
  156. data/rbi/openai/models/realtime/realtime_session_client_secret.rbi +0 -49
  157. data/sig/openai/models/realtime/realtime_session_client_secret.rbs +0 -20
@@ -6,6 +6,8 @@ module OpenAI
6
6
  # Specify additional output data to include in the model response. Currently
7
7
  # supported values are:
8
8
  #
9
+ # - `web_search_call.results`: Include the search results of the web search tool
10
+ # call.
9
11
  # - `web_search_call.action.sources`: Include the sources of the web search tool
10
12
  # call.
11
13
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
@@ -81,7 +81,7 @@ module OpenAI
81
81
  required :logprob, Float
82
82
 
83
83
  # @!attribute top_logprobs
84
- # The log probability of the top 20 most likely tokens.
84
+ # The log probabilities of up to 20 of the most likely tokens.
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
@@ -99,7 +99,7 @@ module OpenAI
99
99
  #
100
100
  # @param logprob [Float] The log probability of this token.
101
101
  #
102
- # @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>] The log probability of the top 20 most likely tokens.
102
+ # @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>] The log probabilities of up to 20 of the most likely tokens.
103
103
 
104
104
  class TopLogprob < OpenAI::Internal::Type::BaseModel
105
105
  # @!attribute token
@@ -81,7 +81,7 @@ module OpenAI
81
81
  required :logprob, Float
82
82
 
83
83
  # @!attribute top_logprobs
84
- # The log probability of the top 20 most likely tokens.
84
+ # The log probabilities of up to 20 of the most likely tokens.
85
85
  #
86
86
  # @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>, nil]
87
87
  optional :top_logprobs,
@@ -99,7 +99,7 @@ module OpenAI
99
99
  #
100
100
  # @param logprob [Float] The log probability of this token.
101
101
  #
102
- # @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>] The log probability of the top 20 most likely tokens.
102
+ # @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>] The log probabilities of up to 20 of the most likely tokens.
103
103
 
104
104
  class TopLogprob < OpenAI::Internal::Type::BaseModel
105
105
  # @!attribute token
@@ -284,8 +284,9 @@ module OpenAI
284
284
  optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
285
285
 
286
286
  # @!attribute top_logprobs
287
- # An integer between 0 and 20 specifying the number of most likely tokens to
288
- # return at each token position, each with an associated log probability.
287
+ # An integer between 0 and 20 specifying the maximum number of most likely tokens
288
+ # to return at each token position, each with an associated log probability. In
289
+ # some cases, the number of returned tokens may be fewer than requested.
289
290
  #
290
291
  # @return [Integer, nil]
291
292
  optional :top_logprobs, Integer, nil?: true
@@ -378,7 +379,7 @@ module OpenAI
378
379
  #
379
380
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::ComputerUsePreviewTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::NamespaceTool, OpenAI::Models::Responses::ToolSearchTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
380
381
  #
381
- # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
382
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the maximum number of most likely
382
383
  #
383
384
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
384
385
  #
@@ -495,8 +495,18 @@ module OpenAI
495
495
  optional :action, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Action }
496
496
 
497
497
  # @!attribute background
498
- # Background type for the generated image. One of `transparent`, `opaque`, or
499
- # `auto`. Default: `auto`.
498
+ # Allows to set transparency for the background of the generated image(s). This
499
+ # parameter is only supported for GPT image models that support transparent
500
+ # backgrounds. Must be one of `transparent`, `opaque`, or `auto` (default value).
501
+ # When `auto` is used, the model will automatically determine the best background
502
+ # for the image.
503
+ #
504
+ # `gpt-image-2` and `gpt-image-2-2026-04-21` do not support transparent
505
+ # backgrounds. Requests with `background` set to `transparent` will return an
506
+ # error for these models; use `opaque` or `auto` instead.
507
+ #
508
+ # If `transparent`, the output format needs to support transparency, so it should
509
+ # be set to either `png` (default value) or `webp`.
500
510
  #
501
511
  # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background, nil]
502
512
  optional :background, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Background }
@@ -561,11 +571,20 @@ module OpenAI
561
571
  optional :quality, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Quality }
562
572
 
563
573
  # @!attribute size
564
- # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`,
565
- # or `auto`. Default: `auto`.
566
- #
567
- # @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size, nil]
568
- optional :size, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Size }
574
+ # The size of the generated images. For `gpt-image-2` and
575
+ # `gpt-image-2-2026-04-21`, arbitrary resolutions are supported as `WIDTHxHEIGHT`
576
+ # strings, for example `1536x864`. Width and height must both be divisible by 16
577
+ # and the requested aspect ratio must be between 1:3 and 3:1. Resolutions above
578
+ # `2560x1440` are experimental, and the maximum supported resolution is
579
+ # `3840x2160`. The requested size must also satisfy the model's current pixel and
580
+ # edge limits. The standard sizes `1024x1024`, `1536x1024`, and `1024x1536` are
581
+ # supported by the GPT image models; `auto` is supported for models that allow
582
+ # automatic sizing. For `dall-e-2`, use one of `256x256`, `512x512`, or
583
+ # `1024x1024`. For `dall-e-3`, use one of `1024x1024`, `1792x1024`, or
584
+ # `1024x1792`.
585
+ #
586
+ # @return [String, Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size, nil]
587
+ optional :size, union: -> { OpenAI::Responses::Tool::ImageGeneration::Size }
569
588
 
570
589
  # @!method initialize(action: nil, background: nil, input_fidelity: nil, input_image_mask: nil, model: nil, moderation: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, size: nil, type: :image_generation)
571
590
  # Some parameter documentations has been truncated, see
@@ -575,7 +594,7 @@ module OpenAI
575
594
  #
576
595
  # @param action [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Action] Whether to generate a new image or edit an existing image. Default: `auto`.
577
596
  #
578
- # @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Background type for the generated image. One of `transparent`,
597
+ # @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Allows to set transparency for the background of the generated image(s).
579
598
  #
580
599
  # @param input_fidelity [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] Control how much effort the model will exert to match the style and features, es
581
600
  #
@@ -593,7 +612,7 @@ module OpenAI
593
612
  #
594
613
  # @param quality [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Quality] The quality of the generated image. One of `low`, `medium`, `high`,
595
614
  #
596
- # @param size [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size] The size of the generated image. One of `1024x1024`, `1024x1536`,
615
+ # @param size [String, Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size] The size of the generated images. For `gpt-image-2` and `gpt-image-2-2026-04-21`
597
616
  #
598
617
  # @param type [Symbol, :image_generation] The type of the image generation tool. Always `image_generation`.
599
618
 
@@ -611,8 +630,18 @@ module OpenAI
611
630
  # @return [Array<Symbol>]
612
631
  end
613
632
 
614
- # Background type for the generated image. One of `transparent`, `opaque`, or
615
- # `auto`. Default: `auto`.
633
+ # Allows to set transparency for the background of the generated image(s). This
634
+ # parameter is only supported for GPT image models that support transparent
635
+ # backgrounds. Must be one of `transparent`, `opaque`, or `auto` (default value).
636
+ # When `auto` is used, the model will automatically determine the best background
637
+ # for the image.
638
+ #
639
+ # `gpt-image-2` and `gpt-image-2-2026-04-21` do not support transparent
640
+ # backgrounds. Requests with `background` set to `transparent` will return an
641
+ # error for these models; use `opaque` or `auto` instead.
642
+ #
643
+ # If `transparent`, the output format needs to support transparency, so it should
644
+ # be set to either `png` (default value) or `webp`.
616
645
  #
617
646
  # @see OpenAI::Models::Responses::Tool::ImageGeneration#background
618
647
  module Background
@@ -681,8 +710,14 @@ module OpenAI
681
710
 
682
711
  variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::GPT_IMAGE_1_MINI }
683
712
 
713
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::GPT_IMAGE_2 }
714
+
715
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::GPT_IMAGE_2_2026_04_21 }
716
+
684
717
  variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::GPT_IMAGE_1_5 }
685
718
 
719
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Model::CHATGPT_IMAGE_LATEST }
720
+
686
721
  # @!method self.variants
687
722
  # @return [Array(String, Symbol)]
688
723
 
@@ -694,7 +729,10 @@ module OpenAI
694
729
 
695
730
  GPT_IMAGE_1 = :"gpt-image-1"
696
731
  GPT_IMAGE_1_MINI = :"gpt-image-1-mini"
732
+ GPT_IMAGE_2 = :"gpt-image-2"
733
+ GPT_IMAGE_2_2026_04_21 = :"gpt-image-2-2026-04-21"
697
734
  GPT_IMAGE_1_5 = :"gpt-image-1.5"
735
+ CHATGPT_IMAGE_LATEST = :"chatgpt-image-latest"
698
736
 
699
737
  # @!endgroup
700
738
  end
@@ -743,20 +781,47 @@ module OpenAI
743
781
  # @return [Array<Symbol>]
744
782
  end
745
783
 
746
- # The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`,
747
- # or `auto`. Default: `auto`.
784
+ # The size of the generated images. For `gpt-image-2` and
785
+ # `gpt-image-2-2026-04-21`, arbitrary resolutions are supported as `WIDTHxHEIGHT`
786
+ # strings, for example `1536x864`. Width and height must both be divisible by 16
787
+ # and the requested aspect ratio must be between 1:3 and 3:1. Resolutions above
788
+ # `2560x1440` are experimental, and the maximum supported resolution is
789
+ # `3840x2160`. The requested size must also satisfy the model's current pixel and
790
+ # edge limits. The standard sizes `1024x1024`, `1536x1024`, and `1024x1536` are
791
+ # supported by the GPT image models; `auto` is supported for models that allow
792
+ # automatic sizing. For `dall-e-2`, use one of `256x256`, `512x512`, or
793
+ # `1024x1024`. For `dall-e-3`, use one of `1024x1024`, `1792x1024`, or
794
+ # `1024x1792`.
748
795
  #
749
796
  # @see OpenAI::Models::Responses::Tool::ImageGeneration#size
750
797
  module Size
751
- extend OpenAI::Internal::Type::Enum
798
+ extend OpenAI::Internal::Type::Union
799
+
800
+ variant String
801
+
802
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Size::SIZE_1024X1024 }
803
+
804
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Size::SIZE_1024X1536 }
805
+
806
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Size::SIZE_1536X1024 }
807
+
808
+ variant const: -> { OpenAI::Models::Responses::Tool::ImageGeneration::Size::AUTO }
809
+
810
+ # @!method self.variants
811
+ # @return [Array(String, Symbol)]
812
+
813
+ define_sorbet_constant!(:Variants) do
814
+ T.type_alias { T.any(String, OpenAI::Responses::Tool::ImageGeneration::Size::TaggedSymbol) }
815
+ end
816
+
817
+ # @!group
752
818
 
753
819
  SIZE_1024X1024 = :"1024x1024"
754
820
  SIZE_1024X1536 = :"1024x1536"
755
821
  SIZE_1536X1024 = :"1536x1024"
756
822
  AUTO = :auto
757
823
 
758
- # @!method self.values
759
- # @return [Array<Symbol>]
824
+ # @!endgroup
760
825
  end
761
826
  end
762
827
 
@@ -97,7 +97,7 @@ module OpenAI
97
97
  #
98
98
  # @param tools [Array<OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool>] A list of tools the model may call. You can provide either
99
99
  #
100
- # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
100
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the maximum number of most likely
101
101
  #
102
102
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
103
103
  #
@@ -349,7 +349,7 @@ module OpenAI
349
349
  #
350
350
  # @param tools [Array<OpenAI::Models::Chat::ChatCompletionFunctionTool, OpenAI::Models::Chat::ChatCompletionCustomTool>] A list of tools the model may call. You can provide either
351
351
  #
352
- # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
352
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the maximum number of most likely
353
353
  #
354
354
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
355
355
  #
@@ -62,7 +62,7 @@ module OpenAI
62
62
  #
63
63
  # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
64
64
  #
65
- # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Defaults to `gpt-image-1.5`.
65
+ # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2` or a GPT image model (`
66
66
  #
67
67
  # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
68
68
  #
@@ -76,7 +76,7 @@ module OpenAI
76
76
  #
77
77
  # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
78
78
  #
79
- # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
79
+ # @param size [String, Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. For `gpt-image-2` and `gpt-image-2-2026-04-21`
80
80
  #
81
81
  # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
82
82
  #
@@ -123,7 +123,7 @@ module OpenAI
123
123
  #
124
124
  # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
125
125
  #
126
- # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Defaults to `gpt-image-1.5`.
126
+ # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2` or a GPT image model (`
127
127
  #
128
128
  # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
129
129
  #
@@ -137,7 +137,7 @@ module OpenAI
137
137
  #
138
138
  # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
139
139
  #
140
- # @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
140
+ # @param size [String, Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. For `gpt-image-2` and `gpt-image-2-2026-04-21`
141
141
  #
142
142
  # @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
143
143
  #
@@ -199,7 +199,7 @@ module OpenAI
199
199
  #
200
200
  # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned
201
201
  #
202
- # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
202
+ # @param size [String, Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. For `gpt-image-2` and `gpt-image-2-2026-04-21`
203
203
  #
204
204
  # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e-
205
205
  #
@@ -256,7 +256,7 @@ module OpenAI
256
256
  #
257
257
  # @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned
258
258
  #
259
- # @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
259
+ # @param size [String, Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. For `gpt-image-2` and `gpt-image-2-2026-04-21`
260
260
  #
261
261
  # @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e-
262
262
  #
@@ -10,7 +10,7 @@ module OpenAI
10
10
  # Accept an incoming SIP call and configure the realtime session that will handle
11
11
  # it.
12
12
  #
13
- # @overload accept(call_id, audio: nil, include: nil, instructions: nil, max_output_tokens: nil, model: nil, output_modalities: nil, prompt: nil, tool_choice: nil, tools: nil, tracing: nil, truncation: nil, type: :realtime, request_options: {})
13
+ # @overload accept(call_id, audio: nil, include: nil, instructions: nil, max_output_tokens: nil, model: nil, output_modalities: nil, parallel_tool_calls: nil, prompt: nil, reasoning: nil, tool_choice: nil, tools: nil, tracing: nil, truncation: nil, type: :realtime, request_options: {})
14
14
  #
15
15
  # @param call_id [String] The identifier for the call provided in the
16
16
  #
@@ -26,8 +26,12 @@ module OpenAI
26
26
  #
27
27
  # @param output_modalities [Array<Symbol, OpenAI::Models::Realtime::RealtimeSessionCreateRequest::OutputModality>] The set of modalities the model can respond with. It defaults to `["audio"]`, in
28
28
  #
29
+ # @param parallel_tool_calls [Boolean] Whether the model may call multiple tools in parallel. Only supported by
30
+ #
29
31
  # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
30
32
  #
33
+ # @param reasoning [OpenAI::Models::Realtime::RealtimeReasoning] Configuration for reasoning-capable Realtime models such as `gpt-realtime-2`.
34
+ #
31
35
  # @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model chooses tools. Provide one of the string modes or force a specific
32
36
  #
33
37
  # @param tools [Array<OpenAI::Models::Realtime::RealtimeFunctionTool, OpenAI::Models::Realtime::RealtimeToolsConfigUnion::Mcp>] Tools available to the model.
@@ -76,7 +76,7 @@ module OpenAI
76
76
  #
77
77
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::ComputerUsePreviewTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::NamespaceTool, OpenAI::Models::Responses::ToolSearchTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
78
78
  #
79
- # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
79
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the maximum number of most likely
80
80
  #
81
81
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
82
82
  #
@@ -299,7 +299,7 @@ module OpenAI
299
299
  #
300
300
  # @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::ComputerUsePreviewTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::NamespaceTool, OpenAI::Models::Responses::ToolSearchTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
301
301
  #
302
- # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
302
+ # @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the maximum number of most likely
303
303
  #
304
304
  # @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
305
305
  #
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module OpenAI
4
- VERSION = "0.61.0"
4
+ VERSION = "0.62.0"
5
5
  end
data/lib/openai.rb CHANGED
@@ -629,6 +629,8 @@ require_relative "openai/models/realtime/realtime_mcp_list_tools"
629
629
  require_relative "openai/models/realtime/realtime_mcp_protocol_error"
630
630
  require_relative "openai/models/realtime/realtime_mcp_tool_call"
631
631
  require_relative "openai/models/realtime/realtime_mcp_tool_execution_error"
632
+ require_relative "openai/models/realtime/realtime_reasoning"
633
+ require_relative "openai/models/realtime/realtime_reasoning_effort"
632
634
  require_relative "openai/models/realtime/realtime_response"
633
635
  require_relative "openai/models/realtime/realtime_response_create_audio_output"
634
636
  require_relative "openai/models/realtime/realtime_response_create_mcp_tool"
@@ -639,7 +641,6 @@ require_relative "openai/models/realtime/realtime_response_usage_input_token_det
639
641
  require_relative "openai/models/realtime/realtime_response_usage_output_token_details"
640
642
  require_relative "openai/models/realtime/realtime_server_event"
641
643
  require_relative "openai/models/realtime/realtime_session"
642
- require_relative "openai/models/realtime/realtime_session_client_secret"
643
644
  require_relative "openai/models/realtime/realtime_session_create_response"
644
645
  require_relative "openai/models/realtime/realtime_tool_choice_config"
645
646
  require_relative "openai/models/realtime/realtime_tools_config"
@@ -651,6 +652,22 @@ require_relative "openai/models/realtime/realtime_transcription_session_audio_in
651
652
  require_relative "openai/models/realtime/realtime_transcription_session_create_request"
652
653
  require_relative "openai/models/realtime/realtime_transcription_session_create_response"
653
654
  require_relative "openai/models/realtime/realtime_transcription_session_turn_detection"
655
+ require_relative "openai/models/realtime/realtime_translation_client_event"
656
+ require_relative "openai/models/realtime/realtime_translation_client_secret_create_request"
657
+ require_relative "openai/models/realtime/realtime_translation_client_secret_create_response"
658
+ require_relative "openai/models/realtime/realtime_translation_input_audio_buffer_append_event"
659
+ require_relative "openai/models/realtime/realtime_translation_input_transcript_delta_event"
660
+ require_relative "openai/models/realtime/realtime_translation_output_audio_delta_event"
661
+ require_relative "openai/models/realtime/realtime_translation_output_transcript_delta_event"
662
+ require_relative "openai/models/realtime/realtime_translation_server_event"
663
+ require_relative "openai/models/realtime/realtime_translation_session"
664
+ require_relative "openai/models/realtime/realtime_translation_session_closed_event"
665
+ require_relative "openai/models/realtime/realtime_translation_session_close_event"
666
+ require_relative "openai/models/realtime/realtime_translation_session_created_event"
667
+ require_relative "openai/models/realtime/realtime_translation_session_create_request"
668
+ require_relative "openai/models/realtime/realtime_translation_session_updated_event"
669
+ require_relative "openai/models/realtime/realtime_translation_session_update_event"
670
+ require_relative "openai/models/realtime/realtime_translation_session_update_request"
654
671
  require_relative "openai/models/realtime/realtime_truncation"
655
672
  require_relative "openai/models/realtime/realtime_truncation_retention_ratio"
656
673
  require_relative "openai/models/realtime/response_audio_delta_event"
@@ -926,6 +926,11 @@ module OpenAI
926
926
  sig { returns(T.nilable(String)) }
927
927
  attr_accessor :project_id
928
928
 
929
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
930
+ # result.
931
+ sig { returns(T.nilable(Float)) }
932
+ attr_accessor :quantity
933
+
929
934
  # The aggregated costs details of the specific time bucket.
930
935
  sig do
931
936
  params(
@@ -934,6 +939,7 @@ module OpenAI
934
939
  api_key_id: T.nilable(String),
935
940
  line_item: T.nilable(String),
936
941
  project_id: T.nilable(String),
942
+ quantity: T.nilable(Float),
937
943
  object: Symbol
938
944
  ).returns(T.attached_class)
939
945
  end
@@ -949,6 +955,9 @@ module OpenAI
949
955
  # When `group_by=project_id`, this field provides the project ID of the grouped
950
956
  # costs result.
951
957
  project_id: nil,
958
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
959
+ # result.
960
+ quantity: nil,
952
961
  object: :"organization.costs.result"
953
962
  )
954
963
  end
@@ -961,7 +970,8 @@ module OpenAI
961
970
  OpenAI::Models::Admin::Organization::UsageAudioSpeechesResponse::Data::Result::OrganizationCostsResult::Amount,
962
971
  api_key_id: T.nilable(String),
963
972
  line_item: T.nilable(String),
964
- project_id: T.nilable(String)
973
+ project_id: T.nilable(String),
974
+ quantity: T.nilable(Float)
965
975
  }
966
976
  )
967
977
  end
@@ -926,6 +926,11 @@ module OpenAI
926
926
  sig { returns(T.nilable(String)) }
927
927
  attr_accessor :project_id
928
928
 
929
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
930
+ # result.
931
+ sig { returns(T.nilable(Float)) }
932
+ attr_accessor :quantity
933
+
929
934
  # The aggregated costs details of the specific time bucket.
930
935
  sig do
931
936
  params(
@@ -934,6 +939,7 @@ module OpenAI
934
939
  api_key_id: T.nilable(String),
935
940
  line_item: T.nilable(String),
936
941
  project_id: T.nilable(String),
942
+ quantity: T.nilable(Float),
937
943
  object: Symbol
938
944
  ).returns(T.attached_class)
939
945
  end
@@ -949,6 +955,9 @@ module OpenAI
949
955
  # When `group_by=project_id`, this field provides the project ID of the grouped
950
956
  # costs result.
951
957
  project_id: nil,
958
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
959
+ # result.
960
+ quantity: nil,
952
961
  object: :"organization.costs.result"
953
962
  )
954
963
  end
@@ -961,7 +970,8 @@ module OpenAI
961
970
  OpenAI::Models::Admin::Organization::UsageAudioTranscriptionsResponse::Data::Result::OrganizationCostsResult::Amount,
962
971
  api_key_id: T.nilable(String),
963
972
  line_item: T.nilable(String),
964
- project_id: T.nilable(String)
973
+ project_id: T.nilable(String),
974
+ quantity: T.nilable(Float)
965
975
  }
966
976
  )
967
977
  end
@@ -926,6 +926,11 @@ module OpenAI
926
926
  sig { returns(T.nilable(String)) }
927
927
  attr_accessor :project_id
928
928
 
929
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
930
+ # result.
931
+ sig { returns(T.nilable(Float)) }
932
+ attr_accessor :quantity
933
+
929
934
  # The aggregated costs details of the specific time bucket.
930
935
  sig do
931
936
  params(
@@ -934,6 +939,7 @@ module OpenAI
934
939
  api_key_id: T.nilable(String),
935
940
  line_item: T.nilable(String),
936
941
  project_id: T.nilable(String),
942
+ quantity: T.nilable(Float),
937
943
  object: Symbol
938
944
  ).returns(T.attached_class)
939
945
  end
@@ -949,6 +955,9 @@ module OpenAI
949
955
  # When `group_by=project_id`, this field provides the project ID of the grouped
950
956
  # costs result.
951
957
  project_id: nil,
958
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
959
+ # result.
960
+ quantity: nil,
952
961
  object: :"organization.costs.result"
953
962
  )
954
963
  end
@@ -961,7 +970,8 @@ module OpenAI
961
970
  OpenAI::Models::Admin::Organization::UsageCodeInterpreterSessionsResponse::Data::Result::OrganizationCostsResult::Amount,
962
971
  api_key_id: T.nilable(String),
963
972
  line_item: T.nilable(String),
964
- project_id: T.nilable(String)
973
+ project_id: T.nilable(String),
974
+ quantity: T.nilable(Float)
965
975
  }
966
976
  )
967
977
  end
@@ -926,6 +926,11 @@ module OpenAI
926
926
  sig { returns(T.nilable(String)) }
927
927
  attr_accessor :project_id
928
928
 
929
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
930
+ # result.
931
+ sig { returns(T.nilable(Float)) }
932
+ attr_accessor :quantity
933
+
929
934
  # The aggregated costs details of the specific time bucket.
930
935
  sig do
931
936
  params(
@@ -934,6 +939,7 @@ module OpenAI
934
939
  api_key_id: T.nilable(String),
935
940
  line_item: T.nilable(String),
936
941
  project_id: T.nilable(String),
942
+ quantity: T.nilable(Float),
937
943
  object: Symbol
938
944
  ).returns(T.attached_class)
939
945
  end
@@ -949,6 +955,9 @@ module OpenAI
949
955
  # When `group_by=project_id`, this field provides the project ID of the grouped
950
956
  # costs result.
951
957
  project_id: nil,
958
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
959
+ # result.
960
+ quantity: nil,
952
961
  object: :"organization.costs.result"
953
962
  )
954
963
  end
@@ -961,7 +970,8 @@ module OpenAI
961
970
  OpenAI::Models::Admin::Organization::UsageCompletionsResponse::Data::Result::OrganizationCostsResult::Amount,
962
971
  api_key_id: T.nilable(String),
963
972
  line_item: T.nilable(String),
964
- project_id: T.nilable(String)
973
+ project_id: T.nilable(String),
974
+ quantity: T.nilable(Float)
965
975
  }
966
976
  )
967
977
  end
@@ -926,6 +926,11 @@ module OpenAI
926
926
  sig { returns(T.nilable(String)) }
927
927
  attr_accessor :project_id
928
928
 
929
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
930
+ # result.
931
+ sig { returns(T.nilable(Float)) }
932
+ attr_accessor :quantity
933
+
929
934
  # The aggregated costs details of the specific time bucket.
930
935
  sig do
931
936
  params(
@@ -934,6 +939,7 @@ module OpenAI
934
939
  api_key_id: T.nilable(String),
935
940
  line_item: T.nilable(String),
936
941
  project_id: T.nilable(String),
942
+ quantity: T.nilable(Float),
937
943
  object: Symbol
938
944
  ).returns(T.attached_class)
939
945
  end
@@ -949,6 +955,9 @@ module OpenAI
949
955
  # When `group_by=project_id`, this field provides the project ID of the grouped
950
956
  # costs result.
951
957
  project_id: nil,
958
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
959
+ # result.
960
+ quantity: nil,
952
961
  object: :"organization.costs.result"
953
962
  )
954
963
  end
@@ -961,7 +970,8 @@ module OpenAI
961
970
  OpenAI::Models::Admin::Organization::UsageCostsResponse::Data::Result::OrganizationCostsResult::Amount,
962
971
  api_key_id: T.nilable(String),
963
972
  line_item: T.nilable(String),
964
- project_id: T.nilable(String)
973
+ project_id: T.nilable(String),
974
+ quantity: T.nilable(Float)
965
975
  }
966
976
  )
967
977
  end
@@ -926,6 +926,11 @@ module OpenAI
926
926
  sig { returns(T.nilable(String)) }
927
927
  attr_accessor :project_id
928
928
 
929
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
930
+ # result.
931
+ sig { returns(T.nilable(Float)) }
932
+ attr_accessor :quantity
933
+
929
934
  # The aggregated costs details of the specific time bucket.
930
935
  sig do
931
936
  params(
@@ -934,6 +939,7 @@ module OpenAI
934
939
  api_key_id: T.nilable(String),
935
940
  line_item: T.nilable(String),
936
941
  project_id: T.nilable(String),
942
+ quantity: T.nilable(Float),
937
943
  object: Symbol
938
944
  ).returns(T.attached_class)
939
945
  end
@@ -949,6 +955,9 @@ module OpenAI
949
955
  # When `group_by=project_id`, this field provides the project ID of the grouped
950
956
  # costs result.
951
957
  project_id: nil,
958
+ # When `group_by=line_item`, this field provides the quantity of the grouped costs
959
+ # result.
960
+ quantity: nil,
952
961
  object: :"organization.costs.result"
953
962
  )
954
963
  end
@@ -961,7 +970,8 @@ module OpenAI
961
970
  OpenAI::Models::Admin::Organization::UsageEmbeddingsResponse::Data::Result::OrganizationCostsResult::Amount,
962
971
  api_key_id: T.nilable(String),
963
972
  line_item: T.nilable(String),
964
- project_id: T.nilable(String)
973
+ project_id: T.nilable(String),
974
+ quantity: T.nilable(Float)
965
975
  }
966
976
  )
967
977
  end