openai 0.13.1 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +26 -0
  3. data/README.md +3 -3
  4. data/lib/openai/models/audio/speech_create_params.rb +0 -9
  5. data/lib/openai/models/chat/chat_completion.rb +2 -2
  6. data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
  7. data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
  8. data/lib/openai/models/chat/completion_create_params.rb +2 -2
  9. data/lib/openai/models/function_definition.rb +1 -1
  10. data/lib/openai/models/image_edit_completed_event.rb +198 -0
  11. data/lib/openai/models/image_edit_params.rb +39 -1
  12. data/lib/openai/models/image_edit_partial_image_event.rb +135 -0
  13. data/lib/openai/models/image_edit_stream_event.rb +21 -0
  14. data/lib/openai/models/image_gen_completed_event.rb +198 -0
  15. data/lib/openai/models/image_gen_partial_image_event.rb +135 -0
  16. data/lib/openai/models/image_gen_stream_event.rb +21 -0
  17. data/lib/openai/models/image_generate_params.rb +16 -1
  18. data/lib/openai/models/images_response.rb +2 -2
  19. data/lib/openai/models/responses/response.rb +2 -2
  20. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
  21. data/lib/openai/models/responses/response_create_params.rb +2 -2
  22. data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
  23. data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
  24. data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
  25. data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
  26. data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
  27. data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
  28. data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
  29. data/lib/openai/models/responses/response_output_refusal.rb +2 -2
  30. data/lib/openai/models/responses/response_stream_event.rb +1 -7
  31. data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
  32. data/lib/openai/models/responses/response_text_done_event.rb +66 -1
  33. data/lib/openai/models/responses/tool.rb +30 -1
  34. data/lib/openai/models.rb +12 -0
  35. data/lib/openai/resources/images.rb +140 -2
  36. data/lib/openai/resources/responses.rb +2 -2
  37. data/lib/openai/version.rb +1 -1
  38. data/lib/openai.rb +6 -2
  39. data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
  40. data/rbi/openai/models/chat/chat_completion.rbi +3 -3
  41. data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
  42. data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
  43. data/rbi/openai/models/chat/completion_create_params.rbi +3 -3
  44. data/rbi/openai/models/function_definition.rbi +2 -2
  45. data/rbi/openai/models/image_edit_completed_event.rbi +346 -0
  46. data/rbi/openai/models/image_edit_params.rbi +57 -0
  47. data/rbi/openai/models/image_edit_partial_image_event.rbi +249 -0
  48. data/rbi/openai/models/image_edit_stream_event.rbi +22 -0
  49. data/rbi/openai/models/image_gen_completed_event.rbi +339 -0
  50. data/rbi/openai/models/image_gen_partial_image_event.rbi +243 -0
  51. data/rbi/openai/models/image_gen_stream_event.rbi +22 -0
  52. data/rbi/openai/models/image_generate_params.rbi +18 -0
  53. data/rbi/openai/models/images_response.rbi +2 -2
  54. data/rbi/openai/models/responses/response.rbi +3 -3
  55. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
  56. data/rbi/openai/models/responses/response_create_params.rbi +3 -3
  57. data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
  58. data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
  59. data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
  60. data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
  61. data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
  62. data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
  63. data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
  64. data/rbi/openai/models/responses/response_output_refusal.rbi +2 -2
  65. data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
  66. data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
  67. data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
  68. data/rbi/openai/models/responses/tool.rbi +61 -0
  69. data/rbi/openai/models.rbi +12 -0
  70. data/rbi/openai/resources/chat/completions.rbi +2 -2
  71. data/rbi/openai/resources/images.rbi +237 -0
  72. data/rbi/openai/resources/responses.rbi +2 -2
  73. data/sig/openai/models/audio/speech_create_params.rbs +0 -6
  74. data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
  75. data/sig/openai/models/image_edit_completed_event.rbs +150 -0
  76. data/sig/openai/models/image_edit_params.rbs +21 -0
  77. data/sig/openai/models/image_edit_partial_image_event.rbs +105 -0
  78. data/sig/openai/models/image_edit_stream_event.rbs +12 -0
  79. data/sig/openai/models/image_gen_completed_event.rbs +150 -0
  80. data/sig/openai/models/image_gen_partial_image_event.rbs +105 -0
  81. data/sig/openai/models/image_gen_stream_event.rbs +12 -0
  82. data/sig/openai/models/image_generate_params.rbs +5 -0
  83. data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
  84. data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
  85. data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
  86. data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
  87. data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
  88. data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
  89. data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
  90. data/sig/openai/models/responses/response_stream_event.rbs +0 -2
  91. data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
  92. data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
  93. data/sig/openai/models/responses/tool.rbs +16 -0
  94. data/sig/openai/models.rbs +12 -0
  95. data/sig/openai/resources/images.rbs +38 -0
  96. metadata +20 -8
  97. data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
  98. data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
  99. data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
  100. data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
  101. data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
  102. data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a91e66a63e35f1e79017e4e2270ff4d5792d7e7def4d703afc6f12817b8ea728
4
- data.tar.gz: 7be53dbd53923afcbf0754423182d8d8d8795458974c6a449d72178ce807377e
3
+ metadata.gz: e15e098317bf9151fffc0d83be9fd3ead36872a58ea53628f2d0dd3028735c78
4
+ data.tar.gz: 04a779ac9f0b4418138bf4a7216e01109e48d608d76c33fc83f76e9de82d574e
5
5
  SHA512:
6
- metadata.gz: 7dd389f0f8d5d978dbf4ac02696b97b874abcd31f1095ce5eef44cdb19282f910a6bda9941d4502be90181850bbddb6d999d88ed05def140efa762454e068999
7
- data.tar.gz: 8e4fbbc22ceb381a8fd83e3899e7440661dc41914fa140c264b7338141e4bbb5dc9d8f4ee14d6c9e68cef995aad053b730ea0d127923201e049ef16b42c94dc8
6
+ metadata.gz: d3673d18e0d3cfcd0db2ddc4c9c45bc6da8dd86371a38d85f7ad181f45780e6a17b48b9478e621c47a17c1708a2ec775ae8b61c0f6eb39c6b9d6686e09edfb65
7
+ data.tar.gz: a7fc556be0b4ba6ea16e6a2ba37bb0e9f62c2f7e39344a372b02b4c54b752e5718e15bfea4ffd8c277dd8db4f6d19530dc29e1ee5aee3fb2fc9a253269944b8f
data/CHANGELOG.md CHANGED
@@ -1,5 +1,31 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.15.0 (2025-07-21)
4
+
5
+ Full Changelog: [v0.14.0...v0.15.0](https://github.com/openai/openai-ruby/compare/v0.14.0...v0.15.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** manual updates ([fb53071](https://github.com/openai/openai-ruby/commit/fb530713d08a4ba49e8bdaecd9848674bb35c333))
10
+
11
+
12
+ ### Bug Fixes
13
+
14
+ * **internal:** tests should use normalized property names ([801e9c2](https://github.com/openai/openai-ruby/commit/801e9c29f65e572a3b49f5cf7891d3053e1d087f))
15
+
16
+
17
+ ### Chores
18
+
19
+ * **api:** event shapes more accurate ([29f32ce](https://github.com/openai/openai-ruby/commit/29f32cedf6112d38fe8de454658a5afd7ad0d2cb))
20
+
21
+ ## 0.14.0 (2025-07-16)
22
+
23
+ Full Changelog: [v0.13.1...v0.14.0](https://github.com/openai/openai-ruby/compare/v0.13.1...v0.14.0)
24
+
25
+ ### Features
26
+
27
+ * **api:** manual updates ([b749baf](https://github.com/openai/openai-ruby/commit/b749baf0d1b52c35ff6e50b889301aa7b8ee2ba1))
28
+
3
29
  ## 0.13.1 (2025-07-15)
4
30
 
5
31
  Full Changelog: [v0.13.0...v0.13.1](https://github.com/openai/openai-ruby/compare/v0.13.0...v0.13.1)
data/README.md CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
15
15
  <!-- x-release-please-start-version -->
16
16
 
17
17
  ```ruby
18
- gem "openai", "~> 0.13.1"
18
+ gem "openai", "~> 0.15.0"
19
19
  ```
20
20
 
21
21
  <!-- x-release-please-end -->
@@ -443,7 +443,7 @@ You can provide typesafe request parameters like so:
443
443
 
444
444
  ```ruby
445
445
  openai.chat.completions.create(
446
- messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(role: "user", content: "Say this is a test")],
446
+ messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
447
447
  model: :"gpt-4.1"
448
448
  )
449
449
  ```
@@ -459,7 +459,7 @@ openai.chat.completions.create(
459
459
 
460
460
  # You can also splat a full Params class:
461
461
  params = OpenAI::Chat::CompletionCreateParams.new(
462
- messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(role: "user", content: "Say this is a test")],
462
+ messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
463
463
  model: :"gpt-4.1"
464
464
  )
465
465
  openai.chat.completions.create(**params)
@@ -111,12 +111,6 @@ module OpenAI
111
111
 
112
112
  variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ECHO }
113
113
 
114
- variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::FABLE }
115
-
116
- variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ONYX }
117
-
118
- variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::NOVA }
119
-
120
114
  variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SAGE }
121
115
 
122
116
  variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SHIMMER }
@@ -137,9 +131,6 @@ module OpenAI
137
131
  BALLAD = :ballad
138
132
  CORAL = :coral
139
133
  ECHO = :echo
140
- FABLE = :fable
141
- ONYX = :onyx
142
- NOVA = :nova
143
134
  SAGE = :sage
144
135
  SHIMMER = :shimmer
145
136
  VERSE = :verse
@@ -44,7 +44,7 @@ module OpenAI
44
44
  # - If set to 'auto', then the request will be processed with the service tier
45
45
  # configured in the Project settings. Unless otherwise configured, the Project
46
46
  # will use 'default'.
47
- # - If set to 'default', then the requset will be processed with the standard
47
+ # - If set to 'default', then the request will be processed with the standard
48
48
  # pricing and performance for the selected model.
49
49
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
50
50
  # 'priority', then the request will be processed with the corresponding service
@@ -193,7 +193,7 @@ module OpenAI
193
193
  # - If set to 'auto', then the request will be processed with the service tier
194
194
  # configured in the Project settings. Unless otherwise configured, the Project
195
195
  # will use 'default'.
196
- # - If set to 'default', then the requset will be processed with the standard
196
+ # - If set to 'default', then the request will be processed with the standard
197
197
  # pricing and performance for the selected model.
198
198
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
199
199
  # 'priority', then the request will be processed with the corresponding service
@@ -67,12 +67,6 @@ module OpenAI
67
67
 
68
68
  variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ECHO }
69
69
 
70
- variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::FABLE }
71
-
72
- variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ONYX }
73
-
74
- variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::NOVA }
75
-
76
70
  variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SAGE }
77
71
 
78
72
  variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SHIMMER }
@@ -93,9 +87,6 @@ module OpenAI
93
87
  BALLAD = :ballad
94
88
  CORAL = :coral
95
89
  ECHO = :echo
96
- FABLE = :fable
97
- ONYX = :onyx
98
- NOVA = :nova
99
90
  SAGE = :sage
100
91
  SHIMMER = :shimmer
101
92
  VERSE = :verse
@@ -43,7 +43,7 @@ module OpenAI
43
43
  # - If set to 'auto', then the request will be processed with the service tier
44
44
  # configured in the Project settings. Unless otherwise configured, the Project
45
45
  # will use 'default'.
46
- # - If set to 'default', then the requset will be processed with the standard
46
+ # - If set to 'default', then the request will be processed with the standard
47
47
  # pricing and performance for the selected model.
48
48
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
49
49
  # 'priority', then the request will be processed with the corresponding service
@@ -376,7 +376,7 @@ module OpenAI
376
376
  # - If set to 'auto', then the request will be processed with the service tier
377
377
  # configured in the Project settings. Unless otherwise configured, the Project
378
378
  # will use 'default'.
379
- # - If set to 'default', then the requset will be processed with the standard
379
+ # - If set to 'default', then the request will be processed with the standard
380
380
  # pricing and performance for the selected model.
381
381
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
382
382
  # 'priority', then the request will be processed with the corresponding service
@@ -224,7 +224,7 @@ module OpenAI
224
224
  # - If set to 'auto', then the request will be processed with the service tier
225
225
  # configured in the Project settings. Unless otherwise configured, the Project
226
226
  # will use 'default'.
227
- # - If set to 'default', then the requset will be processed with the standard
227
+ # - If set to 'default', then the request will be processed with the standard
228
228
  # pricing and performance for the selected model.
229
229
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
230
230
  # 'priority', then the request will be processed with the corresponding service
@@ -553,7 +553,7 @@ module OpenAI
553
553
  # - If set to 'auto', then the request will be processed with the service tier
554
554
  # configured in the Project settings. Unless otherwise configured, the Project
555
555
  # will use 'default'.
556
- # - If set to 'default', then the requset will be processed with the standard
556
+ # - If set to 'default', then the request will be processed with the standard
557
557
  # pricing and performance for the selected model.
558
558
  # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
559
559
  # 'priority', then the request will be processed with the corresponding service
@@ -34,7 +34,7 @@ module OpenAI
34
34
  # set to true, the model will follow the exact schema defined in the `parameters`
35
35
  # field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
36
36
  # more about Structured Outputs in the
37
- # [function calling guide](docs/guides/function-calling).
37
+ # [function calling guide](https://platform.openai.com/docs/guides/function-calling).
38
38
  #
39
39
  # @return [Boolean, nil]
40
40
  optional :strict, OpenAI::Internal::Type::Boolean, nil?: true
@@ -0,0 +1,198 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
6
+ # @!attribute b64_json
7
+ # Base64-encoded final edited image data, suitable for rendering as an image.
8
+ #
9
+ # @return [String]
10
+ required :b64_json, String
11
+
12
+ # @!attribute background
13
+ # The background setting for the edited image.
14
+ #
15
+ # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background]
16
+ required :background, enum: -> { OpenAI::ImageEditCompletedEvent::Background }
17
+
18
+ # @!attribute created_at
19
+ # The Unix timestamp when the event was created.
20
+ #
21
+ # @return [Integer]
22
+ required :created_at, Integer
23
+
24
+ # @!attribute output_format
25
+ # The output format for the edited image.
26
+ #
27
+ # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat]
28
+ required :output_format, enum: -> { OpenAI::ImageEditCompletedEvent::OutputFormat }
29
+
30
+ # @!attribute quality
31
+ # The quality setting for the edited image.
32
+ #
33
+ # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality]
34
+ required :quality, enum: -> { OpenAI::ImageEditCompletedEvent::Quality }
35
+
36
+ # @!attribute size
37
+ # The size of the edited image.
38
+ #
39
+ # @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size]
40
+ required :size, enum: -> { OpenAI::ImageEditCompletedEvent::Size }
41
+
42
+ # @!attribute type
43
+ # The type of the event. Always `image_edit.completed`.
44
+ #
45
+ # @return [Symbol, :"image_edit.completed"]
46
+ required :type, const: :"image_edit.completed"
47
+
48
+ # @!attribute usage
49
+ # For `gpt-image-1` only, the token usage information for the image generation.
50
+ #
51
+ # @return [OpenAI::Models::ImageEditCompletedEvent::Usage]
52
+ required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage }
53
+
54
+ # @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_edit.completed")
55
+ # Some parameter documentations has been truncated, see
56
+ # {OpenAI::Models::ImageEditCompletedEvent} for more details.
57
+ #
58
+ # Emitted when image editing has completed and the final image is available.
59
+ #
60
+ # @param b64_json [String] Base64-encoded final edited image data, suitable for rendering as an image.
61
+ #
62
+ # @param background [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] The background setting for the edited image.
63
+ #
64
+ # @param created_at [Integer] The Unix timestamp when the event was created.
65
+ #
66
+ # @param output_format [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] The output format for the edited image.
67
+ #
68
+ # @param quality [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] The quality setting for the edited image.
69
+ #
70
+ # @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image.
71
+ #
72
+ # @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
73
+ #
74
+ # @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`.
75
+
76
+ # The background setting for the edited image.
77
+ #
78
+ # @see OpenAI::Models::ImageEditCompletedEvent#background
79
+ module Background
80
+ extend OpenAI::Internal::Type::Enum
81
+
82
+ TRANSPARENT = :transparent
83
+ OPAQUE = :opaque
84
+ AUTO = :auto
85
+
86
+ # @!method self.values
87
+ # @return [Array<Symbol>]
88
+ end
89
+
90
+ # The output format for the edited image.
91
+ #
92
+ # @see OpenAI::Models::ImageEditCompletedEvent#output_format
93
+ module OutputFormat
94
+ extend OpenAI::Internal::Type::Enum
95
+
96
+ PNG = :png
97
+ WEBP = :webp
98
+ JPEG = :jpeg
99
+
100
+ # @!method self.values
101
+ # @return [Array<Symbol>]
102
+ end
103
+
104
+ # The quality setting for the edited image.
105
+ #
106
+ # @see OpenAI::Models::ImageEditCompletedEvent#quality
107
+ module Quality
108
+ extend OpenAI::Internal::Type::Enum
109
+
110
+ LOW = :low
111
+ MEDIUM = :medium
112
+ HIGH = :high
113
+ AUTO = :auto
114
+
115
+ # @!method self.values
116
+ # @return [Array<Symbol>]
117
+ end
118
+
119
+ # The size of the edited image.
120
+ #
121
+ # @see OpenAI::Models::ImageEditCompletedEvent#size
122
+ module Size
123
+ extend OpenAI::Internal::Type::Enum
124
+
125
+ SIZE_1024X1024 = :"1024x1024"
126
+ SIZE_1024X1536 = :"1024x1536"
127
+ SIZE_1536X1024 = :"1536x1024"
128
+ AUTO = :auto
129
+
130
+ # @!method self.values
131
+ # @return [Array<Symbol>]
132
+ end
133
+
134
+ # @see OpenAI::Models::ImageEditCompletedEvent#usage
135
+ class Usage < OpenAI::Internal::Type::BaseModel
136
+ # @!attribute input_tokens
137
+ # The number of tokens (images and text) in the input prompt.
138
+ #
139
+ # @return [Integer]
140
+ required :input_tokens, Integer
141
+
142
+ # @!attribute input_tokens_details
143
+ # The input tokens detailed information for the image generation.
144
+ #
145
+ # @return [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails]
146
+ required :input_tokens_details, -> { OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails }
147
+
148
+ # @!attribute output_tokens
149
+ # The number of image tokens in the output image.
150
+ #
151
+ # @return [Integer]
152
+ required :output_tokens, Integer
153
+
154
+ # @!attribute total_tokens
155
+ # The total number of tokens (images and text) used for the image generation.
156
+ #
157
+ # @return [Integer]
158
+ required :total_tokens, Integer
159
+
160
+ # @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
161
+ # Some parameter documentations has been truncated, see
162
+ # {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details.
163
+ #
164
+ # For `gpt-image-1` only, the token usage information for the image generation.
165
+ #
166
+ # @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
167
+ #
168
+ # @param input_tokens_details [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
169
+ #
170
+ # @param output_tokens [Integer] The number of image tokens in the output image.
171
+ #
172
+ # @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
173
+
174
+ # @see OpenAI::Models::ImageEditCompletedEvent::Usage#input_tokens_details
175
+ class InputTokensDetails < OpenAI::Internal::Type::BaseModel
176
+ # @!attribute image_tokens
177
+ # The number of image tokens in the input prompt.
178
+ #
179
+ # @return [Integer]
180
+ required :image_tokens, Integer
181
+
182
+ # @!attribute text_tokens
183
+ # The number of text tokens in the input prompt.
184
+ #
185
+ # @return [Integer]
186
+ required :text_tokens, Integer
187
+
188
+ # @!method initialize(image_tokens:, text_tokens:)
189
+ # The input tokens detailed information for the image generation.
190
+ #
191
+ # @param image_tokens [Integer] The number of image tokens in the input prompt.
192
+ #
193
+ # @param text_tokens [Integer] The number of text tokens in the input prompt.
194
+ end
195
+ end
196
+ end
197
+ end
198
+ end
@@ -3,6 +3,8 @@
3
3
  module OpenAI
4
4
  module Models
5
5
  # @see OpenAI::Resources::Images#edit
6
+ #
7
+ # @see OpenAI::Resources::Images#edit_stream_raw
6
8
  class ImageEditParams < OpenAI::Internal::Type::BaseModel
7
9
  extend OpenAI::Internal::Type::RequestParameters::Converter
8
10
  include OpenAI::Internal::Type::RequestParameters
@@ -38,6 +40,14 @@ module OpenAI
38
40
  # @return [Symbol, OpenAI::Models::ImageEditParams::Background, nil]
39
41
  optional :background, enum: -> { OpenAI::ImageEditParams::Background }, nil?: true
40
42
 
43
+ # @!attribute input_fidelity
44
+ # Control how much effort the model will exert to match the style and features,
45
+ # especially facial features, of input images. This parameter is only supported
46
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
47
+ #
48
+ # @return [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil]
49
+ optional :input_fidelity, enum: -> { OpenAI::ImageEditParams::InputFidelity }, nil?: true
50
+
41
51
  # @!attribute mask
42
52
  # An additional image whose fully transparent areas (e.g. where alpha is zero)
43
53
  # indicate where `image` should be edited. If there are multiple images provided,
@@ -77,6 +87,17 @@ module OpenAI
77
87
  # @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
78
88
  optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true
79
89
 
90
+ # @!attribute partial_images
91
+ # The number of partial images to generate. This parameter is used for streaming
92
+ # responses that return partial images. Value must be between 0 and 3. When set to
93
+ # 0, the response will be a single image sent in one streaming event.
94
+ #
95
+ # Note that the final image may be sent before the full number of partial images
96
+ # are generated if the full image is generated more quickly.
97
+ #
98
+ # @return [Integer, nil]
99
+ optional :partial_images, Integer, nil?: true
100
+
80
101
  # @!attribute quality
81
102
  # The quality of the image that will be generated. `high`, `medium` and `low` are
82
103
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -110,7 +131,7 @@ module OpenAI
110
131
  # @return [String, nil]
111
132
  optional :user, String
112
133
 
113
- # @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
134
+ # @!method initialize(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
114
135
  # Some parameter documentations has been truncated, see
115
136
  # {OpenAI::Models::ImageEditParams} for more details.
116
137
  #
@@ -120,6 +141,8 @@ module OpenAI
120
141
  #
121
142
  # @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s).
122
143
  #
144
+ # @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features,
145
+ #
123
146
  # @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
124
147
  #
125
148
  # @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup
@@ -130,6 +153,8 @@ module OpenAI
130
153
  #
131
154
  # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
132
155
  #
156
+ # @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
157
+ #
133
158
  # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
134
159
  #
135
160
  # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
@@ -179,6 +204,19 @@ module OpenAI
179
204
  # @return [Array<Symbol>]
180
205
  end
181
206
 
207
+ # Control how much effort the model will exert to match the style and features,
208
+ # especially facial features, of input images. This parameter is only supported
209
+ # for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
210
+ module InputFidelity
211
+ extend OpenAI::Internal::Type::Enum
212
+
213
+ HIGH = :high
214
+ LOW = :low
215
+
216
+ # @!method self.values
217
+ # @return [Array<Symbol>]
218
+ end
219
+
182
220
  # The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
183
221
  # supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
184
222
  # is used.
@@ -0,0 +1,135 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel
6
+ # @!attribute b64_json
7
+ # Base64-encoded partial image data, suitable for rendering as an image.
8
+ #
9
+ # @return [String]
10
+ required :b64_json, String
11
+
12
+ # @!attribute background
13
+ # The background setting for the requested edited image.
14
+ #
15
+ # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background]
16
+ required :background, enum: -> { OpenAI::ImageEditPartialImageEvent::Background }
17
+
18
+ # @!attribute created_at
19
+ # The Unix timestamp when the event was created.
20
+ #
21
+ # @return [Integer]
22
+ required :created_at, Integer
23
+
24
+ # @!attribute output_format
25
+ # The output format for the requested edited image.
26
+ #
27
+ # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat]
28
+ required :output_format, enum: -> { OpenAI::ImageEditPartialImageEvent::OutputFormat }
29
+
30
+ # @!attribute partial_image_index
31
+ # 0-based index for the partial image (streaming).
32
+ #
33
+ # @return [Integer]
34
+ required :partial_image_index, Integer
35
+
36
+ # @!attribute quality
37
+ # The quality setting for the requested edited image.
38
+ #
39
+ # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality]
40
+ required :quality, enum: -> { OpenAI::ImageEditPartialImageEvent::Quality }
41
+
42
+ # @!attribute size
43
+ # The size of the requested edited image.
44
+ #
45
+ # @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size]
46
+ required :size, enum: -> { OpenAI::ImageEditPartialImageEvent::Size }
47
+
48
+ # @!attribute type
49
+ # The type of the event. Always `image_edit.partial_image`.
50
+ #
51
+ # @return [Symbol, :"image_edit.partial_image"]
52
+ required :type, const: :"image_edit.partial_image"
53
+
54
+ # @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_edit.partial_image")
55
+ # Some parameter documentations has been truncated, see
56
+ # {OpenAI::Models::ImageEditPartialImageEvent} for more details.
57
+ #
58
+ # Emitted when a partial image is available during image editing streaming.
59
+ #
60
+ # @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image.
61
+ #
62
+ # @param background [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background] The background setting for the requested edited image.
63
+ #
64
+ # @param created_at [Integer] The Unix timestamp when the event was created.
65
+ #
66
+ # @param output_format [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat] The output format for the requested edited image.
67
+ #
68
+ # @param partial_image_index [Integer] 0-based index for the partial image (streaming).
69
+ #
70
+ # @param quality [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality] The quality setting for the requested edited image.
71
+ #
72
+ # @param size [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size] The size of the requested edited image.
73
+ #
74
+ # @param type [Symbol, :"image_edit.partial_image"] The type of the event. Always `image_edit.partial_image`.
75
+
76
+ # The background setting for the requested edited image.
77
+ #
78
+ # @see OpenAI::Models::ImageEditPartialImageEvent#background
79
+ module Background
80
+ extend OpenAI::Internal::Type::Enum
81
+
82
+ TRANSPARENT = :transparent
83
+ OPAQUE = :opaque
84
+ AUTO = :auto
85
+
86
+ # @!method self.values
87
+ # @return [Array<Symbol>]
88
+ end
89
+
90
+ # The output format for the requested edited image.
91
+ #
92
+ # @see OpenAI::Models::ImageEditPartialImageEvent#output_format
93
+ module OutputFormat
94
+ extend OpenAI::Internal::Type::Enum
95
+
96
+ PNG = :png
97
+ WEBP = :webp
98
+ JPEG = :jpeg
99
+
100
+ # @!method self.values
101
+ # @return [Array<Symbol>]
102
+ end
103
+
104
+ # The quality setting for the requested edited image.
105
+ #
106
+ # @see OpenAI::Models::ImageEditPartialImageEvent#quality
107
+ module Quality
108
+ extend OpenAI::Internal::Type::Enum
109
+
110
+ LOW = :low
111
+ MEDIUM = :medium
112
+ HIGH = :high
113
+ AUTO = :auto
114
+
115
+ # @!method self.values
116
+ # @return [Array<Symbol>]
117
+ end
118
+
119
+ # The size of the requested edited image.
120
+ #
121
+ # @see OpenAI::Models::ImageEditPartialImageEvent#size
122
+ module Size
123
+ extend OpenAI::Internal::Type::Enum
124
+
125
+ SIZE_1024X1024 = :"1024x1024"
126
+ SIZE_1024X1536 = :"1024x1536"
127
+ SIZE_1536X1024 = :"1536x1024"
128
+ AUTO = :auto
129
+
130
+ # @!method self.values
131
+ # @return [Array<Symbol>]
132
+ end
133
+ end
134
+ end
135
+ end
@@ -0,0 +1,21 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenAI
4
+ module Models
5
+ # Emitted when a partial image is available during image editing streaming.
6
+ module ImageEditStreamEvent
7
+ extend OpenAI::Internal::Type::Union
8
+
9
+ discriminator :type
10
+
11
+ # Emitted when a partial image is available during image editing streaming.
12
+ variant :"image_edit.partial_image", -> { OpenAI::ImageEditPartialImageEvent }
13
+
14
+ # Emitted when image editing has completed and the final image is available.
15
+ variant :"image_edit.completed", -> { OpenAI::ImageEditCompletedEvent }
16
+
17
+ # @!method self.variants
18
+ # @return [Array(OpenAI::Models::ImageEditPartialImageEvent, OpenAI::Models::ImageEditCompletedEvent)]
19
+ end
20
+ end
21
+ end