openai 0.29.0 → 0.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +22 -0
- data/README.md +1 -1
- data/lib/openai/client.rb +4 -0
- data/lib/openai/internal/conversation_cursor_page.rb +2 -2
- data/lib/openai/models/all_models.rb +2 -0
- data/lib/openai/models/beta/assistant_create_params.rb +3 -0
- data/lib/openai/models/beta/assistant_update_params.rb +3 -0
- data/lib/openai/models/beta/chatkit/chat_session.rb +94 -0
- data/lib/openai/models/beta/chatkit/chat_session_automatic_thread_titling.rb +22 -0
- data/lib/openai/models/beta/chatkit/chat_session_chatkit_configuration.rb +38 -0
- data/lib/openai/models/beta/chatkit/chat_session_chatkit_configuration_param.rb +125 -0
- data/lib/openai/models/beta/chatkit/chat_session_expires_after_param.rb +30 -0
- data/lib/openai/models/beta/chatkit/chat_session_file_upload.rb +38 -0
- data/lib/openai/models/beta/chatkit/chat_session_history.rb +34 -0
- data/lib/openai/models/beta/chatkit/chat_session_rate_limits.rb +22 -0
- data/lib/openai/models/beta/chatkit/chat_session_rate_limits_param.rb +22 -0
- data/lib/openai/models/beta/chatkit/chat_session_status.rb +20 -0
- data/lib/openai/models/beta/chatkit/chat_session_workflow_param.rb +80 -0
- data/lib/openai/models/beta/chatkit/chatkit_attachment.rb +69 -0
- data/lib/openai/models/beta/chatkit/chatkit_response_output_text.rb +143 -0
- data/lib/openai/models/beta/chatkit/chatkit_thread.rb +145 -0
- data/lib/openai/models/beta/chatkit/chatkit_thread_assistant_message_item.rb +65 -0
- data/lib/openai/models/beta/chatkit/chatkit_thread_item_list.rb +374 -0
- data/lib/openai/models/beta/chatkit/chatkit_thread_user_message_item.rb +183 -0
- data/lib/openai/models/beta/chatkit/chatkit_widget_item.rb +64 -0
- data/lib/openai/models/beta/chatkit/session_cancel_params.rb +18 -0
- data/lib/openai/models/beta/chatkit/session_create_params.rb +63 -0
- data/lib/openai/models/beta/chatkit/thread_delete_params.rb +18 -0
- data/lib/openai/models/beta/chatkit/thread_delete_response.rb +39 -0
- data/lib/openai/models/beta/chatkit/thread_list_items_params.rb +66 -0
- data/lib/openai/models/beta/chatkit/thread_list_params.rb +75 -0
- data/lib/openai/models/beta/chatkit/thread_retrieve_params.rb +18 -0
- data/lib/openai/models/beta/chatkit_upload_file_params.rb +28 -0
- data/lib/openai/models/beta/chatkit_upload_file_response.rb +25 -0
- data/lib/openai/models/beta/chatkit_workflow.rb +78 -0
- data/lib/openai/models/beta/file_part.rb +56 -0
- data/lib/openai/models/beta/image_part.rb +64 -0
- data/lib/openai/models/beta/threads/run_create_params.rb +3 -0
- data/lib/openai/models/chat/completion_create_params.rb +3 -0
- data/lib/openai/models/comparison_filter.rb +29 -6
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -0
- data/lib/openai/models/evals/run_cancel_response.rb +6 -0
- data/lib/openai/models/evals/run_create_params.rb +6 -0
- data/lib/openai/models/evals/run_create_response.rb +6 -0
- data/lib/openai/models/evals/run_list_response.rb +6 -0
- data/lib/openai/models/evals/run_retrieve_response.rb +6 -0
- data/lib/openai/models/graders/score_model_grader.rb +3 -0
- data/lib/openai/models/image_edit_params.rb +4 -2
- data/lib/openai/models/image_model.rb +1 -0
- data/lib/openai/models/realtime/realtime_session.rb +4 -0
- data/lib/openai/models/realtime/realtime_session_create_request.rb +12 -0
- data/lib/openai/models/realtime/realtime_session_create_response.rb +12 -0
- data/lib/openai/models/reasoning.rb +3 -0
- data/lib/openai/models/reasoning_effort.rb +3 -0
- data/lib/openai/models/responses/tool.rb +5 -2
- data/lib/openai/models/responses_model.rb +2 -0
- data/lib/openai/models/vector_stores/vector_store_file.rb +3 -3
- data/lib/openai/models/video.rb +122 -0
- data/lib/openai/models/video_create_error.rb +21 -0
- data/lib/openai/models/video_create_params.rb +54 -0
- data/lib/openai/models/video_delete_params.rb +14 -0
- data/lib/openai/models/video_delete_response.rb +35 -0
- data/lib/openai/models/video_download_content_params.rb +34 -0
- data/lib/openai/models/video_list_params.rb +54 -0
- data/lib/openai/models/video_model.rb +15 -0
- data/lib/openai/models/video_remix_params.rb +22 -0
- data/lib/openai/models/video_retrieve_params.rb +14 -0
- data/lib/openai/models/video_seconds.rb +16 -0
- data/lib/openai/models/video_size.rb +17 -0
- data/lib/openai/models.rb +22 -0
- data/lib/openai/resources/beta/chatkit/sessions.rb +71 -0
- data/lib/openai/resources/beta/chatkit/threads.rb +126 -0
- data/lib/openai/resources/beta/chatkit.rb +50 -0
- data/lib/openai/resources/beta.rb +4 -0
- data/lib/openai/resources/files.rb +1 -1
- data/lib/openai/resources/videos.rb +165 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +46 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/models/all_models.rbi +10 -0
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -0
- data/rbi/openai/models/beta/assistant_update_params.rbi +6 -0
- data/rbi/openai/models/beta/chatkit/chat_session.rbi +141 -0
- data/rbi/openai/models/beta/chatkit/chat_session_automatic_thread_titling.rbi +35 -0
- data/rbi/openai/models/beta/chatkit/chat_session_chatkit_configuration.rbi +87 -0
- data/rbi/openai/models/beta/chatkit/chat_session_chatkit_configuration_param.rbi +256 -0
- data/rbi/openai/models/beta/chatkit/chat_session_expires_after_param.rbi +43 -0
- data/rbi/openai/models/beta/chatkit/chat_session_file_upload.rbi +61 -0
- data/rbi/openai/models/beta/chatkit/chat_session_history.rbi +52 -0
- data/rbi/openai/models/beta/chatkit/chat_session_rate_limits.rbi +37 -0
- data/rbi/openai/models/beta/chatkit/chat_session_rate_limits_param.rbi +40 -0
- data/rbi/openai/models/beta/chatkit/chat_session_status.rbi +43 -0
- data/rbi/openai/models/beta/chatkit/chat_session_workflow_param.rbi +166 -0
- data/rbi/openai/models/beta/chatkit/chatkit_attachment.rbi +116 -0
- data/rbi/openai/models/beta/chatkit/chatkit_response_output_text.rbi +287 -0
- data/rbi/openai/models/beta/chatkit/chatkit_thread.rbi +220 -0
- data/rbi/openai/models/beta/chatkit/chatkit_thread_assistant_message_item.rbi +94 -0
- data/rbi/openai/models/beta/chatkit/chatkit_thread_item_list.rbi +590 -0
- data/rbi/openai/models/beta/chatkit/chatkit_thread_user_message_item.rbi +324 -0
- data/rbi/openai/models/beta/chatkit/chatkit_widget_item.rbi +87 -0
- data/rbi/openai/models/beta/chatkit/session_cancel_params.rbi +34 -0
- data/rbi/openai/models/beta/chatkit/session_create_params.rbi +136 -0
- data/rbi/openai/models/beta/chatkit/thread_delete_params.rbi +34 -0
- data/rbi/openai/models/beta/chatkit/thread_delete_response.rbi +55 -0
- data/rbi/openai/models/beta/chatkit/thread_list_items_params.rbi +138 -0
- data/rbi/openai/models/beta/chatkit/thread_list_params.rbi +145 -0
- data/rbi/openai/models/beta/chatkit/thread_retrieve_params.rbi +34 -0
- data/rbi/openai/models/beta/chatkit_upload_file_params.rbi +50 -0
- data/rbi/openai/models/beta/chatkit_upload_file_response.rbi +25 -0
- data/rbi/openai/models/beta/chatkit_workflow.rbi +134 -0
- data/rbi/openai/models/beta/file_part.rbi +74 -0
- data/rbi/openai/models/beta/image_part.rbi +82 -0
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -0
- data/rbi/openai/models/chat/completion_create_params.rbi +6 -0
- data/rbi/openai/models/comparison_filter.rbi +43 -4
- data/rbi/openai/models/eval_create_response.rbi +4 -4
- data/rbi/openai/models/eval_list_response.rbi +4 -4
- data/rbi/openai/models/eval_retrieve_response.rbi +4 -4
- data/rbi/openai/models/eval_update_response.rbi +4 -4
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +6 -0
- data/rbi/openai/models/evals/run_cancel_response.rbi +12 -0
- data/rbi/openai/models/evals/run_create_params.rbi +12 -0
- data/rbi/openai/models/evals/run_create_response.rbi +12 -0
- data/rbi/openai/models/evals/run_list_response.rbi +12 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +12 -0
- data/rbi/openai/models/graders/score_model_grader.rbi +6 -0
- data/rbi/openai/models/image_edit_params.rbi +6 -3
- data/rbi/openai/models/image_model.rbi +2 -0
- data/rbi/openai/models/realtime/realtime_session.rbi +20 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +20 -0
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +20 -0
- data/rbi/openai/models/reasoning.rbi +6 -0
- data/rbi/openai/models/reasoning_effort.rbi +3 -0
- data/rbi/openai/models/responses/tool.rbi +11 -3
- data/rbi/openai/models/responses_model.rbi +10 -0
- data/rbi/openai/models/vector_stores/vector_store_file.rbi +3 -3
- data/rbi/openai/models/video.rbi +143 -0
- data/rbi/openai/models/video_create_error.rbi +26 -0
- data/rbi/openai/models/video_create_params.rbi +87 -0
- data/rbi/openai/models/video_delete_params.rbi +27 -0
- data/rbi/openai/models/video_delete_response.rbi +46 -0
- data/rbi/openai/models/video_download_content_params.rbi +89 -0
- data/rbi/openai/models/video_list_params.rbi +91 -0
- data/rbi/openai/models/video_model.rbi +19 -0
- data/rbi/openai/models/video_remix_params.rbi +40 -0
- data/rbi/openai/models/video_retrieve_params.rbi +27 -0
- data/rbi/openai/models/video_seconds.rbi +20 -0
- data/rbi/openai/models/video_size.rbi +23 -0
- data/rbi/openai/models.rbi +22 -0
- data/rbi/openai/resources/beta/assistants.rbi +6 -0
- data/rbi/openai/resources/beta/chatkit/sessions.rbi +61 -0
- data/rbi/openai/resources/beta/chatkit/threads.rbi +110 -0
- data/rbi/openai/resources/beta/chatkit.rbi +35 -0
- data/rbi/openai/resources/beta/threads/runs.rbi +6 -0
- data/rbi/openai/resources/beta.rbi +3 -0
- data/rbi/openai/resources/chat/completions.rbi +6 -0
- data/rbi/openai/resources/files.rbi +1 -1
- data/rbi/openai/resources/images.rbi +4 -2
- data/rbi/openai/resources/videos.rbi +121 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/models/all_models.rbs +4 -0
- data/sig/openai/models/beta/chatkit/chat_session.rbs +69 -0
- data/sig/openai/models/beta/chatkit/chat_session_automatic_thread_titling.rbs +17 -0
- data/sig/openai/models/beta/chatkit/chat_session_chatkit_configuration.rbs +34 -0
- data/sig/openai/models/beta/chatkit/chat_session_chatkit_configuration_param.rbs +103 -0
- data/sig/openai/models/beta/chatkit/chat_session_expires_after_param.rbs +20 -0
- data/sig/openai/models/beta/chatkit/chat_session_file_upload.rbs +30 -0
- data/sig/openai/models/beta/chatkit/chat_session_history.rbs +19 -0
- data/sig/openai/models/beta/chatkit/chat_session_rate_limits.rbs +18 -0
- data/sig/openai/models/beta/chatkit/chat_session_rate_limits_param.rbs +20 -0
- data/sig/openai/models/beta/chatkit/chat_session_status.rbs +19 -0
- data/sig/openai/models/beta/chatkit/chat_session_workflow_param.rbs +69 -0
- data/sig/openai/models/beta/chatkit/chatkit_attachment.rbs +57 -0
- data/sig/openai/models/beta/chatkit/chatkit_response_output_text.rbs +114 -0
- data/sig/openai/models/beta/chatkit/chatkit_thread.rbs +96 -0
- data/sig/openai/models/beta/chatkit/chatkit_thread_assistant_message_item.rbs +51 -0
- data/sig/openai/models/beta/chatkit/chatkit_thread_item_list.rbs +276 -0
- data/sig/openai/models/beta/chatkit/chatkit_thread_user_message_item.rbs +127 -0
- data/sig/openai/models/beta/chatkit/chatkit_widget_item.rbs +51 -0
- data/sig/openai/models/beta/chatkit/session_cancel_params.rbs +19 -0
- data/sig/openai/models/beta/chatkit/session_create_params.rbs +62 -0
- data/sig/openai/models/beta/chatkit/thread_delete_params.rbs +19 -0
- data/sig/openai/models/beta/chatkit/thread_delete_response.rbs +30 -0
- data/sig/openai/models/beta/chatkit/thread_list_items_params.rbs +66 -0
- data/sig/openai/models/beta/chatkit/thread_list_params.rbs +73 -0
- data/sig/openai/models/beta/chatkit/thread_retrieve_params.rbs +19 -0
- data/sig/openai/models/beta/chatkit_upload_file_params.rbs +26 -0
- data/sig/openai/models/beta/chatkit_upload_file_response.rbs +14 -0
- data/sig/openai/models/beta/chatkit_workflow.rbs +55 -0
- data/sig/openai/models/beta/file_part.rbs +42 -0
- data/sig/openai/models/beta/image_part.rbs +47 -0
- data/sig/openai/models/comparison_filter.rbs +15 -1
- data/sig/openai/models/eval_create_response.rbs +2 -2
- data/sig/openai/models/eval_list_response.rbs +2 -2
- data/sig/openai/models/eval_retrieve_response.rbs +2 -2
- data/sig/openai/models/eval_update_response.rbs +2 -2
- data/sig/openai/models/image_model.rbs +3 -1
- data/sig/openai/models/realtime/realtime_session.rbs +8 -0
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +8 -0
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +8 -0
- data/sig/openai/models/responses/tool.rbs +2 -1
- data/sig/openai/models/responses_model.rbs +4 -0
- data/sig/openai/models/video.rbs +88 -0
- data/sig/openai/models/video_create_error.rbs +15 -0
- data/sig/openai/models/video_create_params.rbs +58 -0
- data/sig/openai/models/video_delete_params.rbs +14 -0
- data/sig/openai/models/video_delete_response.rbs +22 -0
- data/sig/openai/models/video_download_content_params.rbs +40 -0
- data/sig/openai/models/video_list_params.rbs +55 -0
- data/sig/openai/models/video_model.rbs +14 -0
- data/sig/openai/models/video_remix_params.rbs +23 -0
- data/sig/openai/models/video_retrieve_params.rbs +15 -0
- data/sig/openai/models/video_seconds.rbs +15 -0
- data/sig/openai/models/video_size.rbs +16 -0
- data/sig/openai/models.rbs +22 -0
- data/sig/openai/resources/beta/chatkit/sessions.rbs +25 -0
- data/sig/openai/resources/beta/chatkit/threads.rbs +39 -0
- data/sig/openai/resources/beta/chatkit.rbs +18 -0
- data/sig/openai/resources/beta.rbs +2 -0
- data/sig/openai/resources/videos.rbs +45 -0
- metadata +140 -2
@@ -232,6 +232,9 @@ module OpenAI
|
|
232
232
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
233
233
|
# response.
|
234
234
|
#
|
235
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
236
|
+
# effort.
|
237
|
+
#
|
235
238
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
236
239
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
237
240
|
|
@@ -589,6 +592,9 @@ module OpenAI
|
|
589
592
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
590
593
|
# response.
|
591
594
|
#
|
595
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
596
|
+
# effort.
|
597
|
+
#
|
592
598
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
593
599
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
594
600
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -661,6 +664,9 @@ module OpenAI
|
|
661
664
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
662
665
|
# response.
|
663
666
|
#
|
667
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
668
|
+
# effort.
|
669
|
+
#
|
664
670
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
665
671
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
666
672
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -661,6 +664,9 @@ module OpenAI
|
|
661
664
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
662
665
|
# response.
|
663
666
|
#
|
667
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
668
|
+
# effort.
|
669
|
+
#
|
664
670
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
665
671
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
666
672
|
|
@@ -320,6 +320,9 @@ module OpenAI
|
|
320
320
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
321
321
|
# response.
|
322
322
|
#
|
323
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
324
|
+
# effort.
|
325
|
+
#
|
323
326
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
324
327
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
325
328
|
|
@@ -665,6 +668,9 @@ module OpenAI
|
|
665
668
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
666
669
|
# response.
|
667
670
|
#
|
671
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
672
|
+
# effort.
|
673
|
+
#
|
668
674
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
669
675
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
670
676
|
|
@@ -226,6 +226,9 @@ module OpenAI
|
|
226
226
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
227
227
|
# response.
|
228
228
|
#
|
229
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
230
|
+
# effort.
|
231
|
+
#
|
229
232
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
230
233
|
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
231
234
|
|
@@ -43,7 +43,8 @@ module OpenAI
|
|
43
43
|
# @!attribute input_fidelity
|
44
44
|
# Control how much effort the model will exert to match the style and features,
|
45
45
|
# especially facial features, of input images. This parameter is only supported
|
46
|
-
# for `gpt-image-1`. Supports `high` and
|
46
|
+
# for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
47
|
+
# `low`. Defaults to `low`.
|
47
48
|
#
|
48
49
|
# @return [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil]
|
49
50
|
optional :input_fidelity, enum: -> { OpenAI::ImageEditParams::InputFidelity }, nil?: true
|
@@ -206,7 +207,8 @@ module OpenAI
|
|
206
207
|
|
207
208
|
# Control how much effort the model will exert to match the style and features,
|
208
209
|
# especially facial features, of input images. This parameter is only supported
|
209
|
-
# for `gpt-image-1`. Supports `high` and
|
210
|
+
# for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
211
|
+
# `low`. Defaults to `low`.
|
210
212
|
module InputFidelity
|
211
213
|
extend OpenAI::Internal::Type::Enum
|
212
214
|
|
@@ -316,6 +316,10 @@ module OpenAI
|
|
316
316
|
GPT_4O_REALTIME_PREVIEW_2025_06_03 = :"gpt-4o-realtime-preview-2025-06-03"
|
317
317
|
GPT_4O_MINI_REALTIME_PREVIEW = :"gpt-4o-mini-realtime-preview"
|
318
318
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
319
|
+
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
320
|
+
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
321
|
+
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
322
|
+
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
319
323
|
|
320
324
|
# @!method self.values
|
321
325
|
# @return [Array<Symbol>]
|
@@ -183,6 +183,14 @@ module OpenAI
|
|
183
183
|
|
184
184
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 }
|
185
185
|
|
186
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_REALTIME_MINI }
|
187
|
+
|
188
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_REALTIME_MINI_2025_10_06 }
|
189
|
+
|
190
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI }
|
191
|
+
|
192
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI_2025_10_06 }
|
193
|
+
|
186
194
|
# @!method self.variants
|
187
195
|
# @return [Array(String, Symbol)]
|
188
196
|
|
@@ -200,6 +208,10 @@ module OpenAI
|
|
200
208
|
GPT_4O_REALTIME_PREVIEW_2025_06_03 = :"gpt-4o-realtime-preview-2025-06-03"
|
201
209
|
GPT_4O_MINI_REALTIME_PREVIEW = :"gpt-4o-mini-realtime-preview"
|
202
210
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
211
|
+
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
212
|
+
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
213
|
+
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
214
|
+
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
203
215
|
|
204
216
|
# @!endgroup
|
205
217
|
end
|
@@ -584,6 +584,14 @@ module OpenAI
|
|
584
584
|
|
585
585
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 }
|
586
586
|
|
587
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_REALTIME_MINI }
|
588
|
+
|
589
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_REALTIME_MINI_2025_10_06 }
|
590
|
+
|
591
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI }
|
592
|
+
|
593
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI_2025_10_06 }
|
594
|
+
|
587
595
|
# @!method self.variants
|
588
596
|
# @return [Array(String, Symbol)]
|
589
597
|
|
@@ -601,6 +609,10 @@ module OpenAI
|
|
601
609
|
GPT_4O_REALTIME_PREVIEW_2025_06_03 = :"gpt-4o-realtime-preview-2025-06-03"
|
602
610
|
GPT_4O_MINI_REALTIME_PREVIEW = :"gpt-4o-mini-realtime-preview"
|
603
611
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
612
|
+
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
613
|
+
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
614
|
+
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
615
|
+
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
604
616
|
|
605
617
|
# @!endgroup
|
606
618
|
end
|
@@ -10,6 +10,9 @@ module OpenAI
|
|
10
10
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
11
11
|
# response.
|
12
12
|
#
|
13
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
14
|
+
# effort.
|
15
|
+
#
|
13
16
|
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
|
14
17
|
optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
|
15
18
|
|
@@ -7,6 +7,9 @@ module OpenAI
|
|
7
7
|
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
8
8
|
# effort can result in faster responses and fewer tokens used on reasoning in a
|
9
9
|
# response.
|
10
|
+
#
|
11
|
+
# Note: The `gpt-5-pro` model defaults to (and only supports) `high` reasoning
|
12
|
+
# effort.
|
10
13
|
module ReasoningEffort
|
11
14
|
extend OpenAI::Internal::Type::Enum
|
12
15
|
|
@@ -427,7 +427,8 @@ module OpenAI
|
|
427
427
|
# @!attribute input_fidelity
|
428
428
|
# Control how much effort the model will exert to match the style and features,
|
429
429
|
# especially facial features, of input images. This parameter is only supported
|
430
|
-
# for `gpt-image-1`. Supports `high` and
|
430
|
+
# for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
431
|
+
# `low`. Defaults to `low`.
|
431
432
|
#
|
432
433
|
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil]
|
433
434
|
optional :input_fidelity,
|
@@ -534,7 +535,8 @@ module OpenAI
|
|
534
535
|
|
535
536
|
# Control how much effort the model will exert to match the style and features,
|
536
537
|
# especially facial features, of input images. This parameter is only supported
|
537
|
-
# for `gpt-image-1`. Supports `high` and
|
538
|
+
# for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
539
|
+
# `low`. Defaults to `low`.
|
538
540
|
#
|
539
541
|
# @see OpenAI::Models::Responses::Tool::ImageGeneration#input_fidelity
|
540
542
|
module InputFidelity
|
@@ -581,6 +583,7 @@ module OpenAI
|
|
581
583
|
extend OpenAI::Internal::Type::Enum
|
582
584
|
|
583
585
|
GPT_IMAGE_1 = :"gpt-image-1"
|
586
|
+
GPT_IMAGE_1_MINI = :"gpt-image-1-mini"
|
584
587
|
|
585
588
|
# @!method self.values
|
586
589
|
# @return [Array<Symbol>]
|
@@ -25,6 +25,8 @@ module OpenAI
|
|
25
25
|
COMPUTER_USE_PREVIEW = :"computer-use-preview"
|
26
26
|
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
|
27
27
|
GPT_5_CODEX = :"gpt-5-codex"
|
28
|
+
GPT_5_PRO = :"gpt-5-pro"
|
29
|
+
GPT_5_PRO_2025_10_06 = :"gpt-5-pro-2025-10-06"
|
28
30
|
|
29
31
|
# @!method self.values
|
30
32
|
# @return [Array<Symbol>]
|
@@ -101,7 +101,7 @@ module OpenAI
|
|
101
101
|
# @see OpenAI::Models::VectorStores::VectorStoreFile#last_error
|
102
102
|
class LastError < OpenAI::Internal::Type::BaseModel
|
103
103
|
# @!attribute code
|
104
|
-
# One of `server_error` or `
|
104
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
105
105
|
#
|
106
106
|
# @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code]
|
107
107
|
required :code, enum: -> { OpenAI::VectorStores::VectorStoreFile::LastError::Code }
|
@@ -116,11 +116,11 @@ module OpenAI
|
|
116
116
|
# The last error associated with this vector store file. Will be `null` if there
|
117
117
|
# are no errors.
|
118
118
|
#
|
119
|
-
# @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] One of `server_error` or `
|
119
|
+
# @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] One of `server_error`, `unsupported_file`, or `invalid_file`.
|
120
120
|
#
|
121
121
|
# @param message [String] A human-readable description of the error.
|
122
122
|
|
123
|
-
# One of `server_error` or `
|
123
|
+
# One of `server_error`, `unsupported_file`, or `invalid_file`.
|
124
124
|
#
|
125
125
|
# @see OpenAI::Models::VectorStores::VectorStoreFile::LastError#code
|
126
126
|
module Code
|
@@ -0,0 +1,122 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#create
|
6
|
+
class Video < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute id
|
8
|
+
# Unique identifier for the video job.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :id, String
|
12
|
+
|
13
|
+
# @!attribute completed_at
|
14
|
+
# Unix timestamp (seconds) for when the job completed, if finished.
|
15
|
+
#
|
16
|
+
# @return [Integer, nil]
|
17
|
+
required :completed_at, Integer, nil?: true
|
18
|
+
|
19
|
+
# @!attribute created_at
|
20
|
+
# Unix timestamp (seconds) for when the job was created.
|
21
|
+
#
|
22
|
+
# @return [Integer]
|
23
|
+
required :created_at, Integer
|
24
|
+
|
25
|
+
# @!attribute error
|
26
|
+
# Error payload that explains why generation failed, if applicable.
|
27
|
+
#
|
28
|
+
# @return [OpenAI::Models::VideoCreateError, nil]
|
29
|
+
required :error, -> { OpenAI::VideoCreateError }, nil?: true
|
30
|
+
|
31
|
+
# @!attribute expires_at
|
32
|
+
# Unix timestamp (seconds) for when the downloadable assets expire, if set.
|
33
|
+
#
|
34
|
+
# @return [Integer, nil]
|
35
|
+
required :expires_at, Integer, nil?: true
|
36
|
+
|
37
|
+
# @!attribute model
|
38
|
+
# The video generation model that produced the job.
|
39
|
+
#
|
40
|
+
# @return [Symbol, OpenAI::Models::VideoModel]
|
41
|
+
required :model, enum: -> { OpenAI::VideoModel }
|
42
|
+
|
43
|
+
# @!attribute object
|
44
|
+
# The object type, which is always `video`.
|
45
|
+
#
|
46
|
+
# @return [Symbol, :video]
|
47
|
+
required :object, const: :video
|
48
|
+
|
49
|
+
# @!attribute progress
|
50
|
+
# Approximate completion percentage for the generation task.
|
51
|
+
#
|
52
|
+
# @return [Integer]
|
53
|
+
required :progress, Integer
|
54
|
+
|
55
|
+
# @!attribute remixed_from_video_id
|
56
|
+
# Identifier of the source video if this video is a remix.
|
57
|
+
#
|
58
|
+
# @return [String, nil]
|
59
|
+
required :remixed_from_video_id, String, nil?: true
|
60
|
+
|
61
|
+
# @!attribute seconds
|
62
|
+
# Duration of the generated clip in seconds.
|
63
|
+
#
|
64
|
+
# @return [Symbol, OpenAI::Models::VideoSeconds]
|
65
|
+
required :seconds, enum: -> { OpenAI::VideoSeconds }
|
66
|
+
|
67
|
+
# @!attribute size
|
68
|
+
# The resolution of the generated video.
|
69
|
+
#
|
70
|
+
# @return [Symbol, OpenAI::Models::VideoSize]
|
71
|
+
required :size, enum: -> { OpenAI::VideoSize }
|
72
|
+
|
73
|
+
# @!attribute status
|
74
|
+
# Current lifecycle status of the video job.
|
75
|
+
#
|
76
|
+
# @return [Symbol, OpenAI::Models::Video::Status]
|
77
|
+
required :status, enum: -> { OpenAI::Video::Status }
|
78
|
+
|
79
|
+
# @!method initialize(id:, completed_at:, created_at:, error:, expires_at:, model:, progress:, remixed_from_video_id:, seconds:, size:, status:, object: :video)
|
80
|
+
# Structured information describing a generated video job.
|
81
|
+
#
|
82
|
+
# @param id [String] Unique identifier for the video job.
|
83
|
+
#
|
84
|
+
# @param completed_at [Integer, nil] Unix timestamp (seconds) for when the job completed, if finished.
|
85
|
+
#
|
86
|
+
# @param created_at [Integer] Unix timestamp (seconds) for when the job was created.
|
87
|
+
#
|
88
|
+
# @param error [OpenAI::Models::VideoCreateError, nil] Error payload that explains why generation failed, if applicable.
|
89
|
+
#
|
90
|
+
# @param expires_at [Integer, nil] Unix timestamp (seconds) for when the downloadable assets expire, if set.
|
91
|
+
#
|
92
|
+
# @param model [Symbol, OpenAI::Models::VideoModel] The video generation model that produced the job.
|
93
|
+
#
|
94
|
+
# @param progress [Integer] Approximate completion percentage for the generation task.
|
95
|
+
#
|
96
|
+
# @param remixed_from_video_id [String, nil] Identifier of the source video if this video is a remix.
|
97
|
+
#
|
98
|
+
# @param seconds [Symbol, OpenAI::Models::VideoSeconds] Duration of the generated clip in seconds.
|
99
|
+
#
|
100
|
+
# @param size [Symbol, OpenAI::Models::VideoSize] The resolution of the generated video.
|
101
|
+
#
|
102
|
+
# @param status [Symbol, OpenAI::Models::Video::Status] Current lifecycle status of the video job.
|
103
|
+
#
|
104
|
+
# @param object [Symbol, :video] The object type, which is always `video`.
|
105
|
+
|
106
|
+
# Current lifecycle status of the video job.
|
107
|
+
#
|
108
|
+
# @see OpenAI::Models::Video#status
|
109
|
+
module Status
|
110
|
+
extend OpenAI::Internal::Type::Enum
|
111
|
+
|
112
|
+
QUEUED = :queued
|
113
|
+
IN_PROGRESS = :in_progress
|
114
|
+
COMPLETED = :completed
|
115
|
+
FAILED = :failed
|
116
|
+
|
117
|
+
# @!method self.values
|
118
|
+
# @return [Array<Symbol>]
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
class VideoCreateError < OpenAI::Internal::Type::BaseModel
|
6
|
+
# @!attribute code
|
7
|
+
#
|
8
|
+
# @return [String]
|
9
|
+
required :code, String
|
10
|
+
|
11
|
+
# @!attribute message
|
12
|
+
#
|
13
|
+
# @return [String]
|
14
|
+
required :message, String
|
15
|
+
|
16
|
+
# @!method initialize(code:, message:)
|
17
|
+
# @param code [String]
|
18
|
+
# @param message [String]
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#create
|
6
|
+
class VideoCreateParams < OpenAI::Internal::Type::BaseModel
|
7
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
|
+
include OpenAI::Internal::Type::RequestParameters
|
9
|
+
|
10
|
+
# @!attribute prompt
|
11
|
+
# Text prompt that describes the video to generate.
|
12
|
+
#
|
13
|
+
# @return [String]
|
14
|
+
required :prompt, String
|
15
|
+
|
16
|
+
# @!attribute input_reference
|
17
|
+
# Optional image reference that guides generation.
|
18
|
+
#
|
19
|
+
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart, nil]
|
20
|
+
optional :input_reference, OpenAI::Internal::Type::FileInput
|
21
|
+
|
22
|
+
# @!attribute model
|
23
|
+
# The video generation model to use. Defaults to `sora-2`.
|
24
|
+
#
|
25
|
+
# @return [Symbol, OpenAI::Models::VideoModel, nil]
|
26
|
+
optional :model, enum: -> { OpenAI::VideoModel }
|
27
|
+
|
28
|
+
# @!attribute seconds
|
29
|
+
# Clip duration in seconds. Defaults to 4 seconds.
|
30
|
+
#
|
31
|
+
# @return [Symbol, OpenAI::Models::VideoSeconds, nil]
|
32
|
+
optional :seconds, enum: -> { OpenAI::VideoSeconds }
|
33
|
+
|
34
|
+
# @!attribute size
|
35
|
+
# Output resolution formatted as width x height. Defaults to 720x1280.
|
36
|
+
#
|
37
|
+
# @return [Symbol, OpenAI::Models::VideoSize, nil]
|
38
|
+
optional :size, enum: -> { OpenAI::VideoSize }
|
39
|
+
|
40
|
+
# @!method initialize(prompt:, input_reference: nil, model: nil, seconds: nil, size: nil, request_options: {})
|
41
|
+
# @param prompt [String] Text prompt that describes the video to generate.
|
42
|
+
#
|
43
|
+
# @param input_reference [Pathname, StringIO, IO, String, OpenAI::FilePart] Optional image reference that guides generation.
|
44
|
+
#
|
45
|
+
# @param model [Symbol, OpenAI::Models::VideoModel] The video generation model to use. Defaults to `sora-2`.
|
46
|
+
#
|
47
|
+
# @param seconds [Symbol, OpenAI::Models::VideoSeconds] Clip duration in seconds. Defaults to 4 seconds.
|
48
|
+
#
|
49
|
+
# @param size [Symbol, OpenAI::Models::VideoSize] Output resolution formatted as width x height. Defaults to 720x1280.
|
50
|
+
#
|
51
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#delete
|
6
|
+
class VideoDeleteParams < OpenAI::Internal::Type::BaseModel
|
7
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
|
+
include OpenAI::Internal::Type::RequestParameters
|
9
|
+
|
10
|
+
# @!method initialize(request_options: {})
|
11
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#delete
|
6
|
+
class VideoDeleteResponse < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute id
|
8
|
+
# Identifier of the deleted video.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :id, String
|
12
|
+
|
13
|
+
# @!attribute deleted
|
14
|
+
# Indicates that the video resource was deleted.
|
15
|
+
#
|
16
|
+
# @return [Boolean]
|
17
|
+
required :deleted, OpenAI::Internal::Type::Boolean
|
18
|
+
|
19
|
+
# @!attribute object
|
20
|
+
# The object type that signals the deletion response.
|
21
|
+
#
|
22
|
+
# @return [Symbol, :"video.deleted"]
|
23
|
+
required :object, const: :"video.deleted"
|
24
|
+
|
25
|
+
# @!method initialize(id:, deleted:, object: :"video.deleted")
|
26
|
+
# Confirmation payload returned after deleting a video.
|
27
|
+
#
|
28
|
+
# @param id [String] Identifier of the deleted video.
|
29
|
+
#
|
30
|
+
# @param deleted [Boolean] Indicates that the video resource was deleted.
|
31
|
+
#
|
32
|
+
# @param object [Symbol, :"video.deleted"] The object type that signals the deletion response.
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#download_content
|
6
|
+
class VideoDownloadContentParams < OpenAI::Internal::Type::BaseModel
|
7
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
|
+
include OpenAI::Internal::Type::RequestParameters
|
9
|
+
|
10
|
+
# @!attribute variant
|
11
|
+
# Which downloadable asset to return. Defaults to the MP4 video.
|
12
|
+
#
|
13
|
+
# @return [Symbol, OpenAI::Models::VideoDownloadContentParams::Variant, nil]
|
14
|
+
optional :variant, enum: -> { OpenAI::VideoDownloadContentParams::Variant }
|
15
|
+
|
16
|
+
# @!method initialize(variant: nil, request_options: {})
|
17
|
+
# @param variant [Symbol, OpenAI::Models::VideoDownloadContentParams::Variant] Which downloadable asset to return. Defaults to the MP4 video.
|
18
|
+
#
|
19
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
20
|
+
|
21
|
+
# Which downloadable asset to return. Defaults to the MP4 video.
|
22
|
+
module Variant
|
23
|
+
extend OpenAI::Internal::Type::Enum
|
24
|
+
|
25
|
+
VIDEO = :video
|
26
|
+
THUMBNAIL = :thumbnail
|
27
|
+
SPRITESHEET = :spritesheet
|
28
|
+
|
29
|
+
# @!method self.values
|
30
|
+
# @return [Array<Symbol>]
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#list
|
6
|
+
class VideoListParams < OpenAI::Internal::Type::BaseModel
|
7
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
|
+
include OpenAI::Internal::Type::RequestParameters
|
9
|
+
|
10
|
+
# @!attribute after
|
11
|
+
# Identifier for the last item from the previous pagination request
|
12
|
+
#
|
13
|
+
# @return [String, nil]
|
14
|
+
optional :after, String
|
15
|
+
|
16
|
+
# @!attribute limit
|
17
|
+
# Number of items to retrieve
|
18
|
+
#
|
19
|
+
# @return [Integer, nil]
|
20
|
+
optional :limit, Integer
|
21
|
+
|
22
|
+
# @!attribute order
|
23
|
+
# Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
|
24
|
+
# descending order.
|
25
|
+
#
|
26
|
+
# @return [Symbol, OpenAI::Models::VideoListParams::Order, nil]
|
27
|
+
optional :order, enum: -> { OpenAI::VideoListParams::Order }
|
28
|
+
|
29
|
+
# @!method initialize(after: nil, limit: nil, order: nil, request_options: {})
|
30
|
+
# Some parameter documentations has been truncated, see
|
31
|
+
# {OpenAI::Models::VideoListParams} for more details.
|
32
|
+
#
|
33
|
+
# @param after [String] Identifier for the last item from the previous pagination request
|
34
|
+
#
|
35
|
+
# @param limit [Integer] Number of items to retrieve
|
36
|
+
#
|
37
|
+
# @param order [Symbol, OpenAI::Models::VideoListParams::Order] Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
|
38
|
+
#
|
39
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
40
|
+
|
41
|
+
# Sort order of results by timestamp. Use `asc` for ascending order or `desc` for
|
42
|
+
# descending order.
|
43
|
+
module Order
|
44
|
+
extend OpenAI::Internal::Type::Enum
|
45
|
+
|
46
|
+
ASC = :asc
|
47
|
+
DESC = :desc
|
48
|
+
|
49
|
+
# @!method self.values
|
50
|
+
# @return [Array<Symbol>]
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module VideoModel
|
6
|
+
extend OpenAI::Internal::Type::Enum
|
7
|
+
|
8
|
+
SORA_2 = :"sora-2"
|
9
|
+
SORA_2_PRO = :"sora-2-pro"
|
10
|
+
|
11
|
+
# @!method self.values
|
12
|
+
# @return [Array<Symbol>]
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#remix
|
6
|
+
class VideoRemixParams < OpenAI::Internal::Type::BaseModel
|
7
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
|
+
include OpenAI::Internal::Type::RequestParameters
|
9
|
+
|
10
|
+
# @!attribute prompt
|
11
|
+
# Updated text prompt that directs the remix generation.
|
12
|
+
#
|
13
|
+
# @return [String]
|
14
|
+
required :prompt, String
|
15
|
+
|
16
|
+
# @!method initialize(prompt:, request_options: {})
|
17
|
+
# @param prompt [String] Updated text prompt that directs the remix generation.
|
18
|
+
#
|
19
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
# @see OpenAI::Resources::Videos#retrieve
|
6
|
+
class VideoRetrieveParams < OpenAI::Internal::Type::BaseModel
|
7
|
+
extend OpenAI::Internal::Type::RequestParameters::Converter
|
8
|
+
include OpenAI::Internal::Type::RequestParameters
|
9
|
+
|
10
|
+
# @!method initialize(request_options: {})
|
11
|
+
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|