openai 0.20.0 → 0.21.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +21 -0
- data/README.md +1 -1
- data/lib/openai/client.rb +4 -0
- data/lib/openai/internal/stream.rb +3 -2
- data/lib/openai/models/audio/speech_create_params.rb +6 -0
- data/lib/openai/models/chat/chat_completion_audio_param.rb +6 -0
- data/lib/openai/models/eval_create_params.rb +10 -6
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +10 -6
- data/lib/openai/models/evals/run_cancel_response.rb +12 -8
- data/lib/openai/models/evals/run_create_params.rb +12 -8
- data/lib/openai/models/evals/run_create_response.rb +12 -8
- data/lib/openai/models/evals/run_list_response.rb +12 -8
- data/lib/openai/models/evals/run_retrieve_response.rb +12 -8
- data/lib/openai/models/graders/label_model_grader.rb +10 -6
- data/lib/openai/models/graders/score_model_grader.rb +10 -6
- data/lib/openai/models/realtime/client_secret_create_params.rb +93 -0
- data/lib/openai/models/realtime/client_secret_create_response.rb +300 -0
- data/lib/openai/models/realtime/conversation_created_event.rb +70 -0
- data/lib/openai/models/realtime/conversation_item.rb +44 -0
- data/lib/openai/models/realtime/conversation_item_added.rb +48 -0
- data/lib/openai/models/realtime/conversation_item_create_event.rb +57 -0
- data/lib/openai/models/realtime/conversation_item_created_event.rb +59 -0
- data/lib/openai/models/realtime/conversation_item_delete_event.rb +39 -0
- data/lib/openai/models/realtime/conversation_item_deleted_event.rb +38 -0
- data/lib/openai/models/realtime/conversation_item_done.rb +48 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rb +189 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rb +63 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_failed_event.rb +96 -0
- data/lib/openai/models/realtime/conversation_item_input_audio_transcription_segment.rb +84 -0
- data/lib/openai/models/realtime/conversation_item_retrieve_event.rb +40 -0
- data/lib/openai/models/realtime/conversation_item_truncate_event.rb +68 -0
- data/lib/openai/models/realtime/conversation_item_truncated_event.rb +60 -0
- data/lib/openai/models/realtime/conversation_item_with_reference.rb +235 -0
- data/lib/openai/models/realtime/input_audio_buffer_append_event.rb +49 -0
- data/lib/openai/models/realtime/input_audio_buffer_clear_event.rb +29 -0
- data/lib/openai/models/realtime/input_audio_buffer_cleared_event.rb +29 -0
- data/lib/openai/models/realtime/input_audio_buffer_commit_event.rb +35 -0
- data/lib/openai/models/realtime/input_audio_buffer_committed_event.rb +51 -0
- data/lib/openai/models/realtime/input_audio_buffer_speech_started_event.rb +59 -0
- data/lib/openai/models/realtime/input_audio_buffer_speech_stopped_event.rb +51 -0
- data/lib/openai/models/realtime/input_audio_buffer_timeout_triggered.rb +52 -0
- data/lib/openai/models/realtime/log_prob_properties.rb +39 -0
- data/lib/openai/models/realtime/mcp_list_tools_completed.rb +36 -0
- data/lib/openai/models/realtime/mcp_list_tools_failed.rb +36 -0
- data/lib/openai/models/realtime/mcp_list_tools_in_progress.rb +36 -0
- data/lib/openai/models/realtime/output_audio_buffer_clear_event.rb +32 -0
- data/lib/openai/models/realtime/rate_limits_updated_event.rb +91 -0
- data/lib/openai/models/realtime/realtime_audio_config.rb +446 -0
- data/lib/openai/models/realtime/realtime_client_event.rb +123 -0
- data/lib/openai/models/realtime/realtime_client_secret_config.rb +64 -0
- data/lib/openai/models/realtime/realtime_conversation_item_assistant_message.rb +118 -0
- data/lib/openai/models/realtime/realtime_conversation_item_function_call.rb +94 -0
- data/lib/openai/models/realtime/realtime_conversation_item_function_call_output.rb +86 -0
- data/lib/openai/models/realtime/realtime_conversation_item_system_message.rb +118 -0
- data/lib/openai/models/realtime/realtime_conversation_item_user_message.rb +135 -0
- data/lib/openai/models/realtime/realtime_error.rb +55 -0
- data/lib/openai/models/realtime/realtime_error_event.rb +38 -0
- data/lib/openai/models/realtime/realtime_mcp_approval_request.rb +52 -0
- data/lib/openai/models/realtime/realtime_mcp_approval_response.rb +52 -0
- data/lib/openai/models/realtime/realtime_mcp_list_tools.rb +84 -0
- data/lib/openai/models/realtime/realtime_mcp_protocol_error.rb +29 -0
- data/lib/openai/models/realtime/realtime_mcp_tool_call.rb +94 -0
- data/lib/openai/models/realtime/realtime_mcp_tool_execution_error.rb +23 -0
- data/lib/openai/models/realtime/realtime_mcphttp_error.rb +29 -0
- data/lib/openai/models/realtime/realtime_response.rb +259 -0
- data/lib/openai/models/realtime/realtime_response_status.rb +103 -0
- data/lib/openai/models/realtime/realtime_response_usage.rb +61 -0
- data/lib/openai/models/realtime/realtime_response_usage_input_token_details.rb +36 -0
- data/lib/openai/models/realtime/realtime_response_usage_output_token_details.rb +28 -0
- data/lib/openai/models/realtime/realtime_server_event.rb +369 -0
- data/lib/openai/models/realtime/realtime_session.rb +696 -0
- data/lib/openai/models/realtime/realtime_session_create_request.rb +234 -0
- data/lib/openai/models/realtime/realtime_session_create_response.rb +579 -0
- data/lib/openai/models/realtime/realtime_tool_choice_config.rb +32 -0
- data/lib/openai/models/realtime/realtime_tools_config.rb +11 -0
- data/lib/openai/models/realtime/realtime_tools_config_union.rb +379 -0
- data/lib/openai/models/realtime/realtime_tracing_config.rb +61 -0
- data/lib/openai/models/realtime/realtime_transcription_session_create_request.rb +312 -0
- data/lib/openai/models/realtime/realtime_truncation.rb +67 -0
- data/lib/openai/models/realtime/response_audio_delta_event.rb +68 -0
- data/lib/openai/models/realtime/response_audio_done_event.rb +61 -0
- data/lib/openai/models/realtime/response_audio_transcript_delta_event.rb +68 -0
- data/lib/openai/models/realtime/response_audio_transcript_done_event.rb +70 -0
- data/lib/openai/models/realtime/response_cancel_event.rb +42 -0
- data/lib/openai/models/realtime/response_content_part_added_event.rb +120 -0
- data/lib/openai/models/realtime/response_content_part_done_event.rb +120 -0
- data/lib/openai/models/realtime/response_create_event.rb +391 -0
- data/lib/openai/models/realtime/response_created_event.rb +37 -0
- data/lib/openai/models/realtime/response_done_event.rb +38 -0
- data/lib/openai/models/realtime/response_function_call_arguments_delta_event.rb +72 -0
- data/lib/openai/models/realtime/response_function_call_arguments_done_event.rb +73 -0
- data/lib/openai/models/realtime/response_mcp_call_arguments_delta.rb +68 -0
- data/lib/openai/models/realtime/response_mcp_call_arguments_done.rb +60 -0
- data/lib/openai/models/realtime/response_mcp_call_completed.rb +44 -0
- data/lib/openai/models/realtime/response_mcp_call_failed.rb +44 -0
- data/lib/openai/models/realtime/response_mcp_call_in_progress.rb +44 -0
- data/lib/openai/models/realtime/response_output_item_added_event.rb +52 -0
- data/lib/openai/models/realtime/response_output_item_done_event.rb +53 -0
- data/lib/openai/models/realtime/response_text_delta_event.rb +68 -0
- data/lib/openai/models/realtime/response_text_done_event.rb +69 -0
- data/lib/openai/models/realtime/session_created_event.rb +38 -0
- data/lib/openai/models/realtime/session_update_event.rb +44 -0
- data/lib/openai/models/realtime/session_updated_event.rb +37 -0
- data/lib/openai/models/realtime/transcription_session_created.rb +278 -0
- data/lib/openai/models/realtime/transcription_session_update.rb +36 -0
- data/lib/openai/models/realtime/transcription_session_updated_event.rb +279 -0
- data/lib/openai/models/responses/easy_input_message.rb +3 -3
- data/lib/openai/models/responses/response.rb +6 -3
- data/lib/openai/models/responses/response_content.rb +4 -1
- data/lib/openai/models/responses/response_create_params.rb +6 -3
- data/lib/openai/models/responses/response_input_audio.rb +39 -23
- data/lib/openai/models/responses/response_input_content.rb +4 -1
- data/lib/openai/models/responses/response_input_item.rb +2 -2
- data/lib/openai/models/responses/response_input_message_item.rb +2 -2
- data/lib/openai/models/responses/tool.rb +3 -156
- data/lib/openai/models/responses/web_search_preview_tool.rb +124 -0
- data/lib/openai/models/responses/web_search_tool.rb +58 -21
- data/lib/openai/models/webhooks/realtime_call_incoming_webhook_event.rb +119 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +4 -1
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/realtime/client_secrets.rb +44 -0
- data/lib/openai/resources/realtime.rb +18 -0
- data/lib/openai/resources/responses.rb +2 -2
- data/lib/openai/resources/webhooks.rb +1 -1
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +95 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +10 -0
- data/rbi/openai/models/chat/chat_completion_audio_param.rbi +10 -0
- data/rbi/openai/models/eval_create_params.rbi +5 -1
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +5 -1
- data/rbi/openai/models/evals/run_cancel_response.rbi +7 -5
- data/rbi/openai/models/evals/run_create_params.rbi +13 -9
- data/rbi/openai/models/evals/run_create_response.rbi +7 -5
- data/rbi/openai/models/evals/run_list_response.rbi +7 -5
- data/rbi/openai/models/evals/run_retrieve_response.rbi +7 -5
- data/rbi/openai/models/graders/label_model_grader.rbi +5 -1
- data/rbi/openai/models/graders/score_model_grader.rbi +5 -1
- data/rbi/openai/models/realtime/client_secret_create_params.rbi +222 -0
- data/rbi/openai/models/realtime/client_secret_create_response.rbi +676 -0
- data/rbi/openai/models/realtime/conversation_created_event.rbi +164 -0
- data/rbi/openai/models/realtime/conversation_item.rbi +35 -0
- data/rbi/openai/models/realtime/conversation_item_added.rbi +105 -0
- data/rbi/openai/models/realtime/conversation_item_create_event.rbi +123 -0
- data/rbi/openai/models/realtime/conversation_item_created_event.rbi +117 -0
- data/rbi/openai/models/realtime/conversation_item_delete_event.rbi +57 -0
- data/rbi/openai/models/realtime/conversation_item_deleted_event.rbi +53 -0
- data/rbi/openai/models/realtime/conversation_item_done.rbi +105 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rbi +305 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rbi +93 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_failed_event.rbi +158 -0
- data/rbi/openai/models/realtime/conversation_item_input_audio_transcription_segment.rbi +107 -0
- data/rbi/openai/models/realtime/conversation_item_retrieve_event.rbi +58 -0
- data/rbi/openai/models/realtime/conversation_item_truncate_event.rbi +94 -0
- data/rbi/openai/models/realtime/conversation_item_truncated_event.rbi +80 -0
- data/rbi/openai/models/realtime/conversation_item_with_reference.rbi +549 -0
- data/rbi/openai/models/realtime/input_audio_buffer_append_event.rbi +65 -0
- data/rbi/openai/models/realtime/input_audio_buffer_clear_event.rbi +43 -0
- data/rbi/openai/models/realtime/input_audio_buffer_cleared_event.rbi +40 -0
- data/rbi/openai/models/realtime/input_audio_buffer_commit_event.rbi +49 -0
- data/rbi/openai/models/realtime/input_audio_buffer_committed_event.rbi +72 -0
- data/rbi/openai/models/realtime/input_audio_buffer_speech_started_event.rbi +82 -0
- data/rbi/openai/models/realtime/input_audio_buffer_speech_stopped_event.rbi +73 -0
- data/rbi/openai/models/realtime/input_audio_buffer_timeout_triggered.rbi +75 -0
- data/rbi/openai/models/realtime/log_prob_properties.rbi +55 -0
- data/rbi/openai/models/realtime/mcp_list_tools_completed.rbi +51 -0
- data/rbi/openai/models/realtime/mcp_list_tools_failed.rbi +51 -0
- data/rbi/openai/models/realtime/mcp_list_tools_in_progress.rbi +51 -0
- data/rbi/openai/models/realtime/output_audio_buffer_clear_event.rbi +46 -0
- data/rbi/openai/models/realtime/rate_limits_updated_event.rbi +187 -0
- data/rbi/openai/models/realtime/realtime_audio_config.rbi +1004 -0
- data/rbi/openai/models/realtime/realtime_client_event.rbi +38 -0
- data/rbi/openai/models/realtime/realtime_client_secret_config.rbi +147 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_assistant_message.rbi +292 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call.rbi +199 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_function_call_output.rbi +188 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_system_message.rbi +292 -0
- data/rbi/openai/models/realtime/realtime_conversation_item_user_message.rbi +319 -0
- data/rbi/openai/models/realtime/realtime_error.rbi +72 -0
- data/rbi/openai/models/realtime/realtime_error_event.rbi +64 -0
- data/rbi/openai/models/realtime/realtime_mcp_approval_request.rbi +75 -0
- data/rbi/openai/models/realtime/realtime_mcp_approval_response.rbi +75 -0
- data/rbi/openai/models/realtime/realtime_mcp_list_tools.rbi +131 -0
- data/rbi/openai/models/realtime/realtime_mcp_protocol_error.rbi +40 -0
- data/rbi/openai/models/realtime/realtime_mcp_tool_call.rbi +145 -0
- data/rbi/openai/models/realtime/realtime_mcp_tool_execution_error.rbi +31 -0
- data/rbi/openai/models/realtime/realtime_mcphttp_error.rbi +40 -0
- data/rbi/openai/models/realtime/realtime_response.rbi +573 -0
- data/rbi/openai/models/realtime/realtime_response_status.rbi +233 -0
- data/rbi/openai/models/realtime/realtime_response_usage.rbi +121 -0
- data/rbi/openai/models/realtime/realtime_response_usage_input_token_details.rbi +68 -0
- data/rbi/openai/models/realtime/realtime_response_usage_output_token_details.rbi +51 -0
- data/rbi/openai/models/realtime/realtime_server_event.rbi +311 -0
- data/rbi/openai/models/realtime/realtime_session.rbi +1426 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +560 -0
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +1249 -0
- data/rbi/openai/models/realtime/realtime_tool_choice_config.rbi +30 -0
- data/rbi/openai/models/realtime/realtime_tools_config.rbi +15 -0
- data/rbi/openai/models/realtime/realtime_tools_config_union.rbi +755 -0
- data/rbi/openai/models/realtime/realtime_tracing_config.rbi +95 -0
- data/rbi/openai/models/realtime/realtime_transcription_session_create_request.rbi +703 -0
- data/rbi/openai/models/realtime/realtime_truncation.rbi +117 -0
- data/rbi/openai/models/realtime/response_audio_delta_event.rbi +91 -0
- data/rbi/openai/models/realtime/response_audio_done_event.rbi +84 -0
- data/rbi/openai/models/realtime/response_audio_transcript_delta_event.rbi +91 -0
- data/rbi/openai/models/realtime/response_audio_transcript_done_event.rbi +93 -0
- data/rbi/openai/models/realtime/response_cancel_event.rbi +63 -0
- data/rbi/openai/models/realtime/response_content_part_added_event.rbi +219 -0
- data/rbi/openai/models/realtime/response_content_part_done_event.rbi +219 -0
- data/rbi/openai/models/realtime/response_create_event.rbi +863 -0
- data/rbi/openai/models/realtime/response_created_event.rbi +65 -0
- data/rbi/openai/models/realtime/response_done_event.rbi +66 -0
- data/rbi/openai/models/realtime/response_function_call_arguments_delta_event.rbi +91 -0
- data/rbi/openai/models/realtime/response_function_call_arguments_done_event.rbi +92 -0
- data/rbi/openai/models/realtime/response_mcp_call_arguments_delta.rbi +91 -0
- data/rbi/openai/models/realtime/response_mcp_call_arguments_done.rbi +83 -0
- data/rbi/openai/models/realtime/response_mcp_call_completed.rbi +67 -0
- data/rbi/openai/models/realtime/response_mcp_call_failed.rbi +67 -0
- data/rbi/openai/models/realtime/response_mcp_call_in_progress.rbi +67 -0
- data/rbi/openai/models/realtime/response_output_item_added_event.rbi +111 -0
- data/rbi/openai/models/realtime/response_output_item_done_event.rbi +112 -0
- data/rbi/openai/models/realtime/response_text_delta_event.rbi +91 -0
- data/rbi/openai/models/realtime/response_text_done_event.rbi +92 -0
- data/rbi/openai/models/realtime/session_created_event.rbi +64 -0
- data/rbi/openai/models/realtime/session_update_event.rbi +77 -0
- data/rbi/openai/models/realtime/session_updated_event.rbi +63 -0
- data/rbi/openai/models/realtime/transcription_session_created.rbi +653 -0
- data/rbi/openai/models/realtime/transcription_session_update.rbi +74 -0
- data/rbi/openai/models/realtime/transcription_session_updated_event.rbi +657 -0
- data/rbi/openai/models/responses/response.rbi +10 -4
- data/rbi/openai/models/responses/response_content.rbi +1 -0
- data/rbi/openai/models/responses/response_create_params.rbi +16 -10
- data/rbi/openai/models/responses/response_input_audio.rbi +85 -34
- data/rbi/openai/models/responses/response_input_content.rbi +2 -1
- data/rbi/openai/models/responses/response_input_item.rbi +6 -3
- data/rbi/openai/models/responses/response_input_message_item.rbi +2 -1
- data/rbi/openai/models/responses/tool.rbi +2 -348
- data/rbi/openai/models/responses/web_search_preview_tool.rbi +245 -0
- data/rbi/openai/models/responses/web_search_tool.rbi +120 -23
- data/rbi/openai/models/webhooks/realtime_call_incoming_webhook_event.rbi +222 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +1 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/realtime/client_secrets.rbi +38 -0
- data/rbi/openai/resources/realtime.rbi +15 -0
- data/rbi/openai/resources/responses.rbi +12 -6
- data/rbi/openai/resources/webhooks.rbi +1 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/models/audio/speech_create_params.rbs +4 -0
- data/sig/openai/models/chat/chat_completion_audio_param.rbs +4 -0
- data/sig/openai/models/eval_create_params.rbs +2 -1
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +2 -1
- data/sig/openai/models/evals/run_cancel_response.rbs +2 -1
- data/sig/openai/models/evals/run_create_params.rbs +2 -1
- data/sig/openai/models/evals/run_create_response.rbs +2 -1
- data/sig/openai/models/evals/run_list_response.rbs +2 -1
- data/sig/openai/models/evals/run_retrieve_response.rbs +2 -1
- data/sig/openai/models/graders/label_model_grader.rbs +2 -1
- data/sig/openai/models/graders/score_model_grader.rbs +2 -1
- data/sig/openai/models/realtime/client_secret_create_params.rbs +89 -0
- data/sig/openai/models/realtime/client_secret_create_response.rbs +292 -0
- data/sig/openai/models/realtime/conversation_created_event.rbs +70 -0
- data/sig/openai/models/realtime/conversation_item.rbs +22 -0
- data/sig/openai/models/realtime/conversation_item_added.rbs +37 -0
- data/sig/openai/models/realtime/conversation_item_create_event.rbs +41 -0
- data/sig/openai/models/realtime/conversation_item_created_event.rbs +37 -0
- data/sig/openai/models/realtime/conversation_item_delete_event.rbs +30 -0
- data/sig/openai/models/realtime/conversation_item_deleted_event.rbs +32 -0
- data/sig/openai/models/realtime/conversation_item_done.rbs +37 -0
- data/sig/openai/models/realtime/conversation_item_input_audio_transcription_completed_event.rbs +136 -0
- data/sig/openai/models/realtime/conversation_item_input_audio_transcription_delta_event.rbs +51 -0
- data/sig/openai/models/realtime/conversation_item_input_audio_transcription_failed_event.rbs +77 -0
- data/sig/openai/models/realtime/conversation_item_input_audio_transcription_segment.rbs +62 -0
- data/sig/openai/models/realtime/conversation_item_retrieve_event.rbs +34 -0
- data/sig/openai/models/realtime/conversation_item_truncate_event.rbs +44 -0
- data/sig/openai/models/realtime/conversation_item_truncated_event.rbs +42 -0
- data/sig/openai/models/realtime/conversation_item_with_reference.rbs +207 -0
- data/sig/openai/models/realtime/input_audio_buffer_append_event.rbs +30 -0
- data/sig/openai/models/realtime/input_audio_buffer_clear_event.rbs +23 -0
- data/sig/openai/models/realtime/input_audio_buffer_cleared_event.rbs +24 -0
- data/sig/openai/models/realtime/input_audio_buffer_commit_event.rbs +23 -0
- data/sig/openai/models/realtime/input_audio_buffer_committed_event.rbs +37 -0
- data/sig/openai/models/realtime/input_audio_buffer_speech_started_event.rbs +37 -0
- data/sig/openai/models/realtime/input_audio_buffer_speech_stopped_event.rbs +37 -0
- data/sig/openai/models/realtime/input_audio_buffer_timeout_triggered.rbs +42 -0
- data/sig/openai/models/realtime/log_prob_properties.rbs +28 -0
- data/sig/openai/models/realtime/mcp_list_tools_completed.rbs +28 -0
- data/sig/openai/models/realtime/mcp_list_tools_failed.rbs +28 -0
- data/sig/openai/models/realtime/mcp_list_tools_in_progress.rbs +32 -0
- data/sig/openai/models/realtime/output_audio_buffer_clear_event.rbs +23 -0
- data/sig/openai/models/realtime/rate_limits_updated_event.rbs +85 -0
- data/sig/openai/models/realtime/realtime_audio_config.rbs +354 -0
- data/sig/openai/models/realtime/realtime_client_event.rbs +25 -0
- data/sig/openai/models/realtime/realtime_client_secret_config.rbs +60 -0
- data/sig/openai/models/realtime/realtime_conversation_item_assistant_message.rbs +117 -0
- data/sig/openai/models/realtime/realtime_conversation_item_function_call.rbs +86 -0
- data/sig/openai/models/realtime/realtime_conversation_item_function_call_output.rbs +79 -0
- data/sig/openai/models/realtime/realtime_conversation_item_system_message.rbs +117 -0
- data/sig/openai/models/realtime/realtime_conversation_item_user_message.rbs +132 -0
- data/sig/openai/models/realtime/realtime_error.rbs +42 -0
- data/sig/openai/models/realtime/realtime_error_event.rbs +32 -0
- data/sig/openai/models/realtime/realtime_mcp_approval_request.rbs +42 -0
- data/sig/openai/models/realtime/realtime_mcp_approval_response.rbs +42 -0
- data/sig/openai/models/realtime/realtime_mcp_list_tools.rbs +71 -0
- data/sig/openai/models/realtime/realtime_mcp_protocol_error.rbs +28 -0
- data/sig/openai/models/realtime/realtime_mcp_tool_call.rbs +68 -0
- data/sig/openai/models/realtime/realtime_mcp_tool_execution_error.rbs +18 -0
- data/sig/openai/models/realtime/realtime_mcphttp_error.rbs +24 -0
- data/sig/openai/models/realtime/realtime_response.rbs +210 -0
- data/sig/openai/models/realtime/realtime_response_status.rbs +90 -0
- data/sig/openai/models/realtime/realtime_response_usage.rbs +56 -0
- data/sig/openai/models/realtime/realtime_response_usage_input_token_details.rbs +34 -0
- data/sig/openai/models/realtime/realtime_response_usage_output_token_details.rbs +22 -0
- data/sig/openai/models/realtime/realtime_server_event.rbs +168 -0
- data/sig/openai/models/realtime/realtime_session.rbs +521 -0
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +178 -0
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +526 -0
- data/sig/openai/models/realtime/realtime_tool_choice_config.rbs +16 -0
- data/sig/openai/models/realtime/realtime_tools_config.rbs +10 -0
- data/sig/openai/models/realtime/realtime_tools_config_union.rbs +280 -0
- data/sig/openai/models/realtime/realtime_tracing_config.rbs +43 -0
- data/sig/openai/models/realtime/realtime_transcription_session_create_request.rbs +242 -0
- data/sig/openai/models/realtime/realtime_truncation.rbs +53 -0
- data/sig/openai/models/realtime/response_audio_delta_event.rbs +52 -0
- data/sig/openai/models/realtime/response_audio_done_event.rbs +47 -0
- data/sig/openai/models/realtime/response_audio_transcript_delta_event.rbs +52 -0
- data/sig/openai/models/realtime/response_audio_transcript_done_event.rbs +52 -0
- data/sig/openai/models/realtime/response_cancel_event.rbs +32 -0
- data/sig/openai/models/realtime/response_content_part_added_event.rbs +105 -0
- data/sig/openai/models/realtime/response_content_part_done_event.rbs +105 -0
- data/sig/openai/models/realtime/response_create_event.rbs +281 -0
- data/sig/openai/models/realtime/response_created_event.rbs +32 -0
- data/sig/openai/models/realtime/response_done_event.rbs +32 -0
- data/sig/openai/models/realtime/response_function_call_arguments_delta_event.rbs +52 -0
- data/sig/openai/models/realtime/response_function_call_arguments_done_event.rbs +52 -0
- data/sig/openai/models/realtime/response_mcp_call_arguments_delta.rbs +52 -0
- data/sig/openai/models/realtime/response_mcp_call_arguments_done.rbs +47 -0
- data/sig/openai/models/realtime/response_mcp_call_completed.rbs +37 -0
- data/sig/openai/models/realtime/response_mcp_call_failed.rbs +37 -0
- data/sig/openai/models/realtime/response_mcp_call_in_progress.rbs +37 -0
- data/sig/openai/models/realtime/response_output_item_added_event.rbs +42 -0
- data/sig/openai/models/realtime/response_output_item_done_event.rbs +42 -0
- data/sig/openai/models/realtime/response_text_delta_event.rbs +52 -0
- data/sig/openai/models/realtime/response_text_done_event.rbs +52 -0
- data/sig/openai/models/realtime/session_created_event.rbs +32 -0
- data/sig/openai/models/realtime/session_update_event.rbs +34 -0
- data/sig/openai/models/realtime/session_updated_event.rbs +32 -0
- data/sig/openai/models/realtime/transcription_session_created.rbs +282 -0
- data/sig/openai/models/realtime/transcription_session_update.rbs +34 -0
- data/sig/openai/models/realtime/transcription_session_updated_event.rbs +282 -0
- data/sig/openai/models/responses/response_content.rbs +1 -0
- data/sig/openai/models/responses/response_input_audio.rbs +32 -15
- data/sig/openai/models/responses/response_input_content.rbs +1 -0
- data/sig/openai/models/responses/tool.rbs +1 -121
- data/sig/openai/models/responses/web_search_preview_tool.rbs +96 -0
- data/sig/openai/models/responses/web_search_tool.rbs +39 -10
- data/sig/openai/models/webhooks/realtime_call_incoming_webhook_event.rbs +90 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +1 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/realtime/client_secrets.rbs +15 -0
- data/sig/openai/resources/realtime.rbs +9 -0
- data/sig/openai/resources/webhooks.rbs +1 -0
- metadata +287 -2
@@ -0,0 +1,279 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Realtime
|
6
|
+
class TranscriptionSessionUpdatedEvent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute event_id
|
8
|
+
# The unique ID of the server event.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :event_id, String
|
12
|
+
|
13
|
+
# @!attribute session
|
14
|
+
# A Realtime transcription session configuration object.
|
15
|
+
#
|
16
|
+
# @return [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session]
|
17
|
+
required :session, -> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session }
|
18
|
+
|
19
|
+
# @!attribute type
|
20
|
+
# The event type, must be `transcription_session.updated`.
|
21
|
+
#
|
22
|
+
# @return [Symbol, :"transcription_session.updated"]
|
23
|
+
required :type, const: :"transcription_session.updated"
|
24
|
+
|
25
|
+
# @!method initialize(event_id:, session:, type: :"transcription_session.updated")
|
26
|
+
# Some parameter documentations has been truncated, see
|
27
|
+
# {OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent} for more details.
|
28
|
+
#
|
29
|
+
# Returned when a transcription session is updated with a
|
30
|
+
# `transcription_session.update` event, unless there is an error.
|
31
|
+
#
|
32
|
+
# @param event_id [String] The unique ID of the server event.
|
33
|
+
#
|
34
|
+
# @param session [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session] A Realtime transcription session configuration object.
|
35
|
+
#
|
36
|
+
# @param type [Symbol, :"transcription_session.updated"] The event type, must be `transcription_session.updated`.
|
37
|
+
|
38
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent#session
|
39
|
+
class Session < OpenAI::Internal::Type::BaseModel
|
40
|
+
# @!attribute id
|
41
|
+
# Unique identifier for the session that looks like `sess_1234567890abcdef`.
|
42
|
+
#
|
43
|
+
# @return [String, nil]
|
44
|
+
optional :id, String
|
45
|
+
|
46
|
+
# @!attribute audio
|
47
|
+
# Configuration for input audio for the session.
|
48
|
+
#
|
49
|
+
# @return [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio, nil]
|
50
|
+
optional :audio, -> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio }
|
51
|
+
|
52
|
+
# @!attribute expires_at
|
53
|
+
# Expiration timestamp for the session, in seconds since epoch.
|
54
|
+
#
|
55
|
+
# @return [Integer, nil]
|
56
|
+
optional :expires_at, Integer
|
57
|
+
|
58
|
+
# @!attribute include
|
59
|
+
# Additional fields to include in server outputs.
|
60
|
+
#
|
61
|
+
# - `item.input_audio_transcription.logprobs`: Include logprobs for input audio
|
62
|
+
# transcription.
|
63
|
+
#
|
64
|
+
# @return [Array<Symbol, OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Include>, nil]
|
65
|
+
optional :include,
|
66
|
+
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Include] }
|
67
|
+
|
68
|
+
# @!attribute object
|
69
|
+
# The object type. Always `realtime.transcription_session`.
|
70
|
+
#
|
71
|
+
# @return [String, nil]
|
72
|
+
optional :object, String
|
73
|
+
|
74
|
+
# @!method initialize(id: nil, audio: nil, expires_at: nil, include: nil, object: nil)
|
75
|
+
# Some parameter documentations has been truncated, see
|
76
|
+
# {OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session} for more
|
77
|
+
# details.
|
78
|
+
#
|
79
|
+
# A Realtime transcription session configuration object.
|
80
|
+
#
|
81
|
+
# @param id [String] Unique identifier for the session that looks like `sess_1234567890abcdef`.
|
82
|
+
#
|
83
|
+
# @param audio [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio] Configuration for input audio for the session.
|
84
|
+
#
|
85
|
+
# @param expires_at [Integer] Expiration timestamp for the session, in seconds since epoch.
|
86
|
+
#
|
87
|
+
# @param include [Array<Symbol, OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Include>] Additional fields to include in server outputs.
|
88
|
+
#
|
89
|
+
# @param object [String] The object type. Always `realtime.transcription_session`.
|
90
|
+
|
91
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session#audio
|
92
|
+
class Audio < OpenAI::Internal::Type::BaseModel
|
93
|
+
# @!attribute input
|
94
|
+
#
|
95
|
+
# @return [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input, nil]
|
96
|
+
optional :input, -> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input }
|
97
|
+
|
98
|
+
# @!method initialize(input: nil)
|
99
|
+
# Configuration for input audio for the session.
|
100
|
+
#
|
101
|
+
# @param input [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input]
|
102
|
+
|
103
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio#input
|
104
|
+
class Input < OpenAI::Internal::Type::BaseModel
|
105
|
+
# @!attribute format_
|
106
|
+
# The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
107
|
+
#
|
108
|
+
# @return [String, nil]
|
109
|
+
optional :format_, String, api_name: :format
|
110
|
+
|
111
|
+
# @!attribute noise_reduction
|
112
|
+
# Configuration for input audio noise reduction.
|
113
|
+
#
|
114
|
+
# @return [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction, nil]
|
115
|
+
optional :noise_reduction,
|
116
|
+
-> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction }
|
117
|
+
|
118
|
+
# @!attribute transcription
|
119
|
+
# Configuration of the transcription model.
|
120
|
+
#
|
121
|
+
# @return [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription, nil]
|
122
|
+
optional :transcription,
|
123
|
+
-> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription }
|
124
|
+
|
125
|
+
# @!attribute turn_detection
|
126
|
+
# Configuration for turn detection.
|
127
|
+
#
|
128
|
+
# @return [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::TurnDetection, nil]
|
129
|
+
optional :turn_detection,
|
130
|
+
-> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::TurnDetection }
|
131
|
+
|
132
|
+
# @!method initialize(format_: nil, noise_reduction: nil, transcription: nil, turn_detection: nil)
|
133
|
+
# Some parameter documentations has been truncated, see
|
134
|
+
# {OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input}
|
135
|
+
# for more details.
|
136
|
+
#
|
137
|
+
# @param format_ [String] The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
138
|
+
#
|
139
|
+
# @param noise_reduction [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction] Configuration for input audio noise reduction.
|
140
|
+
#
|
141
|
+
# @param transcription [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription] Configuration of the transcription model.
|
142
|
+
#
|
143
|
+
# @param turn_detection [OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::TurnDetection] Configuration for turn detection.
|
144
|
+
|
145
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input#noise_reduction
|
146
|
+
class NoiseReduction < OpenAI::Internal::Type::BaseModel
|
147
|
+
# @!attribute type
|
148
|
+
#
|
149
|
+
# @return [Symbol, OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction::Type, nil]
|
150
|
+
optional :type,
|
151
|
+
enum: -> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction::Type }
|
152
|
+
|
153
|
+
# @!method initialize(type: nil)
|
154
|
+
# Configuration for input audio noise reduction.
|
155
|
+
#
|
156
|
+
# @param type [Symbol, OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction::Type]
|
157
|
+
|
158
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::NoiseReduction#type
|
159
|
+
module Type
|
160
|
+
extend OpenAI::Internal::Type::Enum
|
161
|
+
|
162
|
+
NEAR_FIELD = :near_field
|
163
|
+
FAR_FIELD = :far_field
|
164
|
+
|
165
|
+
# @!method self.values
|
166
|
+
# @return [Array<Symbol>]
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input#transcription
|
171
|
+
class Transcription < OpenAI::Internal::Type::BaseModel
|
172
|
+
# @!attribute language
|
173
|
+
# The language of the input audio. Supplying the input language in
|
174
|
+
# [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
175
|
+
# format will improve accuracy and latency.
|
176
|
+
#
|
177
|
+
# @return [String, nil]
|
178
|
+
optional :language, String
|
179
|
+
|
180
|
+
# @!attribute model
|
181
|
+
# The model to use for transcription. Can be `gpt-4o-transcribe`,
|
182
|
+
# `gpt-4o-mini-transcribe`, or `whisper-1`.
|
183
|
+
#
|
184
|
+
# @return [Symbol, OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription::Model, nil]
|
185
|
+
optional :model,
|
186
|
+
enum: -> { OpenAI::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription::Model }
|
187
|
+
|
188
|
+
# @!attribute prompt
|
189
|
+
# An optional text to guide the model's style or continue a previous audio
|
190
|
+
# segment. The
|
191
|
+
# [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
192
|
+
# should match the audio language.
|
193
|
+
#
|
194
|
+
# @return [String, nil]
|
195
|
+
optional :prompt, String
|
196
|
+
|
197
|
+
# @!method initialize(language: nil, model: nil, prompt: nil)
|
198
|
+
# Some parameter documentations has been truncated, see
|
199
|
+
# {OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription}
|
200
|
+
# for more details.
|
201
|
+
#
|
202
|
+
# Configuration of the transcription model.
|
203
|
+
#
|
204
|
+
# @param language [String] The language of the input audio. Supplying the input language in
|
205
|
+
#
|
206
|
+
# @param model [Symbol, OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription::Model] The model to use for transcription. Can be `gpt-4o-transcribe`, `gpt-4o-mini-tra
|
207
|
+
#
|
208
|
+
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
|
209
|
+
|
210
|
+
# The model to use for transcription. Can be `gpt-4o-transcribe`,
|
211
|
+
# `gpt-4o-mini-transcribe`, or `whisper-1`.
|
212
|
+
#
|
213
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::Transcription#model
|
214
|
+
module Model
|
215
|
+
extend OpenAI::Internal::Type::Enum
|
216
|
+
|
217
|
+
GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe"
|
218
|
+
GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe"
|
219
|
+
WHISPER_1 = :"whisper-1"
|
220
|
+
|
221
|
+
# @!method self.values
|
222
|
+
# @return [Array<Symbol>]
|
223
|
+
end
|
224
|
+
end
|
225
|
+
|
226
|
+
# @see OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input#turn_detection
|
227
|
+
class TurnDetection < OpenAI::Internal::Type::BaseModel
|
228
|
+
# @!attribute prefix_padding_ms
|
229
|
+
#
|
230
|
+
# @return [Integer, nil]
|
231
|
+
optional :prefix_padding_ms, Integer
|
232
|
+
|
233
|
+
# @!attribute silence_duration_ms
|
234
|
+
#
|
235
|
+
# @return [Integer, nil]
|
236
|
+
optional :silence_duration_ms, Integer
|
237
|
+
|
238
|
+
# @!attribute threshold
|
239
|
+
#
|
240
|
+
# @return [Float, nil]
|
241
|
+
optional :threshold, Float
|
242
|
+
|
243
|
+
# @!attribute type
|
244
|
+
# Type of turn detection, only `server_vad` is currently supported.
|
245
|
+
#
|
246
|
+
# @return [String, nil]
|
247
|
+
optional :type, String
|
248
|
+
|
249
|
+
# @!method initialize(prefix_padding_ms: nil, silence_duration_ms: nil, threshold: nil, type: nil)
|
250
|
+
# Some parameter documentations has been truncated, see
|
251
|
+
# {OpenAI::Models::Realtime::TranscriptionSessionUpdatedEvent::Session::Audio::Input::TurnDetection}
|
252
|
+
# for more details.
|
253
|
+
#
|
254
|
+
# Configuration for turn detection.
|
255
|
+
#
|
256
|
+
# @param prefix_padding_ms [Integer]
|
257
|
+
#
|
258
|
+
# @param silence_duration_ms [Integer]
|
259
|
+
#
|
260
|
+
# @param threshold [Float]
|
261
|
+
#
|
262
|
+
# @param type [String] Type of turn detection, only `server_vad` is currently supported.
|
263
|
+
end
|
264
|
+
end
|
265
|
+
end
|
266
|
+
|
267
|
+
module Include
|
268
|
+
extend OpenAI::Internal::Type::Enum
|
269
|
+
|
270
|
+
ITEM_INPUT_AUDIO_TRANSCRIPTION_LOGPROBS = :"item.input_audio_transcription.logprobs"
|
271
|
+
|
272
|
+
# @!method self.values
|
273
|
+
# @return [Array<Symbol>]
|
274
|
+
end
|
275
|
+
end
|
276
|
+
end
|
277
|
+
end
|
278
|
+
end
|
279
|
+
end
|
@@ -8,7 +8,7 @@ module OpenAI
|
|
8
8
|
# Text, image, or audio input to the model, used to generate a response. Can also
|
9
9
|
# contain previous assistant responses.
|
10
10
|
#
|
11
|
-
# @return [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
|
11
|
+
# @return [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>]
|
12
12
|
required :content, union: -> { OpenAI::Responses::EasyInputMessage::Content }
|
13
13
|
|
14
14
|
# @!attribute role
|
@@ -34,7 +34,7 @@ module OpenAI
|
|
34
34
|
# `assistant` role are presumed to have been generated by the model in previous
|
35
35
|
# interactions.
|
36
36
|
#
|
37
|
-
# @param content [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] Text, image, or audio input to the model, used to generate a response.
|
37
|
+
# @param content [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>] Text, image, or audio input to the model, used to generate a response.
|
38
38
|
#
|
39
39
|
# @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] The role of the message input. One of `user`, `assistant`, `system`, or
|
40
40
|
#
|
@@ -55,7 +55,7 @@ module OpenAI
|
|
55
55
|
variant -> { OpenAI::Responses::ResponseInputMessageContentList }
|
56
56
|
|
57
57
|
# @!method self.variants
|
58
|
-
# @return [Array(String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>)]
|
58
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>)]
|
59
59
|
end
|
60
60
|
|
61
61
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
@@ -107,7 +107,7 @@ module OpenAI
|
|
107
107
|
# An array of tools the model may call while generating a response. You can
|
108
108
|
# specify which tool to use by setting the `tool_choice` parameter.
|
109
109
|
#
|
110
|
-
#
|
110
|
+
# We support the following categories of tools:
|
111
111
|
#
|
112
112
|
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
|
113
113
|
# capabilities, like
|
@@ -115,13 +115,16 @@ module OpenAI
|
|
115
115
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
|
116
116
|
# Learn more about
|
117
117
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
118
|
+
# - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
|
119
|
+
# predefined connectors such as Google Drive and SharePoint. Learn more about
|
120
|
+
# [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
|
118
121
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
119
122
|
# the model to call your own code with strongly typed arguments and outputs.
|
120
123
|
# Learn more about
|
121
124
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
122
125
|
# You can also use custom tools to call your own code.
|
123
126
|
#
|
124
|
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::
|
127
|
+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>]
|
125
128
|
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
126
129
|
|
127
130
|
# @!attribute top_p
|
@@ -330,7 +333,7 @@ module OpenAI
|
|
330
333
|
#
|
331
334
|
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
332
335
|
#
|
333
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::
|
336
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
334
337
|
#
|
335
338
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
336
339
|
#
|
@@ -16,6 +16,9 @@ module OpenAI
|
|
16
16
|
# A file input to the model.
|
17
17
|
variant -> { OpenAI::Responses::ResponseInputFile }
|
18
18
|
|
19
|
+
# An audio input to the model.
|
20
|
+
variant -> { OpenAI::Responses::ResponseInputAudio }
|
21
|
+
|
19
22
|
# A text output from the model.
|
20
23
|
variant -> { OpenAI::Responses::ResponseOutputText }
|
21
24
|
|
@@ -23,7 +26,7 @@ module OpenAI
|
|
23
26
|
variant -> { OpenAI::Responses::ResponseOutputRefusal }
|
24
27
|
|
25
28
|
# @!method self.variants
|
26
|
-
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
|
29
|
+
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
|
27
30
|
end
|
28
31
|
end
|
29
32
|
end
|
@@ -236,7 +236,7 @@ module OpenAI
|
|
236
236
|
# An array of tools the model may call while generating a response. You can
|
237
237
|
# specify which tool to use by setting the `tool_choice` parameter.
|
238
238
|
#
|
239
|
-
#
|
239
|
+
# We support the following categories of tools:
|
240
240
|
#
|
241
241
|
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
|
242
242
|
# capabilities, like
|
@@ -244,13 +244,16 @@ module OpenAI
|
|
244
244
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
|
245
245
|
# Learn more about
|
246
246
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
247
|
+
# - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
|
248
|
+
# predefined connectors such as Google Drive and SharePoint. Learn more about
|
249
|
+
# [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
|
247
250
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
248
251
|
# the model to call your own code with strongly typed arguments and outputs.
|
249
252
|
# Learn more about
|
250
253
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
251
254
|
# You can also use custom tools to call your own code.
|
252
255
|
#
|
253
|
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::
|
256
|
+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
|
254
257
|
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
255
258
|
|
256
259
|
# @!attribute top_logprobs
|
@@ -340,7 +343,7 @@ module OpenAI
|
|
340
343
|
#
|
341
344
|
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
342
345
|
#
|
343
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::
|
346
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
344
347
|
#
|
345
348
|
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
346
349
|
#
|
@@ -4,17 +4,10 @@ module OpenAI
|
|
4
4
|
module Models
|
5
5
|
module Responses
|
6
6
|
class ResponseInputAudio < OpenAI::Internal::Type::BaseModel
|
7
|
-
# @!attribute
|
8
|
-
# Base64-encoded audio data.
|
7
|
+
# @!attribute input_audio
|
9
8
|
#
|
10
|
-
# @return [
|
11
|
-
required :
|
12
|
-
|
13
|
-
# @!attribute format_
|
14
|
-
# The format of the audio data. Currently supported formats are `mp3` and `wav`.
|
15
|
-
#
|
16
|
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format]
|
17
|
-
required :format_, enum: -> { OpenAI::Responses::ResponseInputAudio::Format }, api_name: :format
|
9
|
+
# @return [OpenAI::Models::Responses::ResponseInputAudio::InputAudio]
|
10
|
+
required :input_audio, -> { OpenAI::Responses::ResponseInputAudio::InputAudio }
|
18
11
|
|
19
12
|
# @!attribute type
|
20
13
|
# The type of the input item. Always `input_audio`.
|
@@ -22,29 +15,52 @@ module OpenAI
|
|
22
15
|
# @return [Symbol, :input_audio]
|
23
16
|
required :type, const: :input_audio
|
24
17
|
|
25
|
-
# @!method initialize(
|
18
|
+
# @!method initialize(input_audio:, type: :input_audio)
|
26
19
|
# Some parameter documentations has been truncated, see
|
27
20
|
# {OpenAI::Models::Responses::ResponseInputAudio} for more details.
|
28
21
|
#
|
29
22
|
# An audio input to the model.
|
30
23
|
#
|
31
|
-
# @param
|
32
|
-
#
|
33
|
-
# @param format_ [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format] The format of the audio data. Currently supported formats are `mp3` and
|
24
|
+
# @param input_audio [OpenAI::Models::Responses::ResponseInputAudio::InputAudio]
|
34
25
|
#
|
35
26
|
# @param type [Symbol, :input_audio] The type of the input item. Always `input_audio`.
|
36
27
|
|
37
|
-
#
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
28
|
+
# @see OpenAI::Models::Responses::ResponseInputAudio#input_audio
|
29
|
+
class InputAudio < OpenAI::Internal::Type::BaseModel
|
30
|
+
# @!attribute data
|
31
|
+
# Base64-encoded audio data.
|
32
|
+
#
|
33
|
+
# @return [String]
|
34
|
+
required :data, String
|
35
|
+
|
36
|
+
# @!attribute format_
|
37
|
+
# The format of the audio data. Currently supported formats are `mp3` and `wav`.
|
38
|
+
#
|
39
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseInputAudio::InputAudio::Format]
|
40
|
+
required :format_,
|
41
|
+
enum: -> { OpenAI::Responses::ResponseInputAudio::InputAudio::Format },
|
42
|
+
api_name: :format
|
43
|
+
|
44
|
+
# @!method initialize(data:, format_:)
|
45
|
+
# Some parameter documentations has been truncated, see
|
46
|
+
# {OpenAI::Models::Responses::ResponseInputAudio::InputAudio} for more details.
|
47
|
+
#
|
48
|
+
# @param data [String] Base64-encoded audio data.
|
49
|
+
#
|
50
|
+
# @param format_ [Symbol, OpenAI::Models::Responses::ResponseInputAudio::InputAudio::Format] The format of the audio data. Currently supported formats are `mp3` and
|
51
|
+
|
52
|
+
# The format of the audio data. Currently supported formats are `mp3` and `wav`.
|
53
|
+
#
|
54
|
+
# @see OpenAI::Models::Responses::ResponseInputAudio::InputAudio#format_
|
55
|
+
module Format
|
56
|
+
extend OpenAI::Internal::Type::Enum
|
42
57
|
|
43
|
-
|
44
|
-
|
58
|
+
MP3 = :mp3
|
59
|
+
WAV = :wav
|
45
60
|
|
46
|
-
|
47
|
-
|
61
|
+
# @!method self.values
|
62
|
+
# @return [Array<Symbol>]
|
63
|
+
end
|
48
64
|
end
|
49
65
|
end
|
50
66
|
end
|
@@ -18,8 +18,11 @@ module OpenAI
|
|
18
18
|
# A file input to the model.
|
19
19
|
variant :input_file, -> { OpenAI::Responses::ResponseInputFile }
|
20
20
|
|
21
|
+
# An audio input to the model.
|
22
|
+
variant :input_audio, -> { OpenAI::Responses::ResponseInputAudio }
|
23
|
+
|
21
24
|
# @!method self.variants
|
22
|
-
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)]
|
25
|
+
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio)]
|
23
26
|
end
|
24
27
|
end
|
25
28
|
end
|
@@ -94,7 +94,7 @@ module OpenAI
|
|
94
94
|
# A list of one or many input items to the model, containing different content
|
95
95
|
# types.
|
96
96
|
#
|
97
|
-
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
|
97
|
+
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>]
|
98
98
|
required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
|
99
99
|
|
100
100
|
# @!attribute role
|
@@ -124,7 +124,7 @@ module OpenAI
|
|
124
124
|
# hierarchy. Instructions given with the `developer` or `system` role take
|
125
125
|
# precedence over instructions given with the `user` role.
|
126
126
|
#
|
127
|
-
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] A list of one or many input items to the model, containing different content
|
127
|
+
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>] A list of one or many input items to the model, containing different content
|
128
128
|
#
|
129
129
|
# @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] The role of the message input. One of `user`, `system`, or `developer`.
|
130
130
|
#
|
@@ -14,7 +14,7 @@ module OpenAI
|
|
14
14
|
# A list of one or many input items to the model, containing different content
|
15
15
|
# types.
|
16
16
|
#
|
17
|
-
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
|
17
|
+
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>]
|
18
18
|
required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
|
19
19
|
|
20
20
|
# @!attribute role
|
@@ -42,7 +42,7 @@ module OpenAI
|
|
42
42
|
#
|
43
43
|
# @param id [String] The unique ID of the message input.
|
44
44
|
#
|
45
|
-
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] A list of one or many input items to the model, containing different content
|
45
|
+
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseInputAudio>] A list of one or many input items to the model, containing different content
|
46
46
|
#
|
47
47
|
# @param role [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role] The role of the message input. One of `user`, `system`, or `developer`.
|
48
48
|
#
|