aimlapi-sdk-python 2.8.1b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimlapi/__init__.py +243 -0
- aimlapi/__main__.py +3 -0
- aimlapi/_client.py +368 -0
- aimlapi/_utils/__init__.py +3 -0
- aimlapi/_utils/_compat.py +3 -0
- aimlapi/_utils/_datetime_parse.py +3 -0
- aimlapi/_utils/_logs.py +3 -0
- aimlapi/_utils/_proxy.py +3 -0
- aimlapi/_utils/_reflection.py +3 -0
- aimlapi/_utils/_resources_proxy.py +3 -0
- aimlapi/_utils/_streams.py +3 -0
- aimlapi/_utils/_sync.py +3 -0
- aimlapi/_utils/_transform.py +3 -0
- aimlapi/_utils/_typing.py +3 -0
- aimlapi/_utils/_utils.py +3 -0
- aimlapi/_version.py +9 -0
- aimlapi/cli/__init__.py +3 -0
- aimlapi/cli/_api/__init__.py +3 -0
- aimlapi/cli/_api/_main.py +3 -0
- aimlapi/cli/_api/audio.py +3 -0
- aimlapi/cli/_api/chat/__init__.py +3 -0
- aimlapi/cli/_api/chat/completions.py +3 -0
- aimlapi/cli/_api/completions.py +3 -0
- aimlapi/cli/_api/files.py +3 -0
- aimlapi/cli/_api/fine_tuning/__init__.py +3 -0
- aimlapi/cli/_api/fine_tuning/jobs.py +3 -0
- aimlapi/cli/_api/image.py +3 -0
- aimlapi/cli/_api/models.py +3 -0
- aimlapi/cli/_cli.py +3 -0
- aimlapi/cli/_errors.py +3 -0
- aimlapi/cli/_models.py +3 -0
- aimlapi/cli/_progress.py +3 -0
- aimlapi/cli/_tools/__init__.py +3 -0
- aimlapi/cli/_tools/_main.py +3 -0
- aimlapi/cli/_tools/fine_tunes.py +3 -0
- aimlapi/cli/_tools/migrate.py +3 -0
- aimlapi/cli/_utils.py +3 -0
- aimlapi/helpers/__init__.py +3 -0
- aimlapi/helpers/local_audio_player.py +3 -0
- aimlapi/helpers/microphone.py +3 -0
- aimlapi/lib/__init__.py +3 -0
- aimlapi/lib/_old_api.py +3 -0
- aimlapi/lib/_parsing/__init__.py +3 -0
- aimlapi/lib/_parsing/_completions.py +3 -0
- aimlapi/lib/_parsing/_responses.py +3 -0
- aimlapi/lib/_pydantic.py +3 -0
- aimlapi/lib/_realtime.py +3 -0
- aimlapi/lib/_tools.py +3 -0
- aimlapi/lib/_validators.py +3 -0
- aimlapi/lib/azure.py +3 -0
- aimlapi/lib/streaming/__init__.py +3 -0
- aimlapi/lib/streaming/_assistants.py +3 -0
- aimlapi/lib/streaming/_deltas.py +3 -0
- aimlapi/lib/streaming/chat/__init__.py +3 -0
- aimlapi/lib/streaming/chat/_completions.py +3 -0
- aimlapi/lib/streaming/chat/_events.py +3 -0
- aimlapi/lib/streaming/chat/_types.py +3 -0
- aimlapi/lib/streaming/responses/__init__.py +3 -0
- aimlapi/lib/streaming/responses/_events.py +3 -0
- aimlapi/lib/streaming/responses/_responses.py +3 -0
- aimlapi/lib/streaming/responses/_types.py +3 -0
- aimlapi/pagination.py +3 -0
- aimlapi/resources/__init__.py +3 -0
- aimlapi/resources/audio/__init__.py +47 -0
- aimlapi/resources/audio/_polling.py +129 -0
- aimlapi/resources/audio/audio.py +56 -0
- aimlapi/resources/audio/speech.py +428 -0
- aimlapi/resources/audio/transcriptions.py +219 -0
- aimlapi/resources/audio/translations.py +3 -0
- aimlapi/resources/batches.py +3 -0
- aimlapi/resources/beta/__init__.py +3 -0
- aimlapi/resources/beta/assistants.py +3 -0
- aimlapi/resources/beta/beta.py +3 -0
- aimlapi/resources/beta/chatkit/__init__.py +3 -0
- aimlapi/resources/beta/chatkit/chatkit.py +3 -0
- aimlapi/resources/beta/chatkit/sessions.py +3 -0
- aimlapi/resources/beta/chatkit/threads.py +3 -0
- aimlapi/resources/beta/realtime/__init__.py +3 -0
- aimlapi/resources/beta/realtime/realtime.py +3 -0
- aimlapi/resources/beta/realtime/sessions.py +3 -0
- aimlapi/resources/beta/realtime/transcription_sessions.py +3 -0
- aimlapi/resources/beta/threads/__init__.py +3 -0
- aimlapi/resources/beta/threads/messages.py +3 -0
- aimlapi/resources/beta/threads/runs/__init__.py +3 -0
- aimlapi/resources/beta/threads/runs/runs.py +3 -0
- aimlapi/resources/beta/threads/runs/steps.py +3 -0
- aimlapi/resources/beta/threads/threads.py +3 -0
- aimlapi/resources/chat/__init__.py +3 -0
- aimlapi/resources/chat/chat.py +86 -0
- aimlapi/resources/chat/completions/__init__.py +4 -0
- aimlapi/resources/chat/completions/completions.py +452 -0
- aimlapi/resources/chat/completions/messages.py +3 -0
- aimlapi/resources/completions.py +3 -0
- aimlapi/resources/containers/__init__.py +3 -0
- aimlapi/resources/containers/containers.py +3 -0
- aimlapi/resources/containers/files/__init__.py +3 -0
- aimlapi/resources/containers/files/content.py +3 -0
- aimlapi/resources/containers/files/files.py +3 -0
- aimlapi/resources/conversations/__init__.py +3 -0
- aimlapi/resources/conversations/conversations.py +3 -0
- aimlapi/resources/conversations/items.py +3 -0
- aimlapi/resources/embeddings.py +3 -0
- aimlapi/resources/evals/__init__.py +3 -0
- aimlapi/resources/evals/evals.py +3 -0
- aimlapi/resources/evals/runs/__init__.py +3 -0
- aimlapi/resources/evals/runs/output_items.py +3 -0
- aimlapi/resources/evals/runs/runs.py +3 -0
- aimlapi/resources/files.py +3 -0
- aimlapi/resources/fine_tuning/__init__.py +3 -0
- aimlapi/resources/fine_tuning/alpha/__init__.py +3 -0
- aimlapi/resources/fine_tuning/alpha/alpha.py +3 -0
- aimlapi/resources/fine_tuning/alpha/graders.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/__init__.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/checkpoints.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/permissions.py +3 -0
- aimlapi/resources/fine_tuning/fine_tuning.py +3 -0
- aimlapi/resources/fine_tuning/jobs/__init__.py +3 -0
- aimlapi/resources/fine_tuning/jobs/checkpoints.py +3 -0
- aimlapi/resources/fine_tuning/jobs/jobs.py +3 -0
- aimlapi/resources/images.py +184 -0
- aimlapi/resources/models.py +3 -0
- aimlapi/resources/moderations.py +3 -0
- aimlapi/resources/realtime/__init__.py +3 -0
- aimlapi/resources/realtime/calls.py +3 -0
- aimlapi/resources/realtime/client_secrets.py +3 -0
- aimlapi/resources/realtime/realtime.py +3 -0
- aimlapi/resources/responses/__init__.py +4 -0
- aimlapi/resources/responses/input_items.py +3 -0
- aimlapi/resources/responses/input_tokens.py +3 -0
- aimlapi/resources/responses/responses.py +229 -0
- aimlapi/resources/uploads/__init__.py +19 -0
- aimlapi/resources/uploads/parts.py +3 -0
- aimlapi/resources/uploads/uploads.py +99 -0
- aimlapi/resources/vector_stores/__init__.py +3 -0
- aimlapi/resources/vector_stores/file_batches.py +3 -0
- aimlapi/resources/vector_stores/files.py +3 -0
- aimlapi/resources/vector_stores/vector_stores.py +3 -0
- aimlapi/resources/videos.py +267 -0
- aimlapi/resources/webhooks.py +3 -0
- aimlapi/types/__init__.py +3 -0
- aimlapi/types/audio/__init__.py +3 -0
- aimlapi/types/audio/speech_create_params.py +3 -0
- aimlapi/types/audio/speech_model.py +3 -0
- aimlapi/types/audio/transcription.py +3 -0
- aimlapi/types/audio/transcription_create_params.py +3 -0
- aimlapi/types/audio/transcription_create_response.py +3 -0
- aimlapi/types/audio/transcription_diarized.py +3 -0
- aimlapi/types/audio/transcription_diarized_segment.py +3 -0
- aimlapi/types/audio/transcription_include.py +3 -0
- aimlapi/types/audio/transcription_segment.py +3 -0
- aimlapi/types/audio/transcription_stream_event.py +3 -0
- aimlapi/types/audio/transcription_text_delta_event.py +3 -0
- aimlapi/types/audio/transcription_text_done_event.py +3 -0
- aimlapi/types/audio/transcription_text_segment_event.py +3 -0
- aimlapi/types/audio/transcription_verbose.py +3 -0
- aimlapi/types/audio/transcription_word.py +3 -0
- aimlapi/types/audio/translation.py +3 -0
- aimlapi/types/audio/translation_create_params.py +3 -0
- aimlapi/types/audio/translation_create_response.py +3 -0
- aimlapi/types/audio/translation_verbose.py +3 -0
- aimlapi/types/audio_model.py +3 -0
- aimlapi/types/audio_response_format.py +3 -0
- aimlapi/types/auto_file_chunking_strategy_param.py +3 -0
- aimlapi/types/batch.py +3 -0
- aimlapi/types/batch_create_params.py +3 -0
- aimlapi/types/batch_error.py +3 -0
- aimlapi/types/batch_list_params.py +3 -0
- aimlapi/types/batch_request_counts.py +3 -0
- aimlapi/types/batch_usage.py +3 -0
- aimlapi/types/beta/__init__.py +3 -0
- aimlapi/types/beta/assistant.py +3 -0
- aimlapi/types/beta/assistant_create_params.py +3 -0
- aimlapi/types/beta/assistant_deleted.py +3 -0
- aimlapi/types/beta/assistant_list_params.py +3 -0
- aimlapi/types/beta/assistant_response_format_option.py +3 -0
- aimlapi/types/beta/assistant_response_format_option_param.py +3 -0
- aimlapi/types/beta/assistant_stream_event.py +3 -0
- aimlapi/types/beta/assistant_tool.py +3 -0
- aimlapi/types/beta/assistant_tool_choice.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_function.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_function_param.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_option.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_option_param.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_param.py +3 -0
- aimlapi/types/beta/assistant_tool_param.py +3 -0
- aimlapi/types/beta/assistant_update_params.py +3 -0
- aimlapi/types/beta/chat/__init__.py +3 -0
- aimlapi/types/beta/chatkit/__init__.py +3 -0
- aimlapi/types/beta/chatkit/chat_session.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_automatic_thread_titling.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_chatkit_configuration.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_chatkit_configuration_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_expires_after_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_file_upload.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_history.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_rate_limits.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_rate_limits_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_status.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_workflow_param.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_attachment.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_response_output_text.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_assistant_message_item.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_item_list.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_user_message_item.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_widget_item.py +3 -0
- aimlapi/types/beta/chatkit/session_create_params.py +3 -0
- aimlapi/types/beta/chatkit/thread_delete_response.py +3 -0
- aimlapi/types/beta/chatkit/thread_list_items_params.py +3 -0
- aimlapi/types/beta/chatkit/thread_list_params.py +3 -0
- aimlapi/types/beta/chatkit_workflow.py +3 -0
- aimlapi/types/beta/code_interpreter_tool.py +3 -0
- aimlapi/types/beta/code_interpreter_tool_param.py +3 -0
- aimlapi/types/beta/file_search_tool.py +3 -0
- aimlapi/types/beta/file_search_tool_param.py +3 -0
- aimlapi/types/beta/function_tool.py +3 -0
- aimlapi/types/beta/function_tool_param.py +3 -0
- aimlapi/types/beta/realtime/__init__.py +3 -0
- aimlapi/types/beta/realtime/conversation_created_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_content.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_content_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_create_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_create_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_created_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_delete_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_delete_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_deleted_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_retrieve_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_retrieve_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncate_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncate_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncated_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_with_reference.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_with_reference_param.py +3 -0
- aimlapi/types/beta/realtime/error_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_append_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_append_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_clear_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_cleared_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_commit_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_commit_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_committed_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_speech_started_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
- aimlapi/types/beta/realtime/rate_limits_updated_event.py +3 -0
- aimlapi/types/beta/realtime/realtime_client_event.py +3 -0
- aimlapi/types/beta/realtime/realtime_client_event_param.py +3 -0
- aimlapi/types/beta/realtime/realtime_connect_params.py +3 -0
- aimlapi/types/beta/realtime/realtime_response.py +3 -0
- aimlapi/types/beta/realtime/realtime_response_status.py +3 -0
- aimlapi/types/beta/realtime/realtime_response_usage.py +3 -0
- aimlapi/types/beta/realtime/realtime_server_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_cancel_event.py +3 -0
- aimlapi/types/beta/realtime/response_cancel_event_param.py +3 -0
- aimlapi/types/beta/realtime/response_content_part_added_event.py +3 -0
- aimlapi/types/beta/realtime/response_content_part_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_create_event.py +3 -0
- aimlapi/types/beta/realtime/response_create_event_param.py +3 -0
- aimlapi/types/beta/realtime/response_created_event.py +3 -0
- aimlapi/types/beta/realtime/response_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_output_item_added_event.py +3 -0
- aimlapi/types/beta/realtime/response_output_item_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_text_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_text_done_event.py +3 -0
- aimlapi/types/beta/realtime/session.py +3 -0
- aimlapi/types/beta/realtime/session_create_params.py +3 -0
- aimlapi/types/beta/realtime/session_create_response.py +3 -0
- aimlapi/types/beta/realtime/session_created_event.py +3 -0
- aimlapi/types/beta/realtime/session_update_event.py +3 -0
- aimlapi/types/beta/realtime/session_update_event_param.py +3 -0
- aimlapi/types/beta/realtime/session_updated_event.py +3 -0
- aimlapi/types/beta/realtime/transcription_session.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_create_params.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_update.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_update_param.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_updated_event.py +3 -0
- aimlapi/types/beta/thread.py +3 -0
- aimlapi/types/beta/thread_create_and_run_params.py +3 -0
- aimlapi/types/beta/thread_create_params.py +3 -0
- aimlapi/types/beta/thread_deleted.py +3 -0
- aimlapi/types/beta/thread_update_params.py +3 -0
- aimlapi/types/beta/threads/__init__.py +3 -0
- aimlapi/types/beta/threads/annotation.py +3 -0
- aimlapi/types/beta/threads/annotation_delta.py +3 -0
- aimlapi/types/beta/threads/file_citation_annotation.py +3 -0
- aimlapi/types/beta/threads/file_citation_delta_annotation.py +3 -0
- aimlapi/types/beta/threads/file_path_annotation.py +3 -0
- aimlapi/types/beta/threads/file_path_delta_annotation.py +3 -0
- aimlapi/types/beta/threads/image_file.py +3 -0
- aimlapi/types/beta/threads/image_file_content_block.py +3 -0
- aimlapi/types/beta/threads/image_file_content_block_param.py +3 -0
- aimlapi/types/beta/threads/image_file_delta.py +3 -0
- aimlapi/types/beta/threads/image_file_delta_block.py +3 -0
- aimlapi/types/beta/threads/image_file_param.py +3 -0
- aimlapi/types/beta/threads/image_url.py +3 -0
- aimlapi/types/beta/threads/image_url_content_block.py +3 -0
- aimlapi/types/beta/threads/image_url_content_block_param.py +3 -0
- aimlapi/types/beta/threads/image_url_delta.py +3 -0
- aimlapi/types/beta/threads/image_url_delta_block.py +3 -0
- aimlapi/types/beta/threads/image_url_param.py +3 -0
- aimlapi/types/beta/threads/message.py +3 -0
- aimlapi/types/beta/threads/message_content.py +3 -0
- aimlapi/types/beta/threads/message_content_delta.py +3 -0
- aimlapi/types/beta/threads/message_content_part_param.py +3 -0
- aimlapi/types/beta/threads/message_create_params.py +3 -0
- aimlapi/types/beta/threads/message_deleted.py +3 -0
- aimlapi/types/beta/threads/message_delta.py +3 -0
- aimlapi/types/beta/threads/message_delta_event.py +3 -0
- aimlapi/types/beta/threads/message_list_params.py +3 -0
- aimlapi/types/beta/threads/message_update_params.py +3 -0
- aimlapi/types/beta/threads/refusal_content_block.py +3 -0
- aimlapi/types/beta/threads/refusal_delta_block.py +3 -0
- aimlapi/types/beta/threads/required_action_function_tool_call.py +3 -0
- aimlapi/types/beta/threads/run.py +3 -0
- aimlapi/types/beta/threads/run_create_params.py +3 -0
- aimlapi/types/beta/threads/run_list_params.py +3 -0
- aimlapi/types/beta/threads/run_status.py +3 -0
- aimlapi/types/beta/threads/run_submit_tool_outputs_params.py +3 -0
- aimlapi/types/beta/threads/run_update_params.py +3 -0
- aimlapi/types/beta/threads/runs/__init__.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_logs.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_output_image.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/file_search_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/file_search_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/function_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/function_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/message_creation_step_details.py +3 -0
- aimlapi/types/beta/threads/runs/run_step.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta_event.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta_message_delta.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_include.py +3 -0
- aimlapi/types/beta/threads/runs/step_list_params.py +3 -0
- aimlapi/types/beta/threads/runs/step_retrieve_params.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call_delta_object.py +3 -0
- aimlapi/types/beta/threads/runs/tool_calls_step_details.py +3 -0
- aimlapi/types/beta/threads/text.py +3 -0
- aimlapi/types/beta/threads/text_content_block.py +3 -0
- aimlapi/types/beta/threads/text_content_block_param.py +3 -0
- aimlapi/types/beta/threads/text_delta.py +3 -0
- aimlapi/types/beta/threads/text_delta_block.py +3 -0
- aimlapi/types/chat/__init__.py +3 -0
- aimlapi/types/chat/chat_completion.py +3 -0
- aimlapi/types/chat/chat_completion_allowed_tool_choice_param.py +3 -0
- aimlapi/types/chat/chat_completion_allowed_tools_param.py +3 -0
- aimlapi/types/chat/chat_completion_assistant_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_audio.py +3 -0
- aimlapi/types/chat/chat_completion_audio_param.py +3 -0
- aimlapi/types/chat/chat_completion_chunk.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_image.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_image_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_input_audio_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_refusal_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_text.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_text_param.py +3 -0
- aimlapi/types/chat/chat_completion_custom_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_deleted.py +3 -0
- aimlapi/types/chat/chat_completion_developer_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_call_option_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_tool.py +3 -0
- aimlapi/types/chat/chat_completion_function_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_message.py +3 -0
- aimlapi/types/chat/chat_completion_message_custom_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_custom_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_function_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_function_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call_union_param.py +3 -0
- aimlapi/types/chat/chat_completion_modality.py +3 -0
- aimlapi/types/chat/chat_completion_named_tool_choice_custom_param.py +3 -0
- aimlapi/types/chat/chat_completion_named_tool_choice_param.py +3 -0
- aimlapi/types/chat/chat_completion_prediction_content_param.py +3 -0
- aimlapi/types/chat/chat_completion_reasoning_effort.py +3 -0
- aimlapi/types/chat/chat_completion_role.py +3 -0
- aimlapi/types/chat/chat_completion_store_message.py +3 -0
- aimlapi/types/chat/chat_completion_stream_options_param.py +3 -0
- aimlapi/types/chat/chat_completion_system_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_token_logprob.py +3 -0
- aimlapi/types/chat/chat_completion_tool_choice_option_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_union_param.py +3 -0
- aimlapi/types/chat/chat_completion_user_message_param.py +3 -0
- aimlapi/types/chat/completion_create_params.py +3 -0
- aimlapi/types/chat/completion_list_params.py +3 -0
- aimlapi/types/chat/completion_update_params.py +3 -0
- aimlapi/types/chat/completions/__init__.py +3 -0
- aimlapi/types/chat/completions/message_list_params.py +3 -0
- aimlapi/types/chat/parsed_chat_completion.py +3 -0
- aimlapi/types/chat/parsed_function_tool_call.py +3 -0
- aimlapi/types/chat_model.py +3 -0
- aimlapi/types/completion.py +3 -0
- aimlapi/types/completion_choice.py +3 -0
- aimlapi/types/completion_create_params.py +3 -0
- aimlapi/types/completion_usage.py +3 -0
- aimlapi/types/container_create_params.py +3 -0
- aimlapi/types/container_create_response.py +3 -0
- aimlapi/types/container_list_params.py +3 -0
- aimlapi/types/container_list_response.py +3 -0
- aimlapi/types/container_retrieve_response.py +3 -0
- aimlapi/types/containers/__init__.py +3 -0
- aimlapi/types/containers/file_create_params.py +3 -0
- aimlapi/types/containers/file_create_response.py +3 -0
- aimlapi/types/containers/file_list_params.py +3 -0
- aimlapi/types/containers/file_list_response.py +3 -0
- aimlapi/types/containers/file_retrieve_response.py +3 -0
- aimlapi/types/containers/files/__init__.py +3 -0
- aimlapi/types/conversations/__init__.py +3 -0
- aimlapi/types/conversations/computer_screenshot_content.py +3 -0
- aimlapi/types/conversations/conversation.py +3 -0
- aimlapi/types/conversations/conversation_create_params.py +3 -0
- aimlapi/types/conversations/conversation_deleted_resource.py +3 -0
- aimlapi/types/conversations/conversation_item.py +3 -0
- aimlapi/types/conversations/conversation_item_list.py +3 -0
- aimlapi/types/conversations/conversation_update_params.py +3 -0
- aimlapi/types/conversations/input_file_content.py +3 -0
- aimlapi/types/conversations/input_file_content_param.py +3 -0
- aimlapi/types/conversations/input_image_content.py +3 -0
- aimlapi/types/conversations/input_image_content_param.py +3 -0
- aimlapi/types/conversations/input_text_content.py +3 -0
- aimlapi/types/conversations/input_text_content_param.py +3 -0
- aimlapi/types/conversations/item_create_params.py +3 -0
- aimlapi/types/conversations/item_list_params.py +3 -0
- aimlapi/types/conversations/item_retrieve_params.py +3 -0
- aimlapi/types/conversations/message.py +3 -0
- aimlapi/types/conversations/output_text_content.py +3 -0
- aimlapi/types/conversations/output_text_content_param.py +3 -0
- aimlapi/types/conversations/refusal_content.py +3 -0
- aimlapi/types/conversations/refusal_content_param.py +3 -0
- aimlapi/types/conversations/summary_text_content.py +3 -0
- aimlapi/types/conversations/text_content.py +3 -0
- aimlapi/types/create_embedding_response.py +3 -0
- aimlapi/types/embedding.py +3 -0
- aimlapi/types/embedding_create_params.py +3 -0
- aimlapi/types/embedding_model.py +3 -0
- aimlapi/types/eval_create_params.py +3 -0
- aimlapi/types/eval_create_response.py +3 -0
- aimlapi/types/eval_custom_data_source_config.py +3 -0
- aimlapi/types/eval_delete_response.py +3 -0
- aimlapi/types/eval_list_params.py +3 -0
- aimlapi/types/eval_list_response.py +3 -0
- aimlapi/types/eval_retrieve_response.py +3 -0
- aimlapi/types/eval_stored_completions_data_source_config.py +3 -0
- aimlapi/types/eval_update_params.py +3 -0
- aimlapi/types/eval_update_response.py +3 -0
- aimlapi/types/evals/__init__.py +3 -0
- aimlapi/types/evals/create_eval_completions_run_data_source.py +3 -0
- aimlapi/types/evals/create_eval_completions_run_data_source_param.py +3 -0
- aimlapi/types/evals/create_eval_jsonl_run_data_source.py +3 -0
- aimlapi/types/evals/create_eval_jsonl_run_data_source_param.py +3 -0
- aimlapi/types/evals/eval_api_error.py +3 -0
- aimlapi/types/evals/run_cancel_response.py +3 -0
- aimlapi/types/evals/run_create_params.py +3 -0
- aimlapi/types/evals/run_create_response.py +3 -0
- aimlapi/types/evals/run_delete_response.py +3 -0
- aimlapi/types/evals/run_list_params.py +3 -0
- aimlapi/types/evals/run_list_response.py +3 -0
- aimlapi/types/evals/run_retrieve_response.py +3 -0
- aimlapi/types/evals/runs/__init__.py +3 -0
- aimlapi/types/evals/runs/output_item_list_params.py +3 -0
- aimlapi/types/evals/runs/output_item_list_response.py +3 -0
- aimlapi/types/evals/runs/output_item_retrieve_response.py +3 -0
- aimlapi/types/file_chunking_strategy.py +3 -0
- aimlapi/types/file_chunking_strategy_param.py +3 -0
- aimlapi/types/file_content.py +3 -0
- aimlapi/types/file_create_params.py +3 -0
- aimlapi/types/file_deleted.py +3 -0
- aimlapi/types/file_list_params.py +3 -0
- aimlapi/types/file_object.py +3 -0
- aimlapi/types/file_purpose.py +3 -0
- aimlapi/types/fine_tuning/__init__.py +3 -0
- aimlapi/types/fine_tuning/alpha/__init__.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_run_params.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_run_response.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_validate_params.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_validate_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/__init__.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_create_params.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_create_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_delete_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_retrieve_params.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_retrieve_response.py +3 -0
- aimlapi/types/fine_tuning/dpo_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/dpo_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/dpo_method.py +3 -0
- aimlapi/types/fine_tuning/dpo_method_param.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_event.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_integration.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +3 -0
- aimlapi/types/fine_tuning/job_create_params.py +3 -0
- aimlapi/types/fine_tuning/job_list_events_params.py +3 -0
- aimlapi/types/fine_tuning/job_list_params.py +3 -0
- aimlapi/types/fine_tuning/jobs/__init__.py +3 -0
- aimlapi/types/fine_tuning/jobs/checkpoint_list_params.py +3 -0
- aimlapi/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_method.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_method_param.py +3 -0
- aimlapi/types/fine_tuning/supervised_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/supervised_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/supervised_method.py +3 -0
- aimlapi/types/fine_tuning/supervised_method_param.py +3 -0
- aimlapi/types/graders/__init__.py +3 -0
- aimlapi/types/graders/label_model_grader.py +3 -0
- aimlapi/types/graders/label_model_grader_param.py +3 -0
- aimlapi/types/graders/multi_grader.py +3 -0
- aimlapi/types/graders/multi_grader_param.py +3 -0
- aimlapi/types/graders/python_grader.py +3 -0
- aimlapi/types/graders/python_grader_param.py +3 -0
- aimlapi/types/graders/score_model_grader.py +3 -0
- aimlapi/types/graders/score_model_grader_param.py +3 -0
- aimlapi/types/graders/string_check_grader.py +3 -0
- aimlapi/types/graders/string_check_grader_param.py +3 -0
- aimlapi/types/graders/text_similarity_grader.py +3 -0
- aimlapi/types/graders/text_similarity_grader_param.py +3 -0
- aimlapi/types/image.py +3 -0
- aimlapi/types/image_create_variation_params.py +3 -0
- aimlapi/types/image_edit_completed_event.py +3 -0
- aimlapi/types/image_edit_params.py +3 -0
- aimlapi/types/image_edit_partial_image_event.py +3 -0
- aimlapi/types/image_edit_stream_event.py +3 -0
- aimlapi/types/image_gen_completed_event.py +3 -0
- aimlapi/types/image_gen_partial_image_event.py +3 -0
- aimlapi/types/image_gen_stream_event.py +3 -0
- aimlapi/types/image_generate_params.py +3 -0
- aimlapi/types/image_model.py +3 -0
- aimlapi/types/images_response.py +3 -0
- aimlapi/types/model.py +3 -0
- aimlapi/types/model_deleted.py +3 -0
- aimlapi/types/moderation.py +3 -0
- aimlapi/types/moderation_create_params.py +3 -0
- aimlapi/types/moderation_create_response.py +3 -0
- aimlapi/types/moderation_image_url_input_param.py +3 -0
- aimlapi/types/moderation_model.py +3 -0
- aimlapi/types/moderation_multi_modal_input_param.py +3 -0
- aimlapi/types/moderation_text_input_param.py +3 -0
- aimlapi/types/other_file_chunking_strategy_object.py +3 -0
- aimlapi/types/realtime/__init__.py +3 -0
- aimlapi/types/realtime/audio_transcription.py +3 -0
- aimlapi/types/realtime/audio_transcription_param.py +3 -0
- aimlapi/types/realtime/call_accept_params.py +3 -0
- aimlapi/types/realtime/call_create_params.py +3 -0
- aimlapi/types/realtime/call_refer_params.py +3 -0
- aimlapi/types/realtime/call_reject_params.py +3 -0
- aimlapi/types/realtime/client_secret_create_params.py +3 -0
- aimlapi/types/realtime/client_secret_create_response.py +3 -0
- aimlapi/types/realtime/conversation_created_event.py +3 -0
- aimlapi/types/realtime/conversation_item.py +3 -0
- aimlapi/types/realtime/conversation_item_added.py +3 -0
- aimlapi/types/realtime/conversation_item_create_event.py +3 -0
- aimlapi/types/realtime/conversation_item_create_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_created_event.py +3 -0
- aimlapi/types/realtime/conversation_item_delete_event.py +3 -0
- aimlapi/types/realtime/conversation_item_delete_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_deleted_event.py +3 -0
- aimlapi/types/realtime/conversation_item_done.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_segment.py +3 -0
- aimlapi/types/realtime/conversation_item_param.py +3 -0
- aimlapi/types/realtime/conversation_item_retrieve_event.py +3 -0
- aimlapi/types/realtime/conversation_item_retrieve_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_truncate_event.py +3 -0
- aimlapi/types/realtime/conversation_item_truncate_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_truncated_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_append_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_append_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_clear_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_cleared_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_commit_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_commit_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_committed_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_speech_started_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_timeout_triggered.py +3 -0
- aimlapi/types/realtime/log_prob_properties.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_completed.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_failed.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_in_progress.py +3 -0
- aimlapi/types/realtime/noise_reduction_type.py +3 -0
- aimlapi/types/realtime/output_audio_buffer_clear_event.py +3 -0
- aimlapi/types/realtime/output_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/realtime/rate_limits_updated_event.py +3 -0
- aimlapi/types/realtime/realtime_audio_config.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_input.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_input_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_output.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_output_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_formats.py +3 -0
- aimlapi/types/realtime/realtime_audio_formats_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_input_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_audio_input_turn_detection_param.py +3 -0
- aimlapi/types/realtime/realtime_client_event.py +3 -0
- aimlapi/types/realtime/realtime_client_event_param.py +3 -0
- aimlapi/types/realtime/realtime_connect_params.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_assistant_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_assistant_message_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_output.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_output_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_system_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_system_message_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_user_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_user_message_param.py +3 -0
- aimlapi/types/realtime/realtime_error.py +3 -0
- aimlapi/types/realtime/realtime_error_event.py +3 -0
- aimlapi/types/realtime/realtime_function_tool.py +3 -0
- aimlapi/types/realtime/realtime_function_tool_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_request.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_request_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_response.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_response_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_list_tools.py +3 -0
- aimlapi/types/realtime/realtime_mcp_list_tools_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_protocol_error.py +3 -0
- aimlapi/types/realtime/realtime_mcp_protocol_error_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_call.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_call_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_execution_error.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_execution_error_param.py +3 -0
- aimlapi/types/realtime/realtime_mcphttp_error.py +3 -0
- aimlapi/types/realtime/realtime_mcphttp_error_param.py +3 -0
- aimlapi/types/realtime/realtime_response.py +3 -0
- aimlapi/types/realtime/realtime_response_create_audio_output.py +3 -0
- aimlapi/types/realtime/realtime_response_create_audio_output_param.py +3 -0
- aimlapi/types/realtime/realtime_response_create_mcp_tool.py +3 -0
- aimlapi/types/realtime/realtime_response_create_mcp_tool_param.py +3 -0
- aimlapi/types/realtime/realtime_response_create_params.py +3 -0
- aimlapi/types/realtime/realtime_response_create_params_param.py +3 -0
- aimlapi/types/realtime/realtime_response_status.py +3 -0
- aimlapi/types/realtime/realtime_response_usage.py +3 -0
- aimlapi/types/realtime/realtime_response_usage_input_token_details.py +3 -0
- aimlapi/types/realtime/realtime_response_usage_output_token_details.py +3 -0
- aimlapi/types/realtime/realtime_server_event.py +3 -0
- aimlapi/types/realtime/realtime_session_client_secret.py +3 -0
- aimlapi/types/realtime/realtime_session_create_request.py +3 -0
- aimlapi/types/realtime/realtime_session_create_request_param.py +3 -0
- aimlapi/types/realtime/realtime_session_create_response.py +3 -0
- aimlapi/types/realtime/realtime_tool_choice_config.py +3 -0
- aimlapi/types/realtime/realtime_tool_choice_config_param.py +3 -0
- aimlapi/types/realtime/realtime_tools_config.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_param.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_union.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_union_param.py +3 -0
- aimlapi/types/realtime/realtime_tracing_config.py +3 -0
- aimlapi/types/realtime/realtime_tracing_config_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_request.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_request_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_response.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_truncation.py +3 -0
- aimlapi/types/realtime/realtime_truncation_param.py +3 -0
- aimlapi/types/realtime/realtime_truncation_retention_ratio.py +3 -0
- aimlapi/types/realtime/realtime_truncation_retention_ratio_param.py +3 -0
- aimlapi/types/realtime/response_audio_delta_event.py +3 -0
- aimlapi/types/realtime/response_audio_done_event.py +3 -0
- aimlapi/types/realtime/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/realtime/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/realtime/response_cancel_event.py +3 -0
- aimlapi/types/realtime/response_cancel_event_param.py +3 -0
- aimlapi/types/realtime/response_content_part_added_event.py +3 -0
- aimlapi/types/realtime/response_content_part_done_event.py +3 -0
- aimlapi/types/realtime/response_create_event.py +3 -0
- aimlapi/types/realtime/response_create_event_param.py +3 -0
- aimlapi/types/realtime/response_created_event.py +3 -0
- aimlapi/types/realtime/response_done_event.py +3 -0
- aimlapi/types/realtime/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/realtime/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/realtime/response_mcp_call_arguments_delta.py +3 -0
- aimlapi/types/realtime/response_mcp_call_arguments_done.py +3 -0
- aimlapi/types/realtime/response_mcp_call_completed.py +3 -0
- aimlapi/types/realtime/response_mcp_call_failed.py +3 -0
- aimlapi/types/realtime/response_mcp_call_in_progress.py +3 -0
- aimlapi/types/realtime/response_output_item_added_event.py +3 -0
- aimlapi/types/realtime/response_output_item_done_event.py +3 -0
- aimlapi/types/realtime/response_text_delta_event.py +3 -0
- aimlapi/types/realtime/response_text_done_event.py +3 -0
- aimlapi/types/realtime/session_created_event.py +3 -0
- aimlapi/types/realtime/session_update_event.py +3 -0
- aimlapi/types/realtime/session_update_event_param.py +3 -0
- aimlapi/types/realtime/session_updated_event.py +3 -0
- aimlapi/types/responses/__init__.py +3 -0
- aimlapi/types/responses/computer_tool.py +3 -0
- aimlapi/types/responses/computer_tool_param.py +3 -0
- aimlapi/types/responses/custom_tool.py +3 -0
- aimlapi/types/responses/custom_tool_param.py +3 -0
- aimlapi/types/responses/easy_input_message.py +3 -0
- aimlapi/types/responses/easy_input_message_param.py +3 -0
- aimlapi/types/responses/file_search_tool.py +3 -0
- aimlapi/types/responses/file_search_tool_param.py +3 -0
- aimlapi/types/responses/function_tool.py +3 -0
- aimlapi/types/responses/function_tool_param.py +3 -0
- aimlapi/types/responses/input_item_list_params.py +3 -0
- aimlapi/types/responses/input_token_count_params.py +3 -0
- aimlapi/types/responses/input_token_count_response.py +3 -0
- aimlapi/types/responses/parsed_response.py +3 -0
- aimlapi/types/responses/response.py +3 -0
- aimlapi/types/responses/response_audio_delta_event.py +3 -0
- aimlapi/types/responses/response_audio_done_event.py +3 -0
- aimlapi/types/responses/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/responses/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_code_delta_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_code_done_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_completed_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_interpreting_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_tool_call.py +3 -0
- aimlapi/types/responses/response_code_interpreter_tool_call_param.py +3 -0
- aimlapi/types/responses/response_completed_event.py +3 -0
- aimlapi/types/responses/response_computer_tool_call.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_item.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_screenshot.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_screenshot_param.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_param.py +3 -0
- aimlapi/types/responses/response_content_part_added_event.py +3 -0
- aimlapi/types/responses/response_content_part_done_event.py +3 -0
- aimlapi/types/responses/response_conversation_param.py +3 -0
- aimlapi/types/responses/response_create_params.py +3 -0
- aimlapi/types/responses/response_created_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_input_delta_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_input_done_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_output.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_output_param.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_param.py +3 -0
- aimlapi/types/responses/response_error.py +3 -0
- aimlapi/types/responses/response_error_event.py +3 -0
- aimlapi/types/responses/response_failed_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_completed_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_searching_event.py +3 -0
- aimlapi/types/responses/response_file_search_tool_call.py +3 -0
- aimlapi/types/responses/response_file_search_tool_call_param.py +3 -0
- aimlapi/types/responses/response_format_text_config.py +3 -0
- aimlapi/types/responses/response_format_text_config_param.py +3 -0
- aimlapi/types/responses/response_format_text_json_schema_config.py +3 -0
- aimlapi/types/responses/response_format_text_json_schema_config_param.py +3 -0
- aimlapi/types/responses/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/responses/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/responses/response_function_call_output_item.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_list.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_list_param.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_param.py +3 -0
- aimlapi/types/responses/response_function_tool_call.py +3 -0
- aimlapi/types/responses/response_function_tool_call_item.py +3 -0
- aimlapi/types/responses/response_function_tool_call_output_item.py +3 -0
- aimlapi/types/responses/response_function_tool_call_param.py +3 -0
- aimlapi/types/responses/response_function_web_search.py +3 -0
- aimlapi/types/responses/response_function_web_search_param.py +3 -0
- aimlapi/types/responses/response_image_gen_call_completed_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_generating_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_partial_image_event.py +3 -0
- aimlapi/types/responses/response_in_progress_event.py +3 -0
- aimlapi/types/responses/response_includable.py +3 -0
- aimlapi/types/responses/response_incomplete_event.py +3 -0
- aimlapi/types/responses/response_input_audio.py +3 -0
- aimlapi/types/responses/response_input_audio_param.py +3 -0
- aimlapi/types/responses/response_input_content.py +3 -0
- aimlapi/types/responses/response_input_content_param.py +3 -0
- aimlapi/types/responses/response_input_file.py +3 -0
- aimlapi/types/responses/response_input_file_content.py +3 -0
- aimlapi/types/responses/response_input_file_content_param.py +3 -0
- aimlapi/types/responses/response_input_file_param.py +3 -0
- aimlapi/types/responses/response_input_image.py +3 -0
- aimlapi/types/responses/response_input_image_content.py +3 -0
- aimlapi/types/responses/response_input_image_content_param.py +3 -0
- aimlapi/types/responses/response_input_image_param.py +3 -0
- aimlapi/types/responses/response_input_item.py +3 -0
- aimlapi/types/responses/response_input_item_param.py +3 -0
- aimlapi/types/responses/response_input_message_content_list.py +3 -0
- aimlapi/types/responses/response_input_message_content_list_param.py +3 -0
- aimlapi/types/responses/response_input_message_item.py +3 -0
- aimlapi/types/responses/response_input_param.py +3 -0
- aimlapi/types/responses/response_input_text.py +3 -0
- aimlapi/types/responses/response_input_text_content.py +3 -0
- aimlapi/types/responses/response_input_text_content_param.py +3 -0
- aimlapi/types/responses/response_input_text_param.py +3 -0
- aimlapi/types/responses/response_item.py +3 -0
- aimlapi/types/responses/response_item_list.py +3 -0
- aimlapi/types/responses/response_mcp_call_arguments_delta_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_arguments_done_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_completed_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_failed_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_completed_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_failed_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_in_progress_event.py +3 -0
- aimlapi/types/responses/response_output_item.py +3 -0
- aimlapi/types/responses/response_output_item_added_event.py +3 -0
- aimlapi/types/responses/response_output_item_done_event.py +3 -0
- aimlapi/types/responses/response_output_message.py +3 -0
- aimlapi/types/responses/response_output_message_param.py +3 -0
- aimlapi/types/responses/response_output_refusal.py +3 -0
- aimlapi/types/responses/response_output_refusal_param.py +3 -0
- aimlapi/types/responses/response_output_text.py +3 -0
- aimlapi/types/responses/response_output_text_annotation_added_event.py +3 -0
- aimlapi/types/responses/response_output_text_param.py +3 -0
- aimlapi/types/responses/response_prompt.py +3 -0
- aimlapi/types/responses/response_prompt_param.py +3 -0
- aimlapi/types/responses/response_queued_event.py +3 -0
- aimlapi/types/responses/response_reasoning_item.py +3 -0
- aimlapi/types/responses/response_reasoning_item_param.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_part_added_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_part_done_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_text_delta_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_text_done_event.py +3 -0
- aimlapi/types/responses/response_reasoning_text_delta_event.py +3 -0
- aimlapi/types/responses/response_reasoning_text_done_event.py +3 -0
- aimlapi/types/responses/response_refusal_delta_event.py +3 -0
- aimlapi/types/responses/response_refusal_done_event.py +3 -0
- aimlapi/types/responses/response_retrieve_params.py +3 -0
- aimlapi/types/responses/response_status.py +3 -0
- aimlapi/types/responses/response_stream_event.py +3 -0
- aimlapi/types/responses/response_text_config.py +3 -0
- aimlapi/types/responses/response_text_config_param.py +3 -0
- aimlapi/types/responses/response_text_delta_event.py +3 -0
- aimlapi/types/responses/response_text_done_event.py +3 -0
- aimlapi/types/responses/response_usage.py +3 -0
- aimlapi/types/responses/response_web_search_call_completed_event.py +3 -0
- aimlapi/types/responses/response_web_search_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_web_search_call_searching_event.py +3 -0
- aimlapi/types/responses/tool.py +3 -0
- aimlapi/types/responses/tool_choice_allowed.py +3 -0
- aimlapi/types/responses/tool_choice_allowed_param.py +3 -0
- aimlapi/types/responses/tool_choice_custom.py +3 -0
- aimlapi/types/responses/tool_choice_custom_param.py +3 -0
- aimlapi/types/responses/tool_choice_function.py +3 -0
- aimlapi/types/responses/tool_choice_function_param.py +3 -0
- aimlapi/types/responses/tool_choice_mcp.py +3 -0
- aimlapi/types/responses/tool_choice_mcp_param.py +3 -0
- aimlapi/types/responses/tool_choice_options.py +3 -0
- aimlapi/types/responses/tool_choice_types.py +3 -0
- aimlapi/types/responses/tool_choice_types_param.py +3 -0
- aimlapi/types/responses/tool_param.py +3 -0
- aimlapi/types/responses/web_search_preview_tool.py +3 -0
- aimlapi/types/responses/web_search_preview_tool_param.py +3 -0
- aimlapi/types/responses/web_search_tool.py +3 -0
- aimlapi/types/responses/web_search_tool_param.py +3 -0
- aimlapi/types/shared/__init__.py +3 -0
- aimlapi/types/shared/all_models.py +3 -0
- aimlapi/types/shared/chat_model.py +3 -0
- aimlapi/types/shared/comparison_filter.py +3 -0
- aimlapi/types/shared/compound_filter.py +3 -0
- aimlapi/types/shared/custom_tool_input_format.py +3 -0
- aimlapi/types/shared/error_object.py +3 -0
- aimlapi/types/shared/function_definition.py +3 -0
- aimlapi/types/shared/function_parameters.py +3 -0
- aimlapi/types/shared/metadata.py +3 -0
- aimlapi/types/shared/reasoning.py +3 -0
- aimlapi/types/shared/reasoning_effort.py +3 -0
- aimlapi/types/shared/response_format_json_object.py +3 -0
- aimlapi/types/shared/response_format_json_schema.py +3 -0
- aimlapi/types/shared/response_format_text.py +3 -0
- aimlapi/types/shared/response_format_text_grammar.py +3 -0
- aimlapi/types/shared/response_format_text_python.py +3 -0
- aimlapi/types/shared/responses_model.py +3 -0
- aimlapi/types/shared_params/__init__.py +3 -0
- aimlapi/types/shared_params/chat_model.py +3 -0
- aimlapi/types/shared_params/comparison_filter.py +3 -0
- aimlapi/types/shared_params/compound_filter.py +3 -0
- aimlapi/types/shared_params/custom_tool_input_format.py +3 -0
- aimlapi/types/shared_params/function_definition.py +3 -0
- aimlapi/types/shared_params/function_parameters.py +3 -0
- aimlapi/types/shared_params/metadata.py +3 -0
- aimlapi/types/shared_params/reasoning.py +3 -0
- aimlapi/types/shared_params/reasoning_effort.py +3 -0
- aimlapi/types/shared_params/response_format_json_object.py +3 -0
- aimlapi/types/shared_params/response_format_json_schema.py +3 -0
- aimlapi/types/shared_params/response_format_text.py +3 -0
- aimlapi/types/shared_params/responses_model.py +3 -0
- aimlapi/types/static_file_chunking_strategy.py +3 -0
- aimlapi/types/static_file_chunking_strategy_object.py +3 -0
- aimlapi/types/static_file_chunking_strategy_object_param.py +3 -0
- aimlapi/types/static_file_chunking_strategy_param.py +3 -0
- aimlapi/types/upload.py +3 -0
- aimlapi/types/upload_complete_params.py +3 -0
- aimlapi/types/upload_create_params.py +3 -0
- aimlapi/types/uploads/__init__.py +3 -0
- aimlapi/types/uploads/part_create_params.py +3 -0
- aimlapi/types/uploads/upload_part.py +3 -0
- aimlapi/types/vector_store.py +3 -0
- aimlapi/types/vector_store_create_params.py +3 -0
- aimlapi/types/vector_store_deleted.py +3 -0
- aimlapi/types/vector_store_list_params.py +3 -0
- aimlapi/types/vector_store_search_params.py +3 -0
- aimlapi/types/vector_store_search_response.py +3 -0
- aimlapi/types/vector_store_update_params.py +3 -0
- aimlapi/types/vector_stores/__init__.py +3 -0
- aimlapi/types/vector_stores/file_batch_create_params.py +3 -0
- aimlapi/types/vector_stores/file_batch_list_files_params.py +3 -0
- aimlapi/types/vector_stores/file_content_response.py +3 -0
- aimlapi/types/vector_stores/file_create_params.py +3 -0
- aimlapi/types/vector_stores/file_list_params.py +3 -0
- aimlapi/types/vector_stores/file_update_params.py +3 -0
- aimlapi/types/vector_stores/vector_store_file.py +3 -0
- aimlapi/types/vector_stores/vector_store_file_batch.py +3 -0
- aimlapi/types/vector_stores/vector_store_file_deleted.py +3 -0
- aimlapi/types/video.py +3 -0
- aimlapi/types/video_create_error.py +3 -0
- aimlapi/types/video_create_params.py +3 -0
- aimlapi/types/video_delete_response.py +3 -0
- aimlapi/types/video_download_content_params.py +3 -0
- aimlapi/types/video_list_params.py +3 -0
- aimlapi/types/video_model.py +3 -0
- aimlapi/types/video_remix_params.py +3 -0
- aimlapi/types/video_seconds.py +3 -0
- aimlapi/types/video_size.py +3 -0
- aimlapi/types/webhooks/__init__.py +3 -0
- aimlapi/types/webhooks/batch_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_completed_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_expired_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_canceled_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_succeeded_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +3 -0
- aimlapi/types/webhooks/realtime_call_incoming_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_completed_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_incomplete_webhook_event.py +3 -0
- aimlapi/types/webhooks/unwrap_webhook_event.py +3 -0
- aimlapi/types/websocket_connection_options.py +3 -0
- aimlapi/version.py +3 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/METADATA +886 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/RECORD +1958 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/WHEEL +4 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/entry_points.txt +2 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/licenses/LICENSE +201 -0
- openai/__init__.py +395 -0
- openai/__main__.py +3 -0
- openai/_base_client.py +2027 -0
- openai/_client.py +1272 -0
- openai/_compat.py +231 -0
- openai/_constants.py +14 -0
- openai/_exceptions.py +161 -0
- openai/_extras/__init__.py +3 -0
- openai/_extras/_common.py +21 -0
- openai/_extras/numpy_proxy.py +37 -0
- openai/_extras/pandas_proxy.py +28 -0
- openai/_extras/sounddevice_proxy.py +28 -0
- openai/_files.py +123 -0
- openai/_legacy_response.py +488 -0
- openai/_models.py +897 -0
- openai/_module_client.py +173 -0
- openai/_qs.py +150 -0
- openai/_resource.py +43 -0
- openai/_response.py +848 -0
- openai/_streaming.py +408 -0
- openai/_types.py +264 -0
- openai/_utils/__init__.py +67 -0
- openai/_utils/_compat.py +45 -0
- openai/_utils/_datetime_parse.py +136 -0
- openai/_utils/_logs.py +42 -0
- openai/_utils/_proxy.py +65 -0
- openai/_utils/_reflection.py +45 -0
- openai/_utils/_resources_proxy.py +24 -0
- openai/_utils/_streams.py +12 -0
- openai/_utils/_sync.py +58 -0
- openai/_utils/_transform.py +457 -0
- openai/_utils/_typing.py +156 -0
- openai/_utils/_utils.py +437 -0
- openai/_version.py +4 -0
- openai/cli/__init__.py +1 -0
- openai/cli/_api/__init__.py +1 -0
- openai/cli/_api/_main.py +17 -0
- openai/cli/_api/audio.py +108 -0
- openai/cli/_api/chat/__init__.py +13 -0
- openai/cli/_api/chat/completions.py +160 -0
- openai/cli/_api/completions.py +173 -0
- openai/cli/_api/files.py +80 -0
- openai/cli/_api/fine_tuning/__init__.py +13 -0
- openai/cli/_api/fine_tuning/jobs.py +170 -0
- openai/cli/_api/image.py +139 -0
- openai/cli/_api/models.py +45 -0
- openai/cli/_cli.py +233 -0
- openai/cli/_errors.py +21 -0
- openai/cli/_models.py +17 -0
- openai/cli/_progress.py +59 -0
- openai/cli/_tools/__init__.py +1 -0
- openai/cli/_tools/_main.py +17 -0
- openai/cli/_tools/fine_tunes.py +63 -0
- openai/cli/_tools/migrate.py +164 -0
- openai/cli/_utils.py +45 -0
- openai/helpers/__init__.py +4 -0
- openai/helpers/local_audio_player.py +165 -0
- openai/helpers/microphone.py +100 -0
- openai/lib/.keep +4 -0
- openai/lib/__init__.py +2 -0
- openai/lib/_old_api.py +72 -0
- openai/lib/_parsing/__init__.py +12 -0
- openai/lib/_parsing/_completions.py +305 -0
- openai/lib/_parsing/_responses.py +180 -0
- openai/lib/_pydantic.py +155 -0
- openai/lib/_realtime.py +92 -0
- openai/lib/_tools.py +66 -0
- openai/lib/_validators.py +809 -0
- openai/lib/azure.py +647 -0
- openai/lib/streaming/__init__.py +8 -0
- openai/lib/streaming/_assistants.py +1038 -0
- openai/lib/streaming/_deltas.py +64 -0
- openai/lib/streaming/chat/__init__.py +27 -0
- openai/lib/streaming/chat/_completions.py +770 -0
- openai/lib/streaming/chat/_events.py +123 -0
- openai/lib/streaming/chat/_types.py +20 -0
- openai/lib/streaming/responses/__init__.py +13 -0
- openai/lib/streaming/responses/_events.py +148 -0
- openai/lib/streaming/responses/_responses.py +372 -0
- openai/lib/streaming/responses/_types.py +10 -0
- openai/pagination.py +190 -0
- openai/py.typed +0 -0
- openai/resources/__init__.py +229 -0
- openai/resources/audio/__init__.py +61 -0
- openai/resources/audio/audio.py +166 -0
- openai/resources/audio/speech.py +255 -0
- openai/resources/audio/transcriptions.py +980 -0
- openai/resources/audio/translations.py +367 -0
- openai/resources/batches.py +530 -0
- openai/resources/beta/__init__.py +61 -0
- openai/resources/beta/assistants.py +1049 -0
- openai/resources/beta/beta.py +187 -0
- openai/resources/beta/chatkit/__init__.py +47 -0
- openai/resources/beta/chatkit/chatkit.py +134 -0
- openai/resources/beta/chatkit/sessions.py +301 -0
- openai/resources/beta/chatkit/threads.py +521 -0
- openai/resources/beta/realtime/__init__.py +47 -0
- openai/resources/beta/realtime/realtime.py +1094 -0
- openai/resources/beta/realtime/sessions.py +424 -0
- openai/resources/beta/realtime/transcription_sessions.py +282 -0
- openai/resources/beta/threads/__init__.py +47 -0
- openai/resources/beta/threads/messages.py +718 -0
- openai/resources/beta/threads/runs/__init__.py +33 -0
- openai/resources/beta/threads/runs/runs.py +3122 -0
- openai/resources/beta/threads/runs/steps.py +399 -0
- openai/resources/beta/threads/threads.py +1935 -0
- openai/resources/chat/__init__.py +33 -0
- openai/resources/chat/chat.py +102 -0
- openai/resources/chat/completions/__init__.py +33 -0
- openai/resources/chat/completions/completions.py +3143 -0
- openai/resources/chat/completions/messages.py +212 -0
- openai/resources/completions.py +1160 -0
- openai/resources/containers/__init__.py +33 -0
- openai/resources/containers/containers.py +510 -0
- openai/resources/containers/files/__init__.py +33 -0
- openai/resources/containers/files/content.py +173 -0
- openai/resources/containers/files/files.py +545 -0
- openai/resources/conversations/__init__.py +33 -0
- openai/resources/conversations/conversations.py +486 -0
- openai/resources/conversations/items.py +557 -0
- openai/resources/embeddings.py +298 -0
- openai/resources/evals/__init__.py +33 -0
- openai/resources/evals/evals.py +662 -0
- openai/resources/evals/runs/__init__.py +33 -0
- openai/resources/evals/runs/output_items.py +315 -0
- openai/resources/evals/runs/runs.py +634 -0
- openai/resources/files.py +770 -0
- openai/resources/fine_tuning/__init__.py +61 -0
- openai/resources/fine_tuning/alpha/__init__.py +33 -0
- openai/resources/fine_tuning/alpha/alpha.py +102 -0
- openai/resources/fine_tuning/alpha/graders.py +282 -0
- openai/resources/fine_tuning/checkpoints/__init__.py +33 -0
- openai/resources/fine_tuning/checkpoints/checkpoints.py +102 -0
- openai/resources/fine_tuning/checkpoints/permissions.py +418 -0
- openai/resources/fine_tuning/fine_tuning.py +166 -0
- openai/resources/fine_tuning/jobs/__init__.py +33 -0
- openai/resources/fine_tuning/jobs/checkpoints.py +199 -0
- openai/resources/fine_tuning/jobs/jobs.py +918 -0
- openai/resources/images.py +1858 -0
- openai/resources/models.py +306 -0
- openai/resources/moderations.py +197 -0
- openai/resources/realtime/__init__.py +47 -0
- openai/resources/realtime/calls.py +764 -0
- openai/resources/realtime/client_secrets.py +189 -0
- openai/resources/realtime/realtime.py +1079 -0
- openai/resources/responses/__init__.py +47 -0
- openai/resources/responses/input_items.py +226 -0
- openai/resources/responses/input_tokens.py +309 -0
- openai/resources/responses/responses.py +3130 -0
- openai/resources/uploads/__init__.py +33 -0
- openai/resources/uploads/parts.py +205 -0
- openai/resources/uploads/uploads.py +719 -0
- openai/resources/vector_stores/__init__.py +47 -0
- openai/resources/vector_stores/file_batches.py +813 -0
- openai/resources/vector_stores/files.py +939 -0
- openai/resources/vector_stores/vector_stores.py +875 -0
- openai/resources/videos.py +847 -0
- openai/resources/webhooks.py +210 -0
- openai/types/__init__.py +115 -0
- openai/types/audio/__init__.py +23 -0
- openai/types/audio/speech_create_params.py +57 -0
- openai/types/audio/speech_model.py +7 -0
- openai/types/audio/transcription.py +71 -0
- openai/types/audio/transcription_create_params.py +172 -0
- openai/types/audio/transcription_create_response.py +12 -0
- openai/types/audio/transcription_diarized.py +63 -0
- openai/types/audio/transcription_diarized_segment.py +32 -0
- openai/types/audio/transcription_include.py +7 -0
- openai/types/audio/transcription_segment.py +49 -0
- openai/types/audio/transcription_stream_event.py +16 -0
- openai/types/audio/transcription_text_delta_event.py +41 -0
- openai/types/audio/transcription_text_done_event.py +63 -0
- openai/types/audio/transcription_text_segment_event.py +27 -0
- openai/types/audio/transcription_verbose.py +38 -0
- openai/types/audio/transcription_word.py +16 -0
- openai/types/audio/translation.py +9 -0
- openai/types/audio/translation_create_params.py +49 -0
- openai/types/audio/translation_create_response.py +11 -0
- openai/types/audio/translation_verbose.py +22 -0
- openai/types/audio_model.py +7 -0
- openai/types/audio_response_format.py +7 -0
- openai/types/auto_file_chunking_strategy_param.py +12 -0
- openai/types/batch.py +104 -0
- openai/types/batch_create_params.py +72 -0
- openai/types/batch_error.py +21 -0
- openai/types/batch_list_params.py +24 -0
- openai/types/batch_request_counts.py +16 -0
- openai/types/batch_usage.py +35 -0
- openai/types/beta/__init__.py +34 -0
- openai/types/beta/assistant.py +134 -0
- openai/types/beta/assistant_create_params.py +220 -0
- openai/types/beta/assistant_deleted.py +15 -0
- openai/types/beta/assistant_list_params.py +39 -0
- openai/types/beta/assistant_response_format_option.py +14 -0
- openai/types/beta/assistant_response_format_option_param.py +16 -0
- openai/types/beta/assistant_stream_event.py +294 -0
- openai/types/beta/assistant_tool.py +15 -0
- openai/types/beta/assistant_tool_choice.py +16 -0
- openai/types/beta/assistant_tool_choice_function.py +10 -0
- openai/types/beta/assistant_tool_choice_function_param.py +12 -0
- openai/types/beta/assistant_tool_choice_option.py +10 -0
- openai/types/beta/assistant_tool_choice_option_param.py +12 -0
- openai/types/beta/assistant_tool_choice_param.py +16 -0
- openai/types/beta/assistant_tool_param.py +14 -0
- openai/types/beta/assistant_update_params.py +191 -0
- openai/types/beta/chat/__init__.py +3 -0
- openai/types/beta/chatkit/__init__.py +32 -0
- openai/types/beta/chatkit/chat_session.py +43 -0
- openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +10 -0
- openai/types/beta/chatkit/chat_session_chatkit_configuration.py +19 -0
- openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +59 -0
- openai/types/beta/chatkit/chat_session_expires_after_param.py +15 -0
- openai/types/beta/chatkit/chat_session_file_upload.py +18 -0
- openai/types/beta/chatkit/chat_session_history.py +18 -0
- openai/types/beta/chatkit/chat_session_rate_limits.py +10 -0
- openai/types/beta/chatkit/chat_session_rate_limits_param.py +12 -0
- openai/types/beta/chatkit/chat_session_status.py +7 -0
- openai/types/beta/chatkit/chat_session_workflow_param.py +34 -0
- openai/types/beta/chatkit/chatkit_attachment.py +25 -0
- openai/types/beta/chatkit/chatkit_response_output_text.py +62 -0
- openai/types/beta/chatkit/chatkit_thread.py +56 -0
- openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +29 -0
- openai/types/beta/chatkit/chatkit_thread_item_list.py +144 -0
- openai/types/beta/chatkit/chatkit_thread_user_message_item.py +77 -0
- openai/types/beta/chatkit/chatkit_widget_item.py +27 -0
- openai/types/beta/chatkit/session_create_params.py +35 -0
- openai/types/beta/chatkit/thread_delete_response.py +18 -0
- openai/types/beta/chatkit/thread_list_items_params.py +27 -0
- openai/types/beta/chatkit/thread_list_params.py +33 -0
- openai/types/beta/chatkit_workflow.py +32 -0
- openai/types/beta/code_interpreter_tool.py +12 -0
- openai/types/beta/code_interpreter_tool_param.py +12 -0
- openai/types/beta/file_search_tool.py +55 -0
- openai/types/beta/file_search_tool_param.py +54 -0
- openai/types/beta/function_tool.py +15 -0
- openai/types/beta/function_tool_param.py +16 -0
- openai/types/beta/realtime/__init__.py +96 -0
- openai/types/beta/realtime/conversation_created_event.py +27 -0
- openai/types/beta/realtime/conversation_item.py +61 -0
- openai/types/beta/realtime/conversation_item_content.py +32 -0
- openai/types/beta/realtime/conversation_item_content_param.py +31 -0
- openai/types/beta/realtime/conversation_item_create_event.py +29 -0
- openai/types/beta/realtime/conversation_item_create_event_param.py +29 -0
- openai/types/beta/realtime/conversation_item_created_event.py +27 -0
- openai/types/beta/realtime/conversation_item_delete_event.py +19 -0
- openai/types/beta/realtime/conversation_item_delete_event_param.py +18 -0
- openai/types/beta/realtime/conversation_item_deleted_event.py +18 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +87 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +39 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
- openai/types/beta/realtime/conversation_item_param.py +62 -0
- openai/types/beta/realtime/conversation_item_retrieve_event.py +19 -0
- openai/types/beta/realtime/conversation_item_retrieve_event_param.py +18 -0
- openai/types/beta/realtime/conversation_item_truncate_event.py +32 -0
- openai/types/beta/realtime/conversation_item_truncate_event_param.py +31 -0
- openai/types/beta/realtime/conversation_item_truncated_event.py +24 -0
- openai/types/beta/realtime/conversation_item_with_reference.py +87 -0
- openai/types/beta/realtime/conversation_item_with_reference_param.py +87 -0
- openai/types/beta/realtime/error_event.py +36 -0
- openai/types/beta/realtime/input_audio_buffer_append_event.py +23 -0
- openai/types/beta/realtime/input_audio_buffer_append_event_param.py +22 -0
- openai/types/beta/realtime/input_audio_buffer_clear_event.py +16 -0
- openai/types/beta/realtime/input_audio_buffer_clear_event_param.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_cleared_event.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_commit_event.py +16 -0
- openai/types/beta/realtime/input_audio_buffer_commit_event_param.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_committed_event.py +25 -0
- openai/types/beta/realtime/input_audio_buffer_speech_started_event.py +26 -0
- openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
- openai/types/beta/realtime/rate_limits_updated_event.py +33 -0
- openai/types/beta/realtime/realtime_client_event.py +47 -0
- openai/types/beta/realtime/realtime_client_event_param.py +44 -0
- openai/types/beta/realtime/realtime_connect_params.py +11 -0
- openai/types/beta/realtime/realtime_response.py +87 -0
- openai/types/beta/realtime/realtime_response_status.py +39 -0
- openai/types/beta/realtime/realtime_response_usage.py +52 -0
- openai/types/beta/realtime/realtime_server_event.py +133 -0
- openai/types/beta/realtime/response_audio_delta_event.py +30 -0
- openai/types/beta/realtime/response_audio_done_event.py +27 -0
- openai/types/beta/realtime/response_audio_transcript_delta_event.py +30 -0
- openai/types/beta/realtime/response_audio_transcript_done_event.py +30 -0
- openai/types/beta/realtime/response_cancel_event.py +22 -0
- openai/types/beta/realtime/response_cancel_event_param.py +21 -0
- openai/types/beta/realtime/response_content_part_added_event.py +45 -0
- openai/types/beta/realtime/response_content_part_done_event.py +45 -0
- openai/types/beta/realtime/response_create_event.py +121 -0
- openai/types/beta/realtime/response_create_event_param.py +122 -0
- openai/types/beta/realtime/response_created_event.py +19 -0
- openai/types/beta/realtime/response_done_event.py +19 -0
- openai/types/beta/realtime/response_function_call_arguments_delta_event.py +30 -0
- openai/types/beta/realtime/response_function_call_arguments_done_event.py +30 -0
- openai/types/beta/realtime/response_output_item_added_event.py +25 -0
- openai/types/beta/realtime/response_output_item_done_event.py +25 -0
- openai/types/beta/realtime/response_text_delta_event.py +30 -0
- openai/types/beta/realtime/response_text_done_event.py +30 -0
- openai/types/beta/realtime/session.py +279 -0
- openai/types/beta/realtime/session_create_params.py +298 -0
- openai/types/beta/realtime/session_create_response.py +196 -0
- openai/types/beta/realtime/session_created_event.py +19 -0
- openai/types/beta/realtime/session_update_event.py +312 -0
- openai/types/beta/realtime/session_update_event_param.py +310 -0
- openai/types/beta/realtime/session_updated_event.py +19 -0
- openai/types/beta/realtime/transcription_session.py +100 -0
- openai/types/beta/realtime/transcription_session_create_params.py +173 -0
- openai/types/beta/realtime/transcription_session_update.py +185 -0
- openai/types/beta/realtime/transcription_session_update_param.py +185 -0
- openai/types/beta/realtime/transcription_session_updated_event.py +24 -0
- openai/types/beta/thread.py +63 -0
- openai/types/beta/thread_create_and_run_params.py +397 -0
- openai/types/beta/thread_create_params.py +186 -0
- openai/types/beta/thread_deleted.py +15 -0
- openai/types/beta/thread_update_params.py +56 -0
- openai/types/beta/threads/__init__.py +46 -0
- openai/types/beta/threads/annotation.py +12 -0
- openai/types/beta/threads/annotation_delta.py +14 -0
- openai/types/beta/threads/file_citation_annotation.py +26 -0
- openai/types/beta/threads/file_citation_delta_annotation.py +33 -0
- openai/types/beta/threads/file_path_annotation.py +26 -0
- openai/types/beta/threads/file_path_delta_annotation.py +30 -0
- openai/types/beta/threads/image_file.py +23 -0
- openai/types/beta/threads/image_file_content_block.py +15 -0
- openai/types/beta/threads/image_file_content_block_param.py +16 -0
- openai/types/beta/threads/image_file_delta.py +23 -0
- openai/types/beta/threads/image_file_delta_block.py +19 -0
- openai/types/beta/threads/image_file_param.py +22 -0
- openai/types/beta/threads/image_url.py +23 -0
- openai/types/beta/threads/image_url_content_block.py +15 -0
- openai/types/beta/threads/image_url_content_block_param.py +16 -0
- openai/types/beta/threads/image_url_delta.py +22 -0
- openai/types/beta/threads/image_url_delta_block.py +19 -0
- openai/types/beta/threads/image_url_param.py +22 -0
- openai/types/beta/threads/message.py +103 -0
- openai/types/beta/threads/message_content.py +18 -0
- openai/types/beta/threads/message_content_delta.py +17 -0
- openai/types/beta/threads/message_content_part_param.py +14 -0
- openai/types/beta/threads/message_create_params.py +55 -0
- openai/types/beta/threads/message_deleted.py +15 -0
- openai/types/beta/threads/message_delta.py +17 -0
- openai/types/beta/threads/message_delta_event.py +19 -0
- openai/types/beta/threads/message_list_params.py +42 -0
- openai/types/beta/threads/message_update_params.py +24 -0
- openai/types/beta/threads/refusal_content_block.py +14 -0
- openai/types/beta/threads/refusal_delta_block.py +18 -0
- openai/types/beta/threads/required_action_function_tool_call.py +34 -0
- openai/types/beta/threads/run.py +245 -0
- openai/types/beta/threads/run_create_params.py +268 -0
- openai/types/beta/threads/run_list_params.py +39 -0
- openai/types/beta/threads/run_status.py +17 -0
- openai/types/beta/threads/run_submit_tool_outputs_params.py +52 -0
- openai/types/beta/threads/run_update_params.py +24 -0
- openai/types/beta/threads/runs/__init__.py +24 -0
- openai/types/beta/threads/runs/code_interpreter_logs.py +19 -0
- openai/types/beta/threads/runs/code_interpreter_output_image.py +26 -0
- openai/types/beta/threads/runs/code_interpreter_tool_call.py +70 -0
- openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +44 -0
- openai/types/beta/threads/runs/file_search_tool_call.py +78 -0
- openai/types/beta/threads/runs/file_search_tool_call_delta.py +25 -0
- openai/types/beta/threads/runs/function_tool_call.py +38 -0
- openai/types/beta/threads/runs/function_tool_call_delta.py +41 -0
- openai/types/beta/threads/runs/message_creation_step_details.py +19 -0
- openai/types/beta/threads/runs/run_step.py +115 -0
- openai/types/beta/threads/runs/run_step_delta.py +20 -0
- openai/types/beta/threads/runs/run_step_delta_event.py +19 -0
- openai/types/beta/threads/runs/run_step_delta_message_delta.py +20 -0
- openai/types/beta/threads/runs/run_step_include.py +7 -0
- openai/types/beta/threads/runs/step_list_params.py +56 -0
- openai/types/beta/threads/runs/step_retrieve_params.py +28 -0
- openai/types/beta/threads/runs/tool_call.py +15 -0
- openai/types/beta/threads/runs/tool_call_delta.py +16 -0
- openai/types/beta/threads/runs/tool_call_delta_object.py +21 -0
- openai/types/beta/threads/runs/tool_calls_step_details.py +21 -0
- openai/types/beta/threads/text.py +15 -0
- openai/types/beta/threads/text_content_block.py +15 -0
- openai/types/beta/threads/text_content_block_param.py +15 -0
- openai/types/beta/threads/text_delta.py +15 -0
- openai/types/beta/threads/text_delta_block.py +19 -0
- openai/types/chat/__init__.py +102 -0
- openai/types/chat/chat_completion.py +89 -0
- openai/types/chat/chat_completion_allowed_tool_choice_param.py +17 -0
- openai/types/chat/chat_completion_allowed_tools_param.py +32 -0
- openai/types/chat/chat_completion_assistant_message_param.py +70 -0
- openai/types/chat/chat_completion_audio.py +25 -0
- openai/types/chat/chat_completion_audio_param.py +25 -0
- openai/types/chat/chat_completion_chunk.py +166 -0
- openai/types/chat/chat_completion_content_part_image.py +27 -0
- openai/types/chat/chat_completion_content_part_image_param.py +26 -0
- openai/types/chat/chat_completion_content_part_input_audio_param.py +22 -0
- openai/types/chat/chat_completion_content_part_param.py +41 -0
- openai/types/chat/chat_completion_content_part_refusal_param.py +15 -0
- openai/types/chat/chat_completion_content_part_text.py +15 -0
- openai/types/chat/chat_completion_content_part_text_param.py +15 -0
- openai/types/chat/chat_completion_custom_tool_param.py +58 -0
- openai/types/chat/chat_completion_deleted.py +18 -0
- openai/types/chat/chat_completion_developer_message_param.py +25 -0
- openai/types/chat/chat_completion_function_call_option_param.py +12 -0
- openai/types/chat/chat_completion_function_message_param.py +19 -0
- openai/types/chat/chat_completion_function_tool.py +15 -0
- openai/types/chat/chat_completion_function_tool_param.py +16 -0
- openai/types/chat/chat_completion_message.py +79 -0
- openai/types/chat/chat_completion_message_custom_tool_call.py +26 -0
- openai/types/chat/chat_completion_message_custom_tool_call_param.py +26 -0
- openai/types/chat/chat_completion_message_function_tool_call.py +31 -0
- openai/types/chat/chat_completion_message_function_tool_call_param.py +31 -0
- openai/types/chat/chat_completion_message_param.py +24 -0
- openai/types/chat/chat_completion_message_tool_call.py +17 -0
- openai/types/chat/chat_completion_message_tool_call_param.py +14 -0
- openai/types/chat/chat_completion_message_tool_call_union_param.py +15 -0
- openai/types/chat/chat_completion_modality.py +7 -0
- openai/types/chat/chat_completion_named_tool_choice_custom_param.py +19 -0
- openai/types/chat/chat_completion_named_tool_choice_param.py +19 -0
- openai/types/chat/chat_completion_prediction_content_param.py +25 -0
- openai/types/chat/chat_completion_reasoning_effort.py +7 -0
- openai/types/chat/chat_completion_role.py +7 -0
- openai/types/chat/chat_completion_store_message.py +23 -0
- openai/types/chat/chat_completion_stream_options_param.py +31 -0
- openai/types/chat/chat_completion_system_message_param.py +25 -0
- openai/types/chat/chat_completion_token_logprob.py +57 -0
- openai/types/chat/chat_completion_tool_choice_option_param.py +19 -0
- openai/types/chat/chat_completion_tool_message_param.py +21 -0
- openai/types/chat/chat_completion_tool_param.py +14 -0
- openai/types/chat/chat_completion_tool_union_param.py +13 -0
- openai/types/chat/chat_completion_user_message_param.py +25 -0
- openai/types/chat/completion_create_params.py +450 -0
- openai/types/chat/completion_list_params.py +37 -0
- openai/types/chat/completion_update_params.py +22 -0
- openai/types/chat/completions/__init__.py +5 -0
- openai/types/chat/completions/message_list_params.py +21 -0
- openai/types/chat/parsed_chat_completion.py +40 -0
- openai/types/chat/parsed_function_tool_call.py +29 -0
- openai/types/chat_model.py +7 -0
- openai/types/completion.py +37 -0
- openai/types/completion_choice.py +35 -0
- openai/types/completion_create_params.py +189 -0
- openai/types/completion_usage.py +54 -0
- openai/types/container_create_params.py +30 -0
- openai/types/container_create_response.py +40 -0
- openai/types/container_list_params.py +30 -0
- openai/types/container_list_response.py +40 -0
- openai/types/container_retrieve_response.py +40 -0
- openai/types/containers/__init__.py +9 -0
- openai/types/containers/file_create_params.py +17 -0
- openai/types/containers/file_create_response.py +30 -0
- openai/types/containers/file_list_params.py +30 -0
- openai/types/containers/file_list_response.py +30 -0
- openai/types/containers/file_retrieve_response.py +30 -0
- openai/types/containers/files/__init__.py +3 -0
- openai/types/conversations/__init__.py +27 -0
- openai/types/conversations/computer_screenshot_content.py +22 -0
- openai/types/conversations/conversation.py +30 -0
- openai/types/conversations/conversation_create_params.py +29 -0
- openai/types/conversations/conversation_deleted_resource.py +15 -0
- openai/types/conversations/conversation_item.py +230 -0
- openai/types/conversations/conversation_item_list.py +26 -0
- openai/types/conversations/conversation_update_params.py +22 -0
- openai/types/conversations/input_file_content.py +7 -0
- openai/types/conversations/input_file_content_param.py +7 -0
- openai/types/conversations/input_image_content.py +7 -0
- openai/types/conversations/input_image_content_param.py +7 -0
- openai/types/conversations/input_text_content.py +7 -0
- openai/types/conversations/input_text_content_param.py +7 -0
- openai/types/conversations/item_create_params.py +24 -0
- openai/types/conversations/item_list_params.py +50 -0
- openai/types/conversations/item_retrieve_params.py +22 -0
- openai/types/conversations/message.py +66 -0
- openai/types/conversations/output_text_content.py +7 -0
- openai/types/conversations/output_text_content_param.py +7 -0
- openai/types/conversations/refusal_content.py +7 -0
- openai/types/conversations/refusal_content_param.py +7 -0
- openai/types/conversations/summary_text_content.py +15 -0
- openai/types/conversations/text_content.py +13 -0
- openai/types/create_embedding_response.py +31 -0
- openai/types/embedding.py +23 -0
- openai/types/embedding_create_params.py +55 -0
- openai/types/embedding_model.py +7 -0
- openai/types/eval_create_params.py +202 -0
- openai/types/eval_create_response.py +111 -0
- openai/types/eval_custom_data_source_config.py +21 -0
- openai/types/eval_delete_response.py +13 -0
- openai/types/eval_list_params.py +27 -0
- openai/types/eval_list_response.py +111 -0
- openai/types/eval_retrieve_response.py +111 -0
- openai/types/eval_stored_completions_data_source_config.py +32 -0
- openai/types/eval_update_params.py +25 -0
- openai/types/eval_update_response.py +111 -0
- openai/types/evals/__init__.py +22 -0
- openai/types/evals/create_eval_completions_run_data_source.py +236 -0
- openai/types/evals/create_eval_completions_run_data_source_param.py +232 -0
- openai/types/evals/create_eval_jsonl_run_data_source.py +42 -0
- openai/types/evals/create_eval_jsonl_run_data_source_param.py +47 -0
- openai/types/evals/eval_api_error.py +13 -0
- openai/types/evals/run_cancel_response.py +417 -0
- openai/types/evals/run_create_params.py +340 -0
- openai/types/evals/run_create_response.py +417 -0
- openai/types/evals/run_delete_response.py +15 -0
- openai/types/evals/run_list_params.py +27 -0
- openai/types/evals/run_list_response.py +417 -0
- openai/types/evals/run_retrieve_response.py +417 -0
- openai/types/evals/runs/__init__.py +7 -0
- openai/types/evals/runs/output_item_list_params.py +30 -0
- openai/types/evals/runs/output_item_list_response.py +134 -0
- openai/types/evals/runs/output_item_retrieve_response.py +134 -0
- openai/types/file_chunking_strategy.py +14 -0
- openai/types/file_chunking_strategy_param.py +13 -0
- openai/types/file_content.py +7 -0
- openai/types/file_create_params.py +45 -0
- openai/types/file_deleted.py +15 -0
- openai/types/file_list_params.py +33 -0
- openai/types/file_object.py +58 -0
- openai/types/file_purpose.py +7 -0
- openai/types/fine_tuning/__init__.py +26 -0
- openai/types/fine_tuning/alpha/__init__.py +8 -0
- openai/types/fine_tuning/alpha/grader_run_params.py +40 -0
- openai/types/fine_tuning/alpha/grader_run_response.py +67 -0
- openai/types/fine_tuning/alpha/grader_validate_params.py +24 -0
- openai/types/fine_tuning/alpha/grader_validate_response.py +20 -0
- openai/types/fine_tuning/checkpoints/__init__.py +9 -0
- openai/types/fine_tuning/checkpoints/permission_create_params.py +14 -0
- openai/types/fine_tuning/checkpoints/permission_create_response.py +21 -0
- openai/types/fine_tuning/checkpoints/permission_delete_response.py +18 -0
- openai/types/fine_tuning/checkpoints/permission_retrieve_params.py +21 -0
- openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +34 -0
- openai/types/fine_tuning/dpo_hyperparameters.py +36 -0
- openai/types/fine_tuning/dpo_hyperparameters_param.py +36 -0
- openai/types/fine_tuning/dpo_method.py +13 -0
- openai/types/fine_tuning/dpo_method_param.py +14 -0
- openai/types/fine_tuning/fine_tuning_job.py +161 -0
- openai/types/fine_tuning/fine_tuning_job_event.py +32 -0
- openai/types/fine_tuning/fine_tuning_job_integration.py +5 -0
- openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +33 -0
- openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +21 -0
- openai/types/fine_tuning/job_create_params.py +176 -0
- openai/types/fine_tuning/job_list_events_params.py +15 -0
- openai/types/fine_tuning/job_list_params.py +23 -0
- openai/types/fine_tuning/jobs/__init__.py +6 -0
- openai/types/fine_tuning/jobs/checkpoint_list_params.py +15 -0
- openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +47 -0
- openai/types/fine_tuning/reinforcement_hyperparameters.py +43 -0
- openai/types/fine_tuning/reinforcement_hyperparameters_param.py +43 -0
- openai/types/fine_tuning/reinforcement_method.py +24 -0
- openai/types/fine_tuning/reinforcement_method_param.py +27 -0
- openai/types/fine_tuning/supervised_hyperparameters.py +29 -0
- openai/types/fine_tuning/supervised_hyperparameters_param.py +29 -0
- openai/types/fine_tuning/supervised_method.py +13 -0
- openai/types/fine_tuning/supervised_method_param.py +14 -0
- openai/types/graders/__init__.py +16 -0
- openai/types/graders/label_model_grader.py +70 -0
- openai/types/graders/label_model_grader_param.py +77 -0
- openai/types/graders/multi_grader.py +32 -0
- openai/types/graders/multi_grader_param.py +35 -0
- openai/types/graders/python_grader.py +22 -0
- openai/types/graders/python_grader_param.py +21 -0
- openai/types/graders/score_model_grader.py +109 -0
- openai/types/graders/score_model_grader_param.py +115 -0
- openai/types/graders/string_check_grader.py +24 -0
- openai/types/graders/string_check_grader_param.py +24 -0
- openai/types/graders/text_similarity_grader.py +40 -0
- openai/types/graders/text_similarity_grader_param.py +42 -0
- openai/types/image.py +26 -0
- openai/types/image_create_variation_params.py +48 -0
- openai/types/image_edit_completed_event.py +55 -0
- openai/types/image_edit_params.py +145 -0
- openai/types/image_edit_partial_image_event.py +33 -0
- openai/types/image_edit_stream_event.py +14 -0
- openai/types/image_gen_completed_event.py +55 -0
- openai/types/image_gen_partial_image_event.py +33 -0
- openai/types/image_gen_stream_event.py +14 -0
- openai/types/image_generate_params.py +143 -0
- openai/types/image_model.py +7 -0
- openai/types/images_response.py +60 -0
- openai/types/model.py +21 -0
- openai/types/model_deleted.py +13 -0
- openai/types/moderation.py +186 -0
- openai/types/moderation_create_params.py +30 -0
- openai/types/moderation_create_response.py +19 -0
- openai/types/moderation_image_url_input_param.py +20 -0
- openai/types/moderation_model.py +9 -0
- openai/types/moderation_multi_modal_input_param.py +13 -0
- openai/types/moderation_text_input_param.py +15 -0
- openai/types/other_file_chunking_strategy_object.py +12 -0
- openai/types/realtime/__init__.py +237 -0
- openai/types/realtime/audio_transcription.py +37 -0
- openai/types/realtime/audio_transcription_param.py +34 -0
- openai/types/realtime/call_accept_params.py +122 -0
- openai/types/realtime/call_create_params.py +17 -0
- openai/types/realtime/call_refer_params.py +15 -0
- openai/types/realtime/call_reject_params.py +15 -0
- openai/types/realtime/client_secret_create_params.py +46 -0
- openai/types/realtime/client_secret_create_response.py +26 -0
- openai/types/realtime/conversation_created_event.py +27 -0
- openai/types/realtime/conversation_item.py +32 -0
- openai/types/realtime/conversation_item_added.py +26 -0
- openai/types/realtime/conversation_item_create_event.py +29 -0
- openai/types/realtime/conversation_item_create_event_param.py +29 -0
- openai/types/realtime/conversation_item_created_event.py +27 -0
- openai/types/realtime/conversation_item_delete_event.py +19 -0
- openai/types/realtime/conversation_item_delete_event_param.py +18 -0
- openai/types/realtime/conversation_item_deleted_event.py +18 -0
- openai/types/realtime/conversation_item_done.py +26 -0
- openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +79 -0
- openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +36 -0
- openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
- openai/types/realtime/conversation_item_input_audio_transcription_segment.py +36 -0
- openai/types/realtime/conversation_item_param.py +30 -0
- openai/types/realtime/conversation_item_retrieve_event.py +19 -0
- openai/types/realtime/conversation_item_retrieve_event_param.py +18 -0
- openai/types/realtime/conversation_item_truncate_event.py +32 -0
- openai/types/realtime/conversation_item_truncate_event_param.py +31 -0
- openai/types/realtime/conversation_item_truncated_event.py +24 -0
- openai/types/realtime/input_audio_buffer_append_event.py +23 -0
- openai/types/realtime/input_audio_buffer_append_event_param.py +22 -0
- openai/types/realtime/input_audio_buffer_clear_event.py +16 -0
- openai/types/realtime/input_audio_buffer_clear_event_param.py +15 -0
- openai/types/realtime/input_audio_buffer_cleared_event.py +15 -0
- openai/types/realtime/input_audio_buffer_commit_event.py +16 -0
- openai/types/realtime/input_audio_buffer_commit_event_param.py +15 -0
- openai/types/realtime/input_audio_buffer_committed_event.py +25 -0
- openai/types/realtime/input_audio_buffer_speech_started_event.py +26 -0
- openai/types/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
- openai/types/realtime/input_audio_buffer_timeout_triggered.py +30 -0
- openai/types/realtime/log_prob_properties.py +18 -0
- openai/types/realtime/mcp_list_tools_completed.py +18 -0
- openai/types/realtime/mcp_list_tools_failed.py +18 -0
- openai/types/realtime/mcp_list_tools_in_progress.py +18 -0
- openai/types/realtime/noise_reduction_type.py +7 -0
- openai/types/realtime/output_audio_buffer_clear_event.py +16 -0
- openai/types/realtime/output_audio_buffer_clear_event_param.py +15 -0
- openai/types/realtime/rate_limits_updated_event.py +33 -0
- openai/types/realtime/realtime_audio_config.py +15 -0
- openai/types/realtime/realtime_audio_config_input.py +63 -0
- openai/types/realtime/realtime_audio_config_input_param.py +65 -0
- openai/types/realtime/realtime_audio_config_output.py +36 -0
- openai/types/realtime/realtime_audio_config_output_param.py +35 -0
- openai/types/realtime/realtime_audio_config_param.py +16 -0
- openai/types/realtime/realtime_audio_formats.py +30 -0
- openai/types/realtime/realtime_audio_formats_param.py +29 -0
- openai/types/realtime/realtime_audio_input_turn_detection.py +98 -0
- openai/types/realtime/realtime_audio_input_turn_detection_param.py +95 -0
- openai/types/realtime/realtime_client_event.py +36 -0
- openai/types/realtime/realtime_client_event_param.py +34 -0
- openai/types/realtime/realtime_connect_params.py +13 -0
- openai/types/realtime/realtime_conversation_item_assistant_message.py +58 -0
- openai/types/realtime/realtime_conversation_item_assistant_message_param.py +58 -0
- openai/types/realtime/realtime_conversation_item_function_call.py +41 -0
- openai/types/realtime/realtime_conversation_item_function_call_output.py +37 -0
- openai/types/realtime/realtime_conversation_item_function_call_output_param.py +36 -0
- openai/types/realtime/realtime_conversation_item_function_call_param.py +40 -0
- openai/types/realtime/realtime_conversation_item_system_message.py +42 -0
- openai/types/realtime/realtime_conversation_item_system_message_param.py +42 -0
- openai/types/realtime/realtime_conversation_item_user_message.py +69 -0
- openai/types/realtime/realtime_conversation_item_user_message_param.py +69 -0
- openai/types/realtime/realtime_error.py +24 -0
- openai/types/realtime/realtime_error_event.py +19 -0
- openai/types/realtime/realtime_function_tool.py +25 -0
- openai/types/realtime/realtime_function_tool_param.py +24 -0
- openai/types/realtime/realtime_mcp_approval_request.py +24 -0
- openai/types/realtime/realtime_mcp_approval_request_param.py +24 -0
- openai/types/realtime/realtime_mcp_approval_response.py +25 -0
- openai/types/realtime/realtime_mcp_approval_response_param.py +25 -0
- openai/types/realtime/realtime_mcp_list_tools.py +36 -0
- openai/types/realtime/realtime_mcp_list_tools_param.py +36 -0
- openai/types/realtime/realtime_mcp_protocol_error.py +15 -0
- openai/types/realtime/realtime_mcp_protocol_error_param.py +15 -0
- openai/types/realtime/realtime_mcp_tool_call.py +43 -0
- openai/types/realtime/realtime_mcp_tool_call_param.py +40 -0
- openai/types/realtime/realtime_mcp_tool_execution_error.py +13 -0
- openai/types/realtime/realtime_mcp_tool_execution_error_param.py +13 -0
- openai/types/realtime/realtime_mcphttp_error.py +15 -0
- openai/types/realtime/realtime_mcphttp_error_param.py +15 -0
- openai/types/realtime/realtime_response.py +98 -0
- openai/types/realtime/realtime_response_create_audio_output.py +29 -0
- openai/types/realtime/realtime_response_create_audio_output_param.py +28 -0
- openai/types/realtime/realtime_response_create_mcp_tool.py +135 -0
- openai/types/realtime/realtime_response_create_mcp_tool_param.py +135 -0
- openai/types/realtime/realtime_response_create_params.py +98 -0
- openai/types/realtime/realtime_response_create_params_param.py +99 -0
- openai/types/realtime/realtime_response_status.py +39 -0
- openai/types/realtime/realtime_response_usage.py +41 -0
- openai/types/realtime/realtime_response_usage_input_token_details.py +35 -0
- openai/types/realtime/realtime_response_usage_output_token_details.py +15 -0
- openai/types/realtime/realtime_server_event.py +155 -0
- openai/types/realtime/realtime_session_client_secret.py +20 -0
- openai/types/realtime/realtime_session_create_request.py +122 -0
- openai/types/realtime/realtime_session_create_request_param.py +122 -0
- openai/types/realtime/realtime_session_create_response.py +475 -0
- openai/types/realtime/realtime_tool_choice_config.py +12 -0
- openai/types/realtime/realtime_tool_choice_config_param.py +14 -0
- openai/types/realtime/realtime_tools_config.py +10 -0
- openai/types/realtime/realtime_tools_config_param.py +143 -0
- openai/types/realtime/realtime_tools_config_union.py +141 -0
- openai/types/realtime/realtime_tools_config_union_param.py +140 -0
- openai/types/realtime/realtime_tracing_config.py +31 -0
- openai/types/realtime/realtime_tracing_config_param.py +31 -0
- openai/types/realtime/realtime_transcription_session_audio.py +12 -0
- openai/types/realtime/realtime_transcription_session_audio_input.py +65 -0
- openai/types/realtime/realtime_transcription_session_audio_input_param.py +67 -0
- openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +98 -0
- openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +95 -0
- openai/types/realtime/realtime_transcription_session_audio_param.py +13 -0
- openai/types/realtime/realtime_transcription_session_create_request.py +27 -0
- openai/types/realtime/realtime_transcription_session_create_request_param.py +28 -0
- openai/types/realtime/realtime_transcription_session_create_response.py +68 -0
- openai/types/realtime/realtime_transcription_session_turn_detection.py +32 -0
- openai/types/realtime/realtime_truncation.py +10 -0
- openai/types/realtime/realtime_truncation_param.py +12 -0
- openai/types/realtime/realtime_truncation_retention_ratio.py +38 -0
- openai/types/realtime/realtime_truncation_retention_ratio_param.py +37 -0
- openai/types/realtime/response_audio_delta_event.py +30 -0
- openai/types/realtime/response_audio_done_event.py +27 -0
- openai/types/realtime/response_audio_transcript_delta_event.py +30 -0
- openai/types/realtime/response_audio_transcript_done_event.py +30 -0
- openai/types/realtime/response_cancel_event.py +22 -0
- openai/types/realtime/response_cancel_event_param.py +21 -0
- openai/types/realtime/response_content_part_added_event.py +45 -0
- openai/types/realtime/response_content_part_done_event.py +45 -0
- openai/types/realtime/response_create_event.py +20 -0
- openai/types/realtime/response_create_event_param.py +20 -0
- openai/types/realtime/response_created_event.py +19 -0
- openai/types/realtime/response_done_event.py +19 -0
- openai/types/realtime/response_function_call_arguments_delta_event.py +30 -0
- openai/types/realtime/response_function_call_arguments_done_event.py +30 -0
- openai/types/realtime/response_mcp_call_arguments_delta.py +31 -0
- openai/types/realtime/response_mcp_call_arguments_done.py +27 -0
- openai/types/realtime/response_mcp_call_completed.py +21 -0
- openai/types/realtime/response_mcp_call_failed.py +21 -0
- openai/types/realtime/response_mcp_call_in_progress.py +21 -0
- openai/types/realtime/response_output_item_added_event.py +25 -0
- openai/types/realtime/response_output_item_done_event.py +25 -0
- openai/types/realtime/response_text_delta_event.py +30 -0
- openai/types/realtime/response_text_done_event.py +30 -0
- openai/types/realtime/session_created_event.py +23 -0
- openai/types/realtime/session_update_event.py +31 -0
- openai/types/realtime/session_update_event_param.py +32 -0
- openai/types/realtime/session_updated_event.py +23 -0
- openai/types/responses/__init__.py +270 -0
- openai/types/responses/apply_patch_tool.py +12 -0
- openai/types/responses/apply_patch_tool_param.py +12 -0
- openai/types/responses/computer_tool.py +21 -0
- openai/types/responses/computer_tool_param.py +21 -0
- openai/types/responses/custom_tool.py +23 -0
- openai/types/responses/custom_tool_param.py +23 -0
- openai/types/responses/easy_input_message.py +26 -0
- openai/types/responses/easy_input_message_param.py +27 -0
- openai/types/responses/file_search_tool.py +58 -0
- openai/types/responses/file_search_tool_param.py +60 -0
- openai/types/responses/function_shell_tool.py +12 -0
- openai/types/responses/function_shell_tool_param.py +12 -0
- openai/types/responses/function_tool.py +28 -0
- openai/types/responses/function_tool_param.py +28 -0
- openai/types/responses/input_item_list_params.py +34 -0
- openai/types/responses/input_token_count_params.py +142 -0
- openai/types/responses/input_token_count_response.py +13 -0
- openai/types/responses/parsed_response.py +105 -0
- openai/types/responses/response.py +307 -0
- openai/types/responses/response_apply_patch_tool_call.py +76 -0
- openai/types/responses/response_apply_patch_tool_call_output.py +31 -0
- openai/types/responses/response_audio_delta_event.py +18 -0
- openai/types/responses/response_audio_done_event.py +15 -0
- openai/types/responses/response_audio_transcript_delta_event.py +18 -0
- openai/types/responses/response_audio_transcript_done_event.py +15 -0
- openai/types/responses/response_code_interpreter_call_code_delta_event.py +27 -0
- openai/types/responses/response_code_interpreter_call_code_done_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_completed_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_in_progress_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_interpreting_event.py +24 -0
- openai/types/responses/response_code_interpreter_tool_call.py +55 -0
- openai/types/responses/response_code_interpreter_tool_call_param.py +54 -0
- openai/types/responses/response_completed_event.py +19 -0
- openai/types/responses/response_computer_tool_call.py +209 -0
- openai/types/responses/response_computer_tool_call_output_item.py +47 -0
- openai/types/responses/response_computer_tool_call_output_screenshot.py +22 -0
- openai/types/responses/response_computer_tool_call_output_screenshot_param.py +21 -0
- openai/types/responses/response_computer_tool_call_param.py +207 -0
- openai/types/responses/response_content_part_added_event.py +44 -0
- openai/types/responses/response_content_part_done_event.py +44 -0
- openai/types/responses/response_conversation_param.py +12 -0
- openai/types/responses/response_create_params.py +334 -0
- openai/types/responses/response_created_event.py +19 -0
- openai/types/responses/response_custom_tool_call.py +25 -0
- openai/types/responses/response_custom_tool_call_input_delta_event.py +24 -0
- openai/types/responses/response_custom_tool_call_input_done_event.py +24 -0
- openai/types/responses/response_custom_tool_call_output.py +33 -0
- openai/types/responses/response_custom_tool_call_output_param.py +31 -0
- openai/types/responses/response_custom_tool_call_param.py +24 -0
- openai/types/responses/response_error.py +34 -0
- openai/types/responses/response_error_event.py +25 -0
- openai/types/responses/response_failed_event.py +19 -0
- openai/types/responses/response_file_search_call_completed_event.py +21 -0
- openai/types/responses/response_file_search_call_in_progress_event.py +21 -0
- openai/types/responses/response_file_search_call_searching_event.py +21 -0
- openai/types/responses/response_file_search_tool_call.py +51 -0
- openai/types/responses/response_file_search_tool_call_param.py +53 -0
- openai/types/responses/response_format_text_config.py +16 -0
- openai/types/responses/response_format_text_config_param.py +16 -0
- openai/types/responses/response_format_text_json_schema_config.py +43 -0
- openai/types/responses/response_format_text_json_schema_config_param.py +41 -0
- openai/types/responses/response_function_call_arguments_delta_event.py +26 -0
- openai/types/responses/response_function_call_arguments_done_event.py +26 -0
- openai/types/responses/response_function_call_output_item.py +16 -0
- openai/types/responses/response_function_call_output_item_list.py +10 -0
- openai/types/responses/response_function_call_output_item_list_param.py +18 -0
- openai/types/responses/response_function_call_output_item_param.py +16 -0
- openai/types/responses/response_function_shell_call_output_content.py +36 -0
- openai/types/responses/response_function_shell_call_output_content_param.py +35 -0
- openai/types/responses/response_function_shell_tool_call.py +44 -0
- openai/types/responses/response_function_shell_tool_call_output.py +70 -0
- openai/types/responses/response_function_tool_call.py +32 -0
- openai/types/responses/response_function_tool_call_item.py +10 -0
- openai/types/responses/response_function_tool_call_output_item.py +40 -0
- openai/types/responses/response_function_tool_call_param.py +31 -0
- openai/types/responses/response_function_web_search.py +67 -0
- openai/types/responses/response_function_web_search_param.py +73 -0
- openai/types/responses/response_image_gen_call_completed_event.py +21 -0
- openai/types/responses/response_image_gen_call_generating_event.py +21 -0
- openai/types/responses/response_image_gen_call_in_progress_event.py +21 -0
- openai/types/responses/response_image_gen_call_partial_image_event.py +30 -0
- openai/types/responses/response_in_progress_event.py +19 -0
- openai/types/responses/response_includable.py +16 -0
- openai/types/responses/response_incomplete_event.py +19 -0
- openai/types/responses/response_input_audio.py +22 -0
- openai/types/responses/response_input_audio_param.py +22 -0
- openai/types/responses/response_input_content.py +15 -0
- openai/types/responses/response_input_content_param.py +14 -0
- openai/types/responses/response_input_file.py +25 -0
- openai/types/responses/response_input_file_content.py +25 -0
- openai/types/responses/response_input_file_content_param.py +25 -0
- openai/types/responses/response_input_file_param.py +25 -0
- openai/types/responses/response_input_image.py +28 -0
- openai/types/responses/response_input_image_content.py +28 -0
- openai/types/responses/response_input_image_content_param.py +28 -0
- openai/types/responses/response_input_image_param.py +28 -0
- openai/types/responses/response_input_item.py +482 -0
- openai/types/responses/response_input_item_param.py +479 -0
- openai/types/responses/response_input_message_content_list.py +10 -0
- openai/types/responses/response_input_message_content_list_param.py +16 -0
- openai/types/responses/response_input_message_item.py +33 -0
- openai/types/responses/response_input_param.py +482 -0
- openai/types/responses/response_input_text.py +15 -0
- openai/types/responses/response_input_text_content.py +15 -0
- openai/types/responses/response_input_text_content_param.py +15 -0
- openai/types/responses/response_input_text_param.py +15 -0
- openai/types/responses/response_item.py +226 -0
- openai/types/responses/response_item_list.py +26 -0
- openai/types/responses/response_mcp_call_arguments_delta_event.py +27 -0
- openai/types/responses/response_mcp_call_arguments_done_event.py +24 -0
- openai/types/responses/response_mcp_call_completed_event.py +21 -0
- openai/types/responses/response_mcp_call_failed_event.py +21 -0
- openai/types/responses/response_mcp_call_in_progress_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_completed_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_failed_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_in_progress_event.py +21 -0
- openai/types/responses/response_output_item.py +189 -0
- openai/types/responses/response_output_item_added_event.py +22 -0
- openai/types/responses/response_output_item_done_event.py +22 -0
- openai/types/responses/response_output_message.py +34 -0
- openai/types/responses/response_output_message_param.py +34 -0
- openai/types/responses/response_output_refusal.py +15 -0
- openai/types/responses/response_output_refusal_param.py +15 -0
- openai/types/responses/response_output_text.py +117 -0
- openai/types/responses/response_output_text_annotation_added_event.py +30 -0
- openai/types/responses/response_output_text_param.py +115 -0
- openai/types/responses/response_prompt.py +28 -0
- openai/types/responses/response_prompt_param.py +29 -0
- openai/types/responses/response_queued_event.py +19 -0
- openai/types/responses/response_reasoning_item.py +51 -0
- openai/types/responses/response_reasoning_item_param.py +51 -0
- openai/types/responses/response_reasoning_summary_part_added_event.py +35 -0
- openai/types/responses/response_reasoning_summary_part_done_event.py +35 -0
- openai/types/responses/response_reasoning_summary_text_delta_event.py +27 -0
- openai/types/responses/response_reasoning_summary_text_done_event.py +27 -0
- openai/types/responses/response_reasoning_text_delta_event.py +27 -0
- openai/types/responses/response_reasoning_text_done_event.py +27 -0
- openai/types/responses/response_refusal_delta_event.py +27 -0
- openai/types/responses/response_refusal_done_event.py +27 -0
- openai/types/responses/response_retrieve_params.py +59 -0
- openai/types/responses/response_status.py +7 -0
- openai/types/responses/response_stream_event.py +120 -0
- openai/types/responses/response_text_config.py +35 -0
- openai/types/responses/response_text_config_param.py +36 -0
- openai/types/responses/response_text_delta_event.py +50 -0
- openai/types/responses/response_text_done_event.py +50 -0
- openai/types/responses/response_usage.py +35 -0
- openai/types/responses/response_web_search_call_completed_event.py +21 -0
- openai/types/responses/response_web_search_call_in_progress_event.py +21 -0
- openai/types/responses/response_web_search_call_searching_event.py +21 -0
- openai/types/responses/tool.py +271 -0
- openai/types/responses/tool_choice_allowed.py +36 -0
- openai/types/responses/tool_choice_allowed_param.py +36 -0
- openai/types/responses/tool_choice_apply_patch.py +12 -0
- openai/types/responses/tool_choice_apply_patch_param.py +12 -0
- openai/types/responses/tool_choice_custom.py +15 -0
- openai/types/responses/tool_choice_custom_param.py +15 -0
- openai/types/responses/tool_choice_function.py +15 -0
- openai/types/responses/tool_choice_function_param.py +15 -0
- openai/types/responses/tool_choice_mcp.py +19 -0
- openai/types/responses/tool_choice_mcp_param.py +19 -0
- openai/types/responses/tool_choice_options.py +7 -0
- openai/types/responses/tool_choice_shell.py +12 -0
- openai/types/responses/tool_choice_shell_param.py +12 -0
- openai/types/responses/tool_choice_types.py +31 -0
- openai/types/responses/tool_choice_types_param.py +33 -0
- openai/types/responses/tool_param.py +271 -0
- openai/types/responses/web_search_preview_tool.py +49 -0
- openai/types/responses/web_search_preview_tool_param.py +49 -0
- openai/types/responses/web_search_tool.py +63 -0
- openai/types/responses/web_search_tool_param.py +65 -0
- openai/types/shared/__init__.py +19 -0
- openai/types/shared/all_models.py +28 -0
- openai/types/shared/chat_model.py +75 -0
- openai/types/shared/comparison_filter.py +34 -0
- openai/types/shared/compound_filter.py +22 -0
- openai/types/shared/custom_tool_input_format.py +28 -0
- openai/types/shared/error_object.py +17 -0
- openai/types/shared/function_definition.py +43 -0
- openai/types/shared/function_parameters.py +8 -0
- openai/types/shared/metadata.py +8 -0
- openai/types/shared/reasoning.py +44 -0
- openai/types/shared/reasoning_effort.py +8 -0
- openai/types/shared/response_format_json_object.py +12 -0
- openai/types/shared/response_format_json_schema.py +48 -0
- openai/types/shared/response_format_text.py +12 -0
- openai/types/shared/response_format_text_grammar.py +15 -0
- openai/types/shared/response_format_text_python.py +12 -0
- openai/types/shared/responses_model.py +28 -0
- openai/types/shared_params/__init__.py +15 -0
- openai/types/shared_params/chat_model.py +77 -0
- openai/types/shared_params/comparison_filter.py +36 -0
- openai/types/shared_params/compound_filter.py +23 -0
- openai/types/shared_params/custom_tool_input_format.py +27 -0
- openai/types/shared_params/function_definition.py +45 -0
- openai/types/shared_params/function_parameters.py +10 -0
- openai/types/shared_params/metadata.py +10 -0
- openai/types/shared_params/reasoning.py +45 -0
- openai/types/shared_params/reasoning_effort.py +10 -0
- openai/types/shared_params/response_format_json_object.py +12 -0
- openai/types/shared_params/response_format_json_schema.py +46 -0
- openai/types/shared_params/response_format_text.py +12 -0
- openai/types/shared_params/responses_model.py +30 -0
- openai/types/static_file_chunking_strategy.py +20 -0
- openai/types/static_file_chunking_strategy_object.py +15 -0
- openai/types/static_file_chunking_strategy_object_param.py +16 -0
- openai/types/static_file_chunking_strategy_param.py +22 -0
- openai/types/upload.py +42 -0
- openai/types/upload_complete_params.py +20 -0
- openai/types/upload_create_params.py +52 -0
- openai/types/uploads/__init__.py +6 -0
- openai/types/uploads/part_create_params.py +14 -0
- openai/types/uploads/upload_part.py +21 -0
- openai/types/vector_store.py +82 -0
- openai/types/vector_store_create_params.py +61 -0
- openai/types/vector_store_deleted.py +15 -0
- openai/types/vector_store_list_params.py +39 -0
- openai/types/vector_store_search_params.py +42 -0
- openai/types/vector_store_search_response.py +39 -0
- openai/types/vector_store_update_params.py +39 -0
- openai/types/vector_stores/__init__.py +13 -0
- openai/types/vector_stores/file_batch_create_params.py +70 -0
- openai/types/vector_stores/file_batch_list_files_params.py +47 -0
- openai/types/vector_stores/file_content_response.py +15 -0
- openai/types/vector_stores/file_create_params.py +35 -0
- openai/types/vector_stores/file_list_params.py +45 -0
- openai/types/vector_stores/file_update_params.py +21 -0
- openai/types/vector_stores/vector_store_file.py +67 -0
- openai/types/vector_stores/vector_store_file_batch.py +54 -0
- openai/types/vector_stores/vector_store_file_deleted.py +15 -0
- openai/types/video.py +53 -0
- openai/types/video_create_error.py +11 -0
- openai/types/video_create_params.py +29 -0
- openai/types/video_delete_response.py +18 -0
- openai/types/video_download_content_params.py +12 -0
- openai/types/video_list_params.py +21 -0
- openai/types/video_model.py +7 -0
- openai/types/video_remix_params.py +12 -0
- openai/types/video_seconds.py +7 -0
- openai/types/video_size.py +7 -0
- openai/types/webhooks/__init__.py +24 -0
- openai/types/webhooks/batch_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/batch_completed_webhook_event.py +30 -0
- openai/types/webhooks/batch_expired_webhook_event.py +30 -0
- openai/types/webhooks/batch_failed_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_canceled_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_failed_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_succeeded_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +30 -0
- openai/types/webhooks/realtime_call_incoming_webhook_event.py +41 -0
- openai/types/webhooks/response_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/response_completed_webhook_event.py +30 -0
- openai/types/webhooks/response_failed_webhook_event.py +30 -0
- openai/types/webhooks/response_incomplete_webhook_event.py +30 -0
- openai/types/webhooks/unwrap_webhook_event.py +44 -0
- openai/types/websocket_connection_options.py +36 -0
- openai/version.py +3 -0
|
@@ -0,0 +1,3143 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
from typing import Dict, List, Type, Union, Iterable, Optional, cast
|
|
7
|
+
from functools import partial
|
|
8
|
+
from typing_extensions import Literal, overload
|
|
9
|
+
|
|
10
|
+
import httpx
|
|
11
|
+
import pydantic
|
|
12
|
+
|
|
13
|
+
from .... import _legacy_response
|
|
14
|
+
from .messages import (
|
|
15
|
+
Messages,
|
|
16
|
+
AsyncMessages,
|
|
17
|
+
MessagesWithRawResponse,
|
|
18
|
+
AsyncMessagesWithRawResponse,
|
|
19
|
+
MessagesWithStreamingResponse,
|
|
20
|
+
AsyncMessagesWithStreamingResponse,
|
|
21
|
+
)
|
|
22
|
+
from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
|
|
23
|
+
from ...._utils import required_args, maybe_transform, async_maybe_transform
|
|
24
|
+
from ...._compat import cached_property
|
|
25
|
+
from ...._resource import SyncAPIResource, AsyncAPIResource
|
|
26
|
+
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
|
27
|
+
from ...._streaming import Stream, AsyncStream
|
|
28
|
+
from ....pagination import SyncCursorPage, AsyncCursorPage
|
|
29
|
+
from ....types.chat import (
|
|
30
|
+
ChatCompletionAudioParam,
|
|
31
|
+
completion_list_params,
|
|
32
|
+
completion_create_params,
|
|
33
|
+
completion_update_params,
|
|
34
|
+
)
|
|
35
|
+
from ...._base_client import AsyncPaginator, make_request_options
|
|
36
|
+
from ....lib._parsing import (
|
|
37
|
+
ResponseFormatT,
|
|
38
|
+
validate_input_tools as _validate_input_tools,
|
|
39
|
+
parse_chat_completion as _parse_chat_completion,
|
|
40
|
+
type_to_response_format_param as _type_to_response_format,
|
|
41
|
+
)
|
|
42
|
+
from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
|
|
43
|
+
from ....types.shared.chat_model import ChatModel
|
|
44
|
+
from ....types.chat.chat_completion import ChatCompletion
|
|
45
|
+
from ....types.shared_params.metadata import Metadata
|
|
46
|
+
from ....types.shared.reasoning_effort import ReasoningEffort
|
|
47
|
+
from ....types.chat.chat_completion_chunk import ChatCompletionChunk
|
|
48
|
+
from ....types.chat.parsed_chat_completion import ParsedChatCompletion
|
|
49
|
+
from ....types.chat.chat_completion_deleted import ChatCompletionDeleted
|
|
50
|
+
from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
|
|
51
|
+
from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
|
52
|
+
from ....types.chat.chat_completion_tool_union_param import ChatCompletionToolUnionParam
|
|
53
|
+
from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
|
|
54
|
+
from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
|
|
55
|
+
from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
|
|
56
|
+
|
|
57
|
+
__all__ = ["Completions", "AsyncCompletions"]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class Completions(SyncAPIResource):
|
|
61
|
+
@cached_property
|
|
62
|
+
def messages(self) -> Messages:
|
|
63
|
+
return Messages(self._client)
|
|
64
|
+
|
|
65
|
+
@cached_property
|
|
66
|
+
def with_raw_response(self) -> CompletionsWithRawResponse:
|
|
67
|
+
"""
|
|
68
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
69
|
+
the raw response object instead of the parsed content.
|
|
70
|
+
|
|
71
|
+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
|
72
|
+
"""
|
|
73
|
+
return CompletionsWithRawResponse(self)
|
|
74
|
+
|
|
75
|
+
@cached_property
|
|
76
|
+
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
|
|
77
|
+
"""
|
|
78
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
79
|
+
|
|
80
|
+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
|
81
|
+
"""
|
|
82
|
+
return CompletionsWithStreamingResponse(self)
|
|
83
|
+
|
|
84
|
+
def parse(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
88
|
+
model: Union[str, ChatModel],
|
|
89
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
90
|
+
response_format: type[ResponseFormatT] | Omit = omit,
|
|
91
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
92
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
93
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
94
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
95
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
96
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
97
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
98
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
99
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
100
|
+
n: Optional[int] | Omit = omit,
|
|
101
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
102
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
103
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
104
|
+
prompt_cache_key: str | Omit = omit,
|
|
105
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
106
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
107
|
+
safety_identifier: str | Omit = omit,
|
|
108
|
+
seed: Optional[int] | Omit = omit,
|
|
109
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
110
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
111
|
+
store: Optional[bool] | Omit = omit,
|
|
112
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
113
|
+
temperature: Optional[float] | Omit = omit,
|
|
114
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
115
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
116
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
117
|
+
top_p: Optional[float] | Omit = omit,
|
|
118
|
+
user: str | Omit = omit,
|
|
119
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
120
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
121
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
122
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
123
|
+
extra_headers: Headers | None = None,
|
|
124
|
+
extra_query: Query | None = None,
|
|
125
|
+
extra_body: Body | None = None,
|
|
126
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
127
|
+
) -> ParsedChatCompletion[ResponseFormatT]:
|
|
128
|
+
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
|
|
129
|
+
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
|
|
130
|
+
|
|
131
|
+
You can pass a pydantic model to this method and it will automatically convert the model
|
|
132
|
+
into a JSON schema, send it to the API and parse the response content back into the given model.
|
|
133
|
+
|
|
134
|
+
This method will also automatically parse `function` tool calls if:
|
|
135
|
+
- You use the `openai.pydantic_function_tool()` helper method
|
|
136
|
+
- You mark your tool schema with `"strict": True`
|
|
137
|
+
|
|
138
|
+
Example usage:
|
|
139
|
+
```py
|
|
140
|
+
from pydantic import BaseModel
|
|
141
|
+
from openai import OpenAI
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class Step(BaseModel):
|
|
145
|
+
explanation: str
|
|
146
|
+
output: str
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class MathResponse(BaseModel):
|
|
150
|
+
steps: List[Step]
|
|
151
|
+
final_answer: str
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
client = OpenAI()
|
|
155
|
+
completion = client.chat.completions.parse(
|
|
156
|
+
model="gpt-4o-2024-08-06",
|
|
157
|
+
messages=[
|
|
158
|
+
{"role": "system", "content": "You are a helpful math tutor."},
|
|
159
|
+
{"role": "user", "content": "solve 8x + 31 = 2"},
|
|
160
|
+
],
|
|
161
|
+
response_format=MathResponse,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
message = completion.choices[0].message
|
|
165
|
+
if message.parsed:
|
|
166
|
+
print(message.parsed.steps)
|
|
167
|
+
print("answer: ", message.parsed.final_answer)
|
|
168
|
+
```
|
|
169
|
+
"""
|
|
170
|
+
chat_completion_tools = _validate_input_tools(tools)
|
|
171
|
+
|
|
172
|
+
extra_headers = {
|
|
173
|
+
"X-Stainless-Helper-Method": "chat.completions.parse",
|
|
174
|
+
**(extra_headers or {}),
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
|
|
178
|
+
return _parse_chat_completion(
|
|
179
|
+
response_format=response_format,
|
|
180
|
+
chat_completion=raw_completion,
|
|
181
|
+
input_tools=chat_completion_tools,
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
return self._post(
|
|
185
|
+
"/chat/completions",
|
|
186
|
+
body=maybe_transform(
|
|
187
|
+
{
|
|
188
|
+
"messages": messages,
|
|
189
|
+
"model": model,
|
|
190
|
+
"audio": audio,
|
|
191
|
+
"frequency_penalty": frequency_penalty,
|
|
192
|
+
"function_call": function_call,
|
|
193
|
+
"functions": functions,
|
|
194
|
+
"logit_bias": logit_bias,
|
|
195
|
+
"logprobs": logprobs,
|
|
196
|
+
"max_completion_tokens": max_completion_tokens,
|
|
197
|
+
"max_tokens": max_tokens,
|
|
198
|
+
"metadata": metadata,
|
|
199
|
+
"modalities": modalities,
|
|
200
|
+
"n": n,
|
|
201
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
202
|
+
"prediction": prediction,
|
|
203
|
+
"presence_penalty": presence_penalty,
|
|
204
|
+
"prompt_cache_key": prompt_cache_key,
|
|
205
|
+
"prompt_cache_retention": prompt_cache_retention,
|
|
206
|
+
"reasoning_effort": reasoning_effort,
|
|
207
|
+
"response_format": _type_to_response_format(response_format),
|
|
208
|
+
"safety_identifier": safety_identifier,
|
|
209
|
+
"seed": seed,
|
|
210
|
+
"service_tier": service_tier,
|
|
211
|
+
"stop": stop,
|
|
212
|
+
"store": store,
|
|
213
|
+
"stream": False,
|
|
214
|
+
"stream_options": stream_options,
|
|
215
|
+
"temperature": temperature,
|
|
216
|
+
"tool_choice": tool_choice,
|
|
217
|
+
"tools": tools,
|
|
218
|
+
"top_logprobs": top_logprobs,
|
|
219
|
+
"top_p": top_p,
|
|
220
|
+
"user": user,
|
|
221
|
+
"verbosity": verbosity,
|
|
222
|
+
"web_search_options": web_search_options,
|
|
223
|
+
},
|
|
224
|
+
completion_create_params.CompletionCreateParams,
|
|
225
|
+
),
|
|
226
|
+
options=make_request_options(
|
|
227
|
+
extra_headers=extra_headers,
|
|
228
|
+
extra_query=extra_query,
|
|
229
|
+
extra_body=extra_body,
|
|
230
|
+
timeout=timeout,
|
|
231
|
+
post_parser=parser,
|
|
232
|
+
),
|
|
233
|
+
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
|
|
234
|
+
# in the `parser` function above
|
|
235
|
+
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
|
|
236
|
+
stream=False,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
@overload
|
|
240
|
+
def create(
|
|
241
|
+
self,
|
|
242
|
+
*,
|
|
243
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
244
|
+
model: Union[str, ChatModel],
|
|
245
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
246
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
247
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
248
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
249
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
250
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
251
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
252
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
253
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
254
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
255
|
+
n: Optional[int] | Omit = omit,
|
|
256
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
257
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
258
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
259
|
+
prompt_cache_key: str | Omit = omit,
|
|
260
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
261
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
262
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
263
|
+
safety_identifier: str | Omit = omit,
|
|
264
|
+
seed: Optional[int] | Omit = omit,
|
|
265
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
266
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
267
|
+
store: Optional[bool] | Omit = omit,
|
|
268
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
269
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
270
|
+
temperature: Optional[float] | Omit = omit,
|
|
271
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
272
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
273
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
274
|
+
top_p: Optional[float] | Omit = omit,
|
|
275
|
+
user: str | Omit = omit,
|
|
276
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
277
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
278
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
279
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
280
|
+
extra_headers: Headers | None = None,
|
|
281
|
+
extra_query: Query | None = None,
|
|
282
|
+
extra_body: Body | None = None,
|
|
283
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
284
|
+
) -> ChatCompletion:
|
|
285
|
+
"""
|
|
286
|
+
**Starting a new project?** We recommend trying
|
|
287
|
+
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
|
|
288
|
+
advantage of the latest OpenAI platform features. Compare
|
|
289
|
+
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
|
|
290
|
+
|
|
291
|
+
---
|
|
292
|
+
|
|
293
|
+
Creates a model response for the given chat conversation. Learn more in the
|
|
294
|
+
[text generation](https://platform.openai.com/docs/guides/text-generation),
|
|
295
|
+
[vision](https://platform.openai.com/docs/guides/vision), and
|
|
296
|
+
[audio](https://platform.openai.com/docs/guides/audio) guides.
|
|
297
|
+
|
|
298
|
+
Parameter support can differ depending on the model used to generate the
|
|
299
|
+
response, particularly for newer reasoning models. Parameters that are only
|
|
300
|
+
supported for reasoning models are noted below. For the current state of
|
|
301
|
+
unsupported parameters in reasoning models,
|
|
302
|
+
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
messages: A list of messages comprising the conversation so far. Depending on the
|
|
306
|
+
[model](https://platform.openai.com/docs/models) you use, different message
|
|
307
|
+
types (modalities) are supported, like
|
|
308
|
+
[text](https://platform.openai.com/docs/guides/text-generation),
|
|
309
|
+
[images](https://platform.openai.com/docs/guides/vision), and
|
|
310
|
+
[audio](https://platform.openai.com/docs/guides/audio).
|
|
311
|
+
|
|
312
|
+
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
|
313
|
+
wide range of models with different capabilities, performance characteristics,
|
|
314
|
+
and price points. Refer to the
|
|
315
|
+
[model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
316
|
+
available models.
|
|
317
|
+
|
|
318
|
+
audio: Parameters for audio output. Required when audio output is requested with
|
|
319
|
+
`modalities: ["audio"]`.
|
|
320
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
321
|
+
|
|
322
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
323
|
+
existing frequency in the text so far, decreasing the model's likelihood to
|
|
324
|
+
repeat the same line verbatim.
|
|
325
|
+
|
|
326
|
+
function_call: Deprecated in favor of `tool_choice`.
|
|
327
|
+
|
|
328
|
+
Controls which (if any) function is called by the model.
|
|
329
|
+
|
|
330
|
+
`none` means the model will not call a function and instead generates a message.
|
|
331
|
+
|
|
332
|
+
`auto` means the model can pick between generating a message or calling a
|
|
333
|
+
function.
|
|
334
|
+
|
|
335
|
+
Specifying a particular function via `{"name": "my_function"}` forces the model
|
|
336
|
+
to call that function.
|
|
337
|
+
|
|
338
|
+
`none` is the default when no functions are present. `auto` is the default if
|
|
339
|
+
functions are present.
|
|
340
|
+
|
|
341
|
+
functions: Deprecated in favor of `tools`.
|
|
342
|
+
|
|
343
|
+
A list of functions the model may generate JSON inputs for.
|
|
344
|
+
|
|
345
|
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
|
346
|
+
|
|
347
|
+
Accepts a JSON object that maps tokens (specified by their token ID in the
|
|
348
|
+
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
|
349
|
+
bias is added to the logits generated by the model prior to sampling. The exact
|
|
350
|
+
effect will vary per model, but values between -1 and 1 should decrease or
|
|
351
|
+
increase likelihood of selection; values like -100 or 100 should result in a ban
|
|
352
|
+
or exclusive selection of the relevant token.
|
|
353
|
+
|
|
354
|
+
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
|
355
|
+
returns the log probabilities of each output token returned in the `content` of
|
|
356
|
+
`message`.
|
|
357
|
+
|
|
358
|
+
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
|
|
359
|
+
including visible output tokens and
|
|
360
|
+
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
|
361
|
+
|
|
362
|
+
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
363
|
+
completion. This value can be used to control
|
|
364
|
+
[costs](https://openai.com/api/pricing/) for text generated via API.
|
|
365
|
+
|
|
366
|
+
This value is now deprecated in favor of `max_completion_tokens`, and is not
|
|
367
|
+
compatible with
|
|
368
|
+
[o-series models](https://platform.openai.com/docs/guides/reasoning).
|
|
369
|
+
|
|
370
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
371
|
+
for storing additional information about the object in a structured format, and
|
|
372
|
+
querying for objects via API or the dashboard.
|
|
373
|
+
|
|
374
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
375
|
+
a maximum length of 512 characters.
|
|
376
|
+
|
|
377
|
+
modalities: Output types that you would like the model to generate. Most models are capable
|
|
378
|
+
of generating text, which is the default:
|
|
379
|
+
|
|
380
|
+
`["text"]`
|
|
381
|
+
|
|
382
|
+
The `gpt-4o-audio-preview` model can also be used to
|
|
383
|
+
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
|
|
384
|
+
this model generate both text and audio responses, you can use:
|
|
385
|
+
|
|
386
|
+
`["text", "audio"]`
|
|
387
|
+
|
|
388
|
+
n: How many chat completion choices to generate for each input message. Note that
|
|
389
|
+
you will be charged based on the number of generated tokens across all of the
|
|
390
|
+
choices. Keep `n` as `1` to minimize costs.
|
|
391
|
+
|
|
392
|
+
parallel_tool_calls: Whether to enable
|
|
393
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
394
|
+
during tool use.
|
|
395
|
+
|
|
396
|
+
prediction: Static predicted output content, such as the content of a text file that is
|
|
397
|
+
being regenerated.
|
|
398
|
+
|
|
399
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
400
|
+
whether they appear in the text so far, increasing the model's likelihood to
|
|
401
|
+
talk about new topics.
|
|
402
|
+
|
|
403
|
+
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
404
|
+
hit rates. Replaces the `user` field.
|
|
405
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
406
|
+
|
|
407
|
+
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
408
|
+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
409
|
+
of 24 hours.
|
|
410
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
411
|
+
|
|
412
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
413
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
414
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
415
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
416
|
+
reasoning in a response.
|
|
417
|
+
|
|
418
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
419
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
420
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
421
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
422
|
+
support `none`.
|
|
423
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
424
|
+
|
|
425
|
+
response_format: An object specifying the format that the model must output.
|
|
426
|
+
|
|
427
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
428
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
429
|
+
in the
|
|
430
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
431
|
+
|
|
432
|
+
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
|
|
433
|
+
ensures the message the model generates is valid JSON. Using `json_schema` is
|
|
434
|
+
preferred for models that support it.
|
|
435
|
+
|
|
436
|
+
safety_identifier: A stable identifier used to help detect users of your application that may be
|
|
437
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
438
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
439
|
+
order to avoid sending us any identifying information.
|
|
440
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
441
|
+
|
|
442
|
+
seed: This feature is in Beta. If specified, our system will make a best effort to
|
|
443
|
+
sample deterministically, such that repeated requests with the same `seed` and
|
|
444
|
+
parameters should return the same result. Determinism is not guaranteed, and you
|
|
445
|
+
should refer to the `system_fingerprint` response parameter to monitor changes
|
|
446
|
+
in the backend.
|
|
447
|
+
|
|
448
|
+
service_tier: Specifies the processing type used for serving the request.
|
|
449
|
+
|
|
450
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
451
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
452
|
+
will use 'default'.
|
|
453
|
+
- If set to 'default', then the request will be processed with the standard
|
|
454
|
+
pricing and performance for the selected model.
|
|
455
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
456
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
457
|
+
will be processed with the corresponding service tier.
|
|
458
|
+
- When not set, the default behavior is 'auto'.
|
|
459
|
+
|
|
460
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
461
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
462
|
+
request. This response value may be different from the value set in the
|
|
463
|
+
parameter.
|
|
464
|
+
|
|
465
|
+
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
|
|
466
|
+
|
|
467
|
+
Up to 4 sequences where the API will stop generating further tokens. The
|
|
468
|
+
returned text will not contain the stop sequence.
|
|
469
|
+
|
|
470
|
+
store: Whether or not to store the output of this chat completion request for use in
|
|
471
|
+
our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
|
472
|
+
or [evals](https://platform.openai.com/docs/guides/evals) products.
|
|
473
|
+
|
|
474
|
+
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
|
475
|
+
|
|
476
|
+
stream: If set to true, the model response data will be streamed to the client as it is
|
|
477
|
+
generated using
|
|
478
|
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
|
|
479
|
+
See the
|
|
480
|
+
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
|
|
481
|
+
for more information, along with the
|
|
482
|
+
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
|
|
483
|
+
guide for more information on how to handle the streaming events.
|
|
484
|
+
|
|
485
|
+
stream_options: Options for streaming response. Only set this when you set `stream: true`.
|
|
486
|
+
|
|
487
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
488
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
489
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
490
|
+
not both.
|
|
491
|
+
|
|
492
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
493
|
+
not call any tool and instead generates a message. `auto` means the model can
|
|
494
|
+
pick between generating a message or calling one or more tools. `required` means
|
|
495
|
+
the model must call one or more tools. Specifying a particular tool via
|
|
496
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
497
|
+
call that tool.
|
|
498
|
+
|
|
499
|
+
`none` is the default when no tools are present. `auto` is the default if tools
|
|
500
|
+
are present.
|
|
501
|
+
|
|
502
|
+
tools: A list of tools the model may call. You can provide either
|
|
503
|
+
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
504
|
+
or [function tools](https://platform.openai.com/docs/guides/function-calling).
|
|
505
|
+
|
|
506
|
+
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
|
507
|
+
return at each token position, each with an associated log probability.
|
|
508
|
+
`logprobs` must be set to `true` if this parameter is used.
|
|
509
|
+
|
|
510
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
511
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
512
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
513
|
+
|
|
514
|
+
We generally recommend altering this or `temperature` but not both.
|
|
515
|
+
|
|
516
|
+
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
|
517
|
+
`prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
518
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
519
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
520
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
521
|
+
|
|
522
|
+
verbosity: Constrains the verbosity of the model's response. Lower values will result in
|
|
523
|
+
more concise responses, while higher values will result in more verbose
|
|
524
|
+
responses. Currently supported values are `low`, `medium`, and `high`.
|
|
525
|
+
|
|
526
|
+
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
|
|
527
|
+
about the
|
|
528
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
529
|
+
|
|
530
|
+
extra_headers: Send extra headers
|
|
531
|
+
|
|
532
|
+
extra_query: Add additional query parameters to the request
|
|
533
|
+
|
|
534
|
+
extra_body: Add additional JSON properties to the request
|
|
535
|
+
|
|
536
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
537
|
+
"""
|
|
538
|
+
...
|
|
539
|
+
|
|
540
|
+
@overload
|
|
541
|
+
def create(
|
|
542
|
+
self,
|
|
543
|
+
*,
|
|
544
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
545
|
+
model: Union[str, ChatModel],
|
|
546
|
+
stream: Literal[True],
|
|
547
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
548
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
549
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
550
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
551
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
552
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
553
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
554
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
555
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
556
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
557
|
+
n: Optional[int] | Omit = omit,
|
|
558
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
559
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
560
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
561
|
+
prompt_cache_key: str | Omit = omit,
|
|
562
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
563
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
564
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
565
|
+
safety_identifier: str | Omit = omit,
|
|
566
|
+
seed: Optional[int] | Omit = omit,
|
|
567
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
568
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
569
|
+
store: Optional[bool] | Omit = omit,
|
|
570
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
571
|
+
temperature: Optional[float] | Omit = omit,
|
|
572
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
573
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
574
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
575
|
+
top_p: Optional[float] | Omit = omit,
|
|
576
|
+
user: str | Omit = omit,
|
|
577
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
578
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
579
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
580
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
581
|
+
extra_headers: Headers | None = None,
|
|
582
|
+
extra_query: Query | None = None,
|
|
583
|
+
extra_body: Body | None = None,
|
|
584
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
585
|
+
) -> Stream[ChatCompletionChunk]:
|
|
586
|
+
"""
|
|
587
|
+
**Starting a new project?** We recommend trying
|
|
588
|
+
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
|
|
589
|
+
advantage of the latest OpenAI platform features. Compare
|
|
590
|
+
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
|
|
591
|
+
|
|
592
|
+
---
|
|
593
|
+
|
|
594
|
+
Creates a model response for the given chat conversation. Learn more in the
|
|
595
|
+
[text generation](https://platform.openai.com/docs/guides/text-generation),
|
|
596
|
+
[vision](https://platform.openai.com/docs/guides/vision), and
|
|
597
|
+
[audio](https://platform.openai.com/docs/guides/audio) guides.
|
|
598
|
+
|
|
599
|
+
Parameter support can differ depending on the model used to generate the
|
|
600
|
+
response, particularly for newer reasoning models. Parameters that are only
|
|
601
|
+
supported for reasoning models are noted below. For the current state of
|
|
602
|
+
unsupported parameters in reasoning models,
|
|
603
|
+
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
messages: A list of messages comprising the conversation so far. Depending on the
|
|
607
|
+
[model](https://platform.openai.com/docs/models) you use, different message
|
|
608
|
+
types (modalities) are supported, like
|
|
609
|
+
[text](https://platform.openai.com/docs/guides/text-generation),
|
|
610
|
+
[images](https://platform.openai.com/docs/guides/vision), and
|
|
611
|
+
[audio](https://platform.openai.com/docs/guides/audio).
|
|
612
|
+
|
|
613
|
+
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
|
614
|
+
wide range of models with different capabilities, performance characteristics,
|
|
615
|
+
and price points. Refer to the
|
|
616
|
+
[model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
617
|
+
available models.
|
|
618
|
+
|
|
619
|
+
stream: If set to true, the model response data will be streamed to the client as it is
|
|
620
|
+
generated using
|
|
621
|
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
|
|
622
|
+
See the
|
|
623
|
+
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
|
|
624
|
+
for more information, along with the
|
|
625
|
+
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
|
|
626
|
+
guide for more information on how to handle the streaming events.
|
|
627
|
+
|
|
628
|
+
audio: Parameters for audio output. Required when audio output is requested with
|
|
629
|
+
`modalities: ["audio"]`.
|
|
630
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
631
|
+
|
|
632
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
633
|
+
existing frequency in the text so far, decreasing the model's likelihood to
|
|
634
|
+
repeat the same line verbatim.
|
|
635
|
+
|
|
636
|
+
function_call: Deprecated in favor of `tool_choice`.
|
|
637
|
+
|
|
638
|
+
Controls which (if any) function is called by the model.
|
|
639
|
+
|
|
640
|
+
`none` means the model will not call a function and instead generates a message.
|
|
641
|
+
|
|
642
|
+
`auto` means the model can pick between generating a message or calling a
|
|
643
|
+
function.
|
|
644
|
+
|
|
645
|
+
Specifying a particular function via `{"name": "my_function"}` forces the model
|
|
646
|
+
to call that function.
|
|
647
|
+
|
|
648
|
+
`none` is the default when no functions are present. `auto` is the default if
|
|
649
|
+
functions are present.
|
|
650
|
+
|
|
651
|
+
functions: Deprecated in favor of `tools`.
|
|
652
|
+
|
|
653
|
+
A list of functions the model may generate JSON inputs for.
|
|
654
|
+
|
|
655
|
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
|
656
|
+
|
|
657
|
+
Accepts a JSON object that maps tokens (specified by their token ID in the
|
|
658
|
+
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
|
659
|
+
bias is added to the logits generated by the model prior to sampling. The exact
|
|
660
|
+
effect will vary per model, but values between -1 and 1 should decrease or
|
|
661
|
+
increase likelihood of selection; values like -100 or 100 should result in a ban
|
|
662
|
+
or exclusive selection of the relevant token.
|
|
663
|
+
|
|
664
|
+
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
|
665
|
+
returns the log probabilities of each output token returned in the `content` of
|
|
666
|
+
`message`.
|
|
667
|
+
|
|
668
|
+
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
|
|
669
|
+
including visible output tokens and
|
|
670
|
+
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
|
671
|
+
|
|
672
|
+
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
673
|
+
completion. This value can be used to control
|
|
674
|
+
[costs](https://openai.com/api/pricing/) for text generated via API.
|
|
675
|
+
|
|
676
|
+
This value is now deprecated in favor of `max_completion_tokens`, and is not
|
|
677
|
+
compatible with
|
|
678
|
+
[o-series models](https://platform.openai.com/docs/guides/reasoning).
|
|
679
|
+
|
|
680
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
681
|
+
for storing additional information about the object in a structured format, and
|
|
682
|
+
querying for objects via API or the dashboard.
|
|
683
|
+
|
|
684
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
685
|
+
a maximum length of 512 characters.
|
|
686
|
+
|
|
687
|
+
modalities: Output types that you would like the model to generate. Most models are capable
|
|
688
|
+
of generating text, which is the default:
|
|
689
|
+
|
|
690
|
+
`["text"]`
|
|
691
|
+
|
|
692
|
+
The `gpt-4o-audio-preview` model can also be used to
|
|
693
|
+
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
|
|
694
|
+
this model generate both text and audio responses, you can use:
|
|
695
|
+
|
|
696
|
+
`["text", "audio"]`
|
|
697
|
+
|
|
698
|
+
n: How many chat completion choices to generate for each input message. Note that
|
|
699
|
+
you will be charged based on the number of generated tokens across all of the
|
|
700
|
+
choices. Keep `n` as `1` to minimize costs.
|
|
701
|
+
|
|
702
|
+
parallel_tool_calls: Whether to enable
|
|
703
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
704
|
+
during tool use.
|
|
705
|
+
|
|
706
|
+
prediction: Static predicted output content, such as the content of a text file that is
|
|
707
|
+
being regenerated.
|
|
708
|
+
|
|
709
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
710
|
+
whether they appear in the text so far, increasing the model's likelihood to
|
|
711
|
+
talk about new topics.
|
|
712
|
+
|
|
713
|
+
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
714
|
+
hit rates. Replaces the `user` field.
|
|
715
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
716
|
+
|
|
717
|
+
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
718
|
+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
719
|
+
of 24 hours.
|
|
720
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
721
|
+
|
|
722
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
723
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
724
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
725
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
726
|
+
reasoning in a response.
|
|
727
|
+
|
|
728
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
729
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
730
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
731
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
732
|
+
support `none`.
|
|
733
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
734
|
+
|
|
735
|
+
response_format: An object specifying the format that the model must output.
|
|
736
|
+
|
|
737
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
738
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
739
|
+
in the
|
|
740
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
741
|
+
|
|
742
|
+
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
|
|
743
|
+
ensures the message the model generates is valid JSON. Using `json_schema` is
|
|
744
|
+
preferred for models that support it.
|
|
745
|
+
|
|
746
|
+
safety_identifier: A stable identifier used to help detect users of your application that may be
|
|
747
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
748
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
749
|
+
order to avoid sending us any identifying information.
|
|
750
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
751
|
+
|
|
752
|
+
seed: This feature is in Beta. If specified, our system will make a best effort to
|
|
753
|
+
sample deterministically, such that repeated requests with the same `seed` and
|
|
754
|
+
parameters should return the same result. Determinism is not guaranteed, and you
|
|
755
|
+
should refer to the `system_fingerprint` response parameter to monitor changes
|
|
756
|
+
in the backend.
|
|
757
|
+
|
|
758
|
+
service_tier: Specifies the processing type used for serving the request.
|
|
759
|
+
|
|
760
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
761
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
762
|
+
will use 'default'.
|
|
763
|
+
- If set to 'default', then the request will be processed with the standard
|
|
764
|
+
pricing and performance for the selected model.
|
|
765
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
766
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
767
|
+
will be processed with the corresponding service tier.
|
|
768
|
+
- When not set, the default behavior is 'auto'.
|
|
769
|
+
|
|
770
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
771
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
772
|
+
request. This response value may be different from the value set in the
|
|
773
|
+
parameter.
|
|
774
|
+
|
|
775
|
+
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
|
|
776
|
+
|
|
777
|
+
Up to 4 sequences where the API will stop generating further tokens. The
|
|
778
|
+
returned text will not contain the stop sequence.
|
|
779
|
+
|
|
780
|
+
store: Whether or not to store the output of this chat completion request for use in
|
|
781
|
+
our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
|
782
|
+
or [evals](https://platform.openai.com/docs/guides/evals) products.
|
|
783
|
+
|
|
784
|
+
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
|
785
|
+
|
|
786
|
+
stream_options: Options for streaming response. Only set this when you set `stream: true`.
|
|
787
|
+
|
|
788
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
789
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
790
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
791
|
+
not both.
|
|
792
|
+
|
|
793
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
794
|
+
not call any tool and instead generates a message. `auto` means the model can
|
|
795
|
+
pick between generating a message or calling one or more tools. `required` means
|
|
796
|
+
the model must call one or more tools. Specifying a particular tool via
|
|
797
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
798
|
+
call that tool.
|
|
799
|
+
|
|
800
|
+
`none` is the default when no tools are present. `auto` is the default if tools
|
|
801
|
+
are present.
|
|
802
|
+
|
|
803
|
+
tools: A list of tools the model may call. You can provide either
|
|
804
|
+
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
805
|
+
or [function tools](https://platform.openai.com/docs/guides/function-calling).
|
|
806
|
+
|
|
807
|
+
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
|
808
|
+
return at each token position, each with an associated log probability.
|
|
809
|
+
`logprobs` must be set to `true` if this parameter is used.
|
|
810
|
+
|
|
811
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
812
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
813
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
814
|
+
|
|
815
|
+
We generally recommend altering this or `temperature` but not both.
|
|
816
|
+
|
|
817
|
+
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
|
818
|
+
`prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
819
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
820
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
821
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
822
|
+
|
|
823
|
+
verbosity: Constrains the verbosity of the model's response. Lower values will result in
|
|
824
|
+
more concise responses, while higher values will result in more verbose
|
|
825
|
+
responses. Currently supported values are `low`, `medium`, and `high`.
|
|
826
|
+
|
|
827
|
+
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
|
|
828
|
+
about the
|
|
829
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
830
|
+
|
|
831
|
+
extra_headers: Send extra headers
|
|
832
|
+
|
|
833
|
+
extra_query: Add additional query parameters to the request
|
|
834
|
+
|
|
835
|
+
extra_body: Add additional JSON properties to the request
|
|
836
|
+
|
|
837
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
838
|
+
"""
|
|
839
|
+
...
|
|
840
|
+
|
|
841
|
+
@overload
|
|
842
|
+
def create(
|
|
843
|
+
self,
|
|
844
|
+
*,
|
|
845
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
846
|
+
model: Union[str, ChatModel],
|
|
847
|
+
stream: bool,
|
|
848
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
849
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
850
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
851
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
852
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
853
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
854
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
855
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
856
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
857
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
858
|
+
n: Optional[int] | Omit = omit,
|
|
859
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
860
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
861
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
862
|
+
prompt_cache_key: str | Omit = omit,
|
|
863
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
864
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
865
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
866
|
+
safety_identifier: str | Omit = omit,
|
|
867
|
+
seed: Optional[int] | Omit = omit,
|
|
868
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
869
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
870
|
+
store: Optional[bool] | Omit = omit,
|
|
871
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
872
|
+
temperature: Optional[float] | Omit = omit,
|
|
873
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
874
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
875
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
876
|
+
top_p: Optional[float] | Omit = omit,
|
|
877
|
+
user: str | Omit = omit,
|
|
878
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
879
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
880
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
881
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
882
|
+
extra_headers: Headers | None = None,
|
|
883
|
+
extra_query: Query | None = None,
|
|
884
|
+
extra_body: Body | None = None,
|
|
885
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
886
|
+
) -> ChatCompletion | Stream[ChatCompletionChunk]:
|
|
887
|
+
"""
|
|
888
|
+
**Starting a new project?** We recommend trying
|
|
889
|
+
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
|
|
890
|
+
advantage of the latest OpenAI platform features. Compare
|
|
891
|
+
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
|
|
892
|
+
|
|
893
|
+
---
|
|
894
|
+
|
|
895
|
+
Creates a model response for the given chat conversation. Learn more in the
|
|
896
|
+
[text generation](https://platform.openai.com/docs/guides/text-generation),
|
|
897
|
+
[vision](https://platform.openai.com/docs/guides/vision), and
|
|
898
|
+
[audio](https://platform.openai.com/docs/guides/audio) guides.
|
|
899
|
+
|
|
900
|
+
Parameter support can differ depending on the model used to generate the
|
|
901
|
+
response, particularly for newer reasoning models. Parameters that are only
|
|
902
|
+
supported for reasoning models are noted below. For the current state of
|
|
903
|
+
unsupported parameters in reasoning models,
|
|
904
|
+
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
|
|
905
|
+
|
|
906
|
+
Args:
|
|
907
|
+
messages: A list of messages comprising the conversation so far. Depending on the
|
|
908
|
+
[model](https://platform.openai.com/docs/models) you use, different message
|
|
909
|
+
types (modalities) are supported, like
|
|
910
|
+
[text](https://platform.openai.com/docs/guides/text-generation),
|
|
911
|
+
[images](https://platform.openai.com/docs/guides/vision), and
|
|
912
|
+
[audio](https://platform.openai.com/docs/guides/audio).
|
|
913
|
+
|
|
914
|
+
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
|
915
|
+
wide range of models with different capabilities, performance characteristics,
|
|
916
|
+
and price points. Refer to the
|
|
917
|
+
[model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
918
|
+
available models.
|
|
919
|
+
|
|
920
|
+
stream: If set to true, the model response data will be streamed to the client as it is
|
|
921
|
+
generated using
|
|
922
|
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
|
|
923
|
+
See the
|
|
924
|
+
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
|
|
925
|
+
for more information, along with the
|
|
926
|
+
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
|
|
927
|
+
guide for more information on how to handle the streaming events.
|
|
928
|
+
|
|
929
|
+
audio: Parameters for audio output. Required when audio output is requested with
|
|
930
|
+
`modalities: ["audio"]`.
|
|
931
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
932
|
+
|
|
933
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
934
|
+
existing frequency in the text so far, decreasing the model's likelihood to
|
|
935
|
+
repeat the same line verbatim.
|
|
936
|
+
|
|
937
|
+
function_call: Deprecated in favor of `tool_choice`.
|
|
938
|
+
|
|
939
|
+
Controls which (if any) function is called by the model.
|
|
940
|
+
|
|
941
|
+
`none` means the model will not call a function and instead generates a message.
|
|
942
|
+
|
|
943
|
+
`auto` means the model can pick between generating a message or calling a
|
|
944
|
+
function.
|
|
945
|
+
|
|
946
|
+
Specifying a particular function via `{"name": "my_function"}` forces the model
|
|
947
|
+
to call that function.
|
|
948
|
+
|
|
949
|
+
`none` is the default when no functions are present. `auto` is the default if
|
|
950
|
+
functions are present.
|
|
951
|
+
|
|
952
|
+
functions: Deprecated in favor of `tools`.
|
|
953
|
+
|
|
954
|
+
A list of functions the model may generate JSON inputs for.
|
|
955
|
+
|
|
956
|
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
|
957
|
+
|
|
958
|
+
Accepts a JSON object that maps tokens (specified by their token ID in the
|
|
959
|
+
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
|
960
|
+
bias is added to the logits generated by the model prior to sampling. The exact
|
|
961
|
+
effect will vary per model, but values between -1 and 1 should decrease or
|
|
962
|
+
increase likelihood of selection; values like -100 or 100 should result in a ban
|
|
963
|
+
or exclusive selection of the relevant token.
|
|
964
|
+
|
|
965
|
+
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
|
966
|
+
returns the log probabilities of each output token returned in the `content` of
|
|
967
|
+
`message`.
|
|
968
|
+
|
|
969
|
+
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
|
|
970
|
+
including visible output tokens and
|
|
971
|
+
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
|
972
|
+
|
|
973
|
+
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
974
|
+
completion. This value can be used to control
|
|
975
|
+
[costs](https://openai.com/api/pricing/) for text generated via API.
|
|
976
|
+
|
|
977
|
+
This value is now deprecated in favor of `max_completion_tokens`, and is not
|
|
978
|
+
compatible with
|
|
979
|
+
[o-series models](https://platform.openai.com/docs/guides/reasoning).
|
|
980
|
+
|
|
981
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
982
|
+
for storing additional information about the object in a structured format, and
|
|
983
|
+
querying for objects via API or the dashboard.
|
|
984
|
+
|
|
985
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
986
|
+
a maximum length of 512 characters.
|
|
987
|
+
|
|
988
|
+
modalities: Output types that you would like the model to generate. Most models are capable
|
|
989
|
+
of generating text, which is the default:
|
|
990
|
+
|
|
991
|
+
`["text"]`
|
|
992
|
+
|
|
993
|
+
The `gpt-4o-audio-preview` model can also be used to
|
|
994
|
+
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
|
|
995
|
+
this model generate both text and audio responses, you can use:
|
|
996
|
+
|
|
997
|
+
`["text", "audio"]`
|
|
998
|
+
|
|
999
|
+
n: How many chat completion choices to generate for each input message. Note that
|
|
1000
|
+
you will be charged based on the number of generated tokens across all of the
|
|
1001
|
+
choices. Keep `n` as `1` to minimize costs.
|
|
1002
|
+
|
|
1003
|
+
parallel_tool_calls: Whether to enable
|
|
1004
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
1005
|
+
during tool use.
|
|
1006
|
+
|
|
1007
|
+
prediction: Static predicted output content, such as the content of a text file that is
|
|
1008
|
+
being regenerated.
|
|
1009
|
+
|
|
1010
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
1011
|
+
whether they appear in the text so far, increasing the model's likelihood to
|
|
1012
|
+
talk about new topics.
|
|
1013
|
+
|
|
1014
|
+
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
1015
|
+
hit rates. Replaces the `user` field.
|
|
1016
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
1017
|
+
|
|
1018
|
+
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
1019
|
+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
1020
|
+
of 24 hours.
|
|
1021
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
1022
|
+
|
|
1023
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
1024
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1025
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
1026
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
1027
|
+
reasoning in a response.
|
|
1028
|
+
|
|
1029
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1030
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
1031
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
1032
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1033
|
+
support `none`.
|
|
1034
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1035
|
+
|
|
1036
|
+
response_format: An object specifying the format that the model must output.
|
|
1037
|
+
|
|
1038
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
1039
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
1040
|
+
in the
|
|
1041
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
1042
|
+
|
|
1043
|
+
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
|
|
1044
|
+
ensures the message the model generates is valid JSON. Using `json_schema` is
|
|
1045
|
+
preferred for models that support it.
|
|
1046
|
+
|
|
1047
|
+
safety_identifier: A stable identifier used to help detect users of your application that may be
|
|
1048
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
1049
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
1050
|
+
order to avoid sending us any identifying information.
|
|
1051
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
1052
|
+
|
|
1053
|
+
seed: This feature is in Beta. If specified, our system will make a best effort to
|
|
1054
|
+
sample deterministically, such that repeated requests with the same `seed` and
|
|
1055
|
+
parameters should return the same result. Determinism is not guaranteed, and you
|
|
1056
|
+
should refer to the `system_fingerprint` response parameter to monitor changes
|
|
1057
|
+
in the backend.
|
|
1058
|
+
|
|
1059
|
+
service_tier: Specifies the processing type used for serving the request.
|
|
1060
|
+
|
|
1061
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
1062
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
1063
|
+
will use 'default'.
|
|
1064
|
+
- If set to 'default', then the request will be processed with the standard
|
|
1065
|
+
pricing and performance for the selected model.
|
|
1066
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
1067
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
1068
|
+
will be processed with the corresponding service tier.
|
|
1069
|
+
- When not set, the default behavior is 'auto'.
|
|
1070
|
+
|
|
1071
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
1072
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
1073
|
+
request. This response value may be different from the value set in the
|
|
1074
|
+
parameter.
|
|
1075
|
+
|
|
1076
|
+
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
|
|
1077
|
+
|
|
1078
|
+
Up to 4 sequences where the API will stop generating further tokens. The
|
|
1079
|
+
returned text will not contain the stop sequence.
|
|
1080
|
+
|
|
1081
|
+
store: Whether or not to store the output of this chat completion request for use in
|
|
1082
|
+
our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
|
1083
|
+
or [evals](https://platform.openai.com/docs/guides/evals) products.
|
|
1084
|
+
|
|
1085
|
+
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
|
1086
|
+
|
|
1087
|
+
stream_options: Options for streaming response. Only set this when you set `stream: true`.
|
|
1088
|
+
|
|
1089
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
1090
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
1091
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
1092
|
+
not both.
|
|
1093
|
+
|
|
1094
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
1095
|
+
not call any tool and instead generates a message. `auto` means the model can
|
|
1096
|
+
pick between generating a message or calling one or more tools. `required` means
|
|
1097
|
+
the model must call one or more tools. Specifying a particular tool via
|
|
1098
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
1099
|
+
call that tool.
|
|
1100
|
+
|
|
1101
|
+
`none` is the default when no tools are present. `auto` is the default if tools
|
|
1102
|
+
are present.
|
|
1103
|
+
|
|
1104
|
+
tools: A list of tools the model may call. You can provide either
|
|
1105
|
+
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
1106
|
+
or [function tools](https://platform.openai.com/docs/guides/function-calling).
|
|
1107
|
+
|
|
1108
|
+
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
|
1109
|
+
return at each token position, each with an associated log probability.
|
|
1110
|
+
`logprobs` must be set to `true` if this parameter is used.
|
|
1111
|
+
|
|
1112
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
1113
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
1114
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
1115
|
+
|
|
1116
|
+
We generally recommend altering this or `temperature` but not both.
|
|
1117
|
+
|
|
1118
|
+
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
|
1119
|
+
`prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
1120
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
1121
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
1122
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
1123
|
+
|
|
1124
|
+
verbosity: Constrains the verbosity of the model's response. Lower values will result in
|
|
1125
|
+
more concise responses, while higher values will result in more verbose
|
|
1126
|
+
responses. Currently supported values are `low`, `medium`, and `high`.
|
|
1127
|
+
|
|
1128
|
+
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
|
|
1129
|
+
about the
|
|
1130
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
1131
|
+
|
|
1132
|
+
extra_headers: Send extra headers
|
|
1133
|
+
|
|
1134
|
+
extra_query: Add additional query parameters to the request
|
|
1135
|
+
|
|
1136
|
+
extra_body: Add additional JSON properties to the request
|
|
1137
|
+
|
|
1138
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1139
|
+
"""
|
|
1140
|
+
...
|
|
1141
|
+
|
|
1142
|
+
@required_args(["messages", "model"], ["messages", "model", "stream"])
|
|
1143
|
+
def create(
|
|
1144
|
+
self,
|
|
1145
|
+
*,
|
|
1146
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
1147
|
+
model: Union[str, ChatModel],
|
|
1148
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
1149
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
1150
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
1151
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
1152
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
1153
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
1154
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1155
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
1156
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1157
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
1158
|
+
n: Optional[int] | Omit = omit,
|
|
1159
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1160
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
1161
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
1162
|
+
prompt_cache_key: str | Omit = omit,
|
|
1163
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
1164
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1165
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
1166
|
+
safety_identifier: str | Omit = omit,
|
|
1167
|
+
seed: Optional[int] | Omit = omit,
|
|
1168
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
1169
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
1170
|
+
store: Optional[bool] | Omit = omit,
|
|
1171
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
1172
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
1173
|
+
temperature: Optional[float] | Omit = omit,
|
|
1174
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
1175
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
1176
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
1177
|
+
top_p: Optional[float] | Omit = omit,
|
|
1178
|
+
user: str | Omit = omit,
|
|
1179
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
1180
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
1181
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1182
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1183
|
+
extra_headers: Headers | None = None,
|
|
1184
|
+
extra_query: Query | None = None,
|
|
1185
|
+
extra_body: Body | None = None,
|
|
1186
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1187
|
+
) -> ChatCompletion | Stream[ChatCompletionChunk]:
|
|
1188
|
+
validate_response_format(response_format)
|
|
1189
|
+
return self._post(
|
|
1190
|
+
"/chat/completions",
|
|
1191
|
+
body=maybe_transform(
|
|
1192
|
+
{
|
|
1193
|
+
"messages": messages,
|
|
1194
|
+
"model": model,
|
|
1195
|
+
"audio": audio,
|
|
1196
|
+
"frequency_penalty": frequency_penalty,
|
|
1197
|
+
"function_call": function_call,
|
|
1198
|
+
"functions": functions,
|
|
1199
|
+
"logit_bias": logit_bias,
|
|
1200
|
+
"logprobs": logprobs,
|
|
1201
|
+
"max_completion_tokens": max_completion_tokens,
|
|
1202
|
+
"max_tokens": max_tokens,
|
|
1203
|
+
"metadata": metadata,
|
|
1204
|
+
"modalities": modalities,
|
|
1205
|
+
"n": n,
|
|
1206
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
1207
|
+
"prediction": prediction,
|
|
1208
|
+
"presence_penalty": presence_penalty,
|
|
1209
|
+
"prompt_cache_key": prompt_cache_key,
|
|
1210
|
+
"prompt_cache_retention": prompt_cache_retention,
|
|
1211
|
+
"reasoning_effort": reasoning_effort,
|
|
1212
|
+
"response_format": response_format,
|
|
1213
|
+
"safety_identifier": safety_identifier,
|
|
1214
|
+
"seed": seed,
|
|
1215
|
+
"service_tier": service_tier,
|
|
1216
|
+
"stop": stop,
|
|
1217
|
+
"store": store,
|
|
1218
|
+
"stream": stream,
|
|
1219
|
+
"stream_options": stream_options,
|
|
1220
|
+
"temperature": temperature,
|
|
1221
|
+
"tool_choice": tool_choice,
|
|
1222
|
+
"tools": tools,
|
|
1223
|
+
"top_logprobs": top_logprobs,
|
|
1224
|
+
"top_p": top_p,
|
|
1225
|
+
"user": user,
|
|
1226
|
+
"verbosity": verbosity,
|
|
1227
|
+
"web_search_options": web_search_options,
|
|
1228
|
+
},
|
|
1229
|
+
completion_create_params.CompletionCreateParamsStreaming
|
|
1230
|
+
if stream
|
|
1231
|
+
else completion_create_params.CompletionCreateParamsNonStreaming,
|
|
1232
|
+
),
|
|
1233
|
+
options=make_request_options(
|
|
1234
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1235
|
+
),
|
|
1236
|
+
cast_to=ChatCompletion,
|
|
1237
|
+
stream=stream or False,
|
|
1238
|
+
stream_cls=Stream[ChatCompletionChunk],
|
|
1239
|
+
)
|
|
1240
|
+
|
|
1241
|
+
def retrieve(
|
|
1242
|
+
self,
|
|
1243
|
+
completion_id: str,
|
|
1244
|
+
*,
|
|
1245
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1246
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1247
|
+
extra_headers: Headers | None = None,
|
|
1248
|
+
extra_query: Query | None = None,
|
|
1249
|
+
extra_body: Body | None = None,
|
|
1250
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1251
|
+
) -> ChatCompletion:
|
|
1252
|
+
"""Get a stored chat completion.
|
|
1253
|
+
|
|
1254
|
+
Only Chat Completions that have been created with
|
|
1255
|
+
the `store` parameter set to `true` will be returned.
|
|
1256
|
+
|
|
1257
|
+
Args:
|
|
1258
|
+
extra_headers: Send extra headers
|
|
1259
|
+
|
|
1260
|
+
extra_query: Add additional query parameters to the request
|
|
1261
|
+
|
|
1262
|
+
extra_body: Add additional JSON properties to the request
|
|
1263
|
+
|
|
1264
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1265
|
+
"""
|
|
1266
|
+
if not completion_id:
|
|
1267
|
+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
|
|
1268
|
+
return self._get(
|
|
1269
|
+
f"/chat/completions/{completion_id}",
|
|
1270
|
+
options=make_request_options(
|
|
1271
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1272
|
+
),
|
|
1273
|
+
cast_to=ChatCompletion,
|
|
1274
|
+
)
|
|
1275
|
+
|
|
1276
|
+
def update(
|
|
1277
|
+
self,
|
|
1278
|
+
completion_id: str,
|
|
1279
|
+
*,
|
|
1280
|
+
metadata: Optional[Metadata],
|
|
1281
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1282
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1283
|
+
extra_headers: Headers | None = None,
|
|
1284
|
+
extra_query: Query | None = None,
|
|
1285
|
+
extra_body: Body | None = None,
|
|
1286
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1287
|
+
) -> ChatCompletion:
|
|
1288
|
+
"""Modify a stored chat completion.
|
|
1289
|
+
|
|
1290
|
+
Only Chat Completions that have been created
|
|
1291
|
+
with the `store` parameter set to `true` can be modified. Currently, the only
|
|
1292
|
+
supported modification is to update the `metadata` field.
|
|
1293
|
+
|
|
1294
|
+
Args:
|
|
1295
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
1296
|
+
for storing additional information about the object in a structured format, and
|
|
1297
|
+
querying for objects via API or the dashboard.
|
|
1298
|
+
|
|
1299
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
1300
|
+
a maximum length of 512 characters.
|
|
1301
|
+
|
|
1302
|
+
extra_headers: Send extra headers
|
|
1303
|
+
|
|
1304
|
+
extra_query: Add additional query parameters to the request
|
|
1305
|
+
|
|
1306
|
+
extra_body: Add additional JSON properties to the request
|
|
1307
|
+
|
|
1308
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1309
|
+
"""
|
|
1310
|
+
if not completion_id:
|
|
1311
|
+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
|
|
1312
|
+
return self._post(
|
|
1313
|
+
f"/chat/completions/{completion_id}",
|
|
1314
|
+
body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
|
|
1315
|
+
options=make_request_options(
|
|
1316
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1317
|
+
),
|
|
1318
|
+
cast_to=ChatCompletion,
|
|
1319
|
+
)
|
|
1320
|
+
|
|
1321
|
+
def list(
|
|
1322
|
+
self,
|
|
1323
|
+
*,
|
|
1324
|
+
after: str | Omit = omit,
|
|
1325
|
+
limit: int | Omit = omit,
|
|
1326
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1327
|
+
model: str | Omit = omit,
|
|
1328
|
+
order: Literal["asc", "desc"] | Omit = omit,
|
|
1329
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1330
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1331
|
+
extra_headers: Headers | None = None,
|
|
1332
|
+
extra_query: Query | None = None,
|
|
1333
|
+
extra_body: Body | None = None,
|
|
1334
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1335
|
+
) -> SyncCursorPage[ChatCompletion]:
|
|
1336
|
+
"""List stored Chat Completions.
|
|
1337
|
+
|
|
1338
|
+
Only Chat Completions that have been stored with
|
|
1339
|
+
the `store` parameter set to `true` will be returned.
|
|
1340
|
+
|
|
1341
|
+
Args:
|
|
1342
|
+
after: Identifier for the last chat completion from the previous pagination request.
|
|
1343
|
+
|
|
1344
|
+
limit: Number of Chat Completions to retrieve.
|
|
1345
|
+
|
|
1346
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
1347
|
+
for storing additional information about the object in a structured format, and
|
|
1348
|
+
querying for objects via API or the dashboard.
|
|
1349
|
+
|
|
1350
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
1351
|
+
a maximum length of 512 characters.
|
|
1352
|
+
|
|
1353
|
+
model: The model used to generate the Chat Completions.
|
|
1354
|
+
|
|
1355
|
+
order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
|
|
1356
|
+
`desc` for descending order. Defaults to `asc`.
|
|
1357
|
+
|
|
1358
|
+
extra_headers: Send extra headers
|
|
1359
|
+
|
|
1360
|
+
extra_query: Add additional query parameters to the request
|
|
1361
|
+
|
|
1362
|
+
extra_body: Add additional JSON properties to the request
|
|
1363
|
+
|
|
1364
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1365
|
+
"""
|
|
1366
|
+
return self._get_api_list(
|
|
1367
|
+
"/chat/completions",
|
|
1368
|
+
page=SyncCursorPage[ChatCompletion],
|
|
1369
|
+
options=make_request_options(
|
|
1370
|
+
extra_headers=extra_headers,
|
|
1371
|
+
extra_query=extra_query,
|
|
1372
|
+
extra_body=extra_body,
|
|
1373
|
+
timeout=timeout,
|
|
1374
|
+
query=maybe_transform(
|
|
1375
|
+
{
|
|
1376
|
+
"after": after,
|
|
1377
|
+
"limit": limit,
|
|
1378
|
+
"metadata": metadata,
|
|
1379
|
+
"model": model,
|
|
1380
|
+
"order": order,
|
|
1381
|
+
},
|
|
1382
|
+
completion_list_params.CompletionListParams,
|
|
1383
|
+
),
|
|
1384
|
+
),
|
|
1385
|
+
model=ChatCompletion,
|
|
1386
|
+
)
|
|
1387
|
+
|
|
1388
|
+
def delete(
|
|
1389
|
+
self,
|
|
1390
|
+
completion_id: str,
|
|
1391
|
+
*,
|
|
1392
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1393
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1394
|
+
extra_headers: Headers | None = None,
|
|
1395
|
+
extra_query: Query | None = None,
|
|
1396
|
+
extra_body: Body | None = None,
|
|
1397
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1398
|
+
) -> ChatCompletionDeleted:
|
|
1399
|
+
"""Delete a stored chat completion.
|
|
1400
|
+
|
|
1401
|
+
Only Chat Completions that have been created
|
|
1402
|
+
with the `store` parameter set to `true` can be deleted.
|
|
1403
|
+
|
|
1404
|
+
Args:
|
|
1405
|
+
extra_headers: Send extra headers
|
|
1406
|
+
|
|
1407
|
+
extra_query: Add additional query parameters to the request
|
|
1408
|
+
|
|
1409
|
+
extra_body: Add additional JSON properties to the request
|
|
1410
|
+
|
|
1411
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1412
|
+
"""
|
|
1413
|
+
if not completion_id:
|
|
1414
|
+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
|
|
1415
|
+
return self._delete(
|
|
1416
|
+
f"/chat/completions/{completion_id}",
|
|
1417
|
+
options=make_request_options(
|
|
1418
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1419
|
+
),
|
|
1420
|
+
cast_to=ChatCompletionDeleted,
|
|
1421
|
+
)
|
|
1422
|
+
|
|
1423
|
+
def stream(
|
|
1424
|
+
self,
|
|
1425
|
+
*,
|
|
1426
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
1427
|
+
model: Union[str, ChatModel],
|
|
1428
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
1429
|
+
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit,
|
|
1430
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
1431
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
1432
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
1433
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
1434
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
1435
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1436
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
1437
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1438
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
1439
|
+
n: Optional[int] | Omit = omit,
|
|
1440
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1441
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
1442
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
1443
|
+
prompt_cache_key: str | Omit = omit,
|
|
1444
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
1445
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1446
|
+
safety_identifier: str | Omit = omit,
|
|
1447
|
+
seed: Optional[int] | Omit = omit,
|
|
1448
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
1449
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
1450
|
+
store: Optional[bool] | Omit = omit,
|
|
1451
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
1452
|
+
temperature: Optional[float] | Omit = omit,
|
|
1453
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
1454
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
1455
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
1456
|
+
top_p: Optional[float] | Omit = omit,
|
|
1457
|
+
user: str | Omit = omit,
|
|
1458
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
1459
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
1460
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1461
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1462
|
+
extra_headers: Headers | None = None,
|
|
1463
|
+
extra_query: Query | None = None,
|
|
1464
|
+
extra_body: Body | None = None,
|
|
1465
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1466
|
+
) -> ChatCompletionStreamManager[ResponseFormatT]:
|
|
1467
|
+
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
|
|
1468
|
+
and automatic accumulation of each delta.
|
|
1469
|
+
|
|
1470
|
+
This also supports all of the parsing utilities that `.parse()` does.
|
|
1471
|
+
|
|
1472
|
+
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
|
|
1473
|
+
|
|
1474
|
+
```py
|
|
1475
|
+
with client.chat.completions.stream(
|
|
1476
|
+
model="gpt-4o-2024-08-06",
|
|
1477
|
+
messages=[...],
|
|
1478
|
+
) as stream:
|
|
1479
|
+
for event in stream:
|
|
1480
|
+
if event.type == "content.delta":
|
|
1481
|
+
print(event.delta, flush=True, end="")
|
|
1482
|
+
```
|
|
1483
|
+
|
|
1484
|
+
When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
|
|
1485
|
+
|
|
1486
|
+
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
|
|
1487
|
+
the context manager.
|
|
1488
|
+
"""
|
|
1489
|
+
extra_headers = {
|
|
1490
|
+
"X-Stainless-Helper-Method": "chat.completions.stream",
|
|
1491
|
+
**(extra_headers or {}),
|
|
1492
|
+
}
|
|
1493
|
+
|
|
1494
|
+
api_request: partial[Stream[ChatCompletionChunk]] = partial(
|
|
1495
|
+
self.create,
|
|
1496
|
+
messages=messages,
|
|
1497
|
+
model=model,
|
|
1498
|
+
audio=audio,
|
|
1499
|
+
stream=True,
|
|
1500
|
+
response_format=_type_to_response_format(response_format),
|
|
1501
|
+
frequency_penalty=frequency_penalty,
|
|
1502
|
+
function_call=function_call,
|
|
1503
|
+
functions=functions,
|
|
1504
|
+
logit_bias=logit_bias,
|
|
1505
|
+
logprobs=logprobs,
|
|
1506
|
+
max_completion_tokens=max_completion_tokens,
|
|
1507
|
+
max_tokens=max_tokens,
|
|
1508
|
+
metadata=metadata,
|
|
1509
|
+
modalities=modalities,
|
|
1510
|
+
n=n,
|
|
1511
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
1512
|
+
prediction=prediction,
|
|
1513
|
+
presence_penalty=presence_penalty,
|
|
1514
|
+
prompt_cache_key=prompt_cache_key,
|
|
1515
|
+
prompt_cache_retention=prompt_cache_retention,
|
|
1516
|
+
reasoning_effort=reasoning_effort,
|
|
1517
|
+
safety_identifier=safety_identifier,
|
|
1518
|
+
seed=seed,
|
|
1519
|
+
service_tier=service_tier,
|
|
1520
|
+
store=store,
|
|
1521
|
+
stop=stop,
|
|
1522
|
+
stream_options=stream_options,
|
|
1523
|
+
temperature=temperature,
|
|
1524
|
+
tool_choice=tool_choice,
|
|
1525
|
+
tools=tools,
|
|
1526
|
+
top_logprobs=top_logprobs,
|
|
1527
|
+
top_p=top_p,
|
|
1528
|
+
user=user,
|
|
1529
|
+
verbosity=verbosity,
|
|
1530
|
+
web_search_options=web_search_options,
|
|
1531
|
+
extra_headers=extra_headers,
|
|
1532
|
+
extra_query=extra_query,
|
|
1533
|
+
extra_body=extra_body,
|
|
1534
|
+
timeout=timeout,
|
|
1535
|
+
)
|
|
1536
|
+
return ChatCompletionStreamManager(
|
|
1537
|
+
api_request,
|
|
1538
|
+
response_format=response_format,
|
|
1539
|
+
input_tools=tools,
|
|
1540
|
+
)
|
|
1541
|
+
|
|
1542
|
+
|
|
1543
|
+
class AsyncCompletions(AsyncAPIResource):
|
|
1544
|
+
@cached_property
|
|
1545
|
+
def messages(self) -> AsyncMessages:
|
|
1546
|
+
return AsyncMessages(self._client)
|
|
1547
|
+
|
|
1548
|
+
@cached_property
|
|
1549
|
+
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
|
|
1550
|
+
"""
|
|
1551
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
1552
|
+
the raw response object instead of the parsed content.
|
|
1553
|
+
|
|
1554
|
+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
|
1555
|
+
"""
|
|
1556
|
+
return AsyncCompletionsWithRawResponse(self)
|
|
1557
|
+
|
|
1558
|
+
@cached_property
|
|
1559
|
+
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
|
|
1560
|
+
"""
|
|
1561
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
1562
|
+
|
|
1563
|
+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
|
1564
|
+
"""
|
|
1565
|
+
return AsyncCompletionsWithStreamingResponse(self)
|
|
1566
|
+
|
|
1567
|
+
async def parse(
|
|
1568
|
+
self,
|
|
1569
|
+
*,
|
|
1570
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
1571
|
+
model: Union[str, ChatModel],
|
|
1572
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
1573
|
+
response_format: type[ResponseFormatT] | Omit = omit,
|
|
1574
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
1575
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
1576
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
1577
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
1578
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
1579
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1580
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
1581
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1582
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
1583
|
+
n: Optional[int] | Omit = omit,
|
|
1584
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1585
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
1586
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
1587
|
+
prompt_cache_key: str | Omit = omit,
|
|
1588
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
1589
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1590
|
+
safety_identifier: str | Omit = omit,
|
|
1591
|
+
seed: Optional[int] | Omit = omit,
|
|
1592
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
1593
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
1594
|
+
store: Optional[bool] | Omit = omit,
|
|
1595
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
1596
|
+
temperature: Optional[float] | Omit = omit,
|
|
1597
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
1598
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
1599
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
1600
|
+
top_p: Optional[float] | Omit = omit,
|
|
1601
|
+
user: str | Omit = omit,
|
|
1602
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
1603
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
1604
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1605
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1606
|
+
extra_headers: Headers | None = None,
|
|
1607
|
+
extra_query: Query | None = None,
|
|
1608
|
+
extra_body: Body | None = None,
|
|
1609
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1610
|
+
) -> ParsedChatCompletion[ResponseFormatT]:
|
|
1611
|
+
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
|
|
1612
|
+
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
|
|
1613
|
+
|
|
1614
|
+
You can pass a pydantic model to this method and it will automatically convert the model
|
|
1615
|
+
into a JSON schema, send it to the API and parse the response content back into the given model.
|
|
1616
|
+
|
|
1617
|
+
This method will also automatically parse `function` tool calls if:
|
|
1618
|
+
- You use the `openai.pydantic_function_tool()` helper method
|
|
1619
|
+
- You mark your tool schema with `"strict": True`
|
|
1620
|
+
|
|
1621
|
+
Example usage:
|
|
1622
|
+
```py
|
|
1623
|
+
from pydantic import BaseModel
|
|
1624
|
+
from openai import AsyncOpenAI
|
|
1625
|
+
|
|
1626
|
+
|
|
1627
|
+
class Step(BaseModel):
|
|
1628
|
+
explanation: str
|
|
1629
|
+
output: str
|
|
1630
|
+
|
|
1631
|
+
|
|
1632
|
+
class MathResponse(BaseModel):
|
|
1633
|
+
steps: List[Step]
|
|
1634
|
+
final_answer: str
|
|
1635
|
+
|
|
1636
|
+
|
|
1637
|
+
client = AsyncOpenAI()
|
|
1638
|
+
completion = await client.chat.completions.parse(
|
|
1639
|
+
model="gpt-4o-2024-08-06",
|
|
1640
|
+
messages=[
|
|
1641
|
+
{"role": "system", "content": "You are a helpful math tutor."},
|
|
1642
|
+
{"role": "user", "content": "solve 8x + 31 = 2"},
|
|
1643
|
+
],
|
|
1644
|
+
response_format=MathResponse,
|
|
1645
|
+
)
|
|
1646
|
+
|
|
1647
|
+
message = completion.choices[0].message
|
|
1648
|
+
if message.parsed:
|
|
1649
|
+
print(message.parsed.steps)
|
|
1650
|
+
print("answer: ", message.parsed.final_answer)
|
|
1651
|
+
```
|
|
1652
|
+
"""
|
|
1653
|
+
_validate_input_tools(tools)
|
|
1654
|
+
|
|
1655
|
+
extra_headers = {
|
|
1656
|
+
"X-Stainless-Helper-Method": "chat.completions.parse",
|
|
1657
|
+
**(extra_headers or {}),
|
|
1658
|
+
}
|
|
1659
|
+
|
|
1660
|
+
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
|
|
1661
|
+
return _parse_chat_completion(
|
|
1662
|
+
response_format=response_format,
|
|
1663
|
+
chat_completion=raw_completion,
|
|
1664
|
+
input_tools=tools,
|
|
1665
|
+
)
|
|
1666
|
+
|
|
1667
|
+
return await self._post(
|
|
1668
|
+
"/chat/completions",
|
|
1669
|
+
body=await async_maybe_transform(
|
|
1670
|
+
{
|
|
1671
|
+
"messages": messages,
|
|
1672
|
+
"model": model,
|
|
1673
|
+
"audio": audio,
|
|
1674
|
+
"frequency_penalty": frequency_penalty,
|
|
1675
|
+
"function_call": function_call,
|
|
1676
|
+
"functions": functions,
|
|
1677
|
+
"logit_bias": logit_bias,
|
|
1678
|
+
"logprobs": logprobs,
|
|
1679
|
+
"max_completion_tokens": max_completion_tokens,
|
|
1680
|
+
"max_tokens": max_tokens,
|
|
1681
|
+
"metadata": metadata,
|
|
1682
|
+
"modalities": modalities,
|
|
1683
|
+
"n": n,
|
|
1684
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
1685
|
+
"prediction": prediction,
|
|
1686
|
+
"presence_penalty": presence_penalty,
|
|
1687
|
+
"prompt_cache_key": prompt_cache_key,
|
|
1688
|
+
"prompt_cache_retention": prompt_cache_retention,
|
|
1689
|
+
"reasoning_effort": reasoning_effort,
|
|
1690
|
+
"response_format": _type_to_response_format(response_format),
|
|
1691
|
+
"safety_identifier": safety_identifier,
|
|
1692
|
+
"seed": seed,
|
|
1693
|
+
"service_tier": service_tier,
|
|
1694
|
+
"store": store,
|
|
1695
|
+
"stop": stop,
|
|
1696
|
+
"stream": False,
|
|
1697
|
+
"stream_options": stream_options,
|
|
1698
|
+
"temperature": temperature,
|
|
1699
|
+
"tool_choice": tool_choice,
|
|
1700
|
+
"tools": tools,
|
|
1701
|
+
"top_logprobs": top_logprobs,
|
|
1702
|
+
"top_p": top_p,
|
|
1703
|
+
"user": user,
|
|
1704
|
+
"verbosity": verbosity,
|
|
1705
|
+
"web_search_options": web_search_options,
|
|
1706
|
+
},
|
|
1707
|
+
completion_create_params.CompletionCreateParams,
|
|
1708
|
+
),
|
|
1709
|
+
options=make_request_options(
|
|
1710
|
+
extra_headers=extra_headers,
|
|
1711
|
+
extra_query=extra_query,
|
|
1712
|
+
extra_body=extra_body,
|
|
1713
|
+
timeout=timeout,
|
|
1714
|
+
post_parser=parser,
|
|
1715
|
+
),
|
|
1716
|
+
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
|
|
1717
|
+
# in the `parser` function above
|
|
1718
|
+
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
|
|
1719
|
+
stream=False,
|
|
1720
|
+
)
|
|
1721
|
+
|
|
1722
|
+
@overload
|
|
1723
|
+
async def create(
|
|
1724
|
+
self,
|
|
1725
|
+
*,
|
|
1726
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
1727
|
+
model: Union[str, ChatModel],
|
|
1728
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
1729
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
1730
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
1731
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
1732
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
1733
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
1734
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1735
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
1736
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1737
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
1738
|
+
n: Optional[int] | Omit = omit,
|
|
1739
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1740
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
1741
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
1742
|
+
prompt_cache_key: str | Omit = omit,
|
|
1743
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
1744
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1745
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
1746
|
+
safety_identifier: str | Omit = omit,
|
|
1747
|
+
seed: Optional[int] | Omit = omit,
|
|
1748
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
1749
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
1750
|
+
store: Optional[bool] | Omit = omit,
|
|
1751
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
1752
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
1753
|
+
temperature: Optional[float] | Omit = omit,
|
|
1754
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
1755
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
1756
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
1757
|
+
top_p: Optional[float] | Omit = omit,
|
|
1758
|
+
user: str | Omit = omit,
|
|
1759
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
1760
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
1761
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1762
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1763
|
+
extra_headers: Headers | None = None,
|
|
1764
|
+
extra_query: Query | None = None,
|
|
1765
|
+
extra_body: Body | None = None,
|
|
1766
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1767
|
+
) -> ChatCompletion:
|
|
1768
|
+
"""
|
|
1769
|
+
**Starting a new project?** We recommend trying
|
|
1770
|
+
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
|
|
1771
|
+
advantage of the latest OpenAI platform features. Compare
|
|
1772
|
+
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
|
|
1773
|
+
|
|
1774
|
+
---
|
|
1775
|
+
|
|
1776
|
+
Creates a model response for the given chat conversation. Learn more in the
|
|
1777
|
+
[text generation](https://platform.openai.com/docs/guides/text-generation),
|
|
1778
|
+
[vision](https://platform.openai.com/docs/guides/vision), and
|
|
1779
|
+
[audio](https://platform.openai.com/docs/guides/audio) guides.
|
|
1780
|
+
|
|
1781
|
+
Parameter support can differ depending on the model used to generate the
|
|
1782
|
+
response, particularly for newer reasoning models. Parameters that are only
|
|
1783
|
+
supported for reasoning models are noted below. For the current state of
|
|
1784
|
+
unsupported parameters in reasoning models,
|
|
1785
|
+
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
|
|
1786
|
+
|
|
1787
|
+
Args:
|
|
1788
|
+
messages: A list of messages comprising the conversation so far. Depending on the
|
|
1789
|
+
[model](https://platform.openai.com/docs/models) you use, different message
|
|
1790
|
+
types (modalities) are supported, like
|
|
1791
|
+
[text](https://platform.openai.com/docs/guides/text-generation),
|
|
1792
|
+
[images](https://platform.openai.com/docs/guides/vision), and
|
|
1793
|
+
[audio](https://platform.openai.com/docs/guides/audio).
|
|
1794
|
+
|
|
1795
|
+
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
|
1796
|
+
wide range of models with different capabilities, performance characteristics,
|
|
1797
|
+
and price points. Refer to the
|
|
1798
|
+
[model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
1799
|
+
available models.
|
|
1800
|
+
|
|
1801
|
+
audio: Parameters for audio output. Required when audio output is requested with
|
|
1802
|
+
`modalities: ["audio"]`.
|
|
1803
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
1804
|
+
|
|
1805
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
1806
|
+
existing frequency in the text so far, decreasing the model's likelihood to
|
|
1807
|
+
repeat the same line verbatim.
|
|
1808
|
+
|
|
1809
|
+
function_call: Deprecated in favor of `tool_choice`.
|
|
1810
|
+
|
|
1811
|
+
Controls which (if any) function is called by the model.
|
|
1812
|
+
|
|
1813
|
+
`none` means the model will not call a function and instead generates a message.
|
|
1814
|
+
|
|
1815
|
+
`auto` means the model can pick between generating a message or calling a
|
|
1816
|
+
function.
|
|
1817
|
+
|
|
1818
|
+
Specifying a particular function via `{"name": "my_function"}` forces the model
|
|
1819
|
+
to call that function.
|
|
1820
|
+
|
|
1821
|
+
`none` is the default when no functions are present. `auto` is the default if
|
|
1822
|
+
functions are present.
|
|
1823
|
+
|
|
1824
|
+
functions: Deprecated in favor of `tools`.
|
|
1825
|
+
|
|
1826
|
+
A list of functions the model may generate JSON inputs for.
|
|
1827
|
+
|
|
1828
|
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
|
1829
|
+
|
|
1830
|
+
Accepts a JSON object that maps tokens (specified by their token ID in the
|
|
1831
|
+
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
|
1832
|
+
bias is added to the logits generated by the model prior to sampling. The exact
|
|
1833
|
+
effect will vary per model, but values between -1 and 1 should decrease or
|
|
1834
|
+
increase likelihood of selection; values like -100 or 100 should result in a ban
|
|
1835
|
+
or exclusive selection of the relevant token.
|
|
1836
|
+
|
|
1837
|
+
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
|
1838
|
+
returns the log probabilities of each output token returned in the `content` of
|
|
1839
|
+
`message`.
|
|
1840
|
+
|
|
1841
|
+
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
|
|
1842
|
+
including visible output tokens and
|
|
1843
|
+
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
|
1844
|
+
|
|
1845
|
+
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
1846
|
+
completion. This value can be used to control
|
|
1847
|
+
[costs](https://openai.com/api/pricing/) for text generated via API.
|
|
1848
|
+
|
|
1849
|
+
This value is now deprecated in favor of `max_completion_tokens`, and is not
|
|
1850
|
+
compatible with
|
|
1851
|
+
[o-series models](https://platform.openai.com/docs/guides/reasoning).
|
|
1852
|
+
|
|
1853
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
1854
|
+
for storing additional information about the object in a structured format, and
|
|
1855
|
+
querying for objects via API or the dashboard.
|
|
1856
|
+
|
|
1857
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
1858
|
+
a maximum length of 512 characters.
|
|
1859
|
+
|
|
1860
|
+
modalities: Output types that you would like the model to generate. Most models are capable
|
|
1861
|
+
of generating text, which is the default:
|
|
1862
|
+
|
|
1863
|
+
`["text"]`
|
|
1864
|
+
|
|
1865
|
+
The `gpt-4o-audio-preview` model can also be used to
|
|
1866
|
+
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
|
|
1867
|
+
this model generate both text and audio responses, you can use:
|
|
1868
|
+
|
|
1869
|
+
`["text", "audio"]`
|
|
1870
|
+
|
|
1871
|
+
n: How many chat completion choices to generate for each input message. Note that
|
|
1872
|
+
you will be charged based on the number of generated tokens across all of the
|
|
1873
|
+
choices. Keep `n` as `1` to minimize costs.
|
|
1874
|
+
|
|
1875
|
+
parallel_tool_calls: Whether to enable
|
|
1876
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
1877
|
+
during tool use.
|
|
1878
|
+
|
|
1879
|
+
prediction: Static predicted output content, such as the content of a text file that is
|
|
1880
|
+
being regenerated.
|
|
1881
|
+
|
|
1882
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
1883
|
+
whether they appear in the text so far, increasing the model's likelihood to
|
|
1884
|
+
talk about new topics.
|
|
1885
|
+
|
|
1886
|
+
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
1887
|
+
hit rates. Replaces the `user` field.
|
|
1888
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
1889
|
+
|
|
1890
|
+
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
1891
|
+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
1892
|
+
of 24 hours.
|
|
1893
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
1894
|
+
|
|
1895
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
1896
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1897
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
1898
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
1899
|
+
reasoning in a response.
|
|
1900
|
+
|
|
1901
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1902
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
1903
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
1904
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1905
|
+
support `none`.
|
|
1906
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1907
|
+
|
|
1908
|
+
response_format: An object specifying the format that the model must output.
|
|
1909
|
+
|
|
1910
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
1911
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
1912
|
+
in the
|
|
1913
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
1914
|
+
|
|
1915
|
+
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
|
|
1916
|
+
ensures the message the model generates is valid JSON. Using `json_schema` is
|
|
1917
|
+
preferred for models that support it.
|
|
1918
|
+
|
|
1919
|
+
safety_identifier: A stable identifier used to help detect users of your application that may be
|
|
1920
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
1921
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
1922
|
+
order to avoid sending us any identifying information.
|
|
1923
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
1924
|
+
|
|
1925
|
+
seed: This feature is in Beta. If specified, our system will make a best effort to
|
|
1926
|
+
sample deterministically, such that repeated requests with the same `seed` and
|
|
1927
|
+
parameters should return the same result. Determinism is not guaranteed, and you
|
|
1928
|
+
should refer to the `system_fingerprint` response parameter to monitor changes
|
|
1929
|
+
in the backend.
|
|
1930
|
+
|
|
1931
|
+
service_tier: Specifies the processing type used for serving the request.
|
|
1932
|
+
|
|
1933
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
1934
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
1935
|
+
will use 'default'.
|
|
1936
|
+
- If set to 'default', then the request will be processed with the standard
|
|
1937
|
+
pricing and performance for the selected model.
|
|
1938
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
1939
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
1940
|
+
will be processed with the corresponding service tier.
|
|
1941
|
+
- When not set, the default behavior is 'auto'.
|
|
1942
|
+
|
|
1943
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
1944
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
1945
|
+
request. This response value may be different from the value set in the
|
|
1946
|
+
parameter.
|
|
1947
|
+
|
|
1948
|
+
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
|
|
1949
|
+
|
|
1950
|
+
Up to 4 sequences where the API will stop generating further tokens. The
|
|
1951
|
+
returned text will not contain the stop sequence.
|
|
1952
|
+
|
|
1953
|
+
store: Whether or not to store the output of this chat completion request for use in
|
|
1954
|
+
our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
|
1955
|
+
or [evals](https://platform.openai.com/docs/guides/evals) products.
|
|
1956
|
+
|
|
1957
|
+
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
|
1958
|
+
|
|
1959
|
+
stream: If set to true, the model response data will be streamed to the client as it is
|
|
1960
|
+
generated using
|
|
1961
|
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
|
|
1962
|
+
See the
|
|
1963
|
+
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
|
|
1964
|
+
for more information, along with the
|
|
1965
|
+
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
|
|
1966
|
+
guide for more information on how to handle the streaming events.
|
|
1967
|
+
|
|
1968
|
+
stream_options: Options for streaming response. Only set this when you set `stream: true`.
|
|
1969
|
+
|
|
1970
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
1971
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
1972
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
1973
|
+
not both.
|
|
1974
|
+
|
|
1975
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
1976
|
+
not call any tool and instead generates a message. `auto` means the model can
|
|
1977
|
+
pick between generating a message or calling one or more tools. `required` means
|
|
1978
|
+
the model must call one or more tools. Specifying a particular tool via
|
|
1979
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
1980
|
+
call that tool.
|
|
1981
|
+
|
|
1982
|
+
`none` is the default when no tools are present. `auto` is the default if tools
|
|
1983
|
+
are present.
|
|
1984
|
+
|
|
1985
|
+
tools: A list of tools the model may call. You can provide either
|
|
1986
|
+
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
1987
|
+
or [function tools](https://platform.openai.com/docs/guides/function-calling).
|
|
1988
|
+
|
|
1989
|
+
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
|
1990
|
+
return at each token position, each with an associated log probability.
|
|
1991
|
+
`logprobs` must be set to `true` if this parameter is used.
|
|
1992
|
+
|
|
1993
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
1994
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
1995
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
1996
|
+
|
|
1997
|
+
We generally recommend altering this or `temperature` but not both.
|
|
1998
|
+
|
|
1999
|
+
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
|
2000
|
+
`prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
2001
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
2002
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
2003
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
2004
|
+
|
|
2005
|
+
verbosity: Constrains the verbosity of the model's response. Lower values will result in
|
|
2006
|
+
more concise responses, while higher values will result in more verbose
|
|
2007
|
+
responses. Currently supported values are `low`, `medium`, and `high`.
|
|
2008
|
+
|
|
2009
|
+
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
|
|
2010
|
+
about the
|
|
2011
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
2012
|
+
|
|
2013
|
+
extra_headers: Send extra headers
|
|
2014
|
+
|
|
2015
|
+
extra_query: Add additional query parameters to the request
|
|
2016
|
+
|
|
2017
|
+
extra_body: Add additional JSON properties to the request
|
|
2018
|
+
|
|
2019
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2020
|
+
"""
|
|
2021
|
+
...
|
|
2022
|
+
|
|
2023
|
+
@overload
|
|
2024
|
+
async def create(
|
|
2025
|
+
self,
|
|
2026
|
+
*,
|
|
2027
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
2028
|
+
model: Union[str, ChatModel],
|
|
2029
|
+
stream: Literal[True],
|
|
2030
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
2031
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
2032
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
2033
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
2034
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
2035
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
2036
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2037
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
2038
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2039
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
2040
|
+
n: Optional[int] | Omit = omit,
|
|
2041
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2042
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
2043
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
2044
|
+
prompt_cache_key: str | Omit = omit,
|
|
2045
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
2046
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2047
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
2048
|
+
safety_identifier: str | Omit = omit,
|
|
2049
|
+
seed: Optional[int] | Omit = omit,
|
|
2050
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
2051
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
2052
|
+
store: Optional[bool] | Omit = omit,
|
|
2053
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
2054
|
+
temperature: Optional[float] | Omit = omit,
|
|
2055
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
2056
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
2057
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
2058
|
+
top_p: Optional[float] | Omit = omit,
|
|
2059
|
+
user: str | Omit = omit,
|
|
2060
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
2061
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
2062
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2063
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2064
|
+
extra_headers: Headers | None = None,
|
|
2065
|
+
extra_query: Query | None = None,
|
|
2066
|
+
extra_body: Body | None = None,
|
|
2067
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2068
|
+
) -> AsyncStream[ChatCompletionChunk]:
|
|
2069
|
+
"""
|
|
2070
|
+
**Starting a new project?** We recommend trying
|
|
2071
|
+
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
|
|
2072
|
+
advantage of the latest OpenAI platform features. Compare
|
|
2073
|
+
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
|
|
2074
|
+
|
|
2075
|
+
---
|
|
2076
|
+
|
|
2077
|
+
Creates a model response for the given chat conversation. Learn more in the
|
|
2078
|
+
[text generation](https://platform.openai.com/docs/guides/text-generation),
|
|
2079
|
+
[vision](https://platform.openai.com/docs/guides/vision), and
|
|
2080
|
+
[audio](https://platform.openai.com/docs/guides/audio) guides.
|
|
2081
|
+
|
|
2082
|
+
Parameter support can differ depending on the model used to generate the
|
|
2083
|
+
response, particularly for newer reasoning models. Parameters that are only
|
|
2084
|
+
supported for reasoning models are noted below. For the current state of
|
|
2085
|
+
unsupported parameters in reasoning models,
|
|
2086
|
+
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
|
|
2087
|
+
|
|
2088
|
+
Args:
|
|
2089
|
+
messages: A list of messages comprising the conversation so far. Depending on the
|
|
2090
|
+
[model](https://platform.openai.com/docs/models) you use, different message
|
|
2091
|
+
types (modalities) are supported, like
|
|
2092
|
+
[text](https://platform.openai.com/docs/guides/text-generation),
|
|
2093
|
+
[images](https://platform.openai.com/docs/guides/vision), and
|
|
2094
|
+
[audio](https://platform.openai.com/docs/guides/audio).
|
|
2095
|
+
|
|
2096
|
+
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
|
2097
|
+
wide range of models with different capabilities, performance characteristics,
|
|
2098
|
+
and price points. Refer to the
|
|
2099
|
+
[model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
2100
|
+
available models.
|
|
2101
|
+
|
|
2102
|
+
stream: If set to true, the model response data will be streamed to the client as it is
|
|
2103
|
+
generated using
|
|
2104
|
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
|
|
2105
|
+
See the
|
|
2106
|
+
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
|
|
2107
|
+
for more information, along with the
|
|
2108
|
+
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
|
|
2109
|
+
guide for more information on how to handle the streaming events.
|
|
2110
|
+
|
|
2111
|
+
audio: Parameters for audio output. Required when audio output is requested with
|
|
2112
|
+
`modalities: ["audio"]`.
|
|
2113
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
2114
|
+
|
|
2115
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
2116
|
+
existing frequency in the text so far, decreasing the model's likelihood to
|
|
2117
|
+
repeat the same line verbatim.
|
|
2118
|
+
|
|
2119
|
+
function_call: Deprecated in favor of `tool_choice`.
|
|
2120
|
+
|
|
2121
|
+
Controls which (if any) function is called by the model.
|
|
2122
|
+
|
|
2123
|
+
`none` means the model will not call a function and instead generates a message.
|
|
2124
|
+
|
|
2125
|
+
`auto` means the model can pick between generating a message or calling a
|
|
2126
|
+
function.
|
|
2127
|
+
|
|
2128
|
+
Specifying a particular function via `{"name": "my_function"}` forces the model
|
|
2129
|
+
to call that function.
|
|
2130
|
+
|
|
2131
|
+
`none` is the default when no functions are present. `auto` is the default if
|
|
2132
|
+
functions are present.
|
|
2133
|
+
|
|
2134
|
+
functions: Deprecated in favor of `tools`.
|
|
2135
|
+
|
|
2136
|
+
A list of functions the model may generate JSON inputs for.
|
|
2137
|
+
|
|
2138
|
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
|
2139
|
+
|
|
2140
|
+
Accepts a JSON object that maps tokens (specified by their token ID in the
|
|
2141
|
+
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
|
2142
|
+
bias is added to the logits generated by the model prior to sampling. The exact
|
|
2143
|
+
effect will vary per model, but values between -1 and 1 should decrease or
|
|
2144
|
+
increase likelihood of selection; values like -100 or 100 should result in a ban
|
|
2145
|
+
or exclusive selection of the relevant token.
|
|
2146
|
+
|
|
2147
|
+
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
|
2148
|
+
returns the log probabilities of each output token returned in the `content` of
|
|
2149
|
+
`message`.
|
|
2150
|
+
|
|
2151
|
+
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
|
|
2152
|
+
including visible output tokens and
|
|
2153
|
+
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
|
2154
|
+
|
|
2155
|
+
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
2156
|
+
completion. This value can be used to control
|
|
2157
|
+
[costs](https://openai.com/api/pricing/) for text generated via API.
|
|
2158
|
+
|
|
2159
|
+
This value is now deprecated in favor of `max_completion_tokens`, and is not
|
|
2160
|
+
compatible with
|
|
2161
|
+
[o-series models](https://platform.openai.com/docs/guides/reasoning).
|
|
2162
|
+
|
|
2163
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
2164
|
+
for storing additional information about the object in a structured format, and
|
|
2165
|
+
querying for objects via API or the dashboard.
|
|
2166
|
+
|
|
2167
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
2168
|
+
a maximum length of 512 characters.
|
|
2169
|
+
|
|
2170
|
+
modalities: Output types that you would like the model to generate. Most models are capable
|
|
2171
|
+
of generating text, which is the default:
|
|
2172
|
+
|
|
2173
|
+
`["text"]`
|
|
2174
|
+
|
|
2175
|
+
The `gpt-4o-audio-preview` model can also be used to
|
|
2176
|
+
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
|
|
2177
|
+
this model generate both text and audio responses, you can use:
|
|
2178
|
+
|
|
2179
|
+
`["text", "audio"]`
|
|
2180
|
+
|
|
2181
|
+
n: How many chat completion choices to generate for each input message. Note that
|
|
2182
|
+
you will be charged based on the number of generated tokens across all of the
|
|
2183
|
+
choices. Keep `n` as `1` to minimize costs.
|
|
2184
|
+
|
|
2185
|
+
parallel_tool_calls: Whether to enable
|
|
2186
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
2187
|
+
during tool use.
|
|
2188
|
+
|
|
2189
|
+
prediction: Static predicted output content, such as the content of a text file that is
|
|
2190
|
+
being regenerated.
|
|
2191
|
+
|
|
2192
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
2193
|
+
whether they appear in the text so far, increasing the model's likelihood to
|
|
2194
|
+
talk about new topics.
|
|
2195
|
+
|
|
2196
|
+
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
2197
|
+
hit rates. Replaces the `user` field.
|
|
2198
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
2199
|
+
|
|
2200
|
+
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
2201
|
+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
2202
|
+
of 24 hours.
|
|
2203
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
2204
|
+
|
|
2205
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
2206
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
2207
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
2208
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
2209
|
+
reasoning in a response.
|
|
2210
|
+
|
|
2211
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
2212
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
2213
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
2214
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
2215
|
+
support `none`.
|
|
2216
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
2217
|
+
|
|
2218
|
+
response_format: An object specifying the format that the model must output.
|
|
2219
|
+
|
|
2220
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
2221
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
2222
|
+
in the
|
|
2223
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
2224
|
+
|
|
2225
|
+
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
|
|
2226
|
+
ensures the message the model generates is valid JSON. Using `json_schema` is
|
|
2227
|
+
preferred for models that support it.
|
|
2228
|
+
|
|
2229
|
+
safety_identifier: A stable identifier used to help detect users of your application that may be
|
|
2230
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
2231
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
2232
|
+
order to avoid sending us any identifying information.
|
|
2233
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
2234
|
+
|
|
2235
|
+
seed: This feature is in Beta. If specified, our system will make a best effort to
|
|
2236
|
+
sample deterministically, such that repeated requests with the same `seed` and
|
|
2237
|
+
parameters should return the same result. Determinism is not guaranteed, and you
|
|
2238
|
+
should refer to the `system_fingerprint` response parameter to monitor changes
|
|
2239
|
+
in the backend.
|
|
2240
|
+
|
|
2241
|
+
service_tier: Specifies the processing type used for serving the request.
|
|
2242
|
+
|
|
2243
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
2244
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
2245
|
+
will use 'default'.
|
|
2246
|
+
- If set to 'default', then the request will be processed with the standard
|
|
2247
|
+
pricing and performance for the selected model.
|
|
2248
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
2249
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
2250
|
+
will be processed with the corresponding service tier.
|
|
2251
|
+
- When not set, the default behavior is 'auto'.
|
|
2252
|
+
|
|
2253
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
2254
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
2255
|
+
request. This response value may be different from the value set in the
|
|
2256
|
+
parameter.
|
|
2257
|
+
|
|
2258
|
+
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
|
|
2259
|
+
|
|
2260
|
+
Up to 4 sequences where the API will stop generating further tokens. The
|
|
2261
|
+
returned text will not contain the stop sequence.
|
|
2262
|
+
|
|
2263
|
+
store: Whether or not to store the output of this chat completion request for use in
|
|
2264
|
+
our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
|
2265
|
+
or [evals](https://platform.openai.com/docs/guides/evals) products.
|
|
2266
|
+
|
|
2267
|
+
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
|
2268
|
+
|
|
2269
|
+
stream_options: Options for streaming response. Only set this when you set `stream: true`.
|
|
2270
|
+
|
|
2271
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
2272
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
2273
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
2274
|
+
not both.
|
|
2275
|
+
|
|
2276
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
2277
|
+
not call any tool and instead generates a message. `auto` means the model can
|
|
2278
|
+
pick between generating a message or calling one or more tools. `required` means
|
|
2279
|
+
the model must call one or more tools. Specifying a particular tool via
|
|
2280
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
2281
|
+
call that tool.
|
|
2282
|
+
|
|
2283
|
+
`none` is the default when no tools are present. `auto` is the default if tools
|
|
2284
|
+
are present.
|
|
2285
|
+
|
|
2286
|
+
tools: A list of tools the model may call. You can provide either
|
|
2287
|
+
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
2288
|
+
or [function tools](https://platform.openai.com/docs/guides/function-calling).
|
|
2289
|
+
|
|
2290
|
+
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
|
2291
|
+
return at each token position, each with an associated log probability.
|
|
2292
|
+
`logprobs` must be set to `true` if this parameter is used.
|
|
2293
|
+
|
|
2294
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
2295
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
2296
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
2297
|
+
|
|
2298
|
+
We generally recommend altering this or `temperature` but not both.
|
|
2299
|
+
|
|
2300
|
+
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
|
2301
|
+
`prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
2302
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
2303
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
2304
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
2305
|
+
|
|
2306
|
+
verbosity: Constrains the verbosity of the model's response. Lower values will result in
|
|
2307
|
+
more concise responses, while higher values will result in more verbose
|
|
2308
|
+
responses. Currently supported values are `low`, `medium`, and `high`.
|
|
2309
|
+
|
|
2310
|
+
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
|
|
2311
|
+
about the
|
|
2312
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
2313
|
+
|
|
2314
|
+
extra_headers: Send extra headers
|
|
2315
|
+
|
|
2316
|
+
extra_query: Add additional query parameters to the request
|
|
2317
|
+
|
|
2318
|
+
extra_body: Add additional JSON properties to the request
|
|
2319
|
+
|
|
2320
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2321
|
+
"""
|
|
2322
|
+
...
|
|
2323
|
+
|
|
2324
|
+
@overload
|
|
2325
|
+
async def create(
|
|
2326
|
+
self,
|
|
2327
|
+
*,
|
|
2328
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
2329
|
+
model: Union[str, ChatModel],
|
|
2330
|
+
stream: bool,
|
|
2331
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
2332
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
2333
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
2334
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
2335
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
2336
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
2337
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2338
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
2339
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2340
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
2341
|
+
n: Optional[int] | Omit = omit,
|
|
2342
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2343
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
2344
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
2345
|
+
prompt_cache_key: str | Omit = omit,
|
|
2346
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
2347
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2348
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
2349
|
+
safety_identifier: str | Omit = omit,
|
|
2350
|
+
seed: Optional[int] | Omit = omit,
|
|
2351
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
2352
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
2353
|
+
store: Optional[bool] | Omit = omit,
|
|
2354
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
2355
|
+
temperature: Optional[float] | Omit = omit,
|
|
2356
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
2357
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
2358
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
2359
|
+
top_p: Optional[float] | Omit = omit,
|
|
2360
|
+
user: str | Omit = omit,
|
|
2361
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
2362
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
2363
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2364
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2365
|
+
extra_headers: Headers | None = None,
|
|
2366
|
+
extra_query: Query | None = None,
|
|
2367
|
+
extra_body: Body | None = None,
|
|
2368
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2369
|
+
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
|
|
2370
|
+
"""
|
|
2371
|
+
**Starting a new project?** We recommend trying
|
|
2372
|
+
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
|
|
2373
|
+
advantage of the latest OpenAI platform features. Compare
|
|
2374
|
+
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
|
|
2375
|
+
|
|
2376
|
+
---
|
|
2377
|
+
|
|
2378
|
+
Creates a model response for the given chat conversation. Learn more in the
|
|
2379
|
+
[text generation](https://platform.openai.com/docs/guides/text-generation),
|
|
2380
|
+
[vision](https://platform.openai.com/docs/guides/vision), and
|
|
2381
|
+
[audio](https://platform.openai.com/docs/guides/audio) guides.
|
|
2382
|
+
|
|
2383
|
+
Parameter support can differ depending on the model used to generate the
|
|
2384
|
+
response, particularly for newer reasoning models. Parameters that are only
|
|
2385
|
+
supported for reasoning models are noted below. For the current state of
|
|
2386
|
+
unsupported parameters in reasoning models,
|
|
2387
|
+
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
|
|
2388
|
+
|
|
2389
|
+
Args:
|
|
2390
|
+
messages: A list of messages comprising the conversation so far. Depending on the
|
|
2391
|
+
[model](https://platform.openai.com/docs/models) you use, different message
|
|
2392
|
+
types (modalities) are supported, like
|
|
2393
|
+
[text](https://platform.openai.com/docs/guides/text-generation),
|
|
2394
|
+
[images](https://platform.openai.com/docs/guides/vision), and
|
|
2395
|
+
[audio](https://platform.openai.com/docs/guides/audio).
|
|
2396
|
+
|
|
2397
|
+
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
|
2398
|
+
wide range of models with different capabilities, performance characteristics,
|
|
2399
|
+
and price points. Refer to the
|
|
2400
|
+
[model guide](https://platform.openai.com/docs/models) to browse and compare
|
|
2401
|
+
available models.
|
|
2402
|
+
|
|
2403
|
+
stream: If set to true, the model response data will be streamed to the client as it is
|
|
2404
|
+
generated using
|
|
2405
|
+
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
|
|
2406
|
+
See the
|
|
2407
|
+
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
|
|
2408
|
+
for more information, along with the
|
|
2409
|
+
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
|
|
2410
|
+
guide for more information on how to handle the streaming events.
|
|
2411
|
+
|
|
2412
|
+
audio: Parameters for audio output. Required when audio output is requested with
|
|
2413
|
+
`modalities: ["audio"]`.
|
|
2414
|
+
[Learn more](https://platform.openai.com/docs/guides/audio).
|
|
2415
|
+
|
|
2416
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
2417
|
+
existing frequency in the text so far, decreasing the model's likelihood to
|
|
2418
|
+
repeat the same line verbatim.
|
|
2419
|
+
|
|
2420
|
+
function_call: Deprecated in favor of `tool_choice`.
|
|
2421
|
+
|
|
2422
|
+
Controls which (if any) function is called by the model.
|
|
2423
|
+
|
|
2424
|
+
`none` means the model will not call a function and instead generates a message.
|
|
2425
|
+
|
|
2426
|
+
`auto` means the model can pick between generating a message or calling a
|
|
2427
|
+
function.
|
|
2428
|
+
|
|
2429
|
+
Specifying a particular function via `{"name": "my_function"}` forces the model
|
|
2430
|
+
to call that function.
|
|
2431
|
+
|
|
2432
|
+
`none` is the default when no functions are present. `auto` is the default if
|
|
2433
|
+
functions are present.
|
|
2434
|
+
|
|
2435
|
+
functions: Deprecated in favor of `tools`.
|
|
2436
|
+
|
|
2437
|
+
A list of functions the model may generate JSON inputs for.
|
|
2438
|
+
|
|
2439
|
+
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
|
2440
|
+
|
|
2441
|
+
Accepts a JSON object that maps tokens (specified by their token ID in the
|
|
2442
|
+
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
|
2443
|
+
bias is added to the logits generated by the model prior to sampling. The exact
|
|
2444
|
+
effect will vary per model, but values between -1 and 1 should decrease or
|
|
2445
|
+
increase likelihood of selection; values like -100 or 100 should result in a ban
|
|
2446
|
+
or exclusive selection of the relevant token.
|
|
2447
|
+
|
|
2448
|
+
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
|
2449
|
+
returns the log probabilities of each output token returned in the `content` of
|
|
2450
|
+
`message`.
|
|
2451
|
+
|
|
2452
|
+
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
|
|
2453
|
+
including visible output tokens and
|
|
2454
|
+
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
|
2455
|
+
|
|
2456
|
+
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
2457
|
+
completion. This value can be used to control
|
|
2458
|
+
[costs](https://openai.com/api/pricing/) for text generated via API.
|
|
2459
|
+
|
|
2460
|
+
This value is now deprecated in favor of `max_completion_tokens`, and is not
|
|
2461
|
+
compatible with
|
|
2462
|
+
[o-series models](https://platform.openai.com/docs/guides/reasoning).
|
|
2463
|
+
|
|
2464
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
2465
|
+
for storing additional information about the object in a structured format, and
|
|
2466
|
+
querying for objects via API or the dashboard.
|
|
2467
|
+
|
|
2468
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
2469
|
+
a maximum length of 512 characters.
|
|
2470
|
+
|
|
2471
|
+
modalities: Output types that you would like the model to generate. Most models are capable
|
|
2472
|
+
of generating text, which is the default:
|
|
2473
|
+
|
|
2474
|
+
`["text"]`
|
|
2475
|
+
|
|
2476
|
+
The `gpt-4o-audio-preview` model can also be used to
|
|
2477
|
+
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
|
|
2478
|
+
this model generate both text and audio responses, you can use:
|
|
2479
|
+
|
|
2480
|
+
`["text", "audio"]`
|
|
2481
|
+
|
|
2482
|
+
n: How many chat completion choices to generate for each input message. Note that
|
|
2483
|
+
you will be charged based on the number of generated tokens across all of the
|
|
2484
|
+
choices. Keep `n` as `1` to minimize costs.
|
|
2485
|
+
|
|
2486
|
+
parallel_tool_calls: Whether to enable
|
|
2487
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
2488
|
+
during tool use.
|
|
2489
|
+
|
|
2490
|
+
prediction: Static predicted output content, such as the content of a text file that is
|
|
2491
|
+
being regenerated.
|
|
2492
|
+
|
|
2493
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
2494
|
+
whether they appear in the text so far, increasing the model's likelihood to
|
|
2495
|
+
talk about new topics.
|
|
2496
|
+
|
|
2497
|
+
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
|
|
2498
|
+
hit rates. Replaces the `user` field.
|
|
2499
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
2500
|
+
|
|
2501
|
+
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
2502
|
+
prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
2503
|
+
of 24 hours.
|
|
2504
|
+
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
2505
|
+
|
|
2506
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
2507
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
2508
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
2509
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
2510
|
+
reasoning in a response.
|
|
2511
|
+
|
|
2512
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
2513
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
2514
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
2515
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
2516
|
+
support `none`.
|
|
2517
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
2518
|
+
|
|
2519
|
+
response_format: An object specifying the format that the model must output.
|
|
2520
|
+
|
|
2521
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
2522
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
2523
|
+
in the
|
|
2524
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
2525
|
+
|
|
2526
|
+
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
|
|
2527
|
+
ensures the message the model generates is valid JSON. Using `json_schema` is
|
|
2528
|
+
preferred for models that support it.
|
|
2529
|
+
|
|
2530
|
+
safety_identifier: A stable identifier used to help detect users of your application that may be
|
|
2531
|
+
violating OpenAI's usage policies. The IDs should be a string that uniquely
|
|
2532
|
+
identifies each user. We recommend hashing their username or email address, in
|
|
2533
|
+
order to avoid sending us any identifying information.
|
|
2534
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
2535
|
+
|
|
2536
|
+
seed: This feature is in Beta. If specified, our system will make a best effort to
|
|
2537
|
+
sample deterministically, such that repeated requests with the same `seed` and
|
|
2538
|
+
parameters should return the same result. Determinism is not guaranteed, and you
|
|
2539
|
+
should refer to the `system_fingerprint` response parameter to monitor changes
|
|
2540
|
+
in the backend.
|
|
2541
|
+
|
|
2542
|
+
service_tier: Specifies the processing type used for serving the request.
|
|
2543
|
+
|
|
2544
|
+
- If set to 'auto', then the request will be processed with the service tier
|
|
2545
|
+
configured in the Project settings. Unless otherwise configured, the Project
|
|
2546
|
+
will use 'default'.
|
|
2547
|
+
- If set to 'default', then the request will be processed with the standard
|
|
2548
|
+
pricing and performance for the selected model.
|
|
2549
|
+
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
|
2550
|
+
'[priority](https://openai.com/api-priority-processing/)', then the request
|
|
2551
|
+
will be processed with the corresponding service tier.
|
|
2552
|
+
- When not set, the default behavior is 'auto'.
|
|
2553
|
+
|
|
2554
|
+
When the `service_tier` parameter is set, the response body will include the
|
|
2555
|
+
`service_tier` value based on the processing mode actually used to serve the
|
|
2556
|
+
request. This response value may be different from the value set in the
|
|
2557
|
+
parameter.
|
|
2558
|
+
|
|
2559
|
+
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
|
|
2560
|
+
|
|
2561
|
+
Up to 4 sequences where the API will stop generating further tokens. The
|
|
2562
|
+
returned text will not contain the stop sequence.
|
|
2563
|
+
|
|
2564
|
+
store: Whether or not to store the output of this chat completion request for use in
|
|
2565
|
+
our [model distillation](https://platform.openai.com/docs/guides/distillation)
|
|
2566
|
+
or [evals](https://platform.openai.com/docs/guides/evals) products.
|
|
2567
|
+
|
|
2568
|
+
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
|
|
2569
|
+
|
|
2570
|
+
stream_options: Options for streaming response. Only set this when you set `stream: true`.
|
|
2571
|
+
|
|
2572
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
2573
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
2574
|
+
focused and deterministic. We generally recommend altering this or `top_p` but
|
|
2575
|
+
not both.
|
|
2576
|
+
|
|
2577
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
2578
|
+
not call any tool and instead generates a message. `auto` means the model can
|
|
2579
|
+
pick between generating a message or calling one or more tools. `required` means
|
|
2580
|
+
the model must call one or more tools. Specifying a particular tool via
|
|
2581
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
2582
|
+
call that tool.
|
|
2583
|
+
|
|
2584
|
+
`none` is the default when no tools are present. `auto` is the default if tools
|
|
2585
|
+
are present.
|
|
2586
|
+
|
|
2587
|
+
tools: A list of tools the model may call. You can provide either
|
|
2588
|
+
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
2589
|
+
or [function tools](https://platform.openai.com/docs/guides/function-calling).
|
|
2590
|
+
|
|
2591
|
+
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
|
2592
|
+
return at each token position, each with an associated log probability.
|
|
2593
|
+
`logprobs` must be set to `true` if this parameter is used.
|
|
2594
|
+
|
|
2595
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
2596
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
2597
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
2598
|
+
|
|
2599
|
+
We generally recommend altering this or `temperature` but not both.
|
|
2600
|
+
|
|
2601
|
+
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
|
2602
|
+
`prompt_cache_key` instead to maintain caching optimizations. A stable
|
|
2603
|
+
identifier for your end-users. Used to boost cache hit rates by better bucketing
|
|
2604
|
+
similar requests and to help OpenAI detect and prevent abuse.
|
|
2605
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
|
2606
|
+
|
|
2607
|
+
verbosity: Constrains the verbosity of the model's response. Lower values will result in
|
|
2608
|
+
more concise responses, while higher values will result in more verbose
|
|
2609
|
+
responses. Currently supported values are `low`, `medium`, and `high`.
|
|
2610
|
+
|
|
2611
|
+
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
|
|
2612
|
+
about the
|
|
2613
|
+
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
|
|
2614
|
+
|
|
2615
|
+
extra_headers: Send extra headers
|
|
2616
|
+
|
|
2617
|
+
extra_query: Add additional query parameters to the request
|
|
2618
|
+
|
|
2619
|
+
extra_body: Add additional JSON properties to the request
|
|
2620
|
+
|
|
2621
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2622
|
+
"""
|
|
2623
|
+
...
|
|
2624
|
+
|
|
2625
|
+
@required_args(["messages", "model"], ["messages", "model", "stream"])
|
|
2626
|
+
async def create(
|
|
2627
|
+
self,
|
|
2628
|
+
*,
|
|
2629
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
2630
|
+
model: Union[str, ChatModel],
|
|
2631
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
2632
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
2633
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
2634
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
2635
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
2636
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
2637
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2638
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
2639
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2640
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
2641
|
+
n: Optional[int] | Omit = omit,
|
|
2642
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2643
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
2644
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
2645
|
+
prompt_cache_key: str | Omit = omit,
|
|
2646
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
2647
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2648
|
+
response_format: completion_create_params.ResponseFormat | Omit = omit,
|
|
2649
|
+
safety_identifier: str | Omit = omit,
|
|
2650
|
+
seed: Optional[int] | Omit = omit,
|
|
2651
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
2652
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
2653
|
+
store: Optional[bool] | Omit = omit,
|
|
2654
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
2655
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
2656
|
+
temperature: Optional[float] | Omit = omit,
|
|
2657
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
2658
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
2659
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
2660
|
+
top_p: Optional[float] | Omit = omit,
|
|
2661
|
+
user: str | Omit = omit,
|
|
2662
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
2663
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
2664
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2665
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2666
|
+
extra_headers: Headers | None = None,
|
|
2667
|
+
extra_query: Query | None = None,
|
|
2668
|
+
extra_body: Body | None = None,
|
|
2669
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2670
|
+
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
|
|
2671
|
+
validate_response_format(response_format)
|
|
2672
|
+
return await self._post(
|
|
2673
|
+
"/chat/completions",
|
|
2674
|
+
body=await async_maybe_transform(
|
|
2675
|
+
{
|
|
2676
|
+
"messages": messages,
|
|
2677
|
+
"model": model,
|
|
2678
|
+
"audio": audio,
|
|
2679
|
+
"frequency_penalty": frequency_penalty,
|
|
2680
|
+
"function_call": function_call,
|
|
2681
|
+
"functions": functions,
|
|
2682
|
+
"logit_bias": logit_bias,
|
|
2683
|
+
"logprobs": logprobs,
|
|
2684
|
+
"max_completion_tokens": max_completion_tokens,
|
|
2685
|
+
"max_tokens": max_tokens,
|
|
2686
|
+
"metadata": metadata,
|
|
2687
|
+
"modalities": modalities,
|
|
2688
|
+
"n": n,
|
|
2689
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
2690
|
+
"prediction": prediction,
|
|
2691
|
+
"presence_penalty": presence_penalty,
|
|
2692
|
+
"prompt_cache_key": prompt_cache_key,
|
|
2693
|
+
"prompt_cache_retention": prompt_cache_retention,
|
|
2694
|
+
"reasoning_effort": reasoning_effort,
|
|
2695
|
+
"response_format": response_format,
|
|
2696
|
+
"safety_identifier": safety_identifier,
|
|
2697
|
+
"seed": seed,
|
|
2698
|
+
"service_tier": service_tier,
|
|
2699
|
+
"stop": stop,
|
|
2700
|
+
"store": store,
|
|
2701
|
+
"stream": stream,
|
|
2702
|
+
"stream_options": stream_options,
|
|
2703
|
+
"temperature": temperature,
|
|
2704
|
+
"tool_choice": tool_choice,
|
|
2705
|
+
"tools": tools,
|
|
2706
|
+
"top_logprobs": top_logprobs,
|
|
2707
|
+
"top_p": top_p,
|
|
2708
|
+
"user": user,
|
|
2709
|
+
"verbosity": verbosity,
|
|
2710
|
+
"web_search_options": web_search_options,
|
|
2711
|
+
},
|
|
2712
|
+
completion_create_params.CompletionCreateParamsStreaming
|
|
2713
|
+
if stream
|
|
2714
|
+
else completion_create_params.CompletionCreateParamsNonStreaming,
|
|
2715
|
+
),
|
|
2716
|
+
options=make_request_options(
|
|
2717
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2718
|
+
),
|
|
2719
|
+
cast_to=ChatCompletion,
|
|
2720
|
+
stream=stream or False,
|
|
2721
|
+
stream_cls=AsyncStream[ChatCompletionChunk],
|
|
2722
|
+
)
|
|
2723
|
+
|
|
2724
|
+
async def retrieve(
|
|
2725
|
+
self,
|
|
2726
|
+
completion_id: str,
|
|
2727
|
+
*,
|
|
2728
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2729
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2730
|
+
extra_headers: Headers | None = None,
|
|
2731
|
+
extra_query: Query | None = None,
|
|
2732
|
+
extra_body: Body | None = None,
|
|
2733
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2734
|
+
) -> ChatCompletion:
|
|
2735
|
+
"""Get a stored chat completion.
|
|
2736
|
+
|
|
2737
|
+
Only Chat Completions that have been created with
|
|
2738
|
+
the `store` parameter set to `true` will be returned.
|
|
2739
|
+
|
|
2740
|
+
Args:
|
|
2741
|
+
extra_headers: Send extra headers
|
|
2742
|
+
|
|
2743
|
+
extra_query: Add additional query parameters to the request
|
|
2744
|
+
|
|
2745
|
+
extra_body: Add additional JSON properties to the request
|
|
2746
|
+
|
|
2747
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2748
|
+
"""
|
|
2749
|
+
if not completion_id:
|
|
2750
|
+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
|
|
2751
|
+
return await self._get(
|
|
2752
|
+
f"/chat/completions/{completion_id}",
|
|
2753
|
+
options=make_request_options(
|
|
2754
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2755
|
+
),
|
|
2756
|
+
cast_to=ChatCompletion,
|
|
2757
|
+
)
|
|
2758
|
+
|
|
2759
|
+
async def update(
|
|
2760
|
+
self,
|
|
2761
|
+
completion_id: str,
|
|
2762
|
+
*,
|
|
2763
|
+
metadata: Optional[Metadata],
|
|
2764
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2765
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2766
|
+
extra_headers: Headers | None = None,
|
|
2767
|
+
extra_query: Query | None = None,
|
|
2768
|
+
extra_body: Body | None = None,
|
|
2769
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2770
|
+
) -> ChatCompletion:
|
|
2771
|
+
"""Modify a stored chat completion.
|
|
2772
|
+
|
|
2773
|
+
Only Chat Completions that have been created
|
|
2774
|
+
with the `store` parameter set to `true` can be modified. Currently, the only
|
|
2775
|
+
supported modification is to update the `metadata` field.
|
|
2776
|
+
|
|
2777
|
+
Args:
|
|
2778
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
2779
|
+
for storing additional information about the object in a structured format, and
|
|
2780
|
+
querying for objects via API or the dashboard.
|
|
2781
|
+
|
|
2782
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
2783
|
+
a maximum length of 512 characters.
|
|
2784
|
+
|
|
2785
|
+
extra_headers: Send extra headers
|
|
2786
|
+
|
|
2787
|
+
extra_query: Add additional query parameters to the request
|
|
2788
|
+
|
|
2789
|
+
extra_body: Add additional JSON properties to the request
|
|
2790
|
+
|
|
2791
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2792
|
+
"""
|
|
2793
|
+
if not completion_id:
|
|
2794
|
+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
|
|
2795
|
+
return await self._post(
|
|
2796
|
+
f"/chat/completions/{completion_id}",
|
|
2797
|
+
body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
|
|
2798
|
+
options=make_request_options(
|
|
2799
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2800
|
+
),
|
|
2801
|
+
cast_to=ChatCompletion,
|
|
2802
|
+
)
|
|
2803
|
+
|
|
2804
|
+
def list(
|
|
2805
|
+
self,
|
|
2806
|
+
*,
|
|
2807
|
+
after: str | Omit = omit,
|
|
2808
|
+
limit: int | Omit = omit,
|
|
2809
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2810
|
+
model: str | Omit = omit,
|
|
2811
|
+
order: Literal["asc", "desc"] | Omit = omit,
|
|
2812
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2813
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2814
|
+
extra_headers: Headers | None = None,
|
|
2815
|
+
extra_query: Query | None = None,
|
|
2816
|
+
extra_body: Body | None = None,
|
|
2817
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2818
|
+
) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]:
|
|
2819
|
+
"""List stored Chat Completions.
|
|
2820
|
+
|
|
2821
|
+
Only Chat Completions that have been stored with
|
|
2822
|
+
the `store` parameter set to `true` will be returned.
|
|
2823
|
+
|
|
2824
|
+
Args:
|
|
2825
|
+
after: Identifier for the last chat completion from the previous pagination request.
|
|
2826
|
+
|
|
2827
|
+
limit: Number of Chat Completions to retrieve.
|
|
2828
|
+
|
|
2829
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
2830
|
+
for storing additional information about the object in a structured format, and
|
|
2831
|
+
querying for objects via API or the dashboard.
|
|
2832
|
+
|
|
2833
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
2834
|
+
a maximum length of 512 characters.
|
|
2835
|
+
|
|
2836
|
+
model: The model used to generate the Chat Completions.
|
|
2837
|
+
|
|
2838
|
+
order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
|
|
2839
|
+
`desc` for descending order. Defaults to `asc`.
|
|
2840
|
+
|
|
2841
|
+
extra_headers: Send extra headers
|
|
2842
|
+
|
|
2843
|
+
extra_query: Add additional query parameters to the request
|
|
2844
|
+
|
|
2845
|
+
extra_body: Add additional JSON properties to the request
|
|
2846
|
+
|
|
2847
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2848
|
+
"""
|
|
2849
|
+
return self._get_api_list(
|
|
2850
|
+
"/chat/completions",
|
|
2851
|
+
page=AsyncCursorPage[ChatCompletion],
|
|
2852
|
+
options=make_request_options(
|
|
2853
|
+
extra_headers=extra_headers,
|
|
2854
|
+
extra_query=extra_query,
|
|
2855
|
+
extra_body=extra_body,
|
|
2856
|
+
timeout=timeout,
|
|
2857
|
+
query=maybe_transform(
|
|
2858
|
+
{
|
|
2859
|
+
"after": after,
|
|
2860
|
+
"limit": limit,
|
|
2861
|
+
"metadata": metadata,
|
|
2862
|
+
"model": model,
|
|
2863
|
+
"order": order,
|
|
2864
|
+
},
|
|
2865
|
+
completion_list_params.CompletionListParams,
|
|
2866
|
+
),
|
|
2867
|
+
),
|
|
2868
|
+
model=ChatCompletion,
|
|
2869
|
+
)
|
|
2870
|
+
|
|
2871
|
+
async def delete(
|
|
2872
|
+
self,
|
|
2873
|
+
completion_id: str,
|
|
2874
|
+
*,
|
|
2875
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2876
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2877
|
+
extra_headers: Headers | None = None,
|
|
2878
|
+
extra_query: Query | None = None,
|
|
2879
|
+
extra_body: Body | None = None,
|
|
2880
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2881
|
+
) -> ChatCompletionDeleted:
|
|
2882
|
+
"""Delete a stored chat completion.
|
|
2883
|
+
|
|
2884
|
+
Only Chat Completions that have been created
|
|
2885
|
+
with the `store` parameter set to `true` can be deleted.
|
|
2886
|
+
|
|
2887
|
+
Args:
|
|
2888
|
+
extra_headers: Send extra headers
|
|
2889
|
+
|
|
2890
|
+
extra_query: Add additional query parameters to the request
|
|
2891
|
+
|
|
2892
|
+
extra_body: Add additional JSON properties to the request
|
|
2893
|
+
|
|
2894
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2895
|
+
"""
|
|
2896
|
+
if not completion_id:
|
|
2897
|
+
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
|
|
2898
|
+
return await self._delete(
|
|
2899
|
+
f"/chat/completions/{completion_id}",
|
|
2900
|
+
options=make_request_options(
|
|
2901
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2902
|
+
),
|
|
2903
|
+
cast_to=ChatCompletionDeleted,
|
|
2904
|
+
)
|
|
2905
|
+
|
|
2906
|
+
def stream(
|
|
2907
|
+
self,
|
|
2908
|
+
*,
|
|
2909
|
+
messages: Iterable[ChatCompletionMessageParam],
|
|
2910
|
+
model: Union[str, ChatModel],
|
|
2911
|
+
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
|
|
2912
|
+
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit,
|
|
2913
|
+
frequency_penalty: Optional[float] | Omit = omit,
|
|
2914
|
+
function_call: completion_create_params.FunctionCall | Omit = omit,
|
|
2915
|
+
functions: Iterable[completion_create_params.Function] | Omit = omit,
|
|
2916
|
+
logit_bias: Optional[Dict[str, int]] | Omit = omit,
|
|
2917
|
+
logprobs: Optional[bool] | Omit = omit,
|
|
2918
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2919
|
+
max_tokens: Optional[int] | Omit = omit,
|
|
2920
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2921
|
+
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
|
|
2922
|
+
n: Optional[int] | Omit = omit,
|
|
2923
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2924
|
+
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
|
|
2925
|
+
presence_penalty: Optional[float] | Omit = omit,
|
|
2926
|
+
prompt_cache_key: str | Omit = omit,
|
|
2927
|
+
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
|
|
2928
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2929
|
+
safety_identifier: str | Omit = omit,
|
|
2930
|
+
seed: Optional[int] | Omit = omit,
|
|
2931
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
|
|
2932
|
+
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
|
|
2933
|
+
store: Optional[bool] | Omit = omit,
|
|
2934
|
+
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
|
|
2935
|
+
temperature: Optional[float] | Omit = omit,
|
|
2936
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
|
|
2937
|
+
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
|
|
2938
|
+
top_logprobs: Optional[int] | Omit = omit,
|
|
2939
|
+
top_p: Optional[float] | Omit = omit,
|
|
2940
|
+
user: str | Omit = omit,
|
|
2941
|
+
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
|
|
2942
|
+
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
|
|
2943
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2944
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2945
|
+
extra_headers: Headers | None = None,
|
|
2946
|
+
extra_query: Query | None = None,
|
|
2947
|
+
extra_body: Body | None = None,
|
|
2948
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2949
|
+
) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
|
|
2950
|
+
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
|
|
2951
|
+
and automatic accumulation of each delta.
|
|
2952
|
+
|
|
2953
|
+
This also supports all of the parsing utilities that `.parse()` does.
|
|
2954
|
+
|
|
2955
|
+
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
|
|
2956
|
+
|
|
2957
|
+
```py
|
|
2958
|
+
async with client.chat.completions.stream(
|
|
2959
|
+
model="gpt-4o-2024-08-06",
|
|
2960
|
+
messages=[...],
|
|
2961
|
+
) as stream:
|
|
2962
|
+
async for event in stream:
|
|
2963
|
+
if event.type == "content.delta":
|
|
2964
|
+
print(event.delta, flush=True, end="")
|
|
2965
|
+
```
|
|
2966
|
+
|
|
2967
|
+
When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
|
|
2968
|
+
|
|
2969
|
+
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
|
|
2970
|
+
the context manager.
|
|
2971
|
+
"""
|
|
2972
|
+
_validate_input_tools(tools)
|
|
2973
|
+
|
|
2974
|
+
extra_headers = {
|
|
2975
|
+
"X-Stainless-Helper-Method": "chat.completions.stream",
|
|
2976
|
+
**(extra_headers or {}),
|
|
2977
|
+
}
|
|
2978
|
+
|
|
2979
|
+
api_request = self.create(
|
|
2980
|
+
messages=messages,
|
|
2981
|
+
model=model,
|
|
2982
|
+
audio=audio,
|
|
2983
|
+
stream=True,
|
|
2984
|
+
response_format=_type_to_response_format(response_format),
|
|
2985
|
+
frequency_penalty=frequency_penalty,
|
|
2986
|
+
function_call=function_call,
|
|
2987
|
+
functions=functions,
|
|
2988
|
+
logit_bias=logit_bias,
|
|
2989
|
+
logprobs=logprobs,
|
|
2990
|
+
max_completion_tokens=max_completion_tokens,
|
|
2991
|
+
max_tokens=max_tokens,
|
|
2992
|
+
metadata=metadata,
|
|
2993
|
+
modalities=modalities,
|
|
2994
|
+
n=n,
|
|
2995
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
2996
|
+
prediction=prediction,
|
|
2997
|
+
presence_penalty=presence_penalty,
|
|
2998
|
+
prompt_cache_key=prompt_cache_key,
|
|
2999
|
+
prompt_cache_retention=prompt_cache_retention,
|
|
3000
|
+
reasoning_effort=reasoning_effort,
|
|
3001
|
+
safety_identifier=safety_identifier,
|
|
3002
|
+
seed=seed,
|
|
3003
|
+
service_tier=service_tier,
|
|
3004
|
+
stop=stop,
|
|
3005
|
+
store=store,
|
|
3006
|
+
stream_options=stream_options,
|
|
3007
|
+
temperature=temperature,
|
|
3008
|
+
tool_choice=tool_choice,
|
|
3009
|
+
tools=tools,
|
|
3010
|
+
top_logprobs=top_logprobs,
|
|
3011
|
+
top_p=top_p,
|
|
3012
|
+
user=user,
|
|
3013
|
+
verbosity=verbosity,
|
|
3014
|
+
web_search_options=web_search_options,
|
|
3015
|
+
extra_headers=extra_headers,
|
|
3016
|
+
extra_query=extra_query,
|
|
3017
|
+
extra_body=extra_body,
|
|
3018
|
+
timeout=timeout,
|
|
3019
|
+
)
|
|
3020
|
+
return AsyncChatCompletionStreamManager(
|
|
3021
|
+
api_request,
|
|
3022
|
+
response_format=response_format,
|
|
3023
|
+
input_tools=tools,
|
|
3024
|
+
)
|
|
3025
|
+
|
|
3026
|
+
|
|
3027
|
+
class CompletionsWithRawResponse:
|
|
3028
|
+
def __init__(self, completions: Completions) -> None:
|
|
3029
|
+
self._completions = completions
|
|
3030
|
+
|
|
3031
|
+
self.parse = _legacy_response.to_raw_response_wrapper(
|
|
3032
|
+
completions.parse,
|
|
3033
|
+
)
|
|
3034
|
+
self.create = _legacy_response.to_raw_response_wrapper(
|
|
3035
|
+
completions.create,
|
|
3036
|
+
)
|
|
3037
|
+
self.retrieve = _legacy_response.to_raw_response_wrapper(
|
|
3038
|
+
completions.retrieve,
|
|
3039
|
+
)
|
|
3040
|
+
self.update = _legacy_response.to_raw_response_wrapper(
|
|
3041
|
+
completions.update,
|
|
3042
|
+
)
|
|
3043
|
+
self.list = _legacy_response.to_raw_response_wrapper(
|
|
3044
|
+
completions.list,
|
|
3045
|
+
)
|
|
3046
|
+
self.delete = _legacy_response.to_raw_response_wrapper(
|
|
3047
|
+
completions.delete,
|
|
3048
|
+
)
|
|
3049
|
+
|
|
3050
|
+
@cached_property
|
|
3051
|
+
def messages(self) -> MessagesWithRawResponse:
|
|
3052
|
+
return MessagesWithRawResponse(self._completions.messages)
|
|
3053
|
+
|
|
3054
|
+
|
|
3055
|
+
class AsyncCompletionsWithRawResponse:
|
|
3056
|
+
def __init__(self, completions: AsyncCompletions) -> None:
|
|
3057
|
+
self._completions = completions
|
|
3058
|
+
|
|
3059
|
+
self.parse = _legacy_response.async_to_raw_response_wrapper(
|
|
3060
|
+
completions.parse,
|
|
3061
|
+
)
|
|
3062
|
+
self.create = _legacy_response.async_to_raw_response_wrapper(
|
|
3063
|
+
completions.create,
|
|
3064
|
+
)
|
|
3065
|
+
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
|
|
3066
|
+
completions.retrieve,
|
|
3067
|
+
)
|
|
3068
|
+
self.update = _legacy_response.async_to_raw_response_wrapper(
|
|
3069
|
+
completions.update,
|
|
3070
|
+
)
|
|
3071
|
+
self.list = _legacy_response.async_to_raw_response_wrapper(
|
|
3072
|
+
completions.list,
|
|
3073
|
+
)
|
|
3074
|
+
self.delete = _legacy_response.async_to_raw_response_wrapper(
|
|
3075
|
+
completions.delete,
|
|
3076
|
+
)
|
|
3077
|
+
|
|
3078
|
+
@cached_property
|
|
3079
|
+
def messages(self) -> AsyncMessagesWithRawResponse:
|
|
3080
|
+
return AsyncMessagesWithRawResponse(self._completions.messages)
|
|
3081
|
+
|
|
3082
|
+
|
|
3083
|
+
class CompletionsWithStreamingResponse:
|
|
3084
|
+
def __init__(self, completions: Completions) -> None:
|
|
3085
|
+
self._completions = completions
|
|
3086
|
+
|
|
3087
|
+
self.parse = to_streamed_response_wrapper(
|
|
3088
|
+
completions.parse,
|
|
3089
|
+
)
|
|
3090
|
+
self.create = to_streamed_response_wrapper(
|
|
3091
|
+
completions.create,
|
|
3092
|
+
)
|
|
3093
|
+
self.retrieve = to_streamed_response_wrapper(
|
|
3094
|
+
completions.retrieve,
|
|
3095
|
+
)
|
|
3096
|
+
self.update = to_streamed_response_wrapper(
|
|
3097
|
+
completions.update,
|
|
3098
|
+
)
|
|
3099
|
+
self.list = to_streamed_response_wrapper(
|
|
3100
|
+
completions.list,
|
|
3101
|
+
)
|
|
3102
|
+
self.delete = to_streamed_response_wrapper(
|
|
3103
|
+
completions.delete,
|
|
3104
|
+
)
|
|
3105
|
+
|
|
3106
|
+
@cached_property
|
|
3107
|
+
def messages(self) -> MessagesWithStreamingResponse:
|
|
3108
|
+
return MessagesWithStreamingResponse(self._completions.messages)
|
|
3109
|
+
|
|
3110
|
+
|
|
3111
|
+
class AsyncCompletionsWithStreamingResponse:
|
|
3112
|
+
def __init__(self, completions: AsyncCompletions) -> None:
|
|
3113
|
+
self._completions = completions
|
|
3114
|
+
|
|
3115
|
+
self.parse = async_to_streamed_response_wrapper(
|
|
3116
|
+
completions.parse,
|
|
3117
|
+
)
|
|
3118
|
+
self.create = async_to_streamed_response_wrapper(
|
|
3119
|
+
completions.create,
|
|
3120
|
+
)
|
|
3121
|
+
self.retrieve = async_to_streamed_response_wrapper(
|
|
3122
|
+
completions.retrieve,
|
|
3123
|
+
)
|
|
3124
|
+
self.update = async_to_streamed_response_wrapper(
|
|
3125
|
+
completions.update,
|
|
3126
|
+
)
|
|
3127
|
+
self.list = async_to_streamed_response_wrapper(
|
|
3128
|
+
completions.list,
|
|
3129
|
+
)
|
|
3130
|
+
self.delete = async_to_streamed_response_wrapper(
|
|
3131
|
+
completions.delete,
|
|
3132
|
+
)
|
|
3133
|
+
|
|
3134
|
+
@cached_property
|
|
3135
|
+
def messages(self) -> AsyncMessagesWithStreamingResponse:
|
|
3136
|
+
return AsyncMessagesWithStreamingResponse(self._completions.messages)
|
|
3137
|
+
|
|
3138
|
+
|
|
3139
|
+
def validate_response_format(response_format: object) -> None:
|
|
3140
|
+
if inspect.isclass(response_format) and issubclass(response_format, pydantic.BaseModel):
|
|
3141
|
+
raise TypeError(
|
|
3142
|
+
"You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `chat.completions.parse()` instead"
|
|
3143
|
+
)
|