aimlapi-sdk-python 2.8.1b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimlapi/__init__.py +243 -0
- aimlapi/__main__.py +3 -0
- aimlapi/_client.py +368 -0
- aimlapi/_utils/__init__.py +3 -0
- aimlapi/_utils/_compat.py +3 -0
- aimlapi/_utils/_datetime_parse.py +3 -0
- aimlapi/_utils/_logs.py +3 -0
- aimlapi/_utils/_proxy.py +3 -0
- aimlapi/_utils/_reflection.py +3 -0
- aimlapi/_utils/_resources_proxy.py +3 -0
- aimlapi/_utils/_streams.py +3 -0
- aimlapi/_utils/_sync.py +3 -0
- aimlapi/_utils/_transform.py +3 -0
- aimlapi/_utils/_typing.py +3 -0
- aimlapi/_utils/_utils.py +3 -0
- aimlapi/_version.py +9 -0
- aimlapi/cli/__init__.py +3 -0
- aimlapi/cli/_api/__init__.py +3 -0
- aimlapi/cli/_api/_main.py +3 -0
- aimlapi/cli/_api/audio.py +3 -0
- aimlapi/cli/_api/chat/__init__.py +3 -0
- aimlapi/cli/_api/chat/completions.py +3 -0
- aimlapi/cli/_api/completions.py +3 -0
- aimlapi/cli/_api/files.py +3 -0
- aimlapi/cli/_api/fine_tuning/__init__.py +3 -0
- aimlapi/cli/_api/fine_tuning/jobs.py +3 -0
- aimlapi/cli/_api/image.py +3 -0
- aimlapi/cli/_api/models.py +3 -0
- aimlapi/cli/_cli.py +3 -0
- aimlapi/cli/_errors.py +3 -0
- aimlapi/cli/_models.py +3 -0
- aimlapi/cli/_progress.py +3 -0
- aimlapi/cli/_tools/__init__.py +3 -0
- aimlapi/cli/_tools/_main.py +3 -0
- aimlapi/cli/_tools/fine_tunes.py +3 -0
- aimlapi/cli/_tools/migrate.py +3 -0
- aimlapi/cli/_utils.py +3 -0
- aimlapi/helpers/__init__.py +3 -0
- aimlapi/helpers/local_audio_player.py +3 -0
- aimlapi/helpers/microphone.py +3 -0
- aimlapi/lib/__init__.py +3 -0
- aimlapi/lib/_old_api.py +3 -0
- aimlapi/lib/_parsing/__init__.py +3 -0
- aimlapi/lib/_parsing/_completions.py +3 -0
- aimlapi/lib/_parsing/_responses.py +3 -0
- aimlapi/lib/_pydantic.py +3 -0
- aimlapi/lib/_realtime.py +3 -0
- aimlapi/lib/_tools.py +3 -0
- aimlapi/lib/_validators.py +3 -0
- aimlapi/lib/azure.py +3 -0
- aimlapi/lib/streaming/__init__.py +3 -0
- aimlapi/lib/streaming/_assistants.py +3 -0
- aimlapi/lib/streaming/_deltas.py +3 -0
- aimlapi/lib/streaming/chat/__init__.py +3 -0
- aimlapi/lib/streaming/chat/_completions.py +3 -0
- aimlapi/lib/streaming/chat/_events.py +3 -0
- aimlapi/lib/streaming/chat/_types.py +3 -0
- aimlapi/lib/streaming/responses/__init__.py +3 -0
- aimlapi/lib/streaming/responses/_events.py +3 -0
- aimlapi/lib/streaming/responses/_responses.py +3 -0
- aimlapi/lib/streaming/responses/_types.py +3 -0
- aimlapi/pagination.py +3 -0
- aimlapi/resources/__init__.py +3 -0
- aimlapi/resources/audio/__init__.py +47 -0
- aimlapi/resources/audio/_polling.py +129 -0
- aimlapi/resources/audio/audio.py +56 -0
- aimlapi/resources/audio/speech.py +428 -0
- aimlapi/resources/audio/transcriptions.py +219 -0
- aimlapi/resources/audio/translations.py +3 -0
- aimlapi/resources/batches.py +3 -0
- aimlapi/resources/beta/__init__.py +3 -0
- aimlapi/resources/beta/assistants.py +3 -0
- aimlapi/resources/beta/beta.py +3 -0
- aimlapi/resources/beta/chatkit/__init__.py +3 -0
- aimlapi/resources/beta/chatkit/chatkit.py +3 -0
- aimlapi/resources/beta/chatkit/sessions.py +3 -0
- aimlapi/resources/beta/chatkit/threads.py +3 -0
- aimlapi/resources/beta/realtime/__init__.py +3 -0
- aimlapi/resources/beta/realtime/realtime.py +3 -0
- aimlapi/resources/beta/realtime/sessions.py +3 -0
- aimlapi/resources/beta/realtime/transcription_sessions.py +3 -0
- aimlapi/resources/beta/threads/__init__.py +3 -0
- aimlapi/resources/beta/threads/messages.py +3 -0
- aimlapi/resources/beta/threads/runs/__init__.py +3 -0
- aimlapi/resources/beta/threads/runs/runs.py +3 -0
- aimlapi/resources/beta/threads/runs/steps.py +3 -0
- aimlapi/resources/beta/threads/threads.py +3 -0
- aimlapi/resources/chat/__init__.py +3 -0
- aimlapi/resources/chat/chat.py +86 -0
- aimlapi/resources/chat/completions/__init__.py +4 -0
- aimlapi/resources/chat/completions/completions.py +452 -0
- aimlapi/resources/chat/completions/messages.py +3 -0
- aimlapi/resources/completions.py +3 -0
- aimlapi/resources/containers/__init__.py +3 -0
- aimlapi/resources/containers/containers.py +3 -0
- aimlapi/resources/containers/files/__init__.py +3 -0
- aimlapi/resources/containers/files/content.py +3 -0
- aimlapi/resources/containers/files/files.py +3 -0
- aimlapi/resources/conversations/__init__.py +3 -0
- aimlapi/resources/conversations/conversations.py +3 -0
- aimlapi/resources/conversations/items.py +3 -0
- aimlapi/resources/embeddings.py +3 -0
- aimlapi/resources/evals/__init__.py +3 -0
- aimlapi/resources/evals/evals.py +3 -0
- aimlapi/resources/evals/runs/__init__.py +3 -0
- aimlapi/resources/evals/runs/output_items.py +3 -0
- aimlapi/resources/evals/runs/runs.py +3 -0
- aimlapi/resources/files.py +3 -0
- aimlapi/resources/fine_tuning/__init__.py +3 -0
- aimlapi/resources/fine_tuning/alpha/__init__.py +3 -0
- aimlapi/resources/fine_tuning/alpha/alpha.py +3 -0
- aimlapi/resources/fine_tuning/alpha/graders.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/__init__.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/checkpoints.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/permissions.py +3 -0
- aimlapi/resources/fine_tuning/fine_tuning.py +3 -0
- aimlapi/resources/fine_tuning/jobs/__init__.py +3 -0
- aimlapi/resources/fine_tuning/jobs/checkpoints.py +3 -0
- aimlapi/resources/fine_tuning/jobs/jobs.py +3 -0
- aimlapi/resources/images.py +184 -0
- aimlapi/resources/models.py +3 -0
- aimlapi/resources/moderations.py +3 -0
- aimlapi/resources/realtime/__init__.py +3 -0
- aimlapi/resources/realtime/calls.py +3 -0
- aimlapi/resources/realtime/client_secrets.py +3 -0
- aimlapi/resources/realtime/realtime.py +3 -0
- aimlapi/resources/responses/__init__.py +4 -0
- aimlapi/resources/responses/input_items.py +3 -0
- aimlapi/resources/responses/input_tokens.py +3 -0
- aimlapi/resources/responses/responses.py +229 -0
- aimlapi/resources/uploads/__init__.py +19 -0
- aimlapi/resources/uploads/parts.py +3 -0
- aimlapi/resources/uploads/uploads.py +99 -0
- aimlapi/resources/vector_stores/__init__.py +3 -0
- aimlapi/resources/vector_stores/file_batches.py +3 -0
- aimlapi/resources/vector_stores/files.py +3 -0
- aimlapi/resources/vector_stores/vector_stores.py +3 -0
- aimlapi/resources/videos.py +267 -0
- aimlapi/resources/webhooks.py +3 -0
- aimlapi/types/__init__.py +3 -0
- aimlapi/types/audio/__init__.py +3 -0
- aimlapi/types/audio/speech_create_params.py +3 -0
- aimlapi/types/audio/speech_model.py +3 -0
- aimlapi/types/audio/transcription.py +3 -0
- aimlapi/types/audio/transcription_create_params.py +3 -0
- aimlapi/types/audio/transcription_create_response.py +3 -0
- aimlapi/types/audio/transcription_diarized.py +3 -0
- aimlapi/types/audio/transcription_diarized_segment.py +3 -0
- aimlapi/types/audio/transcription_include.py +3 -0
- aimlapi/types/audio/transcription_segment.py +3 -0
- aimlapi/types/audio/transcription_stream_event.py +3 -0
- aimlapi/types/audio/transcription_text_delta_event.py +3 -0
- aimlapi/types/audio/transcription_text_done_event.py +3 -0
- aimlapi/types/audio/transcription_text_segment_event.py +3 -0
- aimlapi/types/audio/transcription_verbose.py +3 -0
- aimlapi/types/audio/transcription_word.py +3 -0
- aimlapi/types/audio/translation.py +3 -0
- aimlapi/types/audio/translation_create_params.py +3 -0
- aimlapi/types/audio/translation_create_response.py +3 -0
- aimlapi/types/audio/translation_verbose.py +3 -0
- aimlapi/types/audio_model.py +3 -0
- aimlapi/types/audio_response_format.py +3 -0
- aimlapi/types/auto_file_chunking_strategy_param.py +3 -0
- aimlapi/types/batch.py +3 -0
- aimlapi/types/batch_create_params.py +3 -0
- aimlapi/types/batch_error.py +3 -0
- aimlapi/types/batch_list_params.py +3 -0
- aimlapi/types/batch_request_counts.py +3 -0
- aimlapi/types/batch_usage.py +3 -0
- aimlapi/types/beta/__init__.py +3 -0
- aimlapi/types/beta/assistant.py +3 -0
- aimlapi/types/beta/assistant_create_params.py +3 -0
- aimlapi/types/beta/assistant_deleted.py +3 -0
- aimlapi/types/beta/assistant_list_params.py +3 -0
- aimlapi/types/beta/assistant_response_format_option.py +3 -0
- aimlapi/types/beta/assistant_response_format_option_param.py +3 -0
- aimlapi/types/beta/assistant_stream_event.py +3 -0
- aimlapi/types/beta/assistant_tool.py +3 -0
- aimlapi/types/beta/assistant_tool_choice.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_function.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_function_param.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_option.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_option_param.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_param.py +3 -0
- aimlapi/types/beta/assistant_tool_param.py +3 -0
- aimlapi/types/beta/assistant_update_params.py +3 -0
- aimlapi/types/beta/chat/__init__.py +3 -0
- aimlapi/types/beta/chatkit/__init__.py +3 -0
- aimlapi/types/beta/chatkit/chat_session.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_automatic_thread_titling.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_chatkit_configuration.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_chatkit_configuration_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_expires_after_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_file_upload.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_history.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_rate_limits.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_rate_limits_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_status.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_workflow_param.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_attachment.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_response_output_text.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_assistant_message_item.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_item_list.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_user_message_item.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_widget_item.py +3 -0
- aimlapi/types/beta/chatkit/session_create_params.py +3 -0
- aimlapi/types/beta/chatkit/thread_delete_response.py +3 -0
- aimlapi/types/beta/chatkit/thread_list_items_params.py +3 -0
- aimlapi/types/beta/chatkit/thread_list_params.py +3 -0
- aimlapi/types/beta/chatkit_workflow.py +3 -0
- aimlapi/types/beta/code_interpreter_tool.py +3 -0
- aimlapi/types/beta/code_interpreter_tool_param.py +3 -0
- aimlapi/types/beta/file_search_tool.py +3 -0
- aimlapi/types/beta/file_search_tool_param.py +3 -0
- aimlapi/types/beta/function_tool.py +3 -0
- aimlapi/types/beta/function_tool_param.py +3 -0
- aimlapi/types/beta/realtime/__init__.py +3 -0
- aimlapi/types/beta/realtime/conversation_created_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_content.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_content_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_create_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_create_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_created_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_delete_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_delete_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_deleted_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_retrieve_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_retrieve_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncate_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncate_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncated_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_with_reference.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_with_reference_param.py +3 -0
- aimlapi/types/beta/realtime/error_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_append_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_append_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_clear_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_cleared_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_commit_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_commit_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_committed_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_speech_started_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
- aimlapi/types/beta/realtime/rate_limits_updated_event.py +3 -0
- aimlapi/types/beta/realtime/realtime_client_event.py +3 -0
- aimlapi/types/beta/realtime/realtime_client_event_param.py +3 -0
- aimlapi/types/beta/realtime/realtime_connect_params.py +3 -0
- aimlapi/types/beta/realtime/realtime_response.py +3 -0
- aimlapi/types/beta/realtime/realtime_response_status.py +3 -0
- aimlapi/types/beta/realtime/realtime_response_usage.py +3 -0
- aimlapi/types/beta/realtime/realtime_server_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_cancel_event.py +3 -0
- aimlapi/types/beta/realtime/response_cancel_event_param.py +3 -0
- aimlapi/types/beta/realtime/response_content_part_added_event.py +3 -0
- aimlapi/types/beta/realtime/response_content_part_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_create_event.py +3 -0
- aimlapi/types/beta/realtime/response_create_event_param.py +3 -0
- aimlapi/types/beta/realtime/response_created_event.py +3 -0
- aimlapi/types/beta/realtime/response_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_output_item_added_event.py +3 -0
- aimlapi/types/beta/realtime/response_output_item_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_text_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_text_done_event.py +3 -0
- aimlapi/types/beta/realtime/session.py +3 -0
- aimlapi/types/beta/realtime/session_create_params.py +3 -0
- aimlapi/types/beta/realtime/session_create_response.py +3 -0
- aimlapi/types/beta/realtime/session_created_event.py +3 -0
- aimlapi/types/beta/realtime/session_update_event.py +3 -0
- aimlapi/types/beta/realtime/session_update_event_param.py +3 -0
- aimlapi/types/beta/realtime/session_updated_event.py +3 -0
- aimlapi/types/beta/realtime/transcription_session.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_create_params.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_update.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_update_param.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_updated_event.py +3 -0
- aimlapi/types/beta/thread.py +3 -0
- aimlapi/types/beta/thread_create_and_run_params.py +3 -0
- aimlapi/types/beta/thread_create_params.py +3 -0
- aimlapi/types/beta/thread_deleted.py +3 -0
- aimlapi/types/beta/thread_update_params.py +3 -0
- aimlapi/types/beta/threads/__init__.py +3 -0
- aimlapi/types/beta/threads/annotation.py +3 -0
- aimlapi/types/beta/threads/annotation_delta.py +3 -0
- aimlapi/types/beta/threads/file_citation_annotation.py +3 -0
- aimlapi/types/beta/threads/file_citation_delta_annotation.py +3 -0
- aimlapi/types/beta/threads/file_path_annotation.py +3 -0
- aimlapi/types/beta/threads/file_path_delta_annotation.py +3 -0
- aimlapi/types/beta/threads/image_file.py +3 -0
- aimlapi/types/beta/threads/image_file_content_block.py +3 -0
- aimlapi/types/beta/threads/image_file_content_block_param.py +3 -0
- aimlapi/types/beta/threads/image_file_delta.py +3 -0
- aimlapi/types/beta/threads/image_file_delta_block.py +3 -0
- aimlapi/types/beta/threads/image_file_param.py +3 -0
- aimlapi/types/beta/threads/image_url.py +3 -0
- aimlapi/types/beta/threads/image_url_content_block.py +3 -0
- aimlapi/types/beta/threads/image_url_content_block_param.py +3 -0
- aimlapi/types/beta/threads/image_url_delta.py +3 -0
- aimlapi/types/beta/threads/image_url_delta_block.py +3 -0
- aimlapi/types/beta/threads/image_url_param.py +3 -0
- aimlapi/types/beta/threads/message.py +3 -0
- aimlapi/types/beta/threads/message_content.py +3 -0
- aimlapi/types/beta/threads/message_content_delta.py +3 -0
- aimlapi/types/beta/threads/message_content_part_param.py +3 -0
- aimlapi/types/beta/threads/message_create_params.py +3 -0
- aimlapi/types/beta/threads/message_deleted.py +3 -0
- aimlapi/types/beta/threads/message_delta.py +3 -0
- aimlapi/types/beta/threads/message_delta_event.py +3 -0
- aimlapi/types/beta/threads/message_list_params.py +3 -0
- aimlapi/types/beta/threads/message_update_params.py +3 -0
- aimlapi/types/beta/threads/refusal_content_block.py +3 -0
- aimlapi/types/beta/threads/refusal_delta_block.py +3 -0
- aimlapi/types/beta/threads/required_action_function_tool_call.py +3 -0
- aimlapi/types/beta/threads/run.py +3 -0
- aimlapi/types/beta/threads/run_create_params.py +3 -0
- aimlapi/types/beta/threads/run_list_params.py +3 -0
- aimlapi/types/beta/threads/run_status.py +3 -0
- aimlapi/types/beta/threads/run_submit_tool_outputs_params.py +3 -0
- aimlapi/types/beta/threads/run_update_params.py +3 -0
- aimlapi/types/beta/threads/runs/__init__.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_logs.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_output_image.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/file_search_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/file_search_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/function_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/function_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/message_creation_step_details.py +3 -0
- aimlapi/types/beta/threads/runs/run_step.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta_event.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta_message_delta.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_include.py +3 -0
- aimlapi/types/beta/threads/runs/step_list_params.py +3 -0
- aimlapi/types/beta/threads/runs/step_retrieve_params.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call_delta_object.py +3 -0
- aimlapi/types/beta/threads/runs/tool_calls_step_details.py +3 -0
- aimlapi/types/beta/threads/text.py +3 -0
- aimlapi/types/beta/threads/text_content_block.py +3 -0
- aimlapi/types/beta/threads/text_content_block_param.py +3 -0
- aimlapi/types/beta/threads/text_delta.py +3 -0
- aimlapi/types/beta/threads/text_delta_block.py +3 -0
- aimlapi/types/chat/__init__.py +3 -0
- aimlapi/types/chat/chat_completion.py +3 -0
- aimlapi/types/chat/chat_completion_allowed_tool_choice_param.py +3 -0
- aimlapi/types/chat/chat_completion_allowed_tools_param.py +3 -0
- aimlapi/types/chat/chat_completion_assistant_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_audio.py +3 -0
- aimlapi/types/chat/chat_completion_audio_param.py +3 -0
- aimlapi/types/chat/chat_completion_chunk.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_image.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_image_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_input_audio_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_refusal_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_text.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_text_param.py +3 -0
- aimlapi/types/chat/chat_completion_custom_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_deleted.py +3 -0
- aimlapi/types/chat/chat_completion_developer_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_call_option_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_tool.py +3 -0
- aimlapi/types/chat/chat_completion_function_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_message.py +3 -0
- aimlapi/types/chat/chat_completion_message_custom_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_custom_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_function_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_function_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call_union_param.py +3 -0
- aimlapi/types/chat/chat_completion_modality.py +3 -0
- aimlapi/types/chat/chat_completion_named_tool_choice_custom_param.py +3 -0
- aimlapi/types/chat/chat_completion_named_tool_choice_param.py +3 -0
- aimlapi/types/chat/chat_completion_prediction_content_param.py +3 -0
- aimlapi/types/chat/chat_completion_reasoning_effort.py +3 -0
- aimlapi/types/chat/chat_completion_role.py +3 -0
- aimlapi/types/chat/chat_completion_store_message.py +3 -0
- aimlapi/types/chat/chat_completion_stream_options_param.py +3 -0
- aimlapi/types/chat/chat_completion_system_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_token_logprob.py +3 -0
- aimlapi/types/chat/chat_completion_tool_choice_option_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_union_param.py +3 -0
- aimlapi/types/chat/chat_completion_user_message_param.py +3 -0
- aimlapi/types/chat/completion_create_params.py +3 -0
- aimlapi/types/chat/completion_list_params.py +3 -0
- aimlapi/types/chat/completion_update_params.py +3 -0
- aimlapi/types/chat/completions/__init__.py +3 -0
- aimlapi/types/chat/completions/message_list_params.py +3 -0
- aimlapi/types/chat/parsed_chat_completion.py +3 -0
- aimlapi/types/chat/parsed_function_tool_call.py +3 -0
- aimlapi/types/chat_model.py +3 -0
- aimlapi/types/completion.py +3 -0
- aimlapi/types/completion_choice.py +3 -0
- aimlapi/types/completion_create_params.py +3 -0
- aimlapi/types/completion_usage.py +3 -0
- aimlapi/types/container_create_params.py +3 -0
- aimlapi/types/container_create_response.py +3 -0
- aimlapi/types/container_list_params.py +3 -0
- aimlapi/types/container_list_response.py +3 -0
- aimlapi/types/container_retrieve_response.py +3 -0
- aimlapi/types/containers/__init__.py +3 -0
- aimlapi/types/containers/file_create_params.py +3 -0
- aimlapi/types/containers/file_create_response.py +3 -0
- aimlapi/types/containers/file_list_params.py +3 -0
- aimlapi/types/containers/file_list_response.py +3 -0
- aimlapi/types/containers/file_retrieve_response.py +3 -0
- aimlapi/types/containers/files/__init__.py +3 -0
- aimlapi/types/conversations/__init__.py +3 -0
- aimlapi/types/conversations/computer_screenshot_content.py +3 -0
- aimlapi/types/conversations/conversation.py +3 -0
- aimlapi/types/conversations/conversation_create_params.py +3 -0
- aimlapi/types/conversations/conversation_deleted_resource.py +3 -0
- aimlapi/types/conversations/conversation_item.py +3 -0
- aimlapi/types/conversations/conversation_item_list.py +3 -0
- aimlapi/types/conversations/conversation_update_params.py +3 -0
- aimlapi/types/conversations/input_file_content.py +3 -0
- aimlapi/types/conversations/input_file_content_param.py +3 -0
- aimlapi/types/conversations/input_image_content.py +3 -0
- aimlapi/types/conversations/input_image_content_param.py +3 -0
- aimlapi/types/conversations/input_text_content.py +3 -0
- aimlapi/types/conversations/input_text_content_param.py +3 -0
- aimlapi/types/conversations/item_create_params.py +3 -0
- aimlapi/types/conversations/item_list_params.py +3 -0
- aimlapi/types/conversations/item_retrieve_params.py +3 -0
- aimlapi/types/conversations/message.py +3 -0
- aimlapi/types/conversations/output_text_content.py +3 -0
- aimlapi/types/conversations/output_text_content_param.py +3 -0
- aimlapi/types/conversations/refusal_content.py +3 -0
- aimlapi/types/conversations/refusal_content_param.py +3 -0
- aimlapi/types/conversations/summary_text_content.py +3 -0
- aimlapi/types/conversations/text_content.py +3 -0
- aimlapi/types/create_embedding_response.py +3 -0
- aimlapi/types/embedding.py +3 -0
- aimlapi/types/embedding_create_params.py +3 -0
- aimlapi/types/embedding_model.py +3 -0
- aimlapi/types/eval_create_params.py +3 -0
- aimlapi/types/eval_create_response.py +3 -0
- aimlapi/types/eval_custom_data_source_config.py +3 -0
- aimlapi/types/eval_delete_response.py +3 -0
- aimlapi/types/eval_list_params.py +3 -0
- aimlapi/types/eval_list_response.py +3 -0
- aimlapi/types/eval_retrieve_response.py +3 -0
- aimlapi/types/eval_stored_completions_data_source_config.py +3 -0
- aimlapi/types/eval_update_params.py +3 -0
- aimlapi/types/eval_update_response.py +3 -0
- aimlapi/types/evals/__init__.py +3 -0
- aimlapi/types/evals/create_eval_completions_run_data_source.py +3 -0
- aimlapi/types/evals/create_eval_completions_run_data_source_param.py +3 -0
- aimlapi/types/evals/create_eval_jsonl_run_data_source.py +3 -0
- aimlapi/types/evals/create_eval_jsonl_run_data_source_param.py +3 -0
- aimlapi/types/evals/eval_api_error.py +3 -0
- aimlapi/types/evals/run_cancel_response.py +3 -0
- aimlapi/types/evals/run_create_params.py +3 -0
- aimlapi/types/evals/run_create_response.py +3 -0
- aimlapi/types/evals/run_delete_response.py +3 -0
- aimlapi/types/evals/run_list_params.py +3 -0
- aimlapi/types/evals/run_list_response.py +3 -0
- aimlapi/types/evals/run_retrieve_response.py +3 -0
- aimlapi/types/evals/runs/__init__.py +3 -0
- aimlapi/types/evals/runs/output_item_list_params.py +3 -0
- aimlapi/types/evals/runs/output_item_list_response.py +3 -0
- aimlapi/types/evals/runs/output_item_retrieve_response.py +3 -0
- aimlapi/types/file_chunking_strategy.py +3 -0
- aimlapi/types/file_chunking_strategy_param.py +3 -0
- aimlapi/types/file_content.py +3 -0
- aimlapi/types/file_create_params.py +3 -0
- aimlapi/types/file_deleted.py +3 -0
- aimlapi/types/file_list_params.py +3 -0
- aimlapi/types/file_object.py +3 -0
- aimlapi/types/file_purpose.py +3 -0
- aimlapi/types/fine_tuning/__init__.py +3 -0
- aimlapi/types/fine_tuning/alpha/__init__.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_run_params.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_run_response.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_validate_params.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_validate_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/__init__.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_create_params.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_create_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_delete_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_retrieve_params.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_retrieve_response.py +3 -0
- aimlapi/types/fine_tuning/dpo_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/dpo_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/dpo_method.py +3 -0
- aimlapi/types/fine_tuning/dpo_method_param.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_event.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_integration.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +3 -0
- aimlapi/types/fine_tuning/job_create_params.py +3 -0
- aimlapi/types/fine_tuning/job_list_events_params.py +3 -0
- aimlapi/types/fine_tuning/job_list_params.py +3 -0
- aimlapi/types/fine_tuning/jobs/__init__.py +3 -0
- aimlapi/types/fine_tuning/jobs/checkpoint_list_params.py +3 -0
- aimlapi/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_method.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_method_param.py +3 -0
- aimlapi/types/fine_tuning/supervised_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/supervised_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/supervised_method.py +3 -0
- aimlapi/types/fine_tuning/supervised_method_param.py +3 -0
- aimlapi/types/graders/__init__.py +3 -0
- aimlapi/types/graders/label_model_grader.py +3 -0
- aimlapi/types/graders/label_model_grader_param.py +3 -0
- aimlapi/types/graders/multi_grader.py +3 -0
- aimlapi/types/graders/multi_grader_param.py +3 -0
- aimlapi/types/graders/python_grader.py +3 -0
- aimlapi/types/graders/python_grader_param.py +3 -0
- aimlapi/types/graders/score_model_grader.py +3 -0
- aimlapi/types/graders/score_model_grader_param.py +3 -0
- aimlapi/types/graders/string_check_grader.py +3 -0
- aimlapi/types/graders/string_check_grader_param.py +3 -0
- aimlapi/types/graders/text_similarity_grader.py +3 -0
- aimlapi/types/graders/text_similarity_grader_param.py +3 -0
- aimlapi/types/image.py +3 -0
- aimlapi/types/image_create_variation_params.py +3 -0
- aimlapi/types/image_edit_completed_event.py +3 -0
- aimlapi/types/image_edit_params.py +3 -0
- aimlapi/types/image_edit_partial_image_event.py +3 -0
- aimlapi/types/image_edit_stream_event.py +3 -0
- aimlapi/types/image_gen_completed_event.py +3 -0
- aimlapi/types/image_gen_partial_image_event.py +3 -0
- aimlapi/types/image_gen_stream_event.py +3 -0
- aimlapi/types/image_generate_params.py +3 -0
- aimlapi/types/image_model.py +3 -0
- aimlapi/types/images_response.py +3 -0
- aimlapi/types/model.py +3 -0
- aimlapi/types/model_deleted.py +3 -0
- aimlapi/types/moderation.py +3 -0
- aimlapi/types/moderation_create_params.py +3 -0
- aimlapi/types/moderation_create_response.py +3 -0
- aimlapi/types/moderation_image_url_input_param.py +3 -0
- aimlapi/types/moderation_model.py +3 -0
- aimlapi/types/moderation_multi_modal_input_param.py +3 -0
- aimlapi/types/moderation_text_input_param.py +3 -0
- aimlapi/types/other_file_chunking_strategy_object.py +3 -0
- aimlapi/types/realtime/__init__.py +3 -0
- aimlapi/types/realtime/audio_transcription.py +3 -0
- aimlapi/types/realtime/audio_transcription_param.py +3 -0
- aimlapi/types/realtime/call_accept_params.py +3 -0
- aimlapi/types/realtime/call_create_params.py +3 -0
- aimlapi/types/realtime/call_refer_params.py +3 -0
- aimlapi/types/realtime/call_reject_params.py +3 -0
- aimlapi/types/realtime/client_secret_create_params.py +3 -0
- aimlapi/types/realtime/client_secret_create_response.py +3 -0
- aimlapi/types/realtime/conversation_created_event.py +3 -0
- aimlapi/types/realtime/conversation_item.py +3 -0
- aimlapi/types/realtime/conversation_item_added.py +3 -0
- aimlapi/types/realtime/conversation_item_create_event.py +3 -0
- aimlapi/types/realtime/conversation_item_create_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_created_event.py +3 -0
- aimlapi/types/realtime/conversation_item_delete_event.py +3 -0
- aimlapi/types/realtime/conversation_item_delete_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_deleted_event.py +3 -0
- aimlapi/types/realtime/conversation_item_done.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_segment.py +3 -0
- aimlapi/types/realtime/conversation_item_param.py +3 -0
- aimlapi/types/realtime/conversation_item_retrieve_event.py +3 -0
- aimlapi/types/realtime/conversation_item_retrieve_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_truncate_event.py +3 -0
- aimlapi/types/realtime/conversation_item_truncate_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_truncated_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_append_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_append_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_clear_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_cleared_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_commit_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_commit_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_committed_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_speech_started_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_timeout_triggered.py +3 -0
- aimlapi/types/realtime/log_prob_properties.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_completed.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_failed.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_in_progress.py +3 -0
- aimlapi/types/realtime/noise_reduction_type.py +3 -0
- aimlapi/types/realtime/output_audio_buffer_clear_event.py +3 -0
- aimlapi/types/realtime/output_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/realtime/rate_limits_updated_event.py +3 -0
- aimlapi/types/realtime/realtime_audio_config.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_input.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_input_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_output.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_output_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_formats.py +3 -0
- aimlapi/types/realtime/realtime_audio_formats_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_input_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_audio_input_turn_detection_param.py +3 -0
- aimlapi/types/realtime/realtime_client_event.py +3 -0
- aimlapi/types/realtime/realtime_client_event_param.py +3 -0
- aimlapi/types/realtime/realtime_connect_params.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_assistant_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_assistant_message_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_output.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_output_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_system_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_system_message_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_user_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_user_message_param.py +3 -0
- aimlapi/types/realtime/realtime_error.py +3 -0
- aimlapi/types/realtime/realtime_error_event.py +3 -0
- aimlapi/types/realtime/realtime_function_tool.py +3 -0
- aimlapi/types/realtime/realtime_function_tool_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_request.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_request_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_response.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_response_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_list_tools.py +3 -0
- aimlapi/types/realtime/realtime_mcp_list_tools_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_protocol_error.py +3 -0
- aimlapi/types/realtime/realtime_mcp_protocol_error_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_call.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_call_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_execution_error.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_execution_error_param.py +3 -0
- aimlapi/types/realtime/realtime_mcphttp_error.py +3 -0
- aimlapi/types/realtime/realtime_mcphttp_error_param.py +3 -0
- aimlapi/types/realtime/realtime_response.py +3 -0
- aimlapi/types/realtime/realtime_response_create_audio_output.py +3 -0
- aimlapi/types/realtime/realtime_response_create_audio_output_param.py +3 -0
- aimlapi/types/realtime/realtime_response_create_mcp_tool.py +3 -0
- aimlapi/types/realtime/realtime_response_create_mcp_tool_param.py +3 -0
- aimlapi/types/realtime/realtime_response_create_params.py +3 -0
- aimlapi/types/realtime/realtime_response_create_params_param.py +3 -0
- aimlapi/types/realtime/realtime_response_status.py +3 -0
- aimlapi/types/realtime/realtime_response_usage.py +3 -0
- aimlapi/types/realtime/realtime_response_usage_input_token_details.py +3 -0
- aimlapi/types/realtime/realtime_response_usage_output_token_details.py +3 -0
- aimlapi/types/realtime/realtime_server_event.py +3 -0
- aimlapi/types/realtime/realtime_session_client_secret.py +3 -0
- aimlapi/types/realtime/realtime_session_create_request.py +3 -0
- aimlapi/types/realtime/realtime_session_create_request_param.py +3 -0
- aimlapi/types/realtime/realtime_session_create_response.py +3 -0
- aimlapi/types/realtime/realtime_tool_choice_config.py +3 -0
- aimlapi/types/realtime/realtime_tool_choice_config_param.py +3 -0
- aimlapi/types/realtime/realtime_tools_config.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_param.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_union.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_union_param.py +3 -0
- aimlapi/types/realtime/realtime_tracing_config.py +3 -0
- aimlapi/types/realtime/realtime_tracing_config_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_request.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_request_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_response.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_truncation.py +3 -0
- aimlapi/types/realtime/realtime_truncation_param.py +3 -0
- aimlapi/types/realtime/realtime_truncation_retention_ratio.py +3 -0
- aimlapi/types/realtime/realtime_truncation_retention_ratio_param.py +3 -0
- aimlapi/types/realtime/response_audio_delta_event.py +3 -0
- aimlapi/types/realtime/response_audio_done_event.py +3 -0
- aimlapi/types/realtime/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/realtime/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/realtime/response_cancel_event.py +3 -0
- aimlapi/types/realtime/response_cancel_event_param.py +3 -0
- aimlapi/types/realtime/response_content_part_added_event.py +3 -0
- aimlapi/types/realtime/response_content_part_done_event.py +3 -0
- aimlapi/types/realtime/response_create_event.py +3 -0
- aimlapi/types/realtime/response_create_event_param.py +3 -0
- aimlapi/types/realtime/response_created_event.py +3 -0
- aimlapi/types/realtime/response_done_event.py +3 -0
- aimlapi/types/realtime/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/realtime/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/realtime/response_mcp_call_arguments_delta.py +3 -0
- aimlapi/types/realtime/response_mcp_call_arguments_done.py +3 -0
- aimlapi/types/realtime/response_mcp_call_completed.py +3 -0
- aimlapi/types/realtime/response_mcp_call_failed.py +3 -0
- aimlapi/types/realtime/response_mcp_call_in_progress.py +3 -0
- aimlapi/types/realtime/response_output_item_added_event.py +3 -0
- aimlapi/types/realtime/response_output_item_done_event.py +3 -0
- aimlapi/types/realtime/response_text_delta_event.py +3 -0
- aimlapi/types/realtime/response_text_done_event.py +3 -0
- aimlapi/types/realtime/session_created_event.py +3 -0
- aimlapi/types/realtime/session_update_event.py +3 -0
- aimlapi/types/realtime/session_update_event_param.py +3 -0
- aimlapi/types/realtime/session_updated_event.py +3 -0
- aimlapi/types/responses/__init__.py +3 -0
- aimlapi/types/responses/computer_tool.py +3 -0
- aimlapi/types/responses/computer_tool_param.py +3 -0
- aimlapi/types/responses/custom_tool.py +3 -0
- aimlapi/types/responses/custom_tool_param.py +3 -0
- aimlapi/types/responses/easy_input_message.py +3 -0
- aimlapi/types/responses/easy_input_message_param.py +3 -0
- aimlapi/types/responses/file_search_tool.py +3 -0
- aimlapi/types/responses/file_search_tool_param.py +3 -0
- aimlapi/types/responses/function_tool.py +3 -0
- aimlapi/types/responses/function_tool_param.py +3 -0
- aimlapi/types/responses/input_item_list_params.py +3 -0
- aimlapi/types/responses/input_token_count_params.py +3 -0
- aimlapi/types/responses/input_token_count_response.py +3 -0
- aimlapi/types/responses/parsed_response.py +3 -0
- aimlapi/types/responses/response.py +3 -0
- aimlapi/types/responses/response_audio_delta_event.py +3 -0
- aimlapi/types/responses/response_audio_done_event.py +3 -0
- aimlapi/types/responses/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/responses/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_code_delta_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_code_done_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_completed_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_interpreting_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_tool_call.py +3 -0
- aimlapi/types/responses/response_code_interpreter_tool_call_param.py +3 -0
- aimlapi/types/responses/response_completed_event.py +3 -0
- aimlapi/types/responses/response_computer_tool_call.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_item.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_screenshot.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_screenshot_param.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_param.py +3 -0
- aimlapi/types/responses/response_content_part_added_event.py +3 -0
- aimlapi/types/responses/response_content_part_done_event.py +3 -0
- aimlapi/types/responses/response_conversation_param.py +3 -0
- aimlapi/types/responses/response_create_params.py +3 -0
- aimlapi/types/responses/response_created_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_input_delta_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_input_done_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_output.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_output_param.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_param.py +3 -0
- aimlapi/types/responses/response_error.py +3 -0
- aimlapi/types/responses/response_error_event.py +3 -0
- aimlapi/types/responses/response_failed_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_completed_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_searching_event.py +3 -0
- aimlapi/types/responses/response_file_search_tool_call.py +3 -0
- aimlapi/types/responses/response_file_search_tool_call_param.py +3 -0
- aimlapi/types/responses/response_format_text_config.py +3 -0
- aimlapi/types/responses/response_format_text_config_param.py +3 -0
- aimlapi/types/responses/response_format_text_json_schema_config.py +3 -0
- aimlapi/types/responses/response_format_text_json_schema_config_param.py +3 -0
- aimlapi/types/responses/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/responses/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/responses/response_function_call_output_item.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_list.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_list_param.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_param.py +3 -0
- aimlapi/types/responses/response_function_tool_call.py +3 -0
- aimlapi/types/responses/response_function_tool_call_item.py +3 -0
- aimlapi/types/responses/response_function_tool_call_output_item.py +3 -0
- aimlapi/types/responses/response_function_tool_call_param.py +3 -0
- aimlapi/types/responses/response_function_web_search.py +3 -0
- aimlapi/types/responses/response_function_web_search_param.py +3 -0
- aimlapi/types/responses/response_image_gen_call_completed_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_generating_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_partial_image_event.py +3 -0
- aimlapi/types/responses/response_in_progress_event.py +3 -0
- aimlapi/types/responses/response_includable.py +3 -0
- aimlapi/types/responses/response_incomplete_event.py +3 -0
- aimlapi/types/responses/response_input_audio.py +3 -0
- aimlapi/types/responses/response_input_audio_param.py +3 -0
- aimlapi/types/responses/response_input_content.py +3 -0
- aimlapi/types/responses/response_input_content_param.py +3 -0
- aimlapi/types/responses/response_input_file.py +3 -0
- aimlapi/types/responses/response_input_file_content.py +3 -0
- aimlapi/types/responses/response_input_file_content_param.py +3 -0
- aimlapi/types/responses/response_input_file_param.py +3 -0
- aimlapi/types/responses/response_input_image.py +3 -0
- aimlapi/types/responses/response_input_image_content.py +3 -0
- aimlapi/types/responses/response_input_image_content_param.py +3 -0
- aimlapi/types/responses/response_input_image_param.py +3 -0
- aimlapi/types/responses/response_input_item.py +3 -0
- aimlapi/types/responses/response_input_item_param.py +3 -0
- aimlapi/types/responses/response_input_message_content_list.py +3 -0
- aimlapi/types/responses/response_input_message_content_list_param.py +3 -0
- aimlapi/types/responses/response_input_message_item.py +3 -0
- aimlapi/types/responses/response_input_param.py +3 -0
- aimlapi/types/responses/response_input_text.py +3 -0
- aimlapi/types/responses/response_input_text_content.py +3 -0
- aimlapi/types/responses/response_input_text_content_param.py +3 -0
- aimlapi/types/responses/response_input_text_param.py +3 -0
- aimlapi/types/responses/response_item.py +3 -0
- aimlapi/types/responses/response_item_list.py +3 -0
- aimlapi/types/responses/response_mcp_call_arguments_delta_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_arguments_done_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_completed_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_failed_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_completed_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_failed_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_in_progress_event.py +3 -0
- aimlapi/types/responses/response_output_item.py +3 -0
- aimlapi/types/responses/response_output_item_added_event.py +3 -0
- aimlapi/types/responses/response_output_item_done_event.py +3 -0
- aimlapi/types/responses/response_output_message.py +3 -0
- aimlapi/types/responses/response_output_message_param.py +3 -0
- aimlapi/types/responses/response_output_refusal.py +3 -0
- aimlapi/types/responses/response_output_refusal_param.py +3 -0
- aimlapi/types/responses/response_output_text.py +3 -0
- aimlapi/types/responses/response_output_text_annotation_added_event.py +3 -0
- aimlapi/types/responses/response_output_text_param.py +3 -0
- aimlapi/types/responses/response_prompt.py +3 -0
- aimlapi/types/responses/response_prompt_param.py +3 -0
- aimlapi/types/responses/response_queued_event.py +3 -0
- aimlapi/types/responses/response_reasoning_item.py +3 -0
- aimlapi/types/responses/response_reasoning_item_param.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_part_added_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_part_done_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_text_delta_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_text_done_event.py +3 -0
- aimlapi/types/responses/response_reasoning_text_delta_event.py +3 -0
- aimlapi/types/responses/response_reasoning_text_done_event.py +3 -0
- aimlapi/types/responses/response_refusal_delta_event.py +3 -0
- aimlapi/types/responses/response_refusal_done_event.py +3 -0
- aimlapi/types/responses/response_retrieve_params.py +3 -0
- aimlapi/types/responses/response_status.py +3 -0
- aimlapi/types/responses/response_stream_event.py +3 -0
- aimlapi/types/responses/response_text_config.py +3 -0
- aimlapi/types/responses/response_text_config_param.py +3 -0
- aimlapi/types/responses/response_text_delta_event.py +3 -0
- aimlapi/types/responses/response_text_done_event.py +3 -0
- aimlapi/types/responses/response_usage.py +3 -0
- aimlapi/types/responses/response_web_search_call_completed_event.py +3 -0
- aimlapi/types/responses/response_web_search_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_web_search_call_searching_event.py +3 -0
- aimlapi/types/responses/tool.py +3 -0
- aimlapi/types/responses/tool_choice_allowed.py +3 -0
- aimlapi/types/responses/tool_choice_allowed_param.py +3 -0
- aimlapi/types/responses/tool_choice_custom.py +3 -0
- aimlapi/types/responses/tool_choice_custom_param.py +3 -0
- aimlapi/types/responses/tool_choice_function.py +3 -0
- aimlapi/types/responses/tool_choice_function_param.py +3 -0
- aimlapi/types/responses/tool_choice_mcp.py +3 -0
- aimlapi/types/responses/tool_choice_mcp_param.py +3 -0
- aimlapi/types/responses/tool_choice_options.py +3 -0
- aimlapi/types/responses/tool_choice_types.py +3 -0
- aimlapi/types/responses/tool_choice_types_param.py +3 -0
- aimlapi/types/responses/tool_param.py +3 -0
- aimlapi/types/responses/web_search_preview_tool.py +3 -0
- aimlapi/types/responses/web_search_preview_tool_param.py +3 -0
- aimlapi/types/responses/web_search_tool.py +3 -0
- aimlapi/types/responses/web_search_tool_param.py +3 -0
- aimlapi/types/shared/__init__.py +3 -0
- aimlapi/types/shared/all_models.py +3 -0
- aimlapi/types/shared/chat_model.py +3 -0
- aimlapi/types/shared/comparison_filter.py +3 -0
- aimlapi/types/shared/compound_filter.py +3 -0
- aimlapi/types/shared/custom_tool_input_format.py +3 -0
- aimlapi/types/shared/error_object.py +3 -0
- aimlapi/types/shared/function_definition.py +3 -0
- aimlapi/types/shared/function_parameters.py +3 -0
- aimlapi/types/shared/metadata.py +3 -0
- aimlapi/types/shared/reasoning.py +3 -0
- aimlapi/types/shared/reasoning_effort.py +3 -0
- aimlapi/types/shared/response_format_json_object.py +3 -0
- aimlapi/types/shared/response_format_json_schema.py +3 -0
- aimlapi/types/shared/response_format_text.py +3 -0
- aimlapi/types/shared/response_format_text_grammar.py +3 -0
- aimlapi/types/shared/response_format_text_python.py +3 -0
- aimlapi/types/shared/responses_model.py +3 -0
- aimlapi/types/shared_params/__init__.py +3 -0
- aimlapi/types/shared_params/chat_model.py +3 -0
- aimlapi/types/shared_params/comparison_filter.py +3 -0
- aimlapi/types/shared_params/compound_filter.py +3 -0
- aimlapi/types/shared_params/custom_tool_input_format.py +3 -0
- aimlapi/types/shared_params/function_definition.py +3 -0
- aimlapi/types/shared_params/function_parameters.py +3 -0
- aimlapi/types/shared_params/metadata.py +3 -0
- aimlapi/types/shared_params/reasoning.py +3 -0
- aimlapi/types/shared_params/reasoning_effort.py +3 -0
- aimlapi/types/shared_params/response_format_json_object.py +3 -0
- aimlapi/types/shared_params/response_format_json_schema.py +3 -0
- aimlapi/types/shared_params/response_format_text.py +3 -0
- aimlapi/types/shared_params/responses_model.py +3 -0
- aimlapi/types/static_file_chunking_strategy.py +3 -0
- aimlapi/types/static_file_chunking_strategy_object.py +3 -0
- aimlapi/types/static_file_chunking_strategy_object_param.py +3 -0
- aimlapi/types/static_file_chunking_strategy_param.py +3 -0
- aimlapi/types/upload.py +3 -0
- aimlapi/types/upload_complete_params.py +3 -0
- aimlapi/types/upload_create_params.py +3 -0
- aimlapi/types/uploads/__init__.py +3 -0
- aimlapi/types/uploads/part_create_params.py +3 -0
- aimlapi/types/uploads/upload_part.py +3 -0
- aimlapi/types/vector_store.py +3 -0
- aimlapi/types/vector_store_create_params.py +3 -0
- aimlapi/types/vector_store_deleted.py +3 -0
- aimlapi/types/vector_store_list_params.py +3 -0
- aimlapi/types/vector_store_search_params.py +3 -0
- aimlapi/types/vector_store_search_response.py +3 -0
- aimlapi/types/vector_store_update_params.py +3 -0
- aimlapi/types/vector_stores/__init__.py +3 -0
- aimlapi/types/vector_stores/file_batch_create_params.py +3 -0
- aimlapi/types/vector_stores/file_batch_list_files_params.py +3 -0
- aimlapi/types/vector_stores/file_content_response.py +3 -0
- aimlapi/types/vector_stores/file_create_params.py +3 -0
- aimlapi/types/vector_stores/file_list_params.py +3 -0
- aimlapi/types/vector_stores/file_update_params.py +3 -0
- aimlapi/types/vector_stores/vector_store_file.py +3 -0
- aimlapi/types/vector_stores/vector_store_file_batch.py +3 -0
- aimlapi/types/vector_stores/vector_store_file_deleted.py +3 -0
- aimlapi/types/video.py +3 -0
- aimlapi/types/video_create_error.py +3 -0
- aimlapi/types/video_create_params.py +3 -0
- aimlapi/types/video_delete_response.py +3 -0
- aimlapi/types/video_download_content_params.py +3 -0
- aimlapi/types/video_list_params.py +3 -0
- aimlapi/types/video_model.py +3 -0
- aimlapi/types/video_remix_params.py +3 -0
- aimlapi/types/video_seconds.py +3 -0
- aimlapi/types/video_size.py +3 -0
- aimlapi/types/webhooks/__init__.py +3 -0
- aimlapi/types/webhooks/batch_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_completed_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_expired_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_canceled_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_succeeded_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +3 -0
- aimlapi/types/webhooks/realtime_call_incoming_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_completed_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_incomplete_webhook_event.py +3 -0
- aimlapi/types/webhooks/unwrap_webhook_event.py +3 -0
- aimlapi/types/websocket_connection_options.py +3 -0
- aimlapi/version.py +3 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/METADATA +886 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/RECORD +1958 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/WHEEL +4 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/entry_points.txt +2 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/licenses/LICENSE +201 -0
- openai/__init__.py +395 -0
- openai/__main__.py +3 -0
- openai/_base_client.py +2027 -0
- openai/_client.py +1272 -0
- openai/_compat.py +231 -0
- openai/_constants.py +14 -0
- openai/_exceptions.py +161 -0
- openai/_extras/__init__.py +3 -0
- openai/_extras/_common.py +21 -0
- openai/_extras/numpy_proxy.py +37 -0
- openai/_extras/pandas_proxy.py +28 -0
- openai/_extras/sounddevice_proxy.py +28 -0
- openai/_files.py +123 -0
- openai/_legacy_response.py +488 -0
- openai/_models.py +897 -0
- openai/_module_client.py +173 -0
- openai/_qs.py +150 -0
- openai/_resource.py +43 -0
- openai/_response.py +848 -0
- openai/_streaming.py +408 -0
- openai/_types.py +264 -0
- openai/_utils/__init__.py +67 -0
- openai/_utils/_compat.py +45 -0
- openai/_utils/_datetime_parse.py +136 -0
- openai/_utils/_logs.py +42 -0
- openai/_utils/_proxy.py +65 -0
- openai/_utils/_reflection.py +45 -0
- openai/_utils/_resources_proxy.py +24 -0
- openai/_utils/_streams.py +12 -0
- openai/_utils/_sync.py +58 -0
- openai/_utils/_transform.py +457 -0
- openai/_utils/_typing.py +156 -0
- openai/_utils/_utils.py +437 -0
- openai/_version.py +4 -0
- openai/cli/__init__.py +1 -0
- openai/cli/_api/__init__.py +1 -0
- openai/cli/_api/_main.py +17 -0
- openai/cli/_api/audio.py +108 -0
- openai/cli/_api/chat/__init__.py +13 -0
- openai/cli/_api/chat/completions.py +160 -0
- openai/cli/_api/completions.py +173 -0
- openai/cli/_api/files.py +80 -0
- openai/cli/_api/fine_tuning/__init__.py +13 -0
- openai/cli/_api/fine_tuning/jobs.py +170 -0
- openai/cli/_api/image.py +139 -0
- openai/cli/_api/models.py +45 -0
- openai/cli/_cli.py +233 -0
- openai/cli/_errors.py +21 -0
- openai/cli/_models.py +17 -0
- openai/cli/_progress.py +59 -0
- openai/cli/_tools/__init__.py +1 -0
- openai/cli/_tools/_main.py +17 -0
- openai/cli/_tools/fine_tunes.py +63 -0
- openai/cli/_tools/migrate.py +164 -0
- openai/cli/_utils.py +45 -0
- openai/helpers/__init__.py +4 -0
- openai/helpers/local_audio_player.py +165 -0
- openai/helpers/microphone.py +100 -0
- openai/lib/.keep +4 -0
- openai/lib/__init__.py +2 -0
- openai/lib/_old_api.py +72 -0
- openai/lib/_parsing/__init__.py +12 -0
- openai/lib/_parsing/_completions.py +305 -0
- openai/lib/_parsing/_responses.py +180 -0
- openai/lib/_pydantic.py +155 -0
- openai/lib/_realtime.py +92 -0
- openai/lib/_tools.py +66 -0
- openai/lib/_validators.py +809 -0
- openai/lib/azure.py +647 -0
- openai/lib/streaming/__init__.py +8 -0
- openai/lib/streaming/_assistants.py +1038 -0
- openai/lib/streaming/_deltas.py +64 -0
- openai/lib/streaming/chat/__init__.py +27 -0
- openai/lib/streaming/chat/_completions.py +770 -0
- openai/lib/streaming/chat/_events.py +123 -0
- openai/lib/streaming/chat/_types.py +20 -0
- openai/lib/streaming/responses/__init__.py +13 -0
- openai/lib/streaming/responses/_events.py +148 -0
- openai/lib/streaming/responses/_responses.py +372 -0
- openai/lib/streaming/responses/_types.py +10 -0
- openai/pagination.py +190 -0
- openai/py.typed +0 -0
- openai/resources/__init__.py +229 -0
- openai/resources/audio/__init__.py +61 -0
- openai/resources/audio/audio.py +166 -0
- openai/resources/audio/speech.py +255 -0
- openai/resources/audio/transcriptions.py +980 -0
- openai/resources/audio/translations.py +367 -0
- openai/resources/batches.py +530 -0
- openai/resources/beta/__init__.py +61 -0
- openai/resources/beta/assistants.py +1049 -0
- openai/resources/beta/beta.py +187 -0
- openai/resources/beta/chatkit/__init__.py +47 -0
- openai/resources/beta/chatkit/chatkit.py +134 -0
- openai/resources/beta/chatkit/sessions.py +301 -0
- openai/resources/beta/chatkit/threads.py +521 -0
- openai/resources/beta/realtime/__init__.py +47 -0
- openai/resources/beta/realtime/realtime.py +1094 -0
- openai/resources/beta/realtime/sessions.py +424 -0
- openai/resources/beta/realtime/transcription_sessions.py +282 -0
- openai/resources/beta/threads/__init__.py +47 -0
- openai/resources/beta/threads/messages.py +718 -0
- openai/resources/beta/threads/runs/__init__.py +33 -0
- openai/resources/beta/threads/runs/runs.py +3122 -0
- openai/resources/beta/threads/runs/steps.py +399 -0
- openai/resources/beta/threads/threads.py +1935 -0
- openai/resources/chat/__init__.py +33 -0
- openai/resources/chat/chat.py +102 -0
- openai/resources/chat/completions/__init__.py +33 -0
- openai/resources/chat/completions/completions.py +3143 -0
- openai/resources/chat/completions/messages.py +212 -0
- openai/resources/completions.py +1160 -0
- openai/resources/containers/__init__.py +33 -0
- openai/resources/containers/containers.py +510 -0
- openai/resources/containers/files/__init__.py +33 -0
- openai/resources/containers/files/content.py +173 -0
- openai/resources/containers/files/files.py +545 -0
- openai/resources/conversations/__init__.py +33 -0
- openai/resources/conversations/conversations.py +486 -0
- openai/resources/conversations/items.py +557 -0
- openai/resources/embeddings.py +298 -0
- openai/resources/evals/__init__.py +33 -0
- openai/resources/evals/evals.py +662 -0
- openai/resources/evals/runs/__init__.py +33 -0
- openai/resources/evals/runs/output_items.py +315 -0
- openai/resources/evals/runs/runs.py +634 -0
- openai/resources/files.py +770 -0
- openai/resources/fine_tuning/__init__.py +61 -0
- openai/resources/fine_tuning/alpha/__init__.py +33 -0
- openai/resources/fine_tuning/alpha/alpha.py +102 -0
- openai/resources/fine_tuning/alpha/graders.py +282 -0
- openai/resources/fine_tuning/checkpoints/__init__.py +33 -0
- openai/resources/fine_tuning/checkpoints/checkpoints.py +102 -0
- openai/resources/fine_tuning/checkpoints/permissions.py +418 -0
- openai/resources/fine_tuning/fine_tuning.py +166 -0
- openai/resources/fine_tuning/jobs/__init__.py +33 -0
- openai/resources/fine_tuning/jobs/checkpoints.py +199 -0
- openai/resources/fine_tuning/jobs/jobs.py +918 -0
- openai/resources/images.py +1858 -0
- openai/resources/models.py +306 -0
- openai/resources/moderations.py +197 -0
- openai/resources/realtime/__init__.py +47 -0
- openai/resources/realtime/calls.py +764 -0
- openai/resources/realtime/client_secrets.py +189 -0
- openai/resources/realtime/realtime.py +1079 -0
- openai/resources/responses/__init__.py +47 -0
- openai/resources/responses/input_items.py +226 -0
- openai/resources/responses/input_tokens.py +309 -0
- openai/resources/responses/responses.py +3130 -0
- openai/resources/uploads/__init__.py +33 -0
- openai/resources/uploads/parts.py +205 -0
- openai/resources/uploads/uploads.py +719 -0
- openai/resources/vector_stores/__init__.py +47 -0
- openai/resources/vector_stores/file_batches.py +813 -0
- openai/resources/vector_stores/files.py +939 -0
- openai/resources/vector_stores/vector_stores.py +875 -0
- openai/resources/videos.py +847 -0
- openai/resources/webhooks.py +210 -0
- openai/types/__init__.py +115 -0
- openai/types/audio/__init__.py +23 -0
- openai/types/audio/speech_create_params.py +57 -0
- openai/types/audio/speech_model.py +7 -0
- openai/types/audio/transcription.py +71 -0
- openai/types/audio/transcription_create_params.py +172 -0
- openai/types/audio/transcription_create_response.py +12 -0
- openai/types/audio/transcription_diarized.py +63 -0
- openai/types/audio/transcription_diarized_segment.py +32 -0
- openai/types/audio/transcription_include.py +7 -0
- openai/types/audio/transcription_segment.py +49 -0
- openai/types/audio/transcription_stream_event.py +16 -0
- openai/types/audio/transcription_text_delta_event.py +41 -0
- openai/types/audio/transcription_text_done_event.py +63 -0
- openai/types/audio/transcription_text_segment_event.py +27 -0
- openai/types/audio/transcription_verbose.py +38 -0
- openai/types/audio/transcription_word.py +16 -0
- openai/types/audio/translation.py +9 -0
- openai/types/audio/translation_create_params.py +49 -0
- openai/types/audio/translation_create_response.py +11 -0
- openai/types/audio/translation_verbose.py +22 -0
- openai/types/audio_model.py +7 -0
- openai/types/audio_response_format.py +7 -0
- openai/types/auto_file_chunking_strategy_param.py +12 -0
- openai/types/batch.py +104 -0
- openai/types/batch_create_params.py +72 -0
- openai/types/batch_error.py +21 -0
- openai/types/batch_list_params.py +24 -0
- openai/types/batch_request_counts.py +16 -0
- openai/types/batch_usage.py +35 -0
- openai/types/beta/__init__.py +34 -0
- openai/types/beta/assistant.py +134 -0
- openai/types/beta/assistant_create_params.py +220 -0
- openai/types/beta/assistant_deleted.py +15 -0
- openai/types/beta/assistant_list_params.py +39 -0
- openai/types/beta/assistant_response_format_option.py +14 -0
- openai/types/beta/assistant_response_format_option_param.py +16 -0
- openai/types/beta/assistant_stream_event.py +294 -0
- openai/types/beta/assistant_tool.py +15 -0
- openai/types/beta/assistant_tool_choice.py +16 -0
- openai/types/beta/assistant_tool_choice_function.py +10 -0
- openai/types/beta/assistant_tool_choice_function_param.py +12 -0
- openai/types/beta/assistant_tool_choice_option.py +10 -0
- openai/types/beta/assistant_tool_choice_option_param.py +12 -0
- openai/types/beta/assistant_tool_choice_param.py +16 -0
- openai/types/beta/assistant_tool_param.py +14 -0
- openai/types/beta/assistant_update_params.py +191 -0
- openai/types/beta/chat/__init__.py +3 -0
- openai/types/beta/chatkit/__init__.py +32 -0
- openai/types/beta/chatkit/chat_session.py +43 -0
- openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +10 -0
- openai/types/beta/chatkit/chat_session_chatkit_configuration.py +19 -0
- openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +59 -0
- openai/types/beta/chatkit/chat_session_expires_after_param.py +15 -0
- openai/types/beta/chatkit/chat_session_file_upload.py +18 -0
- openai/types/beta/chatkit/chat_session_history.py +18 -0
- openai/types/beta/chatkit/chat_session_rate_limits.py +10 -0
- openai/types/beta/chatkit/chat_session_rate_limits_param.py +12 -0
- openai/types/beta/chatkit/chat_session_status.py +7 -0
- openai/types/beta/chatkit/chat_session_workflow_param.py +34 -0
- openai/types/beta/chatkit/chatkit_attachment.py +25 -0
- openai/types/beta/chatkit/chatkit_response_output_text.py +62 -0
- openai/types/beta/chatkit/chatkit_thread.py +56 -0
- openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +29 -0
- openai/types/beta/chatkit/chatkit_thread_item_list.py +144 -0
- openai/types/beta/chatkit/chatkit_thread_user_message_item.py +77 -0
- openai/types/beta/chatkit/chatkit_widget_item.py +27 -0
- openai/types/beta/chatkit/session_create_params.py +35 -0
- openai/types/beta/chatkit/thread_delete_response.py +18 -0
- openai/types/beta/chatkit/thread_list_items_params.py +27 -0
- openai/types/beta/chatkit/thread_list_params.py +33 -0
- openai/types/beta/chatkit_workflow.py +32 -0
- openai/types/beta/code_interpreter_tool.py +12 -0
- openai/types/beta/code_interpreter_tool_param.py +12 -0
- openai/types/beta/file_search_tool.py +55 -0
- openai/types/beta/file_search_tool_param.py +54 -0
- openai/types/beta/function_tool.py +15 -0
- openai/types/beta/function_tool_param.py +16 -0
- openai/types/beta/realtime/__init__.py +96 -0
- openai/types/beta/realtime/conversation_created_event.py +27 -0
- openai/types/beta/realtime/conversation_item.py +61 -0
- openai/types/beta/realtime/conversation_item_content.py +32 -0
- openai/types/beta/realtime/conversation_item_content_param.py +31 -0
- openai/types/beta/realtime/conversation_item_create_event.py +29 -0
- openai/types/beta/realtime/conversation_item_create_event_param.py +29 -0
- openai/types/beta/realtime/conversation_item_created_event.py +27 -0
- openai/types/beta/realtime/conversation_item_delete_event.py +19 -0
- openai/types/beta/realtime/conversation_item_delete_event_param.py +18 -0
- openai/types/beta/realtime/conversation_item_deleted_event.py +18 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +87 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +39 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
- openai/types/beta/realtime/conversation_item_param.py +62 -0
- openai/types/beta/realtime/conversation_item_retrieve_event.py +19 -0
- openai/types/beta/realtime/conversation_item_retrieve_event_param.py +18 -0
- openai/types/beta/realtime/conversation_item_truncate_event.py +32 -0
- openai/types/beta/realtime/conversation_item_truncate_event_param.py +31 -0
- openai/types/beta/realtime/conversation_item_truncated_event.py +24 -0
- openai/types/beta/realtime/conversation_item_with_reference.py +87 -0
- openai/types/beta/realtime/conversation_item_with_reference_param.py +87 -0
- openai/types/beta/realtime/error_event.py +36 -0
- openai/types/beta/realtime/input_audio_buffer_append_event.py +23 -0
- openai/types/beta/realtime/input_audio_buffer_append_event_param.py +22 -0
- openai/types/beta/realtime/input_audio_buffer_clear_event.py +16 -0
- openai/types/beta/realtime/input_audio_buffer_clear_event_param.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_cleared_event.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_commit_event.py +16 -0
- openai/types/beta/realtime/input_audio_buffer_commit_event_param.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_committed_event.py +25 -0
- openai/types/beta/realtime/input_audio_buffer_speech_started_event.py +26 -0
- openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
- openai/types/beta/realtime/rate_limits_updated_event.py +33 -0
- openai/types/beta/realtime/realtime_client_event.py +47 -0
- openai/types/beta/realtime/realtime_client_event_param.py +44 -0
- openai/types/beta/realtime/realtime_connect_params.py +11 -0
- openai/types/beta/realtime/realtime_response.py +87 -0
- openai/types/beta/realtime/realtime_response_status.py +39 -0
- openai/types/beta/realtime/realtime_response_usage.py +52 -0
- openai/types/beta/realtime/realtime_server_event.py +133 -0
- openai/types/beta/realtime/response_audio_delta_event.py +30 -0
- openai/types/beta/realtime/response_audio_done_event.py +27 -0
- openai/types/beta/realtime/response_audio_transcript_delta_event.py +30 -0
- openai/types/beta/realtime/response_audio_transcript_done_event.py +30 -0
- openai/types/beta/realtime/response_cancel_event.py +22 -0
- openai/types/beta/realtime/response_cancel_event_param.py +21 -0
- openai/types/beta/realtime/response_content_part_added_event.py +45 -0
- openai/types/beta/realtime/response_content_part_done_event.py +45 -0
- openai/types/beta/realtime/response_create_event.py +121 -0
- openai/types/beta/realtime/response_create_event_param.py +122 -0
- openai/types/beta/realtime/response_created_event.py +19 -0
- openai/types/beta/realtime/response_done_event.py +19 -0
- openai/types/beta/realtime/response_function_call_arguments_delta_event.py +30 -0
- openai/types/beta/realtime/response_function_call_arguments_done_event.py +30 -0
- openai/types/beta/realtime/response_output_item_added_event.py +25 -0
- openai/types/beta/realtime/response_output_item_done_event.py +25 -0
- openai/types/beta/realtime/response_text_delta_event.py +30 -0
- openai/types/beta/realtime/response_text_done_event.py +30 -0
- openai/types/beta/realtime/session.py +279 -0
- openai/types/beta/realtime/session_create_params.py +298 -0
- openai/types/beta/realtime/session_create_response.py +196 -0
- openai/types/beta/realtime/session_created_event.py +19 -0
- openai/types/beta/realtime/session_update_event.py +312 -0
- openai/types/beta/realtime/session_update_event_param.py +310 -0
- openai/types/beta/realtime/session_updated_event.py +19 -0
- openai/types/beta/realtime/transcription_session.py +100 -0
- openai/types/beta/realtime/transcription_session_create_params.py +173 -0
- openai/types/beta/realtime/transcription_session_update.py +185 -0
- openai/types/beta/realtime/transcription_session_update_param.py +185 -0
- openai/types/beta/realtime/transcription_session_updated_event.py +24 -0
- openai/types/beta/thread.py +63 -0
- openai/types/beta/thread_create_and_run_params.py +397 -0
- openai/types/beta/thread_create_params.py +186 -0
- openai/types/beta/thread_deleted.py +15 -0
- openai/types/beta/thread_update_params.py +56 -0
- openai/types/beta/threads/__init__.py +46 -0
- openai/types/beta/threads/annotation.py +12 -0
- openai/types/beta/threads/annotation_delta.py +14 -0
- openai/types/beta/threads/file_citation_annotation.py +26 -0
- openai/types/beta/threads/file_citation_delta_annotation.py +33 -0
- openai/types/beta/threads/file_path_annotation.py +26 -0
- openai/types/beta/threads/file_path_delta_annotation.py +30 -0
- openai/types/beta/threads/image_file.py +23 -0
- openai/types/beta/threads/image_file_content_block.py +15 -0
- openai/types/beta/threads/image_file_content_block_param.py +16 -0
- openai/types/beta/threads/image_file_delta.py +23 -0
- openai/types/beta/threads/image_file_delta_block.py +19 -0
- openai/types/beta/threads/image_file_param.py +22 -0
- openai/types/beta/threads/image_url.py +23 -0
- openai/types/beta/threads/image_url_content_block.py +15 -0
- openai/types/beta/threads/image_url_content_block_param.py +16 -0
- openai/types/beta/threads/image_url_delta.py +22 -0
- openai/types/beta/threads/image_url_delta_block.py +19 -0
- openai/types/beta/threads/image_url_param.py +22 -0
- openai/types/beta/threads/message.py +103 -0
- openai/types/beta/threads/message_content.py +18 -0
- openai/types/beta/threads/message_content_delta.py +17 -0
- openai/types/beta/threads/message_content_part_param.py +14 -0
- openai/types/beta/threads/message_create_params.py +55 -0
- openai/types/beta/threads/message_deleted.py +15 -0
- openai/types/beta/threads/message_delta.py +17 -0
- openai/types/beta/threads/message_delta_event.py +19 -0
- openai/types/beta/threads/message_list_params.py +42 -0
- openai/types/beta/threads/message_update_params.py +24 -0
- openai/types/beta/threads/refusal_content_block.py +14 -0
- openai/types/beta/threads/refusal_delta_block.py +18 -0
- openai/types/beta/threads/required_action_function_tool_call.py +34 -0
- openai/types/beta/threads/run.py +245 -0
- openai/types/beta/threads/run_create_params.py +268 -0
- openai/types/beta/threads/run_list_params.py +39 -0
- openai/types/beta/threads/run_status.py +17 -0
- openai/types/beta/threads/run_submit_tool_outputs_params.py +52 -0
- openai/types/beta/threads/run_update_params.py +24 -0
- openai/types/beta/threads/runs/__init__.py +24 -0
- openai/types/beta/threads/runs/code_interpreter_logs.py +19 -0
- openai/types/beta/threads/runs/code_interpreter_output_image.py +26 -0
- openai/types/beta/threads/runs/code_interpreter_tool_call.py +70 -0
- openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +44 -0
- openai/types/beta/threads/runs/file_search_tool_call.py +78 -0
- openai/types/beta/threads/runs/file_search_tool_call_delta.py +25 -0
- openai/types/beta/threads/runs/function_tool_call.py +38 -0
- openai/types/beta/threads/runs/function_tool_call_delta.py +41 -0
- openai/types/beta/threads/runs/message_creation_step_details.py +19 -0
- openai/types/beta/threads/runs/run_step.py +115 -0
- openai/types/beta/threads/runs/run_step_delta.py +20 -0
- openai/types/beta/threads/runs/run_step_delta_event.py +19 -0
- openai/types/beta/threads/runs/run_step_delta_message_delta.py +20 -0
- openai/types/beta/threads/runs/run_step_include.py +7 -0
- openai/types/beta/threads/runs/step_list_params.py +56 -0
- openai/types/beta/threads/runs/step_retrieve_params.py +28 -0
- openai/types/beta/threads/runs/tool_call.py +15 -0
- openai/types/beta/threads/runs/tool_call_delta.py +16 -0
- openai/types/beta/threads/runs/tool_call_delta_object.py +21 -0
- openai/types/beta/threads/runs/tool_calls_step_details.py +21 -0
- openai/types/beta/threads/text.py +15 -0
- openai/types/beta/threads/text_content_block.py +15 -0
- openai/types/beta/threads/text_content_block_param.py +15 -0
- openai/types/beta/threads/text_delta.py +15 -0
- openai/types/beta/threads/text_delta_block.py +19 -0
- openai/types/chat/__init__.py +102 -0
- openai/types/chat/chat_completion.py +89 -0
- openai/types/chat/chat_completion_allowed_tool_choice_param.py +17 -0
- openai/types/chat/chat_completion_allowed_tools_param.py +32 -0
- openai/types/chat/chat_completion_assistant_message_param.py +70 -0
- openai/types/chat/chat_completion_audio.py +25 -0
- openai/types/chat/chat_completion_audio_param.py +25 -0
- openai/types/chat/chat_completion_chunk.py +166 -0
- openai/types/chat/chat_completion_content_part_image.py +27 -0
- openai/types/chat/chat_completion_content_part_image_param.py +26 -0
- openai/types/chat/chat_completion_content_part_input_audio_param.py +22 -0
- openai/types/chat/chat_completion_content_part_param.py +41 -0
- openai/types/chat/chat_completion_content_part_refusal_param.py +15 -0
- openai/types/chat/chat_completion_content_part_text.py +15 -0
- openai/types/chat/chat_completion_content_part_text_param.py +15 -0
- openai/types/chat/chat_completion_custom_tool_param.py +58 -0
- openai/types/chat/chat_completion_deleted.py +18 -0
- openai/types/chat/chat_completion_developer_message_param.py +25 -0
- openai/types/chat/chat_completion_function_call_option_param.py +12 -0
- openai/types/chat/chat_completion_function_message_param.py +19 -0
- openai/types/chat/chat_completion_function_tool.py +15 -0
- openai/types/chat/chat_completion_function_tool_param.py +16 -0
- openai/types/chat/chat_completion_message.py +79 -0
- openai/types/chat/chat_completion_message_custom_tool_call.py +26 -0
- openai/types/chat/chat_completion_message_custom_tool_call_param.py +26 -0
- openai/types/chat/chat_completion_message_function_tool_call.py +31 -0
- openai/types/chat/chat_completion_message_function_tool_call_param.py +31 -0
- openai/types/chat/chat_completion_message_param.py +24 -0
- openai/types/chat/chat_completion_message_tool_call.py +17 -0
- openai/types/chat/chat_completion_message_tool_call_param.py +14 -0
- openai/types/chat/chat_completion_message_tool_call_union_param.py +15 -0
- openai/types/chat/chat_completion_modality.py +7 -0
- openai/types/chat/chat_completion_named_tool_choice_custom_param.py +19 -0
- openai/types/chat/chat_completion_named_tool_choice_param.py +19 -0
- openai/types/chat/chat_completion_prediction_content_param.py +25 -0
- openai/types/chat/chat_completion_reasoning_effort.py +7 -0
- openai/types/chat/chat_completion_role.py +7 -0
- openai/types/chat/chat_completion_store_message.py +23 -0
- openai/types/chat/chat_completion_stream_options_param.py +31 -0
- openai/types/chat/chat_completion_system_message_param.py +25 -0
- openai/types/chat/chat_completion_token_logprob.py +57 -0
- openai/types/chat/chat_completion_tool_choice_option_param.py +19 -0
- openai/types/chat/chat_completion_tool_message_param.py +21 -0
- openai/types/chat/chat_completion_tool_param.py +14 -0
- openai/types/chat/chat_completion_tool_union_param.py +13 -0
- openai/types/chat/chat_completion_user_message_param.py +25 -0
- openai/types/chat/completion_create_params.py +450 -0
- openai/types/chat/completion_list_params.py +37 -0
- openai/types/chat/completion_update_params.py +22 -0
- openai/types/chat/completions/__init__.py +5 -0
- openai/types/chat/completions/message_list_params.py +21 -0
- openai/types/chat/parsed_chat_completion.py +40 -0
- openai/types/chat/parsed_function_tool_call.py +29 -0
- openai/types/chat_model.py +7 -0
- openai/types/completion.py +37 -0
- openai/types/completion_choice.py +35 -0
- openai/types/completion_create_params.py +189 -0
- openai/types/completion_usage.py +54 -0
- openai/types/container_create_params.py +30 -0
- openai/types/container_create_response.py +40 -0
- openai/types/container_list_params.py +30 -0
- openai/types/container_list_response.py +40 -0
- openai/types/container_retrieve_response.py +40 -0
- openai/types/containers/__init__.py +9 -0
- openai/types/containers/file_create_params.py +17 -0
- openai/types/containers/file_create_response.py +30 -0
- openai/types/containers/file_list_params.py +30 -0
- openai/types/containers/file_list_response.py +30 -0
- openai/types/containers/file_retrieve_response.py +30 -0
- openai/types/containers/files/__init__.py +3 -0
- openai/types/conversations/__init__.py +27 -0
- openai/types/conversations/computer_screenshot_content.py +22 -0
- openai/types/conversations/conversation.py +30 -0
- openai/types/conversations/conversation_create_params.py +29 -0
- openai/types/conversations/conversation_deleted_resource.py +15 -0
- openai/types/conversations/conversation_item.py +230 -0
- openai/types/conversations/conversation_item_list.py +26 -0
- openai/types/conversations/conversation_update_params.py +22 -0
- openai/types/conversations/input_file_content.py +7 -0
- openai/types/conversations/input_file_content_param.py +7 -0
- openai/types/conversations/input_image_content.py +7 -0
- openai/types/conversations/input_image_content_param.py +7 -0
- openai/types/conversations/input_text_content.py +7 -0
- openai/types/conversations/input_text_content_param.py +7 -0
- openai/types/conversations/item_create_params.py +24 -0
- openai/types/conversations/item_list_params.py +50 -0
- openai/types/conversations/item_retrieve_params.py +22 -0
- openai/types/conversations/message.py +66 -0
- openai/types/conversations/output_text_content.py +7 -0
- openai/types/conversations/output_text_content_param.py +7 -0
- openai/types/conversations/refusal_content.py +7 -0
- openai/types/conversations/refusal_content_param.py +7 -0
- openai/types/conversations/summary_text_content.py +15 -0
- openai/types/conversations/text_content.py +13 -0
- openai/types/create_embedding_response.py +31 -0
- openai/types/embedding.py +23 -0
- openai/types/embedding_create_params.py +55 -0
- openai/types/embedding_model.py +7 -0
- openai/types/eval_create_params.py +202 -0
- openai/types/eval_create_response.py +111 -0
- openai/types/eval_custom_data_source_config.py +21 -0
- openai/types/eval_delete_response.py +13 -0
- openai/types/eval_list_params.py +27 -0
- openai/types/eval_list_response.py +111 -0
- openai/types/eval_retrieve_response.py +111 -0
- openai/types/eval_stored_completions_data_source_config.py +32 -0
- openai/types/eval_update_params.py +25 -0
- openai/types/eval_update_response.py +111 -0
- openai/types/evals/__init__.py +22 -0
- openai/types/evals/create_eval_completions_run_data_source.py +236 -0
- openai/types/evals/create_eval_completions_run_data_source_param.py +232 -0
- openai/types/evals/create_eval_jsonl_run_data_source.py +42 -0
- openai/types/evals/create_eval_jsonl_run_data_source_param.py +47 -0
- openai/types/evals/eval_api_error.py +13 -0
- openai/types/evals/run_cancel_response.py +417 -0
- openai/types/evals/run_create_params.py +340 -0
- openai/types/evals/run_create_response.py +417 -0
- openai/types/evals/run_delete_response.py +15 -0
- openai/types/evals/run_list_params.py +27 -0
- openai/types/evals/run_list_response.py +417 -0
- openai/types/evals/run_retrieve_response.py +417 -0
- openai/types/evals/runs/__init__.py +7 -0
- openai/types/evals/runs/output_item_list_params.py +30 -0
- openai/types/evals/runs/output_item_list_response.py +134 -0
- openai/types/evals/runs/output_item_retrieve_response.py +134 -0
- openai/types/file_chunking_strategy.py +14 -0
- openai/types/file_chunking_strategy_param.py +13 -0
- openai/types/file_content.py +7 -0
- openai/types/file_create_params.py +45 -0
- openai/types/file_deleted.py +15 -0
- openai/types/file_list_params.py +33 -0
- openai/types/file_object.py +58 -0
- openai/types/file_purpose.py +7 -0
- openai/types/fine_tuning/__init__.py +26 -0
- openai/types/fine_tuning/alpha/__init__.py +8 -0
- openai/types/fine_tuning/alpha/grader_run_params.py +40 -0
- openai/types/fine_tuning/alpha/grader_run_response.py +67 -0
- openai/types/fine_tuning/alpha/grader_validate_params.py +24 -0
- openai/types/fine_tuning/alpha/grader_validate_response.py +20 -0
- openai/types/fine_tuning/checkpoints/__init__.py +9 -0
- openai/types/fine_tuning/checkpoints/permission_create_params.py +14 -0
- openai/types/fine_tuning/checkpoints/permission_create_response.py +21 -0
- openai/types/fine_tuning/checkpoints/permission_delete_response.py +18 -0
- openai/types/fine_tuning/checkpoints/permission_retrieve_params.py +21 -0
- openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +34 -0
- openai/types/fine_tuning/dpo_hyperparameters.py +36 -0
- openai/types/fine_tuning/dpo_hyperparameters_param.py +36 -0
- openai/types/fine_tuning/dpo_method.py +13 -0
- openai/types/fine_tuning/dpo_method_param.py +14 -0
- openai/types/fine_tuning/fine_tuning_job.py +161 -0
- openai/types/fine_tuning/fine_tuning_job_event.py +32 -0
- openai/types/fine_tuning/fine_tuning_job_integration.py +5 -0
- openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +33 -0
- openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +21 -0
- openai/types/fine_tuning/job_create_params.py +176 -0
- openai/types/fine_tuning/job_list_events_params.py +15 -0
- openai/types/fine_tuning/job_list_params.py +23 -0
- openai/types/fine_tuning/jobs/__init__.py +6 -0
- openai/types/fine_tuning/jobs/checkpoint_list_params.py +15 -0
- openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +47 -0
- openai/types/fine_tuning/reinforcement_hyperparameters.py +43 -0
- openai/types/fine_tuning/reinforcement_hyperparameters_param.py +43 -0
- openai/types/fine_tuning/reinforcement_method.py +24 -0
- openai/types/fine_tuning/reinforcement_method_param.py +27 -0
- openai/types/fine_tuning/supervised_hyperparameters.py +29 -0
- openai/types/fine_tuning/supervised_hyperparameters_param.py +29 -0
- openai/types/fine_tuning/supervised_method.py +13 -0
- openai/types/fine_tuning/supervised_method_param.py +14 -0
- openai/types/graders/__init__.py +16 -0
- openai/types/graders/label_model_grader.py +70 -0
- openai/types/graders/label_model_grader_param.py +77 -0
- openai/types/graders/multi_grader.py +32 -0
- openai/types/graders/multi_grader_param.py +35 -0
- openai/types/graders/python_grader.py +22 -0
- openai/types/graders/python_grader_param.py +21 -0
- openai/types/graders/score_model_grader.py +109 -0
- openai/types/graders/score_model_grader_param.py +115 -0
- openai/types/graders/string_check_grader.py +24 -0
- openai/types/graders/string_check_grader_param.py +24 -0
- openai/types/graders/text_similarity_grader.py +40 -0
- openai/types/graders/text_similarity_grader_param.py +42 -0
- openai/types/image.py +26 -0
- openai/types/image_create_variation_params.py +48 -0
- openai/types/image_edit_completed_event.py +55 -0
- openai/types/image_edit_params.py +145 -0
- openai/types/image_edit_partial_image_event.py +33 -0
- openai/types/image_edit_stream_event.py +14 -0
- openai/types/image_gen_completed_event.py +55 -0
- openai/types/image_gen_partial_image_event.py +33 -0
- openai/types/image_gen_stream_event.py +14 -0
- openai/types/image_generate_params.py +143 -0
- openai/types/image_model.py +7 -0
- openai/types/images_response.py +60 -0
- openai/types/model.py +21 -0
- openai/types/model_deleted.py +13 -0
- openai/types/moderation.py +186 -0
- openai/types/moderation_create_params.py +30 -0
- openai/types/moderation_create_response.py +19 -0
- openai/types/moderation_image_url_input_param.py +20 -0
- openai/types/moderation_model.py +9 -0
- openai/types/moderation_multi_modal_input_param.py +13 -0
- openai/types/moderation_text_input_param.py +15 -0
- openai/types/other_file_chunking_strategy_object.py +12 -0
- openai/types/realtime/__init__.py +237 -0
- openai/types/realtime/audio_transcription.py +37 -0
- openai/types/realtime/audio_transcription_param.py +34 -0
- openai/types/realtime/call_accept_params.py +122 -0
- openai/types/realtime/call_create_params.py +17 -0
- openai/types/realtime/call_refer_params.py +15 -0
- openai/types/realtime/call_reject_params.py +15 -0
- openai/types/realtime/client_secret_create_params.py +46 -0
- openai/types/realtime/client_secret_create_response.py +26 -0
- openai/types/realtime/conversation_created_event.py +27 -0
- openai/types/realtime/conversation_item.py +32 -0
- openai/types/realtime/conversation_item_added.py +26 -0
- openai/types/realtime/conversation_item_create_event.py +29 -0
- openai/types/realtime/conversation_item_create_event_param.py +29 -0
- openai/types/realtime/conversation_item_created_event.py +27 -0
- openai/types/realtime/conversation_item_delete_event.py +19 -0
- openai/types/realtime/conversation_item_delete_event_param.py +18 -0
- openai/types/realtime/conversation_item_deleted_event.py +18 -0
- openai/types/realtime/conversation_item_done.py +26 -0
- openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +79 -0
- openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +36 -0
- openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
- openai/types/realtime/conversation_item_input_audio_transcription_segment.py +36 -0
- openai/types/realtime/conversation_item_param.py +30 -0
- openai/types/realtime/conversation_item_retrieve_event.py +19 -0
- openai/types/realtime/conversation_item_retrieve_event_param.py +18 -0
- openai/types/realtime/conversation_item_truncate_event.py +32 -0
- openai/types/realtime/conversation_item_truncate_event_param.py +31 -0
- openai/types/realtime/conversation_item_truncated_event.py +24 -0
- openai/types/realtime/input_audio_buffer_append_event.py +23 -0
- openai/types/realtime/input_audio_buffer_append_event_param.py +22 -0
- openai/types/realtime/input_audio_buffer_clear_event.py +16 -0
- openai/types/realtime/input_audio_buffer_clear_event_param.py +15 -0
- openai/types/realtime/input_audio_buffer_cleared_event.py +15 -0
- openai/types/realtime/input_audio_buffer_commit_event.py +16 -0
- openai/types/realtime/input_audio_buffer_commit_event_param.py +15 -0
- openai/types/realtime/input_audio_buffer_committed_event.py +25 -0
- openai/types/realtime/input_audio_buffer_speech_started_event.py +26 -0
- openai/types/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
- openai/types/realtime/input_audio_buffer_timeout_triggered.py +30 -0
- openai/types/realtime/log_prob_properties.py +18 -0
- openai/types/realtime/mcp_list_tools_completed.py +18 -0
- openai/types/realtime/mcp_list_tools_failed.py +18 -0
- openai/types/realtime/mcp_list_tools_in_progress.py +18 -0
- openai/types/realtime/noise_reduction_type.py +7 -0
- openai/types/realtime/output_audio_buffer_clear_event.py +16 -0
- openai/types/realtime/output_audio_buffer_clear_event_param.py +15 -0
- openai/types/realtime/rate_limits_updated_event.py +33 -0
- openai/types/realtime/realtime_audio_config.py +15 -0
- openai/types/realtime/realtime_audio_config_input.py +63 -0
- openai/types/realtime/realtime_audio_config_input_param.py +65 -0
- openai/types/realtime/realtime_audio_config_output.py +36 -0
- openai/types/realtime/realtime_audio_config_output_param.py +35 -0
- openai/types/realtime/realtime_audio_config_param.py +16 -0
- openai/types/realtime/realtime_audio_formats.py +30 -0
- openai/types/realtime/realtime_audio_formats_param.py +29 -0
- openai/types/realtime/realtime_audio_input_turn_detection.py +98 -0
- openai/types/realtime/realtime_audio_input_turn_detection_param.py +95 -0
- openai/types/realtime/realtime_client_event.py +36 -0
- openai/types/realtime/realtime_client_event_param.py +34 -0
- openai/types/realtime/realtime_connect_params.py +13 -0
- openai/types/realtime/realtime_conversation_item_assistant_message.py +58 -0
- openai/types/realtime/realtime_conversation_item_assistant_message_param.py +58 -0
- openai/types/realtime/realtime_conversation_item_function_call.py +41 -0
- openai/types/realtime/realtime_conversation_item_function_call_output.py +37 -0
- openai/types/realtime/realtime_conversation_item_function_call_output_param.py +36 -0
- openai/types/realtime/realtime_conversation_item_function_call_param.py +40 -0
- openai/types/realtime/realtime_conversation_item_system_message.py +42 -0
- openai/types/realtime/realtime_conversation_item_system_message_param.py +42 -0
- openai/types/realtime/realtime_conversation_item_user_message.py +69 -0
- openai/types/realtime/realtime_conversation_item_user_message_param.py +69 -0
- openai/types/realtime/realtime_error.py +24 -0
- openai/types/realtime/realtime_error_event.py +19 -0
- openai/types/realtime/realtime_function_tool.py +25 -0
- openai/types/realtime/realtime_function_tool_param.py +24 -0
- openai/types/realtime/realtime_mcp_approval_request.py +24 -0
- openai/types/realtime/realtime_mcp_approval_request_param.py +24 -0
- openai/types/realtime/realtime_mcp_approval_response.py +25 -0
- openai/types/realtime/realtime_mcp_approval_response_param.py +25 -0
- openai/types/realtime/realtime_mcp_list_tools.py +36 -0
- openai/types/realtime/realtime_mcp_list_tools_param.py +36 -0
- openai/types/realtime/realtime_mcp_protocol_error.py +15 -0
- openai/types/realtime/realtime_mcp_protocol_error_param.py +15 -0
- openai/types/realtime/realtime_mcp_tool_call.py +43 -0
- openai/types/realtime/realtime_mcp_tool_call_param.py +40 -0
- openai/types/realtime/realtime_mcp_tool_execution_error.py +13 -0
- openai/types/realtime/realtime_mcp_tool_execution_error_param.py +13 -0
- openai/types/realtime/realtime_mcphttp_error.py +15 -0
- openai/types/realtime/realtime_mcphttp_error_param.py +15 -0
- openai/types/realtime/realtime_response.py +98 -0
- openai/types/realtime/realtime_response_create_audio_output.py +29 -0
- openai/types/realtime/realtime_response_create_audio_output_param.py +28 -0
- openai/types/realtime/realtime_response_create_mcp_tool.py +135 -0
- openai/types/realtime/realtime_response_create_mcp_tool_param.py +135 -0
- openai/types/realtime/realtime_response_create_params.py +98 -0
- openai/types/realtime/realtime_response_create_params_param.py +99 -0
- openai/types/realtime/realtime_response_status.py +39 -0
- openai/types/realtime/realtime_response_usage.py +41 -0
- openai/types/realtime/realtime_response_usage_input_token_details.py +35 -0
- openai/types/realtime/realtime_response_usage_output_token_details.py +15 -0
- openai/types/realtime/realtime_server_event.py +155 -0
- openai/types/realtime/realtime_session_client_secret.py +20 -0
- openai/types/realtime/realtime_session_create_request.py +122 -0
- openai/types/realtime/realtime_session_create_request_param.py +122 -0
- openai/types/realtime/realtime_session_create_response.py +475 -0
- openai/types/realtime/realtime_tool_choice_config.py +12 -0
- openai/types/realtime/realtime_tool_choice_config_param.py +14 -0
- openai/types/realtime/realtime_tools_config.py +10 -0
- openai/types/realtime/realtime_tools_config_param.py +143 -0
- openai/types/realtime/realtime_tools_config_union.py +141 -0
- openai/types/realtime/realtime_tools_config_union_param.py +140 -0
- openai/types/realtime/realtime_tracing_config.py +31 -0
- openai/types/realtime/realtime_tracing_config_param.py +31 -0
- openai/types/realtime/realtime_transcription_session_audio.py +12 -0
- openai/types/realtime/realtime_transcription_session_audio_input.py +65 -0
- openai/types/realtime/realtime_transcription_session_audio_input_param.py +67 -0
- openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +98 -0
- openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +95 -0
- openai/types/realtime/realtime_transcription_session_audio_param.py +13 -0
- openai/types/realtime/realtime_transcription_session_create_request.py +27 -0
- openai/types/realtime/realtime_transcription_session_create_request_param.py +28 -0
- openai/types/realtime/realtime_transcription_session_create_response.py +68 -0
- openai/types/realtime/realtime_transcription_session_turn_detection.py +32 -0
- openai/types/realtime/realtime_truncation.py +10 -0
- openai/types/realtime/realtime_truncation_param.py +12 -0
- openai/types/realtime/realtime_truncation_retention_ratio.py +38 -0
- openai/types/realtime/realtime_truncation_retention_ratio_param.py +37 -0
- openai/types/realtime/response_audio_delta_event.py +30 -0
- openai/types/realtime/response_audio_done_event.py +27 -0
- openai/types/realtime/response_audio_transcript_delta_event.py +30 -0
- openai/types/realtime/response_audio_transcript_done_event.py +30 -0
- openai/types/realtime/response_cancel_event.py +22 -0
- openai/types/realtime/response_cancel_event_param.py +21 -0
- openai/types/realtime/response_content_part_added_event.py +45 -0
- openai/types/realtime/response_content_part_done_event.py +45 -0
- openai/types/realtime/response_create_event.py +20 -0
- openai/types/realtime/response_create_event_param.py +20 -0
- openai/types/realtime/response_created_event.py +19 -0
- openai/types/realtime/response_done_event.py +19 -0
- openai/types/realtime/response_function_call_arguments_delta_event.py +30 -0
- openai/types/realtime/response_function_call_arguments_done_event.py +30 -0
- openai/types/realtime/response_mcp_call_arguments_delta.py +31 -0
- openai/types/realtime/response_mcp_call_arguments_done.py +27 -0
- openai/types/realtime/response_mcp_call_completed.py +21 -0
- openai/types/realtime/response_mcp_call_failed.py +21 -0
- openai/types/realtime/response_mcp_call_in_progress.py +21 -0
- openai/types/realtime/response_output_item_added_event.py +25 -0
- openai/types/realtime/response_output_item_done_event.py +25 -0
- openai/types/realtime/response_text_delta_event.py +30 -0
- openai/types/realtime/response_text_done_event.py +30 -0
- openai/types/realtime/session_created_event.py +23 -0
- openai/types/realtime/session_update_event.py +31 -0
- openai/types/realtime/session_update_event_param.py +32 -0
- openai/types/realtime/session_updated_event.py +23 -0
- openai/types/responses/__init__.py +270 -0
- openai/types/responses/apply_patch_tool.py +12 -0
- openai/types/responses/apply_patch_tool_param.py +12 -0
- openai/types/responses/computer_tool.py +21 -0
- openai/types/responses/computer_tool_param.py +21 -0
- openai/types/responses/custom_tool.py +23 -0
- openai/types/responses/custom_tool_param.py +23 -0
- openai/types/responses/easy_input_message.py +26 -0
- openai/types/responses/easy_input_message_param.py +27 -0
- openai/types/responses/file_search_tool.py +58 -0
- openai/types/responses/file_search_tool_param.py +60 -0
- openai/types/responses/function_shell_tool.py +12 -0
- openai/types/responses/function_shell_tool_param.py +12 -0
- openai/types/responses/function_tool.py +28 -0
- openai/types/responses/function_tool_param.py +28 -0
- openai/types/responses/input_item_list_params.py +34 -0
- openai/types/responses/input_token_count_params.py +142 -0
- openai/types/responses/input_token_count_response.py +13 -0
- openai/types/responses/parsed_response.py +105 -0
- openai/types/responses/response.py +307 -0
- openai/types/responses/response_apply_patch_tool_call.py +76 -0
- openai/types/responses/response_apply_patch_tool_call_output.py +31 -0
- openai/types/responses/response_audio_delta_event.py +18 -0
- openai/types/responses/response_audio_done_event.py +15 -0
- openai/types/responses/response_audio_transcript_delta_event.py +18 -0
- openai/types/responses/response_audio_transcript_done_event.py +15 -0
- openai/types/responses/response_code_interpreter_call_code_delta_event.py +27 -0
- openai/types/responses/response_code_interpreter_call_code_done_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_completed_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_in_progress_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_interpreting_event.py +24 -0
- openai/types/responses/response_code_interpreter_tool_call.py +55 -0
- openai/types/responses/response_code_interpreter_tool_call_param.py +54 -0
- openai/types/responses/response_completed_event.py +19 -0
- openai/types/responses/response_computer_tool_call.py +209 -0
- openai/types/responses/response_computer_tool_call_output_item.py +47 -0
- openai/types/responses/response_computer_tool_call_output_screenshot.py +22 -0
- openai/types/responses/response_computer_tool_call_output_screenshot_param.py +21 -0
- openai/types/responses/response_computer_tool_call_param.py +207 -0
- openai/types/responses/response_content_part_added_event.py +44 -0
- openai/types/responses/response_content_part_done_event.py +44 -0
- openai/types/responses/response_conversation_param.py +12 -0
- openai/types/responses/response_create_params.py +334 -0
- openai/types/responses/response_created_event.py +19 -0
- openai/types/responses/response_custom_tool_call.py +25 -0
- openai/types/responses/response_custom_tool_call_input_delta_event.py +24 -0
- openai/types/responses/response_custom_tool_call_input_done_event.py +24 -0
- openai/types/responses/response_custom_tool_call_output.py +33 -0
- openai/types/responses/response_custom_tool_call_output_param.py +31 -0
- openai/types/responses/response_custom_tool_call_param.py +24 -0
- openai/types/responses/response_error.py +34 -0
- openai/types/responses/response_error_event.py +25 -0
- openai/types/responses/response_failed_event.py +19 -0
- openai/types/responses/response_file_search_call_completed_event.py +21 -0
- openai/types/responses/response_file_search_call_in_progress_event.py +21 -0
- openai/types/responses/response_file_search_call_searching_event.py +21 -0
- openai/types/responses/response_file_search_tool_call.py +51 -0
- openai/types/responses/response_file_search_tool_call_param.py +53 -0
- openai/types/responses/response_format_text_config.py +16 -0
- openai/types/responses/response_format_text_config_param.py +16 -0
- openai/types/responses/response_format_text_json_schema_config.py +43 -0
- openai/types/responses/response_format_text_json_schema_config_param.py +41 -0
- openai/types/responses/response_function_call_arguments_delta_event.py +26 -0
- openai/types/responses/response_function_call_arguments_done_event.py +26 -0
- openai/types/responses/response_function_call_output_item.py +16 -0
- openai/types/responses/response_function_call_output_item_list.py +10 -0
- openai/types/responses/response_function_call_output_item_list_param.py +18 -0
- openai/types/responses/response_function_call_output_item_param.py +16 -0
- openai/types/responses/response_function_shell_call_output_content.py +36 -0
- openai/types/responses/response_function_shell_call_output_content_param.py +35 -0
- openai/types/responses/response_function_shell_tool_call.py +44 -0
- openai/types/responses/response_function_shell_tool_call_output.py +70 -0
- openai/types/responses/response_function_tool_call.py +32 -0
- openai/types/responses/response_function_tool_call_item.py +10 -0
- openai/types/responses/response_function_tool_call_output_item.py +40 -0
- openai/types/responses/response_function_tool_call_param.py +31 -0
- openai/types/responses/response_function_web_search.py +67 -0
- openai/types/responses/response_function_web_search_param.py +73 -0
- openai/types/responses/response_image_gen_call_completed_event.py +21 -0
- openai/types/responses/response_image_gen_call_generating_event.py +21 -0
- openai/types/responses/response_image_gen_call_in_progress_event.py +21 -0
- openai/types/responses/response_image_gen_call_partial_image_event.py +30 -0
- openai/types/responses/response_in_progress_event.py +19 -0
- openai/types/responses/response_includable.py +16 -0
- openai/types/responses/response_incomplete_event.py +19 -0
- openai/types/responses/response_input_audio.py +22 -0
- openai/types/responses/response_input_audio_param.py +22 -0
- openai/types/responses/response_input_content.py +15 -0
- openai/types/responses/response_input_content_param.py +14 -0
- openai/types/responses/response_input_file.py +25 -0
- openai/types/responses/response_input_file_content.py +25 -0
- openai/types/responses/response_input_file_content_param.py +25 -0
- openai/types/responses/response_input_file_param.py +25 -0
- openai/types/responses/response_input_image.py +28 -0
- openai/types/responses/response_input_image_content.py +28 -0
- openai/types/responses/response_input_image_content_param.py +28 -0
- openai/types/responses/response_input_image_param.py +28 -0
- openai/types/responses/response_input_item.py +482 -0
- openai/types/responses/response_input_item_param.py +479 -0
- openai/types/responses/response_input_message_content_list.py +10 -0
- openai/types/responses/response_input_message_content_list_param.py +16 -0
- openai/types/responses/response_input_message_item.py +33 -0
- openai/types/responses/response_input_param.py +482 -0
- openai/types/responses/response_input_text.py +15 -0
- openai/types/responses/response_input_text_content.py +15 -0
- openai/types/responses/response_input_text_content_param.py +15 -0
- openai/types/responses/response_input_text_param.py +15 -0
- openai/types/responses/response_item.py +226 -0
- openai/types/responses/response_item_list.py +26 -0
- openai/types/responses/response_mcp_call_arguments_delta_event.py +27 -0
- openai/types/responses/response_mcp_call_arguments_done_event.py +24 -0
- openai/types/responses/response_mcp_call_completed_event.py +21 -0
- openai/types/responses/response_mcp_call_failed_event.py +21 -0
- openai/types/responses/response_mcp_call_in_progress_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_completed_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_failed_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_in_progress_event.py +21 -0
- openai/types/responses/response_output_item.py +189 -0
- openai/types/responses/response_output_item_added_event.py +22 -0
- openai/types/responses/response_output_item_done_event.py +22 -0
- openai/types/responses/response_output_message.py +34 -0
- openai/types/responses/response_output_message_param.py +34 -0
- openai/types/responses/response_output_refusal.py +15 -0
- openai/types/responses/response_output_refusal_param.py +15 -0
- openai/types/responses/response_output_text.py +117 -0
- openai/types/responses/response_output_text_annotation_added_event.py +30 -0
- openai/types/responses/response_output_text_param.py +115 -0
- openai/types/responses/response_prompt.py +28 -0
- openai/types/responses/response_prompt_param.py +29 -0
- openai/types/responses/response_queued_event.py +19 -0
- openai/types/responses/response_reasoning_item.py +51 -0
- openai/types/responses/response_reasoning_item_param.py +51 -0
- openai/types/responses/response_reasoning_summary_part_added_event.py +35 -0
- openai/types/responses/response_reasoning_summary_part_done_event.py +35 -0
- openai/types/responses/response_reasoning_summary_text_delta_event.py +27 -0
- openai/types/responses/response_reasoning_summary_text_done_event.py +27 -0
- openai/types/responses/response_reasoning_text_delta_event.py +27 -0
- openai/types/responses/response_reasoning_text_done_event.py +27 -0
- openai/types/responses/response_refusal_delta_event.py +27 -0
- openai/types/responses/response_refusal_done_event.py +27 -0
- openai/types/responses/response_retrieve_params.py +59 -0
- openai/types/responses/response_status.py +7 -0
- openai/types/responses/response_stream_event.py +120 -0
- openai/types/responses/response_text_config.py +35 -0
- openai/types/responses/response_text_config_param.py +36 -0
- openai/types/responses/response_text_delta_event.py +50 -0
- openai/types/responses/response_text_done_event.py +50 -0
- openai/types/responses/response_usage.py +35 -0
- openai/types/responses/response_web_search_call_completed_event.py +21 -0
- openai/types/responses/response_web_search_call_in_progress_event.py +21 -0
- openai/types/responses/response_web_search_call_searching_event.py +21 -0
- openai/types/responses/tool.py +271 -0
- openai/types/responses/tool_choice_allowed.py +36 -0
- openai/types/responses/tool_choice_allowed_param.py +36 -0
- openai/types/responses/tool_choice_apply_patch.py +12 -0
- openai/types/responses/tool_choice_apply_patch_param.py +12 -0
- openai/types/responses/tool_choice_custom.py +15 -0
- openai/types/responses/tool_choice_custom_param.py +15 -0
- openai/types/responses/tool_choice_function.py +15 -0
- openai/types/responses/tool_choice_function_param.py +15 -0
- openai/types/responses/tool_choice_mcp.py +19 -0
- openai/types/responses/tool_choice_mcp_param.py +19 -0
- openai/types/responses/tool_choice_options.py +7 -0
- openai/types/responses/tool_choice_shell.py +12 -0
- openai/types/responses/tool_choice_shell_param.py +12 -0
- openai/types/responses/tool_choice_types.py +31 -0
- openai/types/responses/tool_choice_types_param.py +33 -0
- openai/types/responses/tool_param.py +271 -0
- openai/types/responses/web_search_preview_tool.py +49 -0
- openai/types/responses/web_search_preview_tool_param.py +49 -0
- openai/types/responses/web_search_tool.py +63 -0
- openai/types/responses/web_search_tool_param.py +65 -0
- openai/types/shared/__init__.py +19 -0
- openai/types/shared/all_models.py +28 -0
- openai/types/shared/chat_model.py +75 -0
- openai/types/shared/comparison_filter.py +34 -0
- openai/types/shared/compound_filter.py +22 -0
- openai/types/shared/custom_tool_input_format.py +28 -0
- openai/types/shared/error_object.py +17 -0
- openai/types/shared/function_definition.py +43 -0
- openai/types/shared/function_parameters.py +8 -0
- openai/types/shared/metadata.py +8 -0
- openai/types/shared/reasoning.py +44 -0
- openai/types/shared/reasoning_effort.py +8 -0
- openai/types/shared/response_format_json_object.py +12 -0
- openai/types/shared/response_format_json_schema.py +48 -0
- openai/types/shared/response_format_text.py +12 -0
- openai/types/shared/response_format_text_grammar.py +15 -0
- openai/types/shared/response_format_text_python.py +12 -0
- openai/types/shared/responses_model.py +28 -0
- openai/types/shared_params/__init__.py +15 -0
- openai/types/shared_params/chat_model.py +77 -0
- openai/types/shared_params/comparison_filter.py +36 -0
- openai/types/shared_params/compound_filter.py +23 -0
- openai/types/shared_params/custom_tool_input_format.py +27 -0
- openai/types/shared_params/function_definition.py +45 -0
- openai/types/shared_params/function_parameters.py +10 -0
- openai/types/shared_params/metadata.py +10 -0
- openai/types/shared_params/reasoning.py +45 -0
- openai/types/shared_params/reasoning_effort.py +10 -0
- openai/types/shared_params/response_format_json_object.py +12 -0
- openai/types/shared_params/response_format_json_schema.py +46 -0
- openai/types/shared_params/response_format_text.py +12 -0
- openai/types/shared_params/responses_model.py +30 -0
- openai/types/static_file_chunking_strategy.py +20 -0
- openai/types/static_file_chunking_strategy_object.py +15 -0
- openai/types/static_file_chunking_strategy_object_param.py +16 -0
- openai/types/static_file_chunking_strategy_param.py +22 -0
- openai/types/upload.py +42 -0
- openai/types/upload_complete_params.py +20 -0
- openai/types/upload_create_params.py +52 -0
- openai/types/uploads/__init__.py +6 -0
- openai/types/uploads/part_create_params.py +14 -0
- openai/types/uploads/upload_part.py +21 -0
- openai/types/vector_store.py +82 -0
- openai/types/vector_store_create_params.py +61 -0
- openai/types/vector_store_deleted.py +15 -0
- openai/types/vector_store_list_params.py +39 -0
- openai/types/vector_store_search_params.py +42 -0
- openai/types/vector_store_search_response.py +39 -0
- openai/types/vector_store_update_params.py +39 -0
- openai/types/vector_stores/__init__.py +13 -0
- openai/types/vector_stores/file_batch_create_params.py +70 -0
- openai/types/vector_stores/file_batch_list_files_params.py +47 -0
- openai/types/vector_stores/file_content_response.py +15 -0
- openai/types/vector_stores/file_create_params.py +35 -0
- openai/types/vector_stores/file_list_params.py +45 -0
- openai/types/vector_stores/file_update_params.py +21 -0
- openai/types/vector_stores/vector_store_file.py +67 -0
- openai/types/vector_stores/vector_store_file_batch.py +54 -0
- openai/types/vector_stores/vector_store_file_deleted.py +15 -0
- openai/types/video.py +53 -0
- openai/types/video_create_error.py +11 -0
- openai/types/video_create_params.py +29 -0
- openai/types/video_delete_response.py +18 -0
- openai/types/video_download_content_params.py +12 -0
- openai/types/video_list_params.py +21 -0
- openai/types/video_model.py +7 -0
- openai/types/video_remix_params.py +12 -0
- openai/types/video_seconds.py +7 -0
- openai/types/video_size.py +7 -0
- openai/types/webhooks/__init__.py +24 -0
- openai/types/webhooks/batch_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/batch_completed_webhook_event.py +30 -0
- openai/types/webhooks/batch_expired_webhook_event.py +30 -0
- openai/types/webhooks/batch_failed_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_canceled_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_failed_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_succeeded_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +30 -0
- openai/types/webhooks/realtime_call_incoming_webhook_event.py +41 -0
- openai/types/webhooks/response_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/response_completed_webhook_event.py +30 -0
- openai/types/webhooks/response_failed_webhook_event.py +30 -0
- openai/types/webhooks/response_incomplete_webhook_event.py +30 -0
- openai/types/webhooks/unwrap_webhook_event.py +44 -0
- openai/types/websocket_connection_options.py +36 -0
- openai/version.py +3 -0
|
@@ -0,0 +1,3122 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import typing_extensions
|
|
6
|
+
from typing import List, Union, Iterable, Optional
|
|
7
|
+
from functools import partial
|
|
8
|
+
from typing_extensions import Literal, overload
|
|
9
|
+
|
|
10
|
+
import httpx
|
|
11
|
+
|
|
12
|
+
from ..... import _legacy_response
|
|
13
|
+
from .steps import (
|
|
14
|
+
Steps,
|
|
15
|
+
AsyncSteps,
|
|
16
|
+
StepsWithRawResponse,
|
|
17
|
+
AsyncStepsWithRawResponse,
|
|
18
|
+
StepsWithStreamingResponse,
|
|
19
|
+
AsyncStepsWithStreamingResponse,
|
|
20
|
+
)
|
|
21
|
+
from ....._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, omit, not_given
|
|
22
|
+
from ....._utils import (
|
|
23
|
+
is_given,
|
|
24
|
+
required_args,
|
|
25
|
+
maybe_transform,
|
|
26
|
+
async_maybe_transform,
|
|
27
|
+
)
|
|
28
|
+
from ....._compat import cached_property
|
|
29
|
+
from ....._resource import SyncAPIResource, AsyncAPIResource
|
|
30
|
+
from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
|
31
|
+
from ....._streaming import Stream, AsyncStream
|
|
32
|
+
from .....pagination import SyncCursorPage, AsyncCursorPage
|
|
33
|
+
from ....._base_client import AsyncPaginator, make_request_options
|
|
34
|
+
from .....lib.streaming import (
|
|
35
|
+
AssistantEventHandler,
|
|
36
|
+
AssistantEventHandlerT,
|
|
37
|
+
AssistantStreamManager,
|
|
38
|
+
AsyncAssistantEventHandler,
|
|
39
|
+
AsyncAssistantEventHandlerT,
|
|
40
|
+
AsyncAssistantStreamManager,
|
|
41
|
+
)
|
|
42
|
+
from .....types.beta.threads import (
|
|
43
|
+
run_list_params,
|
|
44
|
+
run_create_params,
|
|
45
|
+
run_update_params,
|
|
46
|
+
run_submit_tool_outputs_params,
|
|
47
|
+
)
|
|
48
|
+
from .....types.beta.threads.run import Run
|
|
49
|
+
from .....types.shared.chat_model import ChatModel
|
|
50
|
+
from .....types.shared_params.metadata import Metadata
|
|
51
|
+
from .....types.shared.reasoning_effort import ReasoningEffort
|
|
52
|
+
from .....types.beta.assistant_tool_param import AssistantToolParam
|
|
53
|
+
from .....types.beta.assistant_stream_event import AssistantStreamEvent
|
|
54
|
+
from .....types.beta.threads.runs.run_step_include import RunStepInclude
|
|
55
|
+
from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
|
|
56
|
+
from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
|
|
57
|
+
|
|
58
|
+
__all__ = ["Runs", "AsyncRuns"]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class Runs(SyncAPIResource):
|
|
62
|
+
@cached_property
|
|
63
|
+
def steps(self) -> Steps:
|
|
64
|
+
return Steps(self._client)
|
|
65
|
+
|
|
66
|
+
@cached_property
|
|
67
|
+
def with_raw_response(self) -> RunsWithRawResponse:
|
|
68
|
+
"""
|
|
69
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
70
|
+
the raw response object instead of the parsed content.
|
|
71
|
+
|
|
72
|
+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
|
73
|
+
"""
|
|
74
|
+
return RunsWithRawResponse(self)
|
|
75
|
+
|
|
76
|
+
@cached_property
|
|
77
|
+
def with_streaming_response(self) -> RunsWithStreamingResponse:
|
|
78
|
+
"""
|
|
79
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
80
|
+
|
|
81
|
+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
|
82
|
+
"""
|
|
83
|
+
return RunsWithStreamingResponse(self)
|
|
84
|
+
|
|
85
|
+
@overload
|
|
86
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
87
|
+
def create(
|
|
88
|
+
self,
|
|
89
|
+
thread_id: str,
|
|
90
|
+
*,
|
|
91
|
+
assistant_id: str,
|
|
92
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
93
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
94
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
95
|
+
instructions: Optional[str] | Omit = omit,
|
|
96
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
97
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
98
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
99
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
100
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
101
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
102
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
103
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
104
|
+
temperature: Optional[float] | Omit = omit,
|
|
105
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
106
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
107
|
+
top_p: Optional[float] | Omit = omit,
|
|
108
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
109
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
110
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
111
|
+
extra_headers: Headers | None = None,
|
|
112
|
+
extra_query: Query | None = None,
|
|
113
|
+
extra_body: Body | None = None,
|
|
114
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
115
|
+
) -> Run:
|
|
116
|
+
"""
|
|
117
|
+
Create a run.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
assistant_id: The ID of the
|
|
121
|
+
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
|
|
122
|
+
execute this run.
|
|
123
|
+
|
|
124
|
+
include: A list of additional fields to include in the response. Currently the only
|
|
125
|
+
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
|
126
|
+
to fetch the file search result content.
|
|
127
|
+
|
|
128
|
+
See the
|
|
129
|
+
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
|
130
|
+
for more information.
|
|
131
|
+
|
|
132
|
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
|
|
133
|
+
is useful for modifying the behavior on a per-run basis without overriding other
|
|
134
|
+
instructions.
|
|
135
|
+
|
|
136
|
+
additional_messages: Adds additional messages to the thread before creating the run.
|
|
137
|
+
|
|
138
|
+
instructions: Overrides the
|
|
139
|
+
[instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
|
|
140
|
+
of the assistant. This is useful for modifying the behavior on a per-run basis.
|
|
141
|
+
|
|
142
|
+
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
|
|
143
|
+
run. The run will make a best effort to use only the number of completion tokens
|
|
144
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
145
|
+
completion tokens specified, the run will end with status `incomplete`. See
|
|
146
|
+
`incomplete_details` for more info.
|
|
147
|
+
|
|
148
|
+
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
|
|
149
|
+
The run will make a best effort to use only the number of prompt tokens
|
|
150
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
151
|
+
prompt tokens specified, the run will end with status `incomplete`. See
|
|
152
|
+
`incomplete_details` for more info.
|
|
153
|
+
|
|
154
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
155
|
+
for storing additional information about the object in a structured format, and
|
|
156
|
+
querying for objects via API or the dashboard.
|
|
157
|
+
|
|
158
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
159
|
+
a maximum length of 512 characters.
|
|
160
|
+
|
|
161
|
+
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
|
|
162
|
+
be used to execute this run. If a value is provided here, it will override the
|
|
163
|
+
model associated with the assistant. If not, the model associated with the
|
|
164
|
+
assistant will be used.
|
|
165
|
+
|
|
166
|
+
parallel_tool_calls: Whether to enable
|
|
167
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
168
|
+
during tool use.
|
|
169
|
+
|
|
170
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
171
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
172
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
173
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
174
|
+
reasoning in a response.
|
|
175
|
+
|
|
176
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
177
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
178
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
179
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
180
|
+
support `none`.
|
|
181
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
182
|
+
|
|
183
|
+
response_format: Specifies the format that the model must output. Compatible with
|
|
184
|
+
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
185
|
+
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
|
|
186
|
+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
|
187
|
+
|
|
188
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
189
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
190
|
+
in the
|
|
191
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
192
|
+
|
|
193
|
+
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
|
|
194
|
+
message the model generates is valid JSON.
|
|
195
|
+
|
|
196
|
+
**Important:** when using JSON mode, you **must** also instruct the model to
|
|
197
|
+
produce JSON yourself via a system or user message. Without this, the model may
|
|
198
|
+
generate an unending stream of whitespace until the generation reaches the token
|
|
199
|
+
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
200
|
+
the message content may be partially cut off if `finish_reason="length"`, which
|
|
201
|
+
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
202
|
+
max context length.
|
|
203
|
+
|
|
204
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
205
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
206
|
+
message.
|
|
207
|
+
|
|
208
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
209
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
210
|
+
focused and deterministic.
|
|
211
|
+
|
|
212
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
213
|
+
not call any tools and instead generates a message. `auto` is the default value
|
|
214
|
+
and means the model can pick between generating a message or calling one or more
|
|
215
|
+
tools. `required` means the model must call one or more tools before responding
|
|
216
|
+
to the user. Specifying a particular tool like `{"type": "file_search"}` or
|
|
217
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
218
|
+
call that tool.
|
|
219
|
+
|
|
220
|
+
tools: Override the tools the assistant can use for this run. This is useful for
|
|
221
|
+
modifying the behavior on a per-run basis.
|
|
222
|
+
|
|
223
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
224
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
225
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
226
|
+
|
|
227
|
+
We generally recommend altering this or temperature but not both.
|
|
228
|
+
|
|
229
|
+
truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
|
|
230
|
+
control the initial context window of the run.
|
|
231
|
+
|
|
232
|
+
extra_headers: Send extra headers
|
|
233
|
+
|
|
234
|
+
extra_query: Add additional query parameters to the request
|
|
235
|
+
|
|
236
|
+
extra_body: Add additional JSON properties to the request
|
|
237
|
+
|
|
238
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
239
|
+
"""
|
|
240
|
+
...
|
|
241
|
+
|
|
242
|
+
@overload
|
|
243
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
244
|
+
def create(
|
|
245
|
+
self,
|
|
246
|
+
thread_id: str,
|
|
247
|
+
*,
|
|
248
|
+
assistant_id: str,
|
|
249
|
+
stream: Literal[True],
|
|
250
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
251
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
252
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
253
|
+
instructions: Optional[str] | Omit = omit,
|
|
254
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
255
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
256
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
257
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
258
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
259
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
260
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
261
|
+
temperature: Optional[float] | Omit = omit,
|
|
262
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
263
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
264
|
+
top_p: Optional[float] | Omit = omit,
|
|
265
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
266
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
267
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
268
|
+
extra_headers: Headers | None = None,
|
|
269
|
+
extra_query: Query | None = None,
|
|
270
|
+
extra_body: Body | None = None,
|
|
271
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
272
|
+
) -> Stream[AssistantStreamEvent]:
|
|
273
|
+
"""
|
|
274
|
+
Create a run.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
assistant_id: The ID of the
|
|
278
|
+
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
|
|
279
|
+
execute this run.
|
|
280
|
+
|
|
281
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
282
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
283
|
+
message.
|
|
284
|
+
|
|
285
|
+
include: A list of additional fields to include in the response. Currently the only
|
|
286
|
+
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
|
287
|
+
to fetch the file search result content.
|
|
288
|
+
|
|
289
|
+
See the
|
|
290
|
+
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
|
291
|
+
for more information.
|
|
292
|
+
|
|
293
|
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
|
|
294
|
+
is useful for modifying the behavior on a per-run basis without overriding other
|
|
295
|
+
instructions.
|
|
296
|
+
|
|
297
|
+
additional_messages: Adds additional messages to the thread before creating the run.
|
|
298
|
+
|
|
299
|
+
instructions: Overrides the
|
|
300
|
+
[instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
|
|
301
|
+
of the assistant. This is useful for modifying the behavior on a per-run basis.
|
|
302
|
+
|
|
303
|
+
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
|
|
304
|
+
run. The run will make a best effort to use only the number of completion tokens
|
|
305
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
306
|
+
completion tokens specified, the run will end with status `incomplete`. See
|
|
307
|
+
`incomplete_details` for more info.
|
|
308
|
+
|
|
309
|
+
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
|
|
310
|
+
The run will make a best effort to use only the number of prompt tokens
|
|
311
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
312
|
+
prompt tokens specified, the run will end with status `incomplete`. See
|
|
313
|
+
`incomplete_details` for more info.
|
|
314
|
+
|
|
315
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
316
|
+
for storing additional information about the object in a structured format, and
|
|
317
|
+
querying for objects via API or the dashboard.
|
|
318
|
+
|
|
319
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
320
|
+
a maximum length of 512 characters.
|
|
321
|
+
|
|
322
|
+
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
|
|
323
|
+
be used to execute this run. If a value is provided here, it will override the
|
|
324
|
+
model associated with the assistant. If not, the model associated with the
|
|
325
|
+
assistant will be used.
|
|
326
|
+
|
|
327
|
+
parallel_tool_calls: Whether to enable
|
|
328
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
329
|
+
during tool use.
|
|
330
|
+
|
|
331
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
332
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
333
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
334
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
335
|
+
reasoning in a response.
|
|
336
|
+
|
|
337
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
338
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
339
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
340
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
341
|
+
support `none`.
|
|
342
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
343
|
+
|
|
344
|
+
response_format: Specifies the format that the model must output. Compatible with
|
|
345
|
+
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
346
|
+
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
|
|
347
|
+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
|
348
|
+
|
|
349
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
350
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
351
|
+
in the
|
|
352
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
353
|
+
|
|
354
|
+
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
|
|
355
|
+
message the model generates is valid JSON.
|
|
356
|
+
|
|
357
|
+
**Important:** when using JSON mode, you **must** also instruct the model to
|
|
358
|
+
produce JSON yourself via a system or user message. Without this, the model may
|
|
359
|
+
generate an unending stream of whitespace until the generation reaches the token
|
|
360
|
+
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
361
|
+
the message content may be partially cut off if `finish_reason="length"`, which
|
|
362
|
+
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
363
|
+
max context length.
|
|
364
|
+
|
|
365
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
366
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
367
|
+
focused and deterministic.
|
|
368
|
+
|
|
369
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
370
|
+
not call any tools and instead generates a message. `auto` is the default value
|
|
371
|
+
and means the model can pick between generating a message or calling one or more
|
|
372
|
+
tools. `required` means the model must call one or more tools before responding
|
|
373
|
+
to the user. Specifying a particular tool like `{"type": "file_search"}` or
|
|
374
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
375
|
+
call that tool.
|
|
376
|
+
|
|
377
|
+
tools: Override the tools the assistant can use for this run. This is useful for
|
|
378
|
+
modifying the behavior on a per-run basis.
|
|
379
|
+
|
|
380
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
381
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
382
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
383
|
+
|
|
384
|
+
We generally recommend altering this or temperature but not both.
|
|
385
|
+
|
|
386
|
+
truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
|
|
387
|
+
control the initial context window of the run.
|
|
388
|
+
|
|
389
|
+
extra_headers: Send extra headers
|
|
390
|
+
|
|
391
|
+
extra_query: Add additional query parameters to the request
|
|
392
|
+
|
|
393
|
+
extra_body: Add additional JSON properties to the request
|
|
394
|
+
|
|
395
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
396
|
+
"""
|
|
397
|
+
...
|
|
398
|
+
|
|
399
|
+
@overload
|
|
400
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
401
|
+
def create(
|
|
402
|
+
self,
|
|
403
|
+
thread_id: str,
|
|
404
|
+
*,
|
|
405
|
+
assistant_id: str,
|
|
406
|
+
stream: bool,
|
|
407
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
408
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
409
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
410
|
+
instructions: Optional[str] | Omit = omit,
|
|
411
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
412
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
413
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
414
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
415
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
416
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
417
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
418
|
+
temperature: Optional[float] | Omit = omit,
|
|
419
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
420
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
421
|
+
top_p: Optional[float] | Omit = omit,
|
|
422
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
423
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
424
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
425
|
+
extra_headers: Headers | None = None,
|
|
426
|
+
extra_query: Query | None = None,
|
|
427
|
+
extra_body: Body | None = None,
|
|
428
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
429
|
+
) -> Run | Stream[AssistantStreamEvent]:
|
|
430
|
+
"""
|
|
431
|
+
Create a run.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
assistant_id: The ID of the
|
|
435
|
+
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
|
|
436
|
+
execute this run.
|
|
437
|
+
|
|
438
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
439
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
440
|
+
message.
|
|
441
|
+
|
|
442
|
+
include: A list of additional fields to include in the response. Currently the only
|
|
443
|
+
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
|
444
|
+
to fetch the file search result content.
|
|
445
|
+
|
|
446
|
+
See the
|
|
447
|
+
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
|
448
|
+
for more information.
|
|
449
|
+
|
|
450
|
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
|
|
451
|
+
is useful for modifying the behavior on a per-run basis without overriding other
|
|
452
|
+
instructions.
|
|
453
|
+
|
|
454
|
+
additional_messages: Adds additional messages to the thread before creating the run.
|
|
455
|
+
|
|
456
|
+
instructions: Overrides the
|
|
457
|
+
[instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
|
|
458
|
+
of the assistant. This is useful for modifying the behavior on a per-run basis.
|
|
459
|
+
|
|
460
|
+
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
|
|
461
|
+
run. The run will make a best effort to use only the number of completion tokens
|
|
462
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
463
|
+
completion tokens specified, the run will end with status `incomplete`. See
|
|
464
|
+
`incomplete_details` for more info.
|
|
465
|
+
|
|
466
|
+
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
|
|
467
|
+
The run will make a best effort to use only the number of prompt tokens
|
|
468
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
469
|
+
prompt tokens specified, the run will end with status `incomplete`. See
|
|
470
|
+
`incomplete_details` for more info.
|
|
471
|
+
|
|
472
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
473
|
+
for storing additional information about the object in a structured format, and
|
|
474
|
+
querying for objects via API or the dashboard.
|
|
475
|
+
|
|
476
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
477
|
+
a maximum length of 512 characters.
|
|
478
|
+
|
|
479
|
+
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
|
|
480
|
+
be used to execute this run. If a value is provided here, it will override the
|
|
481
|
+
model associated with the assistant. If not, the model associated with the
|
|
482
|
+
assistant will be used.
|
|
483
|
+
|
|
484
|
+
parallel_tool_calls: Whether to enable
|
|
485
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
486
|
+
during tool use.
|
|
487
|
+
|
|
488
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
489
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
490
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
491
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
492
|
+
reasoning in a response.
|
|
493
|
+
|
|
494
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
495
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
496
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
497
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
498
|
+
support `none`.
|
|
499
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
500
|
+
|
|
501
|
+
response_format: Specifies the format that the model must output. Compatible with
|
|
502
|
+
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
503
|
+
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
|
|
504
|
+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
|
505
|
+
|
|
506
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
507
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
508
|
+
in the
|
|
509
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
510
|
+
|
|
511
|
+
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
|
|
512
|
+
message the model generates is valid JSON.
|
|
513
|
+
|
|
514
|
+
**Important:** when using JSON mode, you **must** also instruct the model to
|
|
515
|
+
produce JSON yourself via a system or user message. Without this, the model may
|
|
516
|
+
generate an unending stream of whitespace until the generation reaches the token
|
|
517
|
+
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
518
|
+
the message content may be partially cut off if `finish_reason="length"`, which
|
|
519
|
+
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
520
|
+
max context length.
|
|
521
|
+
|
|
522
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
523
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
524
|
+
focused and deterministic.
|
|
525
|
+
|
|
526
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
527
|
+
not call any tools and instead generates a message. `auto` is the default value
|
|
528
|
+
and means the model can pick between generating a message or calling one or more
|
|
529
|
+
tools. `required` means the model must call one or more tools before responding
|
|
530
|
+
to the user. Specifying a particular tool like `{"type": "file_search"}` or
|
|
531
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
532
|
+
call that tool.
|
|
533
|
+
|
|
534
|
+
tools: Override the tools the assistant can use for this run. This is useful for
|
|
535
|
+
modifying the behavior on a per-run basis.
|
|
536
|
+
|
|
537
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
538
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
539
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
540
|
+
|
|
541
|
+
We generally recommend altering this or temperature but not both.
|
|
542
|
+
|
|
543
|
+
truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
|
|
544
|
+
control the initial context window of the run.
|
|
545
|
+
|
|
546
|
+
extra_headers: Send extra headers
|
|
547
|
+
|
|
548
|
+
extra_query: Add additional query parameters to the request
|
|
549
|
+
|
|
550
|
+
extra_body: Add additional JSON properties to the request
|
|
551
|
+
|
|
552
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
553
|
+
"""
|
|
554
|
+
...
|
|
555
|
+
|
|
556
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
557
|
+
@required_args(["assistant_id"], ["assistant_id", "stream"])
|
|
558
|
+
def create(
|
|
559
|
+
self,
|
|
560
|
+
thread_id: str,
|
|
561
|
+
*,
|
|
562
|
+
assistant_id: str,
|
|
563
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
564
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
565
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
566
|
+
instructions: Optional[str] | Omit = omit,
|
|
567
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
568
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
569
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
570
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
571
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
572
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
573
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
574
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
575
|
+
temperature: Optional[float] | Omit = omit,
|
|
576
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
577
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
578
|
+
top_p: Optional[float] | Omit = omit,
|
|
579
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
580
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
581
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
582
|
+
extra_headers: Headers | None = None,
|
|
583
|
+
extra_query: Query | None = None,
|
|
584
|
+
extra_body: Body | None = None,
|
|
585
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
586
|
+
) -> Run | Stream[AssistantStreamEvent]:
|
|
587
|
+
if not thread_id:
|
|
588
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
589
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
590
|
+
return self._post(
|
|
591
|
+
f"/threads/{thread_id}/runs",
|
|
592
|
+
body=maybe_transform(
|
|
593
|
+
{
|
|
594
|
+
"assistant_id": assistant_id,
|
|
595
|
+
"additional_instructions": additional_instructions,
|
|
596
|
+
"additional_messages": additional_messages,
|
|
597
|
+
"instructions": instructions,
|
|
598
|
+
"max_completion_tokens": max_completion_tokens,
|
|
599
|
+
"max_prompt_tokens": max_prompt_tokens,
|
|
600
|
+
"metadata": metadata,
|
|
601
|
+
"model": model,
|
|
602
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
603
|
+
"reasoning_effort": reasoning_effort,
|
|
604
|
+
"response_format": response_format,
|
|
605
|
+
"stream": stream,
|
|
606
|
+
"temperature": temperature,
|
|
607
|
+
"tool_choice": tool_choice,
|
|
608
|
+
"tools": tools,
|
|
609
|
+
"top_p": top_p,
|
|
610
|
+
"truncation_strategy": truncation_strategy,
|
|
611
|
+
},
|
|
612
|
+
run_create_params.RunCreateParamsStreaming if stream else run_create_params.RunCreateParamsNonStreaming,
|
|
613
|
+
),
|
|
614
|
+
options=make_request_options(
|
|
615
|
+
extra_headers=extra_headers,
|
|
616
|
+
extra_query=extra_query,
|
|
617
|
+
extra_body=extra_body,
|
|
618
|
+
timeout=timeout,
|
|
619
|
+
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
|
|
620
|
+
),
|
|
621
|
+
cast_to=Run,
|
|
622
|
+
stream=stream or False,
|
|
623
|
+
stream_cls=Stream[AssistantStreamEvent],
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
627
|
+
def retrieve(
|
|
628
|
+
self,
|
|
629
|
+
run_id: str,
|
|
630
|
+
*,
|
|
631
|
+
thread_id: str,
|
|
632
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
633
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
634
|
+
extra_headers: Headers | None = None,
|
|
635
|
+
extra_query: Query | None = None,
|
|
636
|
+
extra_body: Body | None = None,
|
|
637
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
638
|
+
) -> Run:
|
|
639
|
+
"""
|
|
640
|
+
Retrieves a run.
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
extra_headers: Send extra headers
|
|
644
|
+
|
|
645
|
+
extra_query: Add additional query parameters to the request
|
|
646
|
+
|
|
647
|
+
extra_body: Add additional JSON properties to the request
|
|
648
|
+
|
|
649
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
650
|
+
"""
|
|
651
|
+
if not thread_id:
|
|
652
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
653
|
+
if not run_id:
|
|
654
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
655
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
656
|
+
return self._get(
|
|
657
|
+
f"/threads/{thread_id}/runs/{run_id}",
|
|
658
|
+
options=make_request_options(
|
|
659
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
660
|
+
),
|
|
661
|
+
cast_to=Run,
|
|
662
|
+
)
|
|
663
|
+
|
|
664
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
665
|
+
def update(
|
|
666
|
+
self,
|
|
667
|
+
run_id: str,
|
|
668
|
+
*,
|
|
669
|
+
thread_id: str,
|
|
670
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
671
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
672
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
673
|
+
extra_headers: Headers | None = None,
|
|
674
|
+
extra_query: Query | None = None,
|
|
675
|
+
extra_body: Body | None = None,
|
|
676
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
677
|
+
) -> Run:
|
|
678
|
+
"""
|
|
679
|
+
Modifies a run.
|
|
680
|
+
|
|
681
|
+
Args:
|
|
682
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
683
|
+
for storing additional information about the object in a structured format, and
|
|
684
|
+
querying for objects via API or the dashboard.
|
|
685
|
+
|
|
686
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
687
|
+
a maximum length of 512 characters.
|
|
688
|
+
|
|
689
|
+
extra_headers: Send extra headers
|
|
690
|
+
|
|
691
|
+
extra_query: Add additional query parameters to the request
|
|
692
|
+
|
|
693
|
+
extra_body: Add additional JSON properties to the request
|
|
694
|
+
|
|
695
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
696
|
+
"""
|
|
697
|
+
if not thread_id:
|
|
698
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
699
|
+
if not run_id:
|
|
700
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
701
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
702
|
+
return self._post(
|
|
703
|
+
f"/threads/{thread_id}/runs/{run_id}",
|
|
704
|
+
body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
|
|
705
|
+
options=make_request_options(
|
|
706
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
707
|
+
),
|
|
708
|
+
cast_to=Run,
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
712
|
+
def list(
|
|
713
|
+
self,
|
|
714
|
+
thread_id: str,
|
|
715
|
+
*,
|
|
716
|
+
after: str | Omit = omit,
|
|
717
|
+
before: str | Omit = omit,
|
|
718
|
+
limit: int | Omit = omit,
|
|
719
|
+
order: Literal["asc", "desc"] | Omit = omit,
|
|
720
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
721
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
722
|
+
extra_headers: Headers | None = None,
|
|
723
|
+
extra_query: Query | None = None,
|
|
724
|
+
extra_body: Body | None = None,
|
|
725
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
726
|
+
) -> SyncCursorPage[Run]:
|
|
727
|
+
"""
|
|
728
|
+
Returns a list of runs belonging to a thread.
|
|
729
|
+
|
|
730
|
+
Args:
|
|
731
|
+
after: A cursor for use in pagination. `after` is an object ID that defines your place
|
|
732
|
+
in the list. For instance, if you make a list request and receive 100 objects,
|
|
733
|
+
ending with obj_foo, your subsequent call can include after=obj_foo in order to
|
|
734
|
+
fetch the next page of the list.
|
|
735
|
+
|
|
736
|
+
before: A cursor for use in pagination. `before` is an object ID that defines your place
|
|
737
|
+
in the list. For instance, if you make a list request and receive 100 objects,
|
|
738
|
+
starting with obj_foo, your subsequent call can include before=obj_foo in order
|
|
739
|
+
to fetch the previous page of the list.
|
|
740
|
+
|
|
741
|
+
limit: A limit on the number of objects to be returned. Limit can range between 1 and
|
|
742
|
+
100, and the default is 20.
|
|
743
|
+
|
|
744
|
+
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
|
|
745
|
+
order and `desc` for descending order.
|
|
746
|
+
|
|
747
|
+
extra_headers: Send extra headers
|
|
748
|
+
|
|
749
|
+
extra_query: Add additional query parameters to the request
|
|
750
|
+
|
|
751
|
+
extra_body: Add additional JSON properties to the request
|
|
752
|
+
|
|
753
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
754
|
+
"""
|
|
755
|
+
if not thread_id:
|
|
756
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
757
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
758
|
+
return self._get_api_list(
|
|
759
|
+
f"/threads/{thread_id}/runs",
|
|
760
|
+
page=SyncCursorPage[Run],
|
|
761
|
+
options=make_request_options(
|
|
762
|
+
extra_headers=extra_headers,
|
|
763
|
+
extra_query=extra_query,
|
|
764
|
+
extra_body=extra_body,
|
|
765
|
+
timeout=timeout,
|
|
766
|
+
query=maybe_transform(
|
|
767
|
+
{
|
|
768
|
+
"after": after,
|
|
769
|
+
"before": before,
|
|
770
|
+
"limit": limit,
|
|
771
|
+
"order": order,
|
|
772
|
+
},
|
|
773
|
+
run_list_params.RunListParams,
|
|
774
|
+
),
|
|
775
|
+
),
|
|
776
|
+
model=Run,
|
|
777
|
+
)
|
|
778
|
+
|
|
779
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
780
|
+
def cancel(
|
|
781
|
+
self,
|
|
782
|
+
run_id: str,
|
|
783
|
+
*,
|
|
784
|
+
thread_id: str,
|
|
785
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
786
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
787
|
+
extra_headers: Headers | None = None,
|
|
788
|
+
extra_query: Query | None = None,
|
|
789
|
+
extra_body: Body | None = None,
|
|
790
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
791
|
+
) -> Run:
|
|
792
|
+
"""
|
|
793
|
+
Cancels a run that is `in_progress`.
|
|
794
|
+
|
|
795
|
+
Args:
|
|
796
|
+
extra_headers: Send extra headers
|
|
797
|
+
|
|
798
|
+
extra_query: Add additional query parameters to the request
|
|
799
|
+
|
|
800
|
+
extra_body: Add additional JSON properties to the request
|
|
801
|
+
|
|
802
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
803
|
+
"""
|
|
804
|
+
if not thread_id:
|
|
805
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
806
|
+
if not run_id:
|
|
807
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
808
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
809
|
+
return self._post(
|
|
810
|
+
f"/threads/{thread_id}/runs/{run_id}/cancel",
|
|
811
|
+
options=make_request_options(
|
|
812
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
813
|
+
),
|
|
814
|
+
cast_to=Run,
|
|
815
|
+
)
|
|
816
|
+
|
|
817
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
818
|
+
def create_and_poll(
|
|
819
|
+
self,
|
|
820
|
+
*,
|
|
821
|
+
assistant_id: str,
|
|
822
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
823
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
824
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
825
|
+
instructions: Optional[str] | Omit = omit,
|
|
826
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
827
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
828
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
829
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
830
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
831
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
832
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
833
|
+
temperature: Optional[float] | Omit = omit,
|
|
834
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
835
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
836
|
+
top_p: Optional[float] | Omit = omit,
|
|
837
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
838
|
+
poll_interval_ms: int | Omit = omit,
|
|
839
|
+
thread_id: str,
|
|
840
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
841
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
842
|
+
extra_headers: Headers | None = None,
|
|
843
|
+
extra_query: Query | None = None,
|
|
844
|
+
extra_body: Body | None = None,
|
|
845
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
846
|
+
) -> Run:
|
|
847
|
+
"""
|
|
848
|
+
A helper to create a run an poll for a terminal state. More information on Run
|
|
849
|
+
lifecycles can be found here:
|
|
850
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
851
|
+
"""
|
|
852
|
+
run = self.create( # pyright: ignore[reportDeprecated]
|
|
853
|
+
thread_id=thread_id,
|
|
854
|
+
assistant_id=assistant_id,
|
|
855
|
+
include=include,
|
|
856
|
+
additional_instructions=additional_instructions,
|
|
857
|
+
additional_messages=additional_messages,
|
|
858
|
+
instructions=instructions,
|
|
859
|
+
max_completion_tokens=max_completion_tokens,
|
|
860
|
+
max_prompt_tokens=max_prompt_tokens,
|
|
861
|
+
metadata=metadata,
|
|
862
|
+
model=model,
|
|
863
|
+
response_format=response_format,
|
|
864
|
+
temperature=temperature,
|
|
865
|
+
tool_choice=tool_choice,
|
|
866
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
867
|
+
reasoning_effort=reasoning_effort,
|
|
868
|
+
# We assume we are not streaming when polling
|
|
869
|
+
stream=False,
|
|
870
|
+
tools=tools,
|
|
871
|
+
truncation_strategy=truncation_strategy,
|
|
872
|
+
top_p=top_p,
|
|
873
|
+
extra_headers=extra_headers,
|
|
874
|
+
extra_query=extra_query,
|
|
875
|
+
extra_body=extra_body,
|
|
876
|
+
timeout=timeout,
|
|
877
|
+
)
|
|
878
|
+
return self.poll( # pyright: ignore[reportDeprecated]
|
|
879
|
+
run.id,
|
|
880
|
+
thread_id=thread_id,
|
|
881
|
+
extra_headers=extra_headers,
|
|
882
|
+
extra_query=extra_query,
|
|
883
|
+
extra_body=extra_body,
|
|
884
|
+
poll_interval_ms=poll_interval_ms,
|
|
885
|
+
timeout=timeout,
|
|
886
|
+
)
|
|
887
|
+
|
|
888
|
+
@overload
|
|
889
|
+
@typing_extensions.deprecated("use `stream` instead")
|
|
890
|
+
def create_and_stream(
|
|
891
|
+
self,
|
|
892
|
+
*,
|
|
893
|
+
assistant_id: str,
|
|
894
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
895
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
896
|
+
instructions: Optional[str] | Omit = omit,
|
|
897
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
898
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
899
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
900
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
901
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
902
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
903
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
904
|
+
temperature: Optional[float] | Omit = omit,
|
|
905
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
906
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
907
|
+
top_p: Optional[float] | Omit = omit,
|
|
908
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
909
|
+
thread_id: str,
|
|
910
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
911
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
912
|
+
extra_headers: Headers | None = None,
|
|
913
|
+
extra_query: Query | None = None,
|
|
914
|
+
extra_body: Body | None = None,
|
|
915
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
916
|
+
) -> AssistantStreamManager[AssistantEventHandler]:
|
|
917
|
+
"""Create a Run stream"""
|
|
918
|
+
...
|
|
919
|
+
|
|
920
|
+
@overload
|
|
921
|
+
@typing_extensions.deprecated("use `stream` instead")
|
|
922
|
+
def create_and_stream(
|
|
923
|
+
self,
|
|
924
|
+
*,
|
|
925
|
+
assistant_id: str,
|
|
926
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
927
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
928
|
+
instructions: Optional[str] | Omit = omit,
|
|
929
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
930
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
931
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
932
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
933
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
934
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
935
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
936
|
+
temperature: Optional[float] | Omit = omit,
|
|
937
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
938
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
939
|
+
top_p: Optional[float] | Omit = omit,
|
|
940
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
941
|
+
thread_id: str,
|
|
942
|
+
event_handler: AssistantEventHandlerT,
|
|
943
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
944
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
945
|
+
extra_headers: Headers | None = None,
|
|
946
|
+
extra_query: Query | None = None,
|
|
947
|
+
extra_body: Body | None = None,
|
|
948
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
949
|
+
) -> AssistantStreamManager[AssistantEventHandlerT]:
|
|
950
|
+
"""Create a Run stream"""
|
|
951
|
+
...
|
|
952
|
+
|
|
953
|
+
@typing_extensions.deprecated("use `stream` instead")
|
|
954
|
+
def create_and_stream(
|
|
955
|
+
self,
|
|
956
|
+
*,
|
|
957
|
+
assistant_id: str,
|
|
958
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
959
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
960
|
+
instructions: Optional[str] | Omit = omit,
|
|
961
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
962
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
963
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
964
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
965
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
966
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
967
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
968
|
+
temperature: Optional[float] | Omit = omit,
|
|
969
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
970
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
971
|
+
top_p: Optional[float] | Omit = omit,
|
|
972
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
973
|
+
thread_id: str,
|
|
974
|
+
event_handler: AssistantEventHandlerT | None = None,
|
|
975
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
976
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
977
|
+
extra_headers: Headers | None = None,
|
|
978
|
+
extra_query: Query | None = None,
|
|
979
|
+
extra_body: Body | None = None,
|
|
980
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
981
|
+
) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
|
|
982
|
+
"""Create a Run stream"""
|
|
983
|
+
if not thread_id:
|
|
984
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
985
|
+
|
|
986
|
+
extra_headers = {
|
|
987
|
+
"OpenAI-Beta": "assistants=v2",
|
|
988
|
+
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
|
|
989
|
+
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
|
|
990
|
+
**(extra_headers or {}),
|
|
991
|
+
}
|
|
992
|
+
make_request = partial(
|
|
993
|
+
self._post,
|
|
994
|
+
f"/threads/{thread_id}/runs",
|
|
995
|
+
body=maybe_transform(
|
|
996
|
+
{
|
|
997
|
+
"assistant_id": assistant_id,
|
|
998
|
+
"additional_instructions": additional_instructions,
|
|
999
|
+
"additional_messages": additional_messages,
|
|
1000
|
+
"instructions": instructions,
|
|
1001
|
+
"max_completion_tokens": max_completion_tokens,
|
|
1002
|
+
"max_prompt_tokens": max_prompt_tokens,
|
|
1003
|
+
"metadata": metadata,
|
|
1004
|
+
"model": model,
|
|
1005
|
+
"response_format": response_format,
|
|
1006
|
+
"temperature": temperature,
|
|
1007
|
+
"tool_choice": tool_choice,
|
|
1008
|
+
"stream": True,
|
|
1009
|
+
"tools": tools,
|
|
1010
|
+
"truncation_strategy": truncation_strategy,
|
|
1011
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
1012
|
+
"reasoning_effort": reasoning_effort,
|
|
1013
|
+
"top_p": top_p,
|
|
1014
|
+
},
|
|
1015
|
+
run_create_params.RunCreateParams,
|
|
1016
|
+
),
|
|
1017
|
+
options=make_request_options(
|
|
1018
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1019
|
+
),
|
|
1020
|
+
cast_to=Run,
|
|
1021
|
+
stream=True,
|
|
1022
|
+
stream_cls=Stream[AssistantStreamEvent],
|
|
1023
|
+
)
|
|
1024
|
+
return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
|
|
1025
|
+
|
|
1026
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1027
|
+
def poll(
|
|
1028
|
+
self,
|
|
1029
|
+
run_id: str,
|
|
1030
|
+
thread_id: str,
|
|
1031
|
+
extra_headers: Headers | None = None,
|
|
1032
|
+
extra_query: Query | None = None,
|
|
1033
|
+
extra_body: Body | None = None,
|
|
1034
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1035
|
+
poll_interval_ms: int | Omit = omit,
|
|
1036
|
+
) -> Run:
|
|
1037
|
+
"""
|
|
1038
|
+
A helper to poll a run status until it reaches a terminal state. More
|
|
1039
|
+
information on Run lifecycles can be found here:
|
|
1040
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
1041
|
+
"""
|
|
1042
|
+
extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})}
|
|
1043
|
+
|
|
1044
|
+
if is_given(poll_interval_ms):
|
|
1045
|
+
extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
|
|
1046
|
+
|
|
1047
|
+
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
|
|
1048
|
+
while True:
|
|
1049
|
+
response = self.with_raw_response.retrieve( # pyright: ignore[reportDeprecated]
|
|
1050
|
+
thread_id=thread_id,
|
|
1051
|
+
run_id=run_id,
|
|
1052
|
+
extra_headers=extra_headers,
|
|
1053
|
+
extra_body=extra_body,
|
|
1054
|
+
extra_query=extra_query,
|
|
1055
|
+
timeout=timeout,
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
run = response.parse()
|
|
1059
|
+
# Return if we reached a terminal state
|
|
1060
|
+
if run.status in terminal_states:
|
|
1061
|
+
return run
|
|
1062
|
+
|
|
1063
|
+
if not is_given(poll_interval_ms):
|
|
1064
|
+
from_header = response.headers.get("openai-poll-after-ms")
|
|
1065
|
+
if from_header is not None:
|
|
1066
|
+
poll_interval_ms = int(from_header)
|
|
1067
|
+
else:
|
|
1068
|
+
poll_interval_ms = 1000
|
|
1069
|
+
|
|
1070
|
+
self._sleep(poll_interval_ms / 1000)
|
|
1071
|
+
|
|
1072
|
+
@overload
|
|
1073
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1074
|
+
def stream(
|
|
1075
|
+
self,
|
|
1076
|
+
*,
|
|
1077
|
+
assistant_id: str,
|
|
1078
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
1079
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
1080
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
1081
|
+
instructions: Optional[str] | Omit = omit,
|
|
1082
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1083
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
1084
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1085
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
1086
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1087
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1088
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
1089
|
+
temperature: Optional[float] | Omit = omit,
|
|
1090
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
1091
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
1092
|
+
top_p: Optional[float] | Omit = omit,
|
|
1093
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
1094
|
+
thread_id: str,
|
|
1095
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1096
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1097
|
+
extra_headers: Headers | None = None,
|
|
1098
|
+
extra_query: Query | None = None,
|
|
1099
|
+
extra_body: Body | None = None,
|
|
1100
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1101
|
+
) -> AssistantStreamManager[AssistantEventHandler]:
|
|
1102
|
+
"""Create a Run stream"""
|
|
1103
|
+
...
|
|
1104
|
+
|
|
1105
|
+
@overload
|
|
1106
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1107
|
+
def stream(
|
|
1108
|
+
self,
|
|
1109
|
+
*,
|
|
1110
|
+
assistant_id: str,
|
|
1111
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
1112
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
1113
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
1114
|
+
instructions: Optional[str] | Omit = omit,
|
|
1115
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1116
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
1117
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1118
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
1119
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1120
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1121
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
1122
|
+
temperature: Optional[float] | Omit = omit,
|
|
1123
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
1124
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
1125
|
+
top_p: Optional[float] | Omit = omit,
|
|
1126
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
1127
|
+
thread_id: str,
|
|
1128
|
+
event_handler: AssistantEventHandlerT,
|
|
1129
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1130
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1131
|
+
extra_headers: Headers | None = None,
|
|
1132
|
+
extra_query: Query | None = None,
|
|
1133
|
+
extra_body: Body | None = None,
|
|
1134
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1135
|
+
) -> AssistantStreamManager[AssistantEventHandlerT]:
|
|
1136
|
+
"""Create a Run stream"""
|
|
1137
|
+
...
|
|
1138
|
+
|
|
1139
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1140
|
+
def stream(
|
|
1141
|
+
self,
|
|
1142
|
+
*,
|
|
1143
|
+
assistant_id: str,
|
|
1144
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
1145
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
1146
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
1147
|
+
instructions: Optional[str] | Omit = omit,
|
|
1148
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1149
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
1150
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1151
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
1152
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1153
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1154
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
1155
|
+
temperature: Optional[float] | Omit = omit,
|
|
1156
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
1157
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
1158
|
+
top_p: Optional[float] | Omit = omit,
|
|
1159
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
1160
|
+
thread_id: str,
|
|
1161
|
+
event_handler: AssistantEventHandlerT | None = None,
|
|
1162
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1163
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1164
|
+
extra_headers: Headers | None = None,
|
|
1165
|
+
extra_query: Query | None = None,
|
|
1166
|
+
extra_body: Body | None = None,
|
|
1167
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1168
|
+
) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
|
|
1169
|
+
"""Create a Run stream"""
|
|
1170
|
+
if not thread_id:
|
|
1171
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
1172
|
+
|
|
1173
|
+
extra_headers = {
|
|
1174
|
+
"OpenAI-Beta": "assistants=v2",
|
|
1175
|
+
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
|
|
1176
|
+
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
|
|
1177
|
+
**(extra_headers or {}),
|
|
1178
|
+
}
|
|
1179
|
+
make_request = partial(
|
|
1180
|
+
self._post,
|
|
1181
|
+
f"/threads/{thread_id}/runs",
|
|
1182
|
+
body=maybe_transform(
|
|
1183
|
+
{
|
|
1184
|
+
"assistant_id": assistant_id,
|
|
1185
|
+
"additional_instructions": additional_instructions,
|
|
1186
|
+
"additional_messages": additional_messages,
|
|
1187
|
+
"instructions": instructions,
|
|
1188
|
+
"max_completion_tokens": max_completion_tokens,
|
|
1189
|
+
"max_prompt_tokens": max_prompt_tokens,
|
|
1190
|
+
"metadata": metadata,
|
|
1191
|
+
"model": model,
|
|
1192
|
+
"response_format": response_format,
|
|
1193
|
+
"temperature": temperature,
|
|
1194
|
+
"tool_choice": tool_choice,
|
|
1195
|
+
"stream": True,
|
|
1196
|
+
"tools": tools,
|
|
1197
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
1198
|
+
"reasoning_effort": reasoning_effort,
|
|
1199
|
+
"truncation_strategy": truncation_strategy,
|
|
1200
|
+
"top_p": top_p,
|
|
1201
|
+
},
|
|
1202
|
+
run_create_params.RunCreateParams,
|
|
1203
|
+
),
|
|
1204
|
+
options=make_request_options(
|
|
1205
|
+
extra_headers=extra_headers,
|
|
1206
|
+
extra_query=extra_query,
|
|
1207
|
+
extra_body=extra_body,
|
|
1208
|
+
timeout=timeout,
|
|
1209
|
+
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
|
|
1210
|
+
),
|
|
1211
|
+
cast_to=Run,
|
|
1212
|
+
stream=True,
|
|
1213
|
+
stream_cls=Stream[AssistantStreamEvent],
|
|
1214
|
+
)
|
|
1215
|
+
return AssistantStreamManager(make_request, event_handler=event_handler or AssistantEventHandler())
|
|
1216
|
+
|
|
1217
|
+
@overload
|
|
1218
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1219
|
+
def submit_tool_outputs(
|
|
1220
|
+
self,
|
|
1221
|
+
run_id: str,
|
|
1222
|
+
*,
|
|
1223
|
+
thread_id: str,
|
|
1224
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1225
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
1226
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1227
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1228
|
+
extra_headers: Headers | None = None,
|
|
1229
|
+
extra_query: Query | None = None,
|
|
1230
|
+
extra_body: Body | None = None,
|
|
1231
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1232
|
+
) -> Run:
|
|
1233
|
+
"""
|
|
1234
|
+
When a run has the `status: "requires_action"` and `required_action.type` is
|
|
1235
|
+
`submit_tool_outputs`, this endpoint can be used to submit the outputs from the
|
|
1236
|
+
tool calls once they're all completed. All outputs must be submitted in a single
|
|
1237
|
+
request.
|
|
1238
|
+
|
|
1239
|
+
Args:
|
|
1240
|
+
tool_outputs: A list of tools for which the outputs are being submitted.
|
|
1241
|
+
|
|
1242
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
1243
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
1244
|
+
message.
|
|
1245
|
+
|
|
1246
|
+
extra_headers: Send extra headers
|
|
1247
|
+
|
|
1248
|
+
extra_query: Add additional query parameters to the request
|
|
1249
|
+
|
|
1250
|
+
extra_body: Add additional JSON properties to the request
|
|
1251
|
+
|
|
1252
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1253
|
+
"""
|
|
1254
|
+
...
|
|
1255
|
+
|
|
1256
|
+
@overload
|
|
1257
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1258
|
+
def submit_tool_outputs(
|
|
1259
|
+
self,
|
|
1260
|
+
run_id: str,
|
|
1261
|
+
*,
|
|
1262
|
+
thread_id: str,
|
|
1263
|
+
stream: Literal[True],
|
|
1264
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1265
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1266
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1267
|
+
extra_headers: Headers | None = None,
|
|
1268
|
+
extra_query: Query | None = None,
|
|
1269
|
+
extra_body: Body | None = None,
|
|
1270
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1271
|
+
) -> Stream[AssistantStreamEvent]:
|
|
1272
|
+
"""
|
|
1273
|
+
When a run has the `status: "requires_action"` and `required_action.type` is
|
|
1274
|
+
`submit_tool_outputs`, this endpoint can be used to submit the outputs from the
|
|
1275
|
+
tool calls once they're all completed. All outputs must be submitted in a single
|
|
1276
|
+
request.
|
|
1277
|
+
|
|
1278
|
+
Args:
|
|
1279
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
1280
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
1281
|
+
message.
|
|
1282
|
+
|
|
1283
|
+
tool_outputs: A list of tools for which the outputs are being submitted.
|
|
1284
|
+
|
|
1285
|
+
extra_headers: Send extra headers
|
|
1286
|
+
|
|
1287
|
+
extra_query: Add additional query parameters to the request
|
|
1288
|
+
|
|
1289
|
+
extra_body: Add additional JSON properties to the request
|
|
1290
|
+
|
|
1291
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1292
|
+
"""
|
|
1293
|
+
...
|
|
1294
|
+
|
|
1295
|
+
@overload
|
|
1296
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1297
|
+
def submit_tool_outputs(
|
|
1298
|
+
self,
|
|
1299
|
+
run_id: str,
|
|
1300
|
+
*,
|
|
1301
|
+
thread_id: str,
|
|
1302
|
+
stream: bool,
|
|
1303
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1304
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1305
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1306
|
+
extra_headers: Headers | None = None,
|
|
1307
|
+
extra_query: Query | None = None,
|
|
1308
|
+
extra_body: Body | None = None,
|
|
1309
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1310
|
+
) -> Run | Stream[AssistantStreamEvent]:
|
|
1311
|
+
"""
|
|
1312
|
+
When a run has the `status: "requires_action"` and `required_action.type` is
|
|
1313
|
+
`submit_tool_outputs`, this endpoint can be used to submit the outputs from the
|
|
1314
|
+
tool calls once they're all completed. All outputs must be submitted in a single
|
|
1315
|
+
request.
|
|
1316
|
+
|
|
1317
|
+
Args:
|
|
1318
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
1319
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
1320
|
+
message.
|
|
1321
|
+
|
|
1322
|
+
tool_outputs: A list of tools for which the outputs are being submitted.
|
|
1323
|
+
|
|
1324
|
+
extra_headers: Send extra headers
|
|
1325
|
+
|
|
1326
|
+
extra_query: Add additional query parameters to the request
|
|
1327
|
+
|
|
1328
|
+
extra_body: Add additional JSON properties to the request
|
|
1329
|
+
|
|
1330
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1331
|
+
"""
|
|
1332
|
+
...
|
|
1333
|
+
|
|
1334
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1335
|
+
@required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
|
|
1336
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1337
|
+
def submit_tool_outputs(
|
|
1338
|
+
self,
|
|
1339
|
+
run_id: str,
|
|
1340
|
+
*,
|
|
1341
|
+
thread_id: str,
|
|
1342
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1343
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
1344
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1345
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1346
|
+
extra_headers: Headers | None = None,
|
|
1347
|
+
extra_query: Query | None = None,
|
|
1348
|
+
extra_body: Body | None = None,
|
|
1349
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1350
|
+
) -> Run | Stream[AssistantStreamEvent]:
|
|
1351
|
+
if not thread_id:
|
|
1352
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
1353
|
+
if not run_id:
|
|
1354
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
1355
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
1356
|
+
return self._post(
|
|
1357
|
+
f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
|
|
1358
|
+
body=maybe_transform(
|
|
1359
|
+
{
|
|
1360
|
+
"tool_outputs": tool_outputs,
|
|
1361
|
+
"stream": stream,
|
|
1362
|
+
},
|
|
1363
|
+
run_submit_tool_outputs_params.RunSubmitToolOutputsParamsStreaming
|
|
1364
|
+
if stream
|
|
1365
|
+
else run_submit_tool_outputs_params.RunSubmitToolOutputsParamsNonStreaming,
|
|
1366
|
+
),
|
|
1367
|
+
options=make_request_options(
|
|
1368
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1369
|
+
),
|
|
1370
|
+
cast_to=Run,
|
|
1371
|
+
stream=stream or False,
|
|
1372
|
+
stream_cls=Stream[AssistantStreamEvent],
|
|
1373
|
+
)
|
|
1374
|
+
|
|
1375
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1376
|
+
def submit_tool_outputs_and_poll(
|
|
1377
|
+
self,
|
|
1378
|
+
*,
|
|
1379
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1380
|
+
run_id: str,
|
|
1381
|
+
thread_id: str,
|
|
1382
|
+
poll_interval_ms: int | Omit = omit,
|
|
1383
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1384
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1385
|
+
extra_headers: Headers | None = None,
|
|
1386
|
+
extra_query: Query | None = None,
|
|
1387
|
+
extra_body: Body | None = None,
|
|
1388
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1389
|
+
) -> Run:
|
|
1390
|
+
"""
|
|
1391
|
+
A helper to submit a tool output to a run and poll for a terminal run state.
|
|
1392
|
+
More information on Run lifecycles can be found here:
|
|
1393
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
1394
|
+
"""
|
|
1395
|
+
run = self.submit_tool_outputs( # pyright: ignore[reportDeprecated]
|
|
1396
|
+
run_id=run_id,
|
|
1397
|
+
thread_id=thread_id,
|
|
1398
|
+
tool_outputs=tool_outputs,
|
|
1399
|
+
stream=False,
|
|
1400
|
+
extra_headers=extra_headers,
|
|
1401
|
+
extra_query=extra_query,
|
|
1402
|
+
extra_body=extra_body,
|
|
1403
|
+
timeout=timeout,
|
|
1404
|
+
)
|
|
1405
|
+
return self.poll( # pyright: ignore[reportDeprecated]
|
|
1406
|
+
run_id=run.id,
|
|
1407
|
+
thread_id=thread_id,
|
|
1408
|
+
extra_headers=extra_headers,
|
|
1409
|
+
extra_query=extra_query,
|
|
1410
|
+
extra_body=extra_body,
|
|
1411
|
+
timeout=timeout,
|
|
1412
|
+
poll_interval_ms=poll_interval_ms,
|
|
1413
|
+
)
|
|
1414
|
+
|
|
1415
|
+
@overload
|
|
1416
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1417
|
+
def submit_tool_outputs_stream(
|
|
1418
|
+
self,
|
|
1419
|
+
*,
|
|
1420
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1421
|
+
run_id: str,
|
|
1422
|
+
thread_id: str,
|
|
1423
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1424
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1425
|
+
extra_headers: Headers | None = None,
|
|
1426
|
+
extra_query: Query | None = None,
|
|
1427
|
+
extra_body: Body | None = None,
|
|
1428
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1429
|
+
) -> AssistantStreamManager[AssistantEventHandler]:
|
|
1430
|
+
"""
|
|
1431
|
+
Submit the tool outputs from a previous run and stream the run to a terminal
|
|
1432
|
+
state. More information on Run lifecycles can be found here:
|
|
1433
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
1434
|
+
"""
|
|
1435
|
+
...
|
|
1436
|
+
|
|
1437
|
+
@overload
|
|
1438
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1439
|
+
def submit_tool_outputs_stream(
|
|
1440
|
+
self,
|
|
1441
|
+
*,
|
|
1442
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1443
|
+
run_id: str,
|
|
1444
|
+
thread_id: str,
|
|
1445
|
+
event_handler: AssistantEventHandlerT,
|
|
1446
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1447
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1448
|
+
extra_headers: Headers | None = None,
|
|
1449
|
+
extra_query: Query | None = None,
|
|
1450
|
+
extra_body: Body | None = None,
|
|
1451
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1452
|
+
) -> AssistantStreamManager[AssistantEventHandlerT]:
|
|
1453
|
+
"""
|
|
1454
|
+
Submit the tool outputs from a previous run and stream the run to a terminal
|
|
1455
|
+
state. More information on Run lifecycles can be found here:
|
|
1456
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
1457
|
+
"""
|
|
1458
|
+
...
|
|
1459
|
+
|
|
1460
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1461
|
+
def submit_tool_outputs_stream(
|
|
1462
|
+
self,
|
|
1463
|
+
*,
|
|
1464
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
1465
|
+
run_id: str,
|
|
1466
|
+
thread_id: str,
|
|
1467
|
+
event_handler: AssistantEventHandlerT | None = None,
|
|
1468
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1469
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1470
|
+
extra_headers: Headers | None = None,
|
|
1471
|
+
extra_query: Query | None = None,
|
|
1472
|
+
extra_body: Body | None = None,
|
|
1473
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1474
|
+
) -> AssistantStreamManager[AssistantEventHandler] | AssistantStreamManager[AssistantEventHandlerT]:
|
|
1475
|
+
"""
|
|
1476
|
+
Submit the tool outputs from a previous run and stream the run to a terminal
|
|
1477
|
+
state. More information on Run lifecycles can be found here:
|
|
1478
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
1479
|
+
"""
|
|
1480
|
+
if not run_id:
|
|
1481
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
1482
|
+
|
|
1483
|
+
if not thread_id:
|
|
1484
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
1485
|
+
|
|
1486
|
+
extra_headers = {
|
|
1487
|
+
"OpenAI-Beta": "assistants=v2",
|
|
1488
|
+
"X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream",
|
|
1489
|
+
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
|
|
1490
|
+
**(extra_headers or {}),
|
|
1491
|
+
}
|
|
1492
|
+
request = partial(
|
|
1493
|
+
self._post,
|
|
1494
|
+
f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
|
|
1495
|
+
body=maybe_transform(
|
|
1496
|
+
{
|
|
1497
|
+
"tool_outputs": tool_outputs,
|
|
1498
|
+
"stream": True,
|
|
1499
|
+
},
|
|
1500
|
+
run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
|
|
1501
|
+
),
|
|
1502
|
+
options=make_request_options(
|
|
1503
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1504
|
+
),
|
|
1505
|
+
cast_to=Run,
|
|
1506
|
+
stream=True,
|
|
1507
|
+
stream_cls=Stream[AssistantStreamEvent],
|
|
1508
|
+
)
|
|
1509
|
+
return AssistantStreamManager(request, event_handler=event_handler or AssistantEventHandler())
|
|
1510
|
+
|
|
1511
|
+
|
|
1512
|
+
class AsyncRuns(AsyncAPIResource):
|
|
1513
|
+
@cached_property
|
|
1514
|
+
def steps(self) -> AsyncSteps:
|
|
1515
|
+
return AsyncSteps(self._client)
|
|
1516
|
+
|
|
1517
|
+
@cached_property
|
|
1518
|
+
def with_raw_response(self) -> AsyncRunsWithRawResponse:
|
|
1519
|
+
"""
|
|
1520
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
1521
|
+
the raw response object instead of the parsed content.
|
|
1522
|
+
|
|
1523
|
+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
|
1524
|
+
"""
|
|
1525
|
+
return AsyncRunsWithRawResponse(self)
|
|
1526
|
+
|
|
1527
|
+
@cached_property
|
|
1528
|
+
def with_streaming_response(self) -> AsyncRunsWithStreamingResponse:
|
|
1529
|
+
"""
|
|
1530
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
1531
|
+
|
|
1532
|
+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
|
1533
|
+
"""
|
|
1534
|
+
return AsyncRunsWithStreamingResponse(self)
|
|
1535
|
+
|
|
1536
|
+
@overload
|
|
1537
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1538
|
+
async def create(
|
|
1539
|
+
self,
|
|
1540
|
+
thread_id: str,
|
|
1541
|
+
*,
|
|
1542
|
+
assistant_id: str,
|
|
1543
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
1544
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
1545
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
1546
|
+
instructions: Optional[str] | Omit = omit,
|
|
1547
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1548
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
1549
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1550
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
1551
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1552
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1553
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
1554
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
1555
|
+
temperature: Optional[float] | Omit = omit,
|
|
1556
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
1557
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
1558
|
+
top_p: Optional[float] | Omit = omit,
|
|
1559
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
1560
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1561
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1562
|
+
extra_headers: Headers | None = None,
|
|
1563
|
+
extra_query: Query | None = None,
|
|
1564
|
+
extra_body: Body | None = None,
|
|
1565
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1566
|
+
) -> Run:
|
|
1567
|
+
"""
|
|
1568
|
+
Create a run.
|
|
1569
|
+
|
|
1570
|
+
Args:
|
|
1571
|
+
assistant_id: The ID of the
|
|
1572
|
+
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
|
|
1573
|
+
execute this run.
|
|
1574
|
+
|
|
1575
|
+
include: A list of additional fields to include in the response. Currently the only
|
|
1576
|
+
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
|
1577
|
+
to fetch the file search result content.
|
|
1578
|
+
|
|
1579
|
+
See the
|
|
1580
|
+
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
|
1581
|
+
for more information.
|
|
1582
|
+
|
|
1583
|
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
|
|
1584
|
+
is useful for modifying the behavior on a per-run basis without overriding other
|
|
1585
|
+
instructions.
|
|
1586
|
+
|
|
1587
|
+
additional_messages: Adds additional messages to the thread before creating the run.
|
|
1588
|
+
|
|
1589
|
+
instructions: Overrides the
|
|
1590
|
+
[instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
|
|
1591
|
+
of the assistant. This is useful for modifying the behavior on a per-run basis.
|
|
1592
|
+
|
|
1593
|
+
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
|
|
1594
|
+
run. The run will make a best effort to use only the number of completion tokens
|
|
1595
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
1596
|
+
completion tokens specified, the run will end with status `incomplete`. See
|
|
1597
|
+
`incomplete_details` for more info.
|
|
1598
|
+
|
|
1599
|
+
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
|
|
1600
|
+
The run will make a best effort to use only the number of prompt tokens
|
|
1601
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
1602
|
+
prompt tokens specified, the run will end with status `incomplete`. See
|
|
1603
|
+
`incomplete_details` for more info.
|
|
1604
|
+
|
|
1605
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
1606
|
+
for storing additional information about the object in a structured format, and
|
|
1607
|
+
querying for objects via API or the dashboard.
|
|
1608
|
+
|
|
1609
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
1610
|
+
a maximum length of 512 characters.
|
|
1611
|
+
|
|
1612
|
+
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
|
|
1613
|
+
be used to execute this run. If a value is provided here, it will override the
|
|
1614
|
+
model associated with the assistant. If not, the model associated with the
|
|
1615
|
+
assistant will be used.
|
|
1616
|
+
|
|
1617
|
+
parallel_tool_calls: Whether to enable
|
|
1618
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
1619
|
+
during tool use.
|
|
1620
|
+
|
|
1621
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
1622
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1623
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
1624
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
1625
|
+
reasoning in a response.
|
|
1626
|
+
|
|
1627
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1628
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
1629
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
1630
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1631
|
+
support `none`.
|
|
1632
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1633
|
+
|
|
1634
|
+
response_format: Specifies the format that the model must output. Compatible with
|
|
1635
|
+
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
1636
|
+
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
|
|
1637
|
+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
|
1638
|
+
|
|
1639
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
1640
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
1641
|
+
in the
|
|
1642
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
1643
|
+
|
|
1644
|
+
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
|
|
1645
|
+
message the model generates is valid JSON.
|
|
1646
|
+
|
|
1647
|
+
**Important:** when using JSON mode, you **must** also instruct the model to
|
|
1648
|
+
produce JSON yourself via a system or user message. Without this, the model may
|
|
1649
|
+
generate an unending stream of whitespace until the generation reaches the token
|
|
1650
|
+
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
1651
|
+
the message content may be partially cut off if `finish_reason="length"`, which
|
|
1652
|
+
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
1653
|
+
max context length.
|
|
1654
|
+
|
|
1655
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
1656
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
1657
|
+
message.
|
|
1658
|
+
|
|
1659
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
1660
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
1661
|
+
focused and deterministic.
|
|
1662
|
+
|
|
1663
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
1664
|
+
not call any tools and instead generates a message. `auto` is the default value
|
|
1665
|
+
and means the model can pick between generating a message or calling one or more
|
|
1666
|
+
tools. `required` means the model must call one or more tools before responding
|
|
1667
|
+
to the user. Specifying a particular tool like `{"type": "file_search"}` or
|
|
1668
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
1669
|
+
call that tool.
|
|
1670
|
+
|
|
1671
|
+
tools: Override the tools the assistant can use for this run. This is useful for
|
|
1672
|
+
modifying the behavior on a per-run basis.
|
|
1673
|
+
|
|
1674
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
1675
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
1676
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
1677
|
+
|
|
1678
|
+
We generally recommend altering this or temperature but not both.
|
|
1679
|
+
|
|
1680
|
+
truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
|
|
1681
|
+
control the initial context window of the run.
|
|
1682
|
+
|
|
1683
|
+
extra_headers: Send extra headers
|
|
1684
|
+
|
|
1685
|
+
extra_query: Add additional query parameters to the request
|
|
1686
|
+
|
|
1687
|
+
extra_body: Add additional JSON properties to the request
|
|
1688
|
+
|
|
1689
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1690
|
+
"""
|
|
1691
|
+
...
|
|
1692
|
+
|
|
1693
|
+
@overload
|
|
1694
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1695
|
+
async def create(
|
|
1696
|
+
self,
|
|
1697
|
+
thread_id: str,
|
|
1698
|
+
*,
|
|
1699
|
+
assistant_id: str,
|
|
1700
|
+
stream: Literal[True],
|
|
1701
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
1702
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
1703
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
1704
|
+
instructions: Optional[str] | Omit = omit,
|
|
1705
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1706
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
1707
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1708
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
1709
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1710
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1711
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
1712
|
+
temperature: Optional[float] | Omit = omit,
|
|
1713
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
1714
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
1715
|
+
top_p: Optional[float] | Omit = omit,
|
|
1716
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
1717
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1718
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1719
|
+
extra_headers: Headers | None = None,
|
|
1720
|
+
extra_query: Query | None = None,
|
|
1721
|
+
extra_body: Body | None = None,
|
|
1722
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1723
|
+
) -> AsyncStream[AssistantStreamEvent]:
|
|
1724
|
+
"""
|
|
1725
|
+
Create a run.
|
|
1726
|
+
|
|
1727
|
+
Args:
|
|
1728
|
+
assistant_id: The ID of the
|
|
1729
|
+
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
|
|
1730
|
+
execute this run.
|
|
1731
|
+
|
|
1732
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
1733
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
1734
|
+
message.
|
|
1735
|
+
|
|
1736
|
+
include: A list of additional fields to include in the response. Currently the only
|
|
1737
|
+
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
|
1738
|
+
to fetch the file search result content.
|
|
1739
|
+
|
|
1740
|
+
See the
|
|
1741
|
+
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
|
1742
|
+
for more information.
|
|
1743
|
+
|
|
1744
|
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
|
|
1745
|
+
is useful for modifying the behavior on a per-run basis without overriding other
|
|
1746
|
+
instructions.
|
|
1747
|
+
|
|
1748
|
+
additional_messages: Adds additional messages to the thread before creating the run.
|
|
1749
|
+
|
|
1750
|
+
instructions: Overrides the
|
|
1751
|
+
[instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
|
|
1752
|
+
of the assistant. This is useful for modifying the behavior on a per-run basis.
|
|
1753
|
+
|
|
1754
|
+
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
|
|
1755
|
+
run. The run will make a best effort to use only the number of completion tokens
|
|
1756
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
1757
|
+
completion tokens specified, the run will end with status `incomplete`. See
|
|
1758
|
+
`incomplete_details` for more info.
|
|
1759
|
+
|
|
1760
|
+
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
|
|
1761
|
+
The run will make a best effort to use only the number of prompt tokens
|
|
1762
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
1763
|
+
prompt tokens specified, the run will end with status `incomplete`. See
|
|
1764
|
+
`incomplete_details` for more info.
|
|
1765
|
+
|
|
1766
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
1767
|
+
for storing additional information about the object in a structured format, and
|
|
1768
|
+
querying for objects via API or the dashboard.
|
|
1769
|
+
|
|
1770
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
1771
|
+
a maximum length of 512 characters.
|
|
1772
|
+
|
|
1773
|
+
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
|
|
1774
|
+
be used to execute this run. If a value is provided here, it will override the
|
|
1775
|
+
model associated with the assistant. If not, the model associated with the
|
|
1776
|
+
assistant will be used.
|
|
1777
|
+
|
|
1778
|
+
parallel_tool_calls: Whether to enable
|
|
1779
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
1780
|
+
during tool use.
|
|
1781
|
+
|
|
1782
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
1783
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1784
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
1785
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
1786
|
+
reasoning in a response.
|
|
1787
|
+
|
|
1788
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1789
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
1790
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
1791
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1792
|
+
support `none`.
|
|
1793
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1794
|
+
|
|
1795
|
+
response_format: Specifies the format that the model must output. Compatible with
|
|
1796
|
+
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
1797
|
+
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
|
|
1798
|
+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
|
1799
|
+
|
|
1800
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
1801
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
1802
|
+
in the
|
|
1803
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
1804
|
+
|
|
1805
|
+
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
|
|
1806
|
+
message the model generates is valid JSON.
|
|
1807
|
+
|
|
1808
|
+
**Important:** when using JSON mode, you **must** also instruct the model to
|
|
1809
|
+
produce JSON yourself via a system or user message. Without this, the model may
|
|
1810
|
+
generate an unending stream of whitespace until the generation reaches the token
|
|
1811
|
+
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
1812
|
+
the message content may be partially cut off if `finish_reason="length"`, which
|
|
1813
|
+
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
1814
|
+
max context length.
|
|
1815
|
+
|
|
1816
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
1817
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
1818
|
+
focused and deterministic.
|
|
1819
|
+
|
|
1820
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
1821
|
+
not call any tools and instead generates a message. `auto` is the default value
|
|
1822
|
+
and means the model can pick between generating a message or calling one or more
|
|
1823
|
+
tools. `required` means the model must call one or more tools before responding
|
|
1824
|
+
to the user. Specifying a particular tool like `{"type": "file_search"}` or
|
|
1825
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
1826
|
+
call that tool.
|
|
1827
|
+
|
|
1828
|
+
tools: Override the tools the assistant can use for this run. This is useful for
|
|
1829
|
+
modifying the behavior on a per-run basis.
|
|
1830
|
+
|
|
1831
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
1832
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
1833
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
1834
|
+
|
|
1835
|
+
We generally recommend altering this or temperature but not both.
|
|
1836
|
+
|
|
1837
|
+
truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
|
|
1838
|
+
control the initial context window of the run.
|
|
1839
|
+
|
|
1840
|
+
extra_headers: Send extra headers
|
|
1841
|
+
|
|
1842
|
+
extra_query: Add additional query parameters to the request
|
|
1843
|
+
|
|
1844
|
+
extra_body: Add additional JSON properties to the request
|
|
1845
|
+
|
|
1846
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1847
|
+
"""
|
|
1848
|
+
...
|
|
1849
|
+
|
|
1850
|
+
@overload
|
|
1851
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
1852
|
+
async def create(
|
|
1853
|
+
self,
|
|
1854
|
+
thread_id: str,
|
|
1855
|
+
*,
|
|
1856
|
+
assistant_id: str,
|
|
1857
|
+
stream: bool,
|
|
1858
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
1859
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
1860
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
1861
|
+
instructions: Optional[str] | Omit = omit,
|
|
1862
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
1863
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
1864
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
1865
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
1866
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
1867
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
1868
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
1869
|
+
temperature: Optional[float] | Omit = omit,
|
|
1870
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
1871
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
1872
|
+
top_p: Optional[float] | Omit = omit,
|
|
1873
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
1874
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1875
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1876
|
+
extra_headers: Headers | None = None,
|
|
1877
|
+
extra_query: Query | None = None,
|
|
1878
|
+
extra_body: Body | None = None,
|
|
1879
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1880
|
+
) -> Run | AsyncStream[AssistantStreamEvent]:
|
|
1881
|
+
"""
|
|
1882
|
+
Create a run.
|
|
1883
|
+
|
|
1884
|
+
Args:
|
|
1885
|
+
assistant_id: The ID of the
|
|
1886
|
+
[assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
|
|
1887
|
+
execute this run.
|
|
1888
|
+
|
|
1889
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
1890
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
1891
|
+
message.
|
|
1892
|
+
|
|
1893
|
+
include: A list of additional fields to include in the response. Currently the only
|
|
1894
|
+
supported value is `step_details.tool_calls[*].file_search.results[*].content`
|
|
1895
|
+
to fetch the file search result content.
|
|
1896
|
+
|
|
1897
|
+
See the
|
|
1898
|
+
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
|
|
1899
|
+
for more information.
|
|
1900
|
+
|
|
1901
|
+
additional_instructions: Appends additional instructions at the end of the instructions for the run. This
|
|
1902
|
+
is useful for modifying the behavior on a per-run basis without overriding other
|
|
1903
|
+
instructions.
|
|
1904
|
+
|
|
1905
|
+
additional_messages: Adds additional messages to the thread before creating the run.
|
|
1906
|
+
|
|
1907
|
+
instructions: Overrides the
|
|
1908
|
+
[instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
|
|
1909
|
+
of the assistant. This is useful for modifying the behavior on a per-run basis.
|
|
1910
|
+
|
|
1911
|
+
max_completion_tokens: The maximum number of completion tokens that may be used over the course of the
|
|
1912
|
+
run. The run will make a best effort to use only the number of completion tokens
|
|
1913
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
1914
|
+
completion tokens specified, the run will end with status `incomplete`. See
|
|
1915
|
+
`incomplete_details` for more info.
|
|
1916
|
+
|
|
1917
|
+
max_prompt_tokens: The maximum number of prompt tokens that may be used over the course of the run.
|
|
1918
|
+
The run will make a best effort to use only the number of prompt tokens
|
|
1919
|
+
specified, across multiple turns of the run. If the run exceeds the number of
|
|
1920
|
+
prompt tokens specified, the run will end with status `incomplete`. See
|
|
1921
|
+
`incomplete_details` for more info.
|
|
1922
|
+
|
|
1923
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
1924
|
+
for storing additional information about the object in a structured format, and
|
|
1925
|
+
querying for objects via API or the dashboard.
|
|
1926
|
+
|
|
1927
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
1928
|
+
a maximum length of 512 characters.
|
|
1929
|
+
|
|
1930
|
+
model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
|
|
1931
|
+
be used to execute this run. If a value is provided here, it will override the
|
|
1932
|
+
model associated with the assistant. If not, the model associated with the
|
|
1933
|
+
assistant will be used.
|
|
1934
|
+
|
|
1935
|
+
parallel_tool_calls: Whether to enable
|
|
1936
|
+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
|
1937
|
+
during tool use.
|
|
1938
|
+
|
|
1939
|
+
reasoning_effort: Constrains effort on reasoning for
|
|
1940
|
+
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
1941
|
+
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
1942
|
+
reasoning effort can result in faster responses and fewer tokens used on
|
|
1943
|
+
reasoning in a response.
|
|
1944
|
+
|
|
1945
|
+
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
1946
|
+
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
1947
|
+
calls are supported for all reasoning values in gpt-5.1.
|
|
1948
|
+
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
1949
|
+
support `none`.
|
|
1950
|
+
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
1951
|
+
|
|
1952
|
+
response_format: Specifies the format that the model must output. Compatible with
|
|
1953
|
+
[GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
1954
|
+
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
|
|
1955
|
+
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
|
|
1956
|
+
|
|
1957
|
+
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
|
|
1958
|
+
Outputs which ensures the model will match your supplied JSON schema. Learn more
|
|
1959
|
+
in the
|
|
1960
|
+
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
|
|
1961
|
+
|
|
1962
|
+
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
|
|
1963
|
+
message the model generates is valid JSON.
|
|
1964
|
+
|
|
1965
|
+
**Important:** when using JSON mode, you **must** also instruct the model to
|
|
1966
|
+
produce JSON yourself via a system or user message. Without this, the model may
|
|
1967
|
+
generate an unending stream of whitespace until the generation reaches the token
|
|
1968
|
+
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
1969
|
+
the message content may be partially cut off if `finish_reason="length"`, which
|
|
1970
|
+
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
1971
|
+
max context length.
|
|
1972
|
+
|
|
1973
|
+
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
1974
|
+
make the output more random, while lower values like 0.2 will make it more
|
|
1975
|
+
focused and deterministic.
|
|
1976
|
+
|
|
1977
|
+
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
|
1978
|
+
not call any tools and instead generates a message. `auto` is the default value
|
|
1979
|
+
and means the model can pick between generating a message or calling one or more
|
|
1980
|
+
tools. `required` means the model must call one or more tools before responding
|
|
1981
|
+
to the user. Specifying a particular tool like `{"type": "file_search"}` or
|
|
1982
|
+
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
1983
|
+
call that tool.
|
|
1984
|
+
|
|
1985
|
+
tools: Override the tools the assistant can use for this run. This is useful for
|
|
1986
|
+
modifying the behavior on a per-run basis.
|
|
1987
|
+
|
|
1988
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
|
1989
|
+
model considers the results of the tokens with top_p probability mass. So 0.1
|
|
1990
|
+
means only the tokens comprising the top 10% probability mass are considered.
|
|
1991
|
+
|
|
1992
|
+
We generally recommend altering this or temperature but not both.
|
|
1993
|
+
|
|
1994
|
+
truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to
|
|
1995
|
+
control the initial context window of the run.
|
|
1996
|
+
|
|
1997
|
+
extra_headers: Send extra headers
|
|
1998
|
+
|
|
1999
|
+
extra_query: Add additional query parameters to the request
|
|
2000
|
+
|
|
2001
|
+
extra_body: Add additional JSON properties to the request
|
|
2002
|
+
|
|
2003
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2004
|
+
"""
|
|
2005
|
+
...
|
|
2006
|
+
|
|
2007
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2008
|
+
@required_args(["assistant_id"], ["assistant_id", "stream"])
|
|
2009
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2010
|
+
async def create(
|
|
2011
|
+
self,
|
|
2012
|
+
thread_id: str,
|
|
2013
|
+
*,
|
|
2014
|
+
assistant_id: str,
|
|
2015
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
2016
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2017
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2018
|
+
instructions: Optional[str] | Omit = omit,
|
|
2019
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2020
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2021
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2022
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2023
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2024
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2025
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2026
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
2027
|
+
temperature: Optional[float] | Omit = omit,
|
|
2028
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2029
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2030
|
+
top_p: Optional[float] | Omit = omit,
|
|
2031
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2032
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2033
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2034
|
+
extra_headers: Headers | None = None,
|
|
2035
|
+
extra_query: Query | None = None,
|
|
2036
|
+
extra_body: Body | None = None,
|
|
2037
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2038
|
+
) -> Run | AsyncStream[AssistantStreamEvent]:
|
|
2039
|
+
if not thread_id:
|
|
2040
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2041
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
2042
|
+
return await self._post(
|
|
2043
|
+
f"/threads/{thread_id}/runs",
|
|
2044
|
+
body=await async_maybe_transform(
|
|
2045
|
+
{
|
|
2046
|
+
"assistant_id": assistant_id,
|
|
2047
|
+
"additional_instructions": additional_instructions,
|
|
2048
|
+
"additional_messages": additional_messages,
|
|
2049
|
+
"instructions": instructions,
|
|
2050
|
+
"max_completion_tokens": max_completion_tokens,
|
|
2051
|
+
"max_prompt_tokens": max_prompt_tokens,
|
|
2052
|
+
"metadata": metadata,
|
|
2053
|
+
"model": model,
|
|
2054
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
2055
|
+
"reasoning_effort": reasoning_effort,
|
|
2056
|
+
"response_format": response_format,
|
|
2057
|
+
"stream": stream,
|
|
2058
|
+
"temperature": temperature,
|
|
2059
|
+
"tool_choice": tool_choice,
|
|
2060
|
+
"tools": tools,
|
|
2061
|
+
"top_p": top_p,
|
|
2062
|
+
"truncation_strategy": truncation_strategy,
|
|
2063
|
+
},
|
|
2064
|
+
run_create_params.RunCreateParamsStreaming if stream else run_create_params.RunCreateParamsNonStreaming,
|
|
2065
|
+
),
|
|
2066
|
+
options=make_request_options(
|
|
2067
|
+
extra_headers=extra_headers,
|
|
2068
|
+
extra_query=extra_query,
|
|
2069
|
+
extra_body=extra_body,
|
|
2070
|
+
timeout=timeout,
|
|
2071
|
+
query=await async_maybe_transform({"include": include}, run_create_params.RunCreateParams),
|
|
2072
|
+
),
|
|
2073
|
+
cast_to=Run,
|
|
2074
|
+
stream=stream or False,
|
|
2075
|
+
stream_cls=AsyncStream[AssistantStreamEvent],
|
|
2076
|
+
)
|
|
2077
|
+
|
|
2078
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2079
|
+
async def retrieve(
|
|
2080
|
+
self,
|
|
2081
|
+
run_id: str,
|
|
2082
|
+
*,
|
|
2083
|
+
thread_id: str,
|
|
2084
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2085
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2086
|
+
extra_headers: Headers | None = None,
|
|
2087
|
+
extra_query: Query | None = None,
|
|
2088
|
+
extra_body: Body | None = None,
|
|
2089
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2090
|
+
) -> Run:
|
|
2091
|
+
"""
|
|
2092
|
+
Retrieves a run.
|
|
2093
|
+
|
|
2094
|
+
Args:
|
|
2095
|
+
extra_headers: Send extra headers
|
|
2096
|
+
|
|
2097
|
+
extra_query: Add additional query parameters to the request
|
|
2098
|
+
|
|
2099
|
+
extra_body: Add additional JSON properties to the request
|
|
2100
|
+
|
|
2101
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2102
|
+
"""
|
|
2103
|
+
if not thread_id:
|
|
2104
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2105
|
+
if not run_id:
|
|
2106
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
2107
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
2108
|
+
return await self._get(
|
|
2109
|
+
f"/threads/{thread_id}/runs/{run_id}",
|
|
2110
|
+
options=make_request_options(
|
|
2111
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2112
|
+
),
|
|
2113
|
+
cast_to=Run,
|
|
2114
|
+
)
|
|
2115
|
+
|
|
2116
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2117
|
+
async def update(
|
|
2118
|
+
self,
|
|
2119
|
+
run_id: str,
|
|
2120
|
+
*,
|
|
2121
|
+
thread_id: str,
|
|
2122
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2123
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2124
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2125
|
+
extra_headers: Headers | None = None,
|
|
2126
|
+
extra_query: Query | None = None,
|
|
2127
|
+
extra_body: Body | None = None,
|
|
2128
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2129
|
+
) -> Run:
|
|
2130
|
+
"""
|
|
2131
|
+
Modifies a run.
|
|
2132
|
+
|
|
2133
|
+
Args:
|
|
2134
|
+
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
|
|
2135
|
+
for storing additional information about the object in a structured format, and
|
|
2136
|
+
querying for objects via API or the dashboard.
|
|
2137
|
+
|
|
2138
|
+
Keys are strings with a maximum length of 64 characters. Values are strings with
|
|
2139
|
+
a maximum length of 512 characters.
|
|
2140
|
+
|
|
2141
|
+
extra_headers: Send extra headers
|
|
2142
|
+
|
|
2143
|
+
extra_query: Add additional query parameters to the request
|
|
2144
|
+
|
|
2145
|
+
extra_body: Add additional JSON properties to the request
|
|
2146
|
+
|
|
2147
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2148
|
+
"""
|
|
2149
|
+
if not thread_id:
|
|
2150
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2151
|
+
if not run_id:
|
|
2152
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
2153
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
2154
|
+
return await self._post(
|
|
2155
|
+
f"/threads/{thread_id}/runs/{run_id}",
|
|
2156
|
+
body=await async_maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams),
|
|
2157
|
+
options=make_request_options(
|
|
2158
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2159
|
+
),
|
|
2160
|
+
cast_to=Run,
|
|
2161
|
+
)
|
|
2162
|
+
|
|
2163
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2164
|
+
def list(
|
|
2165
|
+
self,
|
|
2166
|
+
thread_id: str,
|
|
2167
|
+
*,
|
|
2168
|
+
after: str | Omit = omit,
|
|
2169
|
+
before: str | Omit = omit,
|
|
2170
|
+
limit: int | Omit = omit,
|
|
2171
|
+
order: Literal["asc", "desc"] | Omit = omit,
|
|
2172
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2173
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2174
|
+
extra_headers: Headers | None = None,
|
|
2175
|
+
extra_query: Query | None = None,
|
|
2176
|
+
extra_body: Body | None = None,
|
|
2177
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2178
|
+
) -> AsyncPaginator[Run, AsyncCursorPage[Run]]:
|
|
2179
|
+
"""
|
|
2180
|
+
Returns a list of runs belonging to a thread.
|
|
2181
|
+
|
|
2182
|
+
Args:
|
|
2183
|
+
after: A cursor for use in pagination. `after` is an object ID that defines your place
|
|
2184
|
+
in the list. For instance, if you make a list request and receive 100 objects,
|
|
2185
|
+
ending with obj_foo, your subsequent call can include after=obj_foo in order to
|
|
2186
|
+
fetch the next page of the list.
|
|
2187
|
+
|
|
2188
|
+
before: A cursor for use in pagination. `before` is an object ID that defines your place
|
|
2189
|
+
in the list. For instance, if you make a list request and receive 100 objects,
|
|
2190
|
+
starting with obj_foo, your subsequent call can include before=obj_foo in order
|
|
2191
|
+
to fetch the previous page of the list.
|
|
2192
|
+
|
|
2193
|
+
limit: A limit on the number of objects to be returned. Limit can range between 1 and
|
|
2194
|
+
100, and the default is 20.
|
|
2195
|
+
|
|
2196
|
+
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
|
|
2197
|
+
order and `desc` for descending order.
|
|
2198
|
+
|
|
2199
|
+
extra_headers: Send extra headers
|
|
2200
|
+
|
|
2201
|
+
extra_query: Add additional query parameters to the request
|
|
2202
|
+
|
|
2203
|
+
extra_body: Add additional JSON properties to the request
|
|
2204
|
+
|
|
2205
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2206
|
+
"""
|
|
2207
|
+
if not thread_id:
|
|
2208
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2209
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
2210
|
+
return self._get_api_list(
|
|
2211
|
+
f"/threads/{thread_id}/runs",
|
|
2212
|
+
page=AsyncCursorPage[Run],
|
|
2213
|
+
options=make_request_options(
|
|
2214
|
+
extra_headers=extra_headers,
|
|
2215
|
+
extra_query=extra_query,
|
|
2216
|
+
extra_body=extra_body,
|
|
2217
|
+
timeout=timeout,
|
|
2218
|
+
query=maybe_transform(
|
|
2219
|
+
{
|
|
2220
|
+
"after": after,
|
|
2221
|
+
"before": before,
|
|
2222
|
+
"limit": limit,
|
|
2223
|
+
"order": order,
|
|
2224
|
+
},
|
|
2225
|
+
run_list_params.RunListParams,
|
|
2226
|
+
),
|
|
2227
|
+
),
|
|
2228
|
+
model=Run,
|
|
2229
|
+
)
|
|
2230
|
+
|
|
2231
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2232
|
+
async def cancel(
|
|
2233
|
+
self,
|
|
2234
|
+
run_id: str,
|
|
2235
|
+
*,
|
|
2236
|
+
thread_id: str,
|
|
2237
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2238
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2239
|
+
extra_headers: Headers | None = None,
|
|
2240
|
+
extra_query: Query | None = None,
|
|
2241
|
+
extra_body: Body | None = None,
|
|
2242
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2243
|
+
) -> Run:
|
|
2244
|
+
"""
|
|
2245
|
+
Cancels a run that is `in_progress`.
|
|
2246
|
+
|
|
2247
|
+
Args:
|
|
2248
|
+
extra_headers: Send extra headers
|
|
2249
|
+
|
|
2250
|
+
extra_query: Add additional query parameters to the request
|
|
2251
|
+
|
|
2252
|
+
extra_body: Add additional JSON properties to the request
|
|
2253
|
+
|
|
2254
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2255
|
+
"""
|
|
2256
|
+
if not thread_id:
|
|
2257
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2258
|
+
if not run_id:
|
|
2259
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
2260
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
2261
|
+
return await self._post(
|
|
2262
|
+
f"/threads/{thread_id}/runs/{run_id}/cancel",
|
|
2263
|
+
options=make_request_options(
|
|
2264
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2265
|
+
),
|
|
2266
|
+
cast_to=Run,
|
|
2267
|
+
)
|
|
2268
|
+
|
|
2269
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2270
|
+
async def create_and_poll(
|
|
2271
|
+
self,
|
|
2272
|
+
*,
|
|
2273
|
+
assistant_id: str,
|
|
2274
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
2275
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2276
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2277
|
+
instructions: Optional[str] | Omit = omit,
|
|
2278
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2279
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2280
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2281
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2282
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2283
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2284
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2285
|
+
temperature: Optional[float] | Omit = omit,
|
|
2286
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2287
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2288
|
+
top_p: Optional[float] | Omit = omit,
|
|
2289
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2290
|
+
poll_interval_ms: int | Omit = omit,
|
|
2291
|
+
thread_id: str,
|
|
2292
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2293
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2294
|
+
extra_headers: Headers | None = None,
|
|
2295
|
+
extra_query: Query | None = None,
|
|
2296
|
+
extra_body: Body | None = None,
|
|
2297
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2298
|
+
) -> Run:
|
|
2299
|
+
"""
|
|
2300
|
+
A helper to create a run an poll for a terminal state. More information on Run
|
|
2301
|
+
lifecycles can be found here:
|
|
2302
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
2303
|
+
"""
|
|
2304
|
+
run = await self.create( # pyright: ignore[reportDeprecated]
|
|
2305
|
+
thread_id=thread_id,
|
|
2306
|
+
assistant_id=assistant_id,
|
|
2307
|
+
include=include,
|
|
2308
|
+
additional_instructions=additional_instructions,
|
|
2309
|
+
additional_messages=additional_messages,
|
|
2310
|
+
instructions=instructions,
|
|
2311
|
+
max_completion_tokens=max_completion_tokens,
|
|
2312
|
+
max_prompt_tokens=max_prompt_tokens,
|
|
2313
|
+
metadata=metadata,
|
|
2314
|
+
model=model,
|
|
2315
|
+
response_format=response_format,
|
|
2316
|
+
temperature=temperature,
|
|
2317
|
+
tool_choice=tool_choice,
|
|
2318
|
+
parallel_tool_calls=parallel_tool_calls,
|
|
2319
|
+
reasoning_effort=reasoning_effort,
|
|
2320
|
+
# We assume we are not streaming when polling
|
|
2321
|
+
stream=False,
|
|
2322
|
+
tools=tools,
|
|
2323
|
+
truncation_strategy=truncation_strategy,
|
|
2324
|
+
top_p=top_p,
|
|
2325
|
+
extra_headers=extra_headers,
|
|
2326
|
+
extra_query=extra_query,
|
|
2327
|
+
extra_body=extra_body,
|
|
2328
|
+
timeout=timeout,
|
|
2329
|
+
)
|
|
2330
|
+
return await self.poll( # pyright: ignore[reportDeprecated]
|
|
2331
|
+
run.id,
|
|
2332
|
+
thread_id=thread_id,
|
|
2333
|
+
extra_headers=extra_headers,
|
|
2334
|
+
extra_query=extra_query,
|
|
2335
|
+
extra_body=extra_body,
|
|
2336
|
+
poll_interval_ms=poll_interval_ms,
|
|
2337
|
+
timeout=timeout,
|
|
2338
|
+
)
|
|
2339
|
+
|
|
2340
|
+
@overload
|
|
2341
|
+
@typing_extensions.deprecated("use `stream` instead")
|
|
2342
|
+
def create_and_stream(
|
|
2343
|
+
self,
|
|
2344
|
+
*,
|
|
2345
|
+
assistant_id: str,
|
|
2346
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2347
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2348
|
+
instructions: Optional[str] | Omit = omit,
|
|
2349
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2350
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2351
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2352
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2353
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2354
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2355
|
+
temperature: Optional[float] | Omit = omit,
|
|
2356
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2357
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2358
|
+
top_p: Optional[float] | Omit = omit,
|
|
2359
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2360
|
+
thread_id: str,
|
|
2361
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2362
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2363
|
+
extra_headers: Headers | None = None,
|
|
2364
|
+
extra_query: Query | None = None,
|
|
2365
|
+
extra_body: Body | None = None,
|
|
2366
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2367
|
+
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
|
|
2368
|
+
"""Create a Run stream"""
|
|
2369
|
+
...
|
|
2370
|
+
|
|
2371
|
+
@overload
|
|
2372
|
+
@typing_extensions.deprecated("use `stream` instead")
|
|
2373
|
+
def create_and_stream(
|
|
2374
|
+
self,
|
|
2375
|
+
*,
|
|
2376
|
+
assistant_id: str,
|
|
2377
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2378
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2379
|
+
instructions: Optional[str] | Omit = omit,
|
|
2380
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2381
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2382
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2383
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2384
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2385
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2386
|
+
temperature: Optional[float] | Omit = omit,
|
|
2387
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2388
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2389
|
+
top_p: Optional[float] | Omit = omit,
|
|
2390
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2391
|
+
thread_id: str,
|
|
2392
|
+
event_handler: AsyncAssistantEventHandlerT,
|
|
2393
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2394
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2395
|
+
extra_headers: Headers | None = None,
|
|
2396
|
+
extra_query: Query | None = None,
|
|
2397
|
+
extra_body: Body | None = None,
|
|
2398
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2399
|
+
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
|
|
2400
|
+
"""Create a Run stream"""
|
|
2401
|
+
...
|
|
2402
|
+
|
|
2403
|
+
@typing_extensions.deprecated("use `stream` instead")
|
|
2404
|
+
def create_and_stream(
|
|
2405
|
+
self,
|
|
2406
|
+
*,
|
|
2407
|
+
assistant_id: str,
|
|
2408
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2409
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2410
|
+
instructions: Optional[str] | Omit = omit,
|
|
2411
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2412
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2413
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2414
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2415
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2416
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2417
|
+
temperature: Optional[float] | Omit = omit,
|
|
2418
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2419
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2420
|
+
top_p: Optional[float] | Omit = omit,
|
|
2421
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2422
|
+
thread_id: str,
|
|
2423
|
+
event_handler: AsyncAssistantEventHandlerT | None = None,
|
|
2424
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2425
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2426
|
+
extra_headers: Headers | None = None,
|
|
2427
|
+
extra_query: Query | None = None,
|
|
2428
|
+
extra_body: Body | None = None,
|
|
2429
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2430
|
+
) -> (
|
|
2431
|
+
AsyncAssistantStreamManager[AsyncAssistantEventHandler]
|
|
2432
|
+
| AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
|
|
2433
|
+
):
|
|
2434
|
+
"""Create a Run stream"""
|
|
2435
|
+
if not thread_id:
|
|
2436
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2437
|
+
|
|
2438
|
+
extra_headers = {
|
|
2439
|
+
"OpenAI-Beta": "assistants=v2",
|
|
2440
|
+
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
|
|
2441
|
+
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
|
|
2442
|
+
**(extra_headers or {}),
|
|
2443
|
+
}
|
|
2444
|
+
request = self._post(
|
|
2445
|
+
f"/threads/{thread_id}/runs",
|
|
2446
|
+
body=maybe_transform(
|
|
2447
|
+
{
|
|
2448
|
+
"assistant_id": assistant_id,
|
|
2449
|
+
"additional_instructions": additional_instructions,
|
|
2450
|
+
"additional_messages": additional_messages,
|
|
2451
|
+
"instructions": instructions,
|
|
2452
|
+
"max_completion_tokens": max_completion_tokens,
|
|
2453
|
+
"max_prompt_tokens": max_prompt_tokens,
|
|
2454
|
+
"metadata": metadata,
|
|
2455
|
+
"model": model,
|
|
2456
|
+
"response_format": response_format,
|
|
2457
|
+
"temperature": temperature,
|
|
2458
|
+
"tool_choice": tool_choice,
|
|
2459
|
+
"stream": True,
|
|
2460
|
+
"tools": tools,
|
|
2461
|
+
"truncation_strategy": truncation_strategy,
|
|
2462
|
+
"top_p": top_p,
|
|
2463
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
2464
|
+
},
|
|
2465
|
+
run_create_params.RunCreateParams,
|
|
2466
|
+
),
|
|
2467
|
+
options=make_request_options(
|
|
2468
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2469
|
+
),
|
|
2470
|
+
cast_to=Run,
|
|
2471
|
+
stream=True,
|
|
2472
|
+
stream_cls=AsyncStream[AssistantStreamEvent],
|
|
2473
|
+
)
|
|
2474
|
+
return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
|
|
2475
|
+
|
|
2476
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2477
|
+
async def poll(
|
|
2478
|
+
self,
|
|
2479
|
+
run_id: str,
|
|
2480
|
+
thread_id: str,
|
|
2481
|
+
extra_headers: Headers | None = None,
|
|
2482
|
+
extra_query: Query | None = None,
|
|
2483
|
+
extra_body: Body | None = None,
|
|
2484
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2485
|
+
poll_interval_ms: int | Omit = omit,
|
|
2486
|
+
) -> Run:
|
|
2487
|
+
"""
|
|
2488
|
+
A helper to poll a run status until it reaches a terminal state. More
|
|
2489
|
+
information on Run lifecycles can be found here:
|
|
2490
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
2491
|
+
"""
|
|
2492
|
+
extra_headers = {"X-Stainless-Poll-Helper": "true", **(extra_headers or {})}
|
|
2493
|
+
|
|
2494
|
+
if is_given(poll_interval_ms):
|
|
2495
|
+
extra_headers["X-Stainless-Custom-Poll-Interval"] = str(poll_interval_ms)
|
|
2496
|
+
|
|
2497
|
+
terminal_states = {"requires_action", "cancelled", "completed", "failed", "expired", "incomplete"}
|
|
2498
|
+
while True:
|
|
2499
|
+
response = await self.with_raw_response.retrieve( # pyright: ignore[reportDeprecated]
|
|
2500
|
+
thread_id=thread_id,
|
|
2501
|
+
run_id=run_id,
|
|
2502
|
+
extra_headers=extra_headers,
|
|
2503
|
+
extra_body=extra_body,
|
|
2504
|
+
extra_query=extra_query,
|
|
2505
|
+
timeout=timeout,
|
|
2506
|
+
)
|
|
2507
|
+
|
|
2508
|
+
run = response.parse()
|
|
2509
|
+
# Return if we reached a terminal state
|
|
2510
|
+
if run.status in terminal_states:
|
|
2511
|
+
return run
|
|
2512
|
+
|
|
2513
|
+
if not is_given(poll_interval_ms):
|
|
2514
|
+
from_header = response.headers.get("openai-poll-after-ms")
|
|
2515
|
+
if from_header is not None:
|
|
2516
|
+
poll_interval_ms = int(from_header)
|
|
2517
|
+
else:
|
|
2518
|
+
poll_interval_ms = 1000
|
|
2519
|
+
|
|
2520
|
+
await self._sleep(poll_interval_ms / 1000)
|
|
2521
|
+
|
|
2522
|
+
@overload
|
|
2523
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2524
|
+
def stream(
|
|
2525
|
+
self,
|
|
2526
|
+
*,
|
|
2527
|
+
assistant_id: str,
|
|
2528
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2529
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2530
|
+
instructions: Optional[str] | Omit = omit,
|
|
2531
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2532
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2533
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2534
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2535
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2536
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2537
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2538
|
+
temperature: Optional[float] | Omit = omit,
|
|
2539
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2540
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2541
|
+
top_p: Optional[float] | Omit = omit,
|
|
2542
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2543
|
+
thread_id: str,
|
|
2544
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2545
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2546
|
+
extra_headers: Headers | None = None,
|
|
2547
|
+
extra_query: Query | None = None,
|
|
2548
|
+
extra_body: Body | None = None,
|
|
2549
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2550
|
+
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
|
|
2551
|
+
"""Create a Run stream"""
|
|
2552
|
+
...
|
|
2553
|
+
|
|
2554
|
+
@overload
|
|
2555
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2556
|
+
def stream(
|
|
2557
|
+
self,
|
|
2558
|
+
*,
|
|
2559
|
+
assistant_id: str,
|
|
2560
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
2561
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2562
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2563
|
+
instructions: Optional[str] | Omit = omit,
|
|
2564
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2565
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2566
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2567
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2568
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2569
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2570
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2571
|
+
temperature: Optional[float] | Omit = omit,
|
|
2572
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2573
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2574
|
+
top_p: Optional[float] | Omit = omit,
|
|
2575
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2576
|
+
thread_id: str,
|
|
2577
|
+
event_handler: AsyncAssistantEventHandlerT,
|
|
2578
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2579
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2580
|
+
extra_headers: Headers | None = None,
|
|
2581
|
+
extra_query: Query | None = None,
|
|
2582
|
+
extra_body: Body | None = None,
|
|
2583
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2584
|
+
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
|
|
2585
|
+
"""Create a Run stream"""
|
|
2586
|
+
...
|
|
2587
|
+
|
|
2588
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2589
|
+
def stream(
|
|
2590
|
+
self,
|
|
2591
|
+
*,
|
|
2592
|
+
assistant_id: str,
|
|
2593
|
+
include: List[RunStepInclude] | Omit = omit,
|
|
2594
|
+
additional_instructions: Optional[str] | Omit = omit,
|
|
2595
|
+
additional_messages: Optional[Iterable[run_create_params.AdditionalMessage]] | Omit = omit,
|
|
2596
|
+
instructions: Optional[str] | Omit = omit,
|
|
2597
|
+
max_completion_tokens: Optional[int] | Omit = omit,
|
|
2598
|
+
max_prompt_tokens: Optional[int] | Omit = omit,
|
|
2599
|
+
metadata: Optional[Metadata] | Omit = omit,
|
|
2600
|
+
model: Union[str, ChatModel, None] | Omit = omit,
|
|
2601
|
+
parallel_tool_calls: bool | Omit = omit,
|
|
2602
|
+
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
|
|
2603
|
+
response_format: Optional[AssistantResponseFormatOptionParam] | Omit = omit,
|
|
2604
|
+
temperature: Optional[float] | Omit = omit,
|
|
2605
|
+
tool_choice: Optional[AssistantToolChoiceOptionParam] | Omit = omit,
|
|
2606
|
+
tools: Optional[Iterable[AssistantToolParam]] | Omit = omit,
|
|
2607
|
+
top_p: Optional[float] | Omit = omit,
|
|
2608
|
+
truncation_strategy: Optional[run_create_params.TruncationStrategy] | Omit = omit,
|
|
2609
|
+
thread_id: str,
|
|
2610
|
+
event_handler: AsyncAssistantEventHandlerT | None = None,
|
|
2611
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2612
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2613
|
+
extra_headers: Headers | None = None,
|
|
2614
|
+
extra_query: Query | None = None,
|
|
2615
|
+
extra_body: Body | None = None,
|
|
2616
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2617
|
+
) -> (
|
|
2618
|
+
AsyncAssistantStreamManager[AsyncAssistantEventHandler]
|
|
2619
|
+
| AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
|
|
2620
|
+
):
|
|
2621
|
+
"""Create a Run stream"""
|
|
2622
|
+
if not thread_id:
|
|
2623
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2624
|
+
|
|
2625
|
+
extra_headers = {
|
|
2626
|
+
"OpenAI-Beta": "assistants=v2",
|
|
2627
|
+
"X-Stainless-Stream-Helper": "threads.runs.create_and_stream",
|
|
2628
|
+
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
|
|
2629
|
+
**(extra_headers or {}),
|
|
2630
|
+
}
|
|
2631
|
+
request = self._post(
|
|
2632
|
+
f"/threads/{thread_id}/runs",
|
|
2633
|
+
body=maybe_transform(
|
|
2634
|
+
{
|
|
2635
|
+
"assistant_id": assistant_id,
|
|
2636
|
+
"additional_instructions": additional_instructions,
|
|
2637
|
+
"additional_messages": additional_messages,
|
|
2638
|
+
"instructions": instructions,
|
|
2639
|
+
"max_completion_tokens": max_completion_tokens,
|
|
2640
|
+
"max_prompt_tokens": max_prompt_tokens,
|
|
2641
|
+
"metadata": metadata,
|
|
2642
|
+
"model": model,
|
|
2643
|
+
"response_format": response_format,
|
|
2644
|
+
"temperature": temperature,
|
|
2645
|
+
"tool_choice": tool_choice,
|
|
2646
|
+
"stream": True,
|
|
2647
|
+
"tools": tools,
|
|
2648
|
+
"parallel_tool_calls": parallel_tool_calls,
|
|
2649
|
+
"reasoning_effort": reasoning_effort,
|
|
2650
|
+
"truncation_strategy": truncation_strategy,
|
|
2651
|
+
"top_p": top_p,
|
|
2652
|
+
},
|
|
2653
|
+
run_create_params.RunCreateParams,
|
|
2654
|
+
),
|
|
2655
|
+
options=make_request_options(
|
|
2656
|
+
extra_headers=extra_headers,
|
|
2657
|
+
extra_query=extra_query,
|
|
2658
|
+
extra_body=extra_body,
|
|
2659
|
+
timeout=timeout,
|
|
2660
|
+
query=maybe_transform({"include": include}, run_create_params.RunCreateParams),
|
|
2661
|
+
),
|
|
2662
|
+
cast_to=Run,
|
|
2663
|
+
stream=True,
|
|
2664
|
+
stream_cls=AsyncStream[AssistantStreamEvent],
|
|
2665
|
+
)
|
|
2666
|
+
return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
|
|
2667
|
+
|
|
2668
|
+
@overload
|
|
2669
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2670
|
+
async def submit_tool_outputs(
|
|
2671
|
+
self,
|
|
2672
|
+
run_id: str,
|
|
2673
|
+
*,
|
|
2674
|
+
thread_id: str,
|
|
2675
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2676
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
2677
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2678
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2679
|
+
extra_headers: Headers | None = None,
|
|
2680
|
+
extra_query: Query | None = None,
|
|
2681
|
+
extra_body: Body | None = None,
|
|
2682
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2683
|
+
) -> Run:
|
|
2684
|
+
"""
|
|
2685
|
+
When a run has the `status: "requires_action"` and `required_action.type` is
|
|
2686
|
+
`submit_tool_outputs`, this endpoint can be used to submit the outputs from the
|
|
2687
|
+
tool calls once they're all completed. All outputs must be submitted in a single
|
|
2688
|
+
request.
|
|
2689
|
+
|
|
2690
|
+
Args:
|
|
2691
|
+
tool_outputs: A list of tools for which the outputs are being submitted.
|
|
2692
|
+
|
|
2693
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
2694
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
2695
|
+
message.
|
|
2696
|
+
|
|
2697
|
+
extra_headers: Send extra headers
|
|
2698
|
+
|
|
2699
|
+
extra_query: Add additional query parameters to the request
|
|
2700
|
+
|
|
2701
|
+
extra_body: Add additional JSON properties to the request
|
|
2702
|
+
|
|
2703
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2704
|
+
"""
|
|
2705
|
+
...
|
|
2706
|
+
|
|
2707
|
+
@overload
|
|
2708
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2709
|
+
async def submit_tool_outputs(
|
|
2710
|
+
self,
|
|
2711
|
+
run_id: str,
|
|
2712
|
+
*,
|
|
2713
|
+
thread_id: str,
|
|
2714
|
+
stream: Literal[True],
|
|
2715
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2716
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2717
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2718
|
+
extra_headers: Headers | None = None,
|
|
2719
|
+
extra_query: Query | None = None,
|
|
2720
|
+
extra_body: Body | None = None,
|
|
2721
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2722
|
+
) -> AsyncStream[AssistantStreamEvent]:
|
|
2723
|
+
"""
|
|
2724
|
+
When a run has the `status: "requires_action"` and `required_action.type` is
|
|
2725
|
+
`submit_tool_outputs`, this endpoint can be used to submit the outputs from the
|
|
2726
|
+
tool calls once they're all completed. All outputs must be submitted in a single
|
|
2727
|
+
request.
|
|
2728
|
+
|
|
2729
|
+
Args:
|
|
2730
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
2731
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
2732
|
+
message.
|
|
2733
|
+
|
|
2734
|
+
tool_outputs: A list of tools for which the outputs are being submitted.
|
|
2735
|
+
|
|
2736
|
+
extra_headers: Send extra headers
|
|
2737
|
+
|
|
2738
|
+
extra_query: Add additional query parameters to the request
|
|
2739
|
+
|
|
2740
|
+
extra_body: Add additional JSON properties to the request
|
|
2741
|
+
|
|
2742
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2743
|
+
"""
|
|
2744
|
+
...
|
|
2745
|
+
|
|
2746
|
+
@overload
|
|
2747
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2748
|
+
async def submit_tool_outputs(
|
|
2749
|
+
self,
|
|
2750
|
+
run_id: str,
|
|
2751
|
+
*,
|
|
2752
|
+
thread_id: str,
|
|
2753
|
+
stream: bool,
|
|
2754
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2755
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2756
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2757
|
+
extra_headers: Headers | None = None,
|
|
2758
|
+
extra_query: Query | None = None,
|
|
2759
|
+
extra_body: Body | None = None,
|
|
2760
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2761
|
+
) -> Run | AsyncStream[AssistantStreamEvent]:
|
|
2762
|
+
"""
|
|
2763
|
+
When a run has the `status: "requires_action"` and `required_action.type` is
|
|
2764
|
+
`submit_tool_outputs`, this endpoint can be used to submit the outputs from the
|
|
2765
|
+
tool calls once they're all completed. All outputs must be submitted in a single
|
|
2766
|
+
request.
|
|
2767
|
+
|
|
2768
|
+
Args:
|
|
2769
|
+
stream: If `true`, returns a stream of events that happen during the Run as server-sent
|
|
2770
|
+
events, terminating when the Run enters a terminal state with a `data: [DONE]`
|
|
2771
|
+
message.
|
|
2772
|
+
|
|
2773
|
+
tool_outputs: A list of tools for which the outputs are being submitted.
|
|
2774
|
+
|
|
2775
|
+
extra_headers: Send extra headers
|
|
2776
|
+
|
|
2777
|
+
extra_query: Add additional query parameters to the request
|
|
2778
|
+
|
|
2779
|
+
extra_body: Add additional JSON properties to the request
|
|
2780
|
+
|
|
2781
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
2782
|
+
"""
|
|
2783
|
+
...
|
|
2784
|
+
|
|
2785
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2786
|
+
@required_args(["thread_id", "tool_outputs"], ["thread_id", "stream", "tool_outputs"])
|
|
2787
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2788
|
+
async def submit_tool_outputs(
|
|
2789
|
+
self,
|
|
2790
|
+
run_id: str,
|
|
2791
|
+
*,
|
|
2792
|
+
thread_id: str,
|
|
2793
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2794
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
2795
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2796
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2797
|
+
extra_headers: Headers | None = None,
|
|
2798
|
+
extra_query: Query | None = None,
|
|
2799
|
+
extra_body: Body | None = None,
|
|
2800
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
2801
|
+
) -> Run | AsyncStream[AssistantStreamEvent]:
|
|
2802
|
+
if not thread_id:
|
|
2803
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2804
|
+
if not run_id:
|
|
2805
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
2806
|
+
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
|
|
2807
|
+
return await self._post(
|
|
2808
|
+
f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
|
|
2809
|
+
body=await async_maybe_transform(
|
|
2810
|
+
{
|
|
2811
|
+
"tool_outputs": tool_outputs,
|
|
2812
|
+
"stream": stream,
|
|
2813
|
+
},
|
|
2814
|
+
run_submit_tool_outputs_params.RunSubmitToolOutputsParamsStreaming
|
|
2815
|
+
if stream
|
|
2816
|
+
else run_submit_tool_outputs_params.RunSubmitToolOutputsParamsNonStreaming,
|
|
2817
|
+
),
|
|
2818
|
+
options=make_request_options(
|
|
2819
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2820
|
+
),
|
|
2821
|
+
cast_to=Run,
|
|
2822
|
+
stream=stream or False,
|
|
2823
|
+
stream_cls=AsyncStream[AssistantStreamEvent],
|
|
2824
|
+
)
|
|
2825
|
+
|
|
2826
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2827
|
+
async def submit_tool_outputs_and_poll(
|
|
2828
|
+
self,
|
|
2829
|
+
*,
|
|
2830
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2831
|
+
run_id: str,
|
|
2832
|
+
thread_id: str,
|
|
2833
|
+
poll_interval_ms: int | Omit = omit,
|
|
2834
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2835
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2836
|
+
extra_headers: Headers | None = None,
|
|
2837
|
+
extra_query: Query | None = None,
|
|
2838
|
+
extra_body: Body | None = None,
|
|
2839
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2840
|
+
) -> Run:
|
|
2841
|
+
"""
|
|
2842
|
+
A helper to submit a tool output to a run and poll for a terminal run state.
|
|
2843
|
+
More information on Run lifecycles can be found here:
|
|
2844
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
2845
|
+
"""
|
|
2846
|
+
run = await self.submit_tool_outputs( # pyright: ignore[reportDeprecated]
|
|
2847
|
+
run_id=run_id,
|
|
2848
|
+
thread_id=thread_id,
|
|
2849
|
+
tool_outputs=tool_outputs,
|
|
2850
|
+
stream=False,
|
|
2851
|
+
extra_headers=extra_headers,
|
|
2852
|
+
extra_query=extra_query,
|
|
2853
|
+
extra_body=extra_body,
|
|
2854
|
+
timeout=timeout,
|
|
2855
|
+
)
|
|
2856
|
+
return await self.poll( # pyright: ignore[reportDeprecated]
|
|
2857
|
+
run_id=run.id,
|
|
2858
|
+
thread_id=thread_id,
|
|
2859
|
+
extra_headers=extra_headers,
|
|
2860
|
+
extra_query=extra_query,
|
|
2861
|
+
extra_body=extra_body,
|
|
2862
|
+
timeout=timeout,
|
|
2863
|
+
poll_interval_ms=poll_interval_ms,
|
|
2864
|
+
)
|
|
2865
|
+
|
|
2866
|
+
@overload
|
|
2867
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2868
|
+
def submit_tool_outputs_stream(
|
|
2869
|
+
self,
|
|
2870
|
+
*,
|
|
2871
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2872
|
+
run_id: str,
|
|
2873
|
+
thread_id: str,
|
|
2874
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2875
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2876
|
+
extra_headers: Headers | None = None,
|
|
2877
|
+
extra_query: Query | None = None,
|
|
2878
|
+
extra_body: Body | None = None,
|
|
2879
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2880
|
+
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandler]:
|
|
2881
|
+
"""
|
|
2882
|
+
Submit the tool outputs from a previous run and stream the run to a terminal
|
|
2883
|
+
state. More information on Run lifecycles can be found here:
|
|
2884
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
2885
|
+
"""
|
|
2886
|
+
...
|
|
2887
|
+
|
|
2888
|
+
@overload
|
|
2889
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2890
|
+
def submit_tool_outputs_stream(
|
|
2891
|
+
self,
|
|
2892
|
+
*,
|
|
2893
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2894
|
+
run_id: str,
|
|
2895
|
+
thread_id: str,
|
|
2896
|
+
event_handler: AsyncAssistantEventHandlerT,
|
|
2897
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2898
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2899
|
+
extra_headers: Headers | None = None,
|
|
2900
|
+
extra_query: Query | None = None,
|
|
2901
|
+
extra_body: Body | None = None,
|
|
2902
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2903
|
+
) -> AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]:
|
|
2904
|
+
"""
|
|
2905
|
+
Submit the tool outputs from a previous run and stream the run to a terminal
|
|
2906
|
+
state. More information on Run lifecycles can be found here:
|
|
2907
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
2908
|
+
"""
|
|
2909
|
+
...
|
|
2910
|
+
|
|
2911
|
+
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
|
|
2912
|
+
def submit_tool_outputs_stream(
|
|
2913
|
+
self,
|
|
2914
|
+
*,
|
|
2915
|
+
tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput],
|
|
2916
|
+
run_id: str,
|
|
2917
|
+
thread_id: str,
|
|
2918
|
+
event_handler: AsyncAssistantEventHandlerT | None = None,
|
|
2919
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2920
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2921
|
+
extra_headers: Headers | None = None,
|
|
2922
|
+
extra_query: Query | None = None,
|
|
2923
|
+
extra_body: Body | None = None,
|
|
2924
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2925
|
+
) -> (
|
|
2926
|
+
AsyncAssistantStreamManager[AsyncAssistantEventHandler]
|
|
2927
|
+
| AsyncAssistantStreamManager[AsyncAssistantEventHandlerT]
|
|
2928
|
+
):
|
|
2929
|
+
"""
|
|
2930
|
+
Submit the tool outputs from a previous run and stream the run to a terminal
|
|
2931
|
+
state. More information on Run lifecycles can be found here:
|
|
2932
|
+
https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
|
|
2933
|
+
"""
|
|
2934
|
+
if not run_id:
|
|
2935
|
+
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
|
|
2936
|
+
|
|
2937
|
+
if not thread_id:
|
|
2938
|
+
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
|
|
2939
|
+
|
|
2940
|
+
extra_headers = {
|
|
2941
|
+
"OpenAI-Beta": "assistants=v2",
|
|
2942
|
+
"X-Stainless-Stream-Helper": "threads.runs.submit_tool_outputs_stream",
|
|
2943
|
+
"X-Stainless-Custom-Event-Handler": "true" if event_handler else "false",
|
|
2944
|
+
**(extra_headers or {}),
|
|
2945
|
+
}
|
|
2946
|
+
request = self._post(
|
|
2947
|
+
f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs",
|
|
2948
|
+
body=maybe_transform(
|
|
2949
|
+
{
|
|
2950
|
+
"tool_outputs": tool_outputs,
|
|
2951
|
+
"stream": True,
|
|
2952
|
+
},
|
|
2953
|
+
run_submit_tool_outputs_params.RunSubmitToolOutputsParams,
|
|
2954
|
+
),
|
|
2955
|
+
options=make_request_options(
|
|
2956
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
2957
|
+
),
|
|
2958
|
+
cast_to=Run,
|
|
2959
|
+
stream=True,
|
|
2960
|
+
stream_cls=AsyncStream[AssistantStreamEvent],
|
|
2961
|
+
)
|
|
2962
|
+
return AsyncAssistantStreamManager(request, event_handler=event_handler or AsyncAssistantEventHandler())
|
|
2963
|
+
|
|
2964
|
+
|
|
2965
|
+
class RunsWithRawResponse:
|
|
2966
|
+
def __init__(self, runs: Runs) -> None:
|
|
2967
|
+
self._runs = runs
|
|
2968
|
+
|
|
2969
|
+
self.create = ( # pyright: ignore[reportDeprecated]
|
|
2970
|
+
_legacy_response.to_raw_response_wrapper(
|
|
2971
|
+
runs.create, # pyright: ignore[reportDeprecated],
|
|
2972
|
+
)
|
|
2973
|
+
)
|
|
2974
|
+
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
|
2975
|
+
_legacy_response.to_raw_response_wrapper(
|
|
2976
|
+
runs.retrieve, # pyright: ignore[reportDeprecated],
|
|
2977
|
+
)
|
|
2978
|
+
)
|
|
2979
|
+
self.update = ( # pyright: ignore[reportDeprecated]
|
|
2980
|
+
_legacy_response.to_raw_response_wrapper(
|
|
2981
|
+
runs.update, # pyright: ignore[reportDeprecated],
|
|
2982
|
+
)
|
|
2983
|
+
)
|
|
2984
|
+
self.list = ( # pyright: ignore[reportDeprecated]
|
|
2985
|
+
_legacy_response.to_raw_response_wrapper(
|
|
2986
|
+
runs.list, # pyright: ignore[reportDeprecated],
|
|
2987
|
+
)
|
|
2988
|
+
)
|
|
2989
|
+
self.cancel = ( # pyright: ignore[reportDeprecated]
|
|
2990
|
+
_legacy_response.to_raw_response_wrapper(
|
|
2991
|
+
runs.cancel, # pyright: ignore[reportDeprecated],
|
|
2992
|
+
)
|
|
2993
|
+
)
|
|
2994
|
+
self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
|
|
2995
|
+
_legacy_response.to_raw_response_wrapper(
|
|
2996
|
+
runs.submit_tool_outputs, # pyright: ignore[reportDeprecated],
|
|
2997
|
+
)
|
|
2998
|
+
)
|
|
2999
|
+
|
|
3000
|
+
@cached_property
|
|
3001
|
+
def steps(self) -> StepsWithRawResponse:
|
|
3002
|
+
return StepsWithRawResponse(self._runs.steps)
|
|
3003
|
+
|
|
3004
|
+
|
|
3005
|
+
class AsyncRunsWithRawResponse:
|
|
3006
|
+
def __init__(self, runs: AsyncRuns) -> None:
|
|
3007
|
+
self._runs = runs
|
|
3008
|
+
|
|
3009
|
+
self.create = ( # pyright: ignore[reportDeprecated]
|
|
3010
|
+
_legacy_response.async_to_raw_response_wrapper(
|
|
3011
|
+
runs.create, # pyright: ignore[reportDeprecated],
|
|
3012
|
+
)
|
|
3013
|
+
)
|
|
3014
|
+
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
|
3015
|
+
_legacy_response.async_to_raw_response_wrapper(
|
|
3016
|
+
runs.retrieve, # pyright: ignore[reportDeprecated],
|
|
3017
|
+
)
|
|
3018
|
+
)
|
|
3019
|
+
self.update = ( # pyright: ignore[reportDeprecated]
|
|
3020
|
+
_legacy_response.async_to_raw_response_wrapper(
|
|
3021
|
+
runs.update, # pyright: ignore[reportDeprecated],
|
|
3022
|
+
)
|
|
3023
|
+
)
|
|
3024
|
+
self.list = ( # pyright: ignore[reportDeprecated]
|
|
3025
|
+
_legacy_response.async_to_raw_response_wrapper(
|
|
3026
|
+
runs.list, # pyright: ignore[reportDeprecated],
|
|
3027
|
+
)
|
|
3028
|
+
)
|
|
3029
|
+
self.cancel = ( # pyright: ignore[reportDeprecated]
|
|
3030
|
+
_legacy_response.async_to_raw_response_wrapper(
|
|
3031
|
+
runs.cancel, # pyright: ignore[reportDeprecated],
|
|
3032
|
+
)
|
|
3033
|
+
)
|
|
3034
|
+
self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
|
|
3035
|
+
_legacy_response.async_to_raw_response_wrapper(
|
|
3036
|
+
runs.submit_tool_outputs, # pyright: ignore[reportDeprecated],
|
|
3037
|
+
)
|
|
3038
|
+
)
|
|
3039
|
+
|
|
3040
|
+
@cached_property
|
|
3041
|
+
def steps(self) -> AsyncStepsWithRawResponse:
|
|
3042
|
+
return AsyncStepsWithRawResponse(self._runs.steps)
|
|
3043
|
+
|
|
3044
|
+
|
|
3045
|
+
class RunsWithStreamingResponse:
|
|
3046
|
+
def __init__(self, runs: Runs) -> None:
|
|
3047
|
+
self._runs = runs
|
|
3048
|
+
|
|
3049
|
+
self.create = ( # pyright: ignore[reportDeprecated]
|
|
3050
|
+
to_streamed_response_wrapper(
|
|
3051
|
+
runs.create, # pyright: ignore[reportDeprecated],
|
|
3052
|
+
)
|
|
3053
|
+
)
|
|
3054
|
+
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
|
3055
|
+
to_streamed_response_wrapper(
|
|
3056
|
+
runs.retrieve, # pyright: ignore[reportDeprecated],
|
|
3057
|
+
)
|
|
3058
|
+
)
|
|
3059
|
+
self.update = ( # pyright: ignore[reportDeprecated]
|
|
3060
|
+
to_streamed_response_wrapper(
|
|
3061
|
+
runs.update, # pyright: ignore[reportDeprecated],
|
|
3062
|
+
)
|
|
3063
|
+
)
|
|
3064
|
+
self.list = ( # pyright: ignore[reportDeprecated]
|
|
3065
|
+
to_streamed_response_wrapper(
|
|
3066
|
+
runs.list, # pyright: ignore[reportDeprecated],
|
|
3067
|
+
)
|
|
3068
|
+
)
|
|
3069
|
+
self.cancel = ( # pyright: ignore[reportDeprecated]
|
|
3070
|
+
to_streamed_response_wrapper(
|
|
3071
|
+
runs.cancel, # pyright: ignore[reportDeprecated],
|
|
3072
|
+
)
|
|
3073
|
+
)
|
|
3074
|
+
self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
|
|
3075
|
+
to_streamed_response_wrapper(
|
|
3076
|
+
runs.submit_tool_outputs, # pyright: ignore[reportDeprecated],
|
|
3077
|
+
)
|
|
3078
|
+
)
|
|
3079
|
+
|
|
3080
|
+
@cached_property
|
|
3081
|
+
def steps(self) -> StepsWithStreamingResponse:
|
|
3082
|
+
return StepsWithStreamingResponse(self._runs.steps)
|
|
3083
|
+
|
|
3084
|
+
|
|
3085
|
+
class AsyncRunsWithStreamingResponse:
|
|
3086
|
+
def __init__(self, runs: AsyncRuns) -> None:
|
|
3087
|
+
self._runs = runs
|
|
3088
|
+
|
|
3089
|
+
self.create = ( # pyright: ignore[reportDeprecated]
|
|
3090
|
+
async_to_streamed_response_wrapper(
|
|
3091
|
+
runs.create, # pyright: ignore[reportDeprecated],
|
|
3092
|
+
)
|
|
3093
|
+
)
|
|
3094
|
+
self.retrieve = ( # pyright: ignore[reportDeprecated]
|
|
3095
|
+
async_to_streamed_response_wrapper(
|
|
3096
|
+
runs.retrieve, # pyright: ignore[reportDeprecated],
|
|
3097
|
+
)
|
|
3098
|
+
)
|
|
3099
|
+
self.update = ( # pyright: ignore[reportDeprecated]
|
|
3100
|
+
async_to_streamed_response_wrapper(
|
|
3101
|
+
runs.update, # pyright: ignore[reportDeprecated],
|
|
3102
|
+
)
|
|
3103
|
+
)
|
|
3104
|
+
self.list = ( # pyright: ignore[reportDeprecated]
|
|
3105
|
+
async_to_streamed_response_wrapper(
|
|
3106
|
+
runs.list, # pyright: ignore[reportDeprecated],
|
|
3107
|
+
)
|
|
3108
|
+
)
|
|
3109
|
+
self.cancel = ( # pyright: ignore[reportDeprecated]
|
|
3110
|
+
async_to_streamed_response_wrapper(
|
|
3111
|
+
runs.cancel, # pyright: ignore[reportDeprecated],
|
|
3112
|
+
)
|
|
3113
|
+
)
|
|
3114
|
+
self.submit_tool_outputs = ( # pyright: ignore[reportDeprecated]
|
|
3115
|
+
async_to_streamed_response_wrapper(
|
|
3116
|
+
runs.submit_tool_outputs, # pyright: ignore[reportDeprecated],
|
|
3117
|
+
)
|
|
3118
|
+
)
|
|
3119
|
+
|
|
3120
|
+
@cached_property
|
|
3121
|
+
def steps(self) -> AsyncStepsWithStreamingResponse:
|
|
3122
|
+
return AsyncStepsWithStreamingResponse(self._runs.steps)
|