aimlapi-sdk-python 2.8.1b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aimlapi/__init__.py +243 -0
- aimlapi/__main__.py +3 -0
- aimlapi/_client.py +368 -0
- aimlapi/_utils/__init__.py +3 -0
- aimlapi/_utils/_compat.py +3 -0
- aimlapi/_utils/_datetime_parse.py +3 -0
- aimlapi/_utils/_logs.py +3 -0
- aimlapi/_utils/_proxy.py +3 -0
- aimlapi/_utils/_reflection.py +3 -0
- aimlapi/_utils/_resources_proxy.py +3 -0
- aimlapi/_utils/_streams.py +3 -0
- aimlapi/_utils/_sync.py +3 -0
- aimlapi/_utils/_transform.py +3 -0
- aimlapi/_utils/_typing.py +3 -0
- aimlapi/_utils/_utils.py +3 -0
- aimlapi/_version.py +9 -0
- aimlapi/cli/__init__.py +3 -0
- aimlapi/cli/_api/__init__.py +3 -0
- aimlapi/cli/_api/_main.py +3 -0
- aimlapi/cli/_api/audio.py +3 -0
- aimlapi/cli/_api/chat/__init__.py +3 -0
- aimlapi/cli/_api/chat/completions.py +3 -0
- aimlapi/cli/_api/completions.py +3 -0
- aimlapi/cli/_api/files.py +3 -0
- aimlapi/cli/_api/fine_tuning/__init__.py +3 -0
- aimlapi/cli/_api/fine_tuning/jobs.py +3 -0
- aimlapi/cli/_api/image.py +3 -0
- aimlapi/cli/_api/models.py +3 -0
- aimlapi/cli/_cli.py +3 -0
- aimlapi/cli/_errors.py +3 -0
- aimlapi/cli/_models.py +3 -0
- aimlapi/cli/_progress.py +3 -0
- aimlapi/cli/_tools/__init__.py +3 -0
- aimlapi/cli/_tools/_main.py +3 -0
- aimlapi/cli/_tools/fine_tunes.py +3 -0
- aimlapi/cli/_tools/migrate.py +3 -0
- aimlapi/cli/_utils.py +3 -0
- aimlapi/helpers/__init__.py +3 -0
- aimlapi/helpers/local_audio_player.py +3 -0
- aimlapi/helpers/microphone.py +3 -0
- aimlapi/lib/__init__.py +3 -0
- aimlapi/lib/_old_api.py +3 -0
- aimlapi/lib/_parsing/__init__.py +3 -0
- aimlapi/lib/_parsing/_completions.py +3 -0
- aimlapi/lib/_parsing/_responses.py +3 -0
- aimlapi/lib/_pydantic.py +3 -0
- aimlapi/lib/_realtime.py +3 -0
- aimlapi/lib/_tools.py +3 -0
- aimlapi/lib/_validators.py +3 -0
- aimlapi/lib/azure.py +3 -0
- aimlapi/lib/streaming/__init__.py +3 -0
- aimlapi/lib/streaming/_assistants.py +3 -0
- aimlapi/lib/streaming/_deltas.py +3 -0
- aimlapi/lib/streaming/chat/__init__.py +3 -0
- aimlapi/lib/streaming/chat/_completions.py +3 -0
- aimlapi/lib/streaming/chat/_events.py +3 -0
- aimlapi/lib/streaming/chat/_types.py +3 -0
- aimlapi/lib/streaming/responses/__init__.py +3 -0
- aimlapi/lib/streaming/responses/_events.py +3 -0
- aimlapi/lib/streaming/responses/_responses.py +3 -0
- aimlapi/lib/streaming/responses/_types.py +3 -0
- aimlapi/pagination.py +3 -0
- aimlapi/resources/__init__.py +3 -0
- aimlapi/resources/audio/__init__.py +47 -0
- aimlapi/resources/audio/_polling.py +129 -0
- aimlapi/resources/audio/audio.py +56 -0
- aimlapi/resources/audio/speech.py +428 -0
- aimlapi/resources/audio/transcriptions.py +219 -0
- aimlapi/resources/audio/translations.py +3 -0
- aimlapi/resources/batches.py +3 -0
- aimlapi/resources/beta/__init__.py +3 -0
- aimlapi/resources/beta/assistants.py +3 -0
- aimlapi/resources/beta/beta.py +3 -0
- aimlapi/resources/beta/chatkit/__init__.py +3 -0
- aimlapi/resources/beta/chatkit/chatkit.py +3 -0
- aimlapi/resources/beta/chatkit/sessions.py +3 -0
- aimlapi/resources/beta/chatkit/threads.py +3 -0
- aimlapi/resources/beta/realtime/__init__.py +3 -0
- aimlapi/resources/beta/realtime/realtime.py +3 -0
- aimlapi/resources/beta/realtime/sessions.py +3 -0
- aimlapi/resources/beta/realtime/transcription_sessions.py +3 -0
- aimlapi/resources/beta/threads/__init__.py +3 -0
- aimlapi/resources/beta/threads/messages.py +3 -0
- aimlapi/resources/beta/threads/runs/__init__.py +3 -0
- aimlapi/resources/beta/threads/runs/runs.py +3 -0
- aimlapi/resources/beta/threads/runs/steps.py +3 -0
- aimlapi/resources/beta/threads/threads.py +3 -0
- aimlapi/resources/chat/__init__.py +3 -0
- aimlapi/resources/chat/chat.py +86 -0
- aimlapi/resources/chat/completions/__init__.py +4 -0
- aimlapi/resources/chat/completions/completions.py +452 -0
- aimlapi/resources/chat/completions/messages.py +3 -0
- aimlapi/resources/completions.py +3 -0
- aimlapi/resources/containers/__init__.py +3 -0
- aimlapi/resources/containers/containers.py +3 -0
- aimlapi/resources/containers/files/__init__.py +3 -0
- aimlapi/resources/containers/files/content.py +3 -0
- aimlapi/resources/containers/files/files.py +3 -0
- aimlapi/resources/conversations/__init__.py +3 -0
- aimlapi/resources/conversations/conversations.py +3 -0
- aimlapi/resources/conversations/items.py +3 -0
- aimlapi/resources/embeddings.py +3 -0
- aimlapi/resources/evals/__init__.py +3 -0
- aimlapi/resources/evals/evals.py +3 -0
- aimlapi/resources/evals/runs/__init__.py +3 -0
- aimlapi/resources/evals/runs/output_items.py +3 -0
- aimlapi/resources/evals/runs/runs.py +3 -0
- aimlapi/resources/files.py +3 -0
- aimlapi/resources/fine_tuning/__init__.py +3 -0
- aimlapi/resources/fine_tuning/alpha/__init__.py +3 -0
- aimlapi/resources/fine_tuning/alpha/alpha.py +3 -0
- aimlapi/resources/fine_tuning/alpha/graders.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/__init__.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/checkpoints.py +3 -0
- aimlapi/resources/fine_tuning/checkpoints/permissions.py +3 -0
- aimlapi/resources/fine_tuning/fine_tuning.py +3 -0
- aimlapi/resources/fine_tuning/jobs/__init__.py +3 -0
- aimlapi/resources/fine_tuning/jobs/checkpoints.py +3 -0
- aimlapi/resources/fine_tuning/jobs/jobs.py +3 -0
- aimlapi/resources/images.py +184 -0
- aimlapi/resources/models.py +3 -0
- aimlapi/resources/moderations.py +3 -0
- aimlapi/resources/realtime/__init__.py +3 -0
- aimlapi/resources/realtime/calls.py +3 -0
- aimlapi/resources/realtime/client_secrets.py +3 -0
- aimlapi/resources/realtime/realtime.py +3 -0
- aimlapi/resources/responses/__init__.py +4 -0
- aimlapi/resources/responses/input_items.py +3 -0
- aimlapi/resources/responses/input_tokens.py +3 -0
- aimlapi/resources/responses/responses.py +229 -0
- aimlapi/resources/uploads/__init__.py +19 -0
- aimlapi/resources/uploads/parts.py +3 -0
- aimlapi/resources/uploads/uploads.py +99 -0
- aimlapi/resources/vector_stores/__init__.py +3 -0
- aimlapi/resources/vector_stores/file_batches.py +3 -0
- aimlapi/resources/vector_stores/files.py +3 -0
- aimlapi/resources/vector_stores/vector_stores.py +3 -0
- aimlapi/resources/videos.py +267 -0
- aimlapi/resources/webhooks.py +3 -0
- aimlapi/types/__init__.py +3 -0
- aimlapi/types/audio/__init__.py +3 -0
- aimlapi/types/audio/speech_create_params.py +3 -0
- aimlapi/types/audio/speech_model.py +3 -0
- aimlapi/types/audio/transcription.py +3 -0
- aimlapi/types/audio/transcription_create_params.py +3 -0
- aimlapi/types/audio/transcription_create_response.py +3 -0
- aimlapi/types/audio/transcription_diarized.py +3 -0
- aimlapi/types/audio/transcription_diarized_segment.py +3 -0
- aimlapi/types/audio/transcription_include.py +3 -0
- aimlapi/types/audio/transcription_segment.py +3 -0
- aimlapi/types/audio/transcription_stream_event.py +3 -0
- aimlapi/types/audio/transcription_text_delta_event.py +3 -0
- aimlapi/types/audio/transcription_text_done_event.py +3 -0
- aimlapi/types/audio/transcription_text_segment_event.py +3 -0
- aimlapi/types/audio/transcription_verbose.py +3 -0
- aimlapi/types/audio/transcription_word.py +3 -0
- aimlapi/types/audio/translation.py +3 -0
- aimlapi/types/audio/translation_create_params.py +3 -0
- aimlapi/types/audio/translation_create_response.py +3 -0
- aimlapi/types/audio/translation_verbose.py +3 -0
- aimlapi/types/audio_model.py +3 -0
- aimlapi/types/audio_response_format.py +3 -0
- aimlapi/types/auto_file_chunking_strategy_param.py +3 -0
- aimlapi/types/batch.py +3 -0
- aimlapi/types/batch_create_params.py +3 -0
- aimlapi/types/batch_error.py +3 -0
- aimlapi/types/batch_list_params.py +3 -0
- aimlapi/types/batch_request_counts.py +3 -0
- aimlapi/types/batch_usage.py +3 -0
- aimlapi/types/beta/__init__.py +3 -0
- aimlapi/types/beta/assistant.py +3 -0
- aimlapi/types/beta/assistant_create_params.py +3 -0
- aimlapi/types/beta/assistant_deleted.py +3 -0
- aimlapi/types/beta/assistant_list_params.py +3 -0
- aimlapi/types/beta/assistant_response_format_option.py +3 -0
- aimlapi/types/beta/assistant_response_format_option_param.py +3 -0
- aimlapi/types/beta/assistant_stream_event.py +3 -0
- aimlapi/types/beta/assistant_tool.py +3 -0
- aimlapi/types/beta/assistant_tool_choice.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_function.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_function_param.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_option.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_option_param.py +3 -0
- aimlapi/types/beta/assistant_tool_choice_param.py +3 -0
- aimlapi/types/beta/assistant_tool_param.py +3 -0
- aimlapi/types/beta/assistant_update_params.py +3 -0
- aimlapi/types/beta/chat/__init__.py +3 -0
- aimlapi/types/beta/chatkit/__init__.py +3 -0
- aimlapi/types/beta/chatkit/chat_session.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_automatic_thread_titling.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_chatkit_configuration.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_chatkit_configuration_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_expires_after_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_file_upload.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_history.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_rate_limits.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_rate_limits_param.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_status.py +3 -0
- aimlapi/types/beta/chatkit/chat_session_workflow_param.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_attachment.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_response_output_text.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_assistant_message_item.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_item_list.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_thread_user_message_item.py +3 -0
- aimlapi/types/beta/chatkit/chatkit_widget_item.py +3 -0
- aimlapi/types/beta/chatkit/session_create_params.py +3 -0
- aimlapi/types/beta/chatkit/thread_delete_response.py +3 -0
- aimlapi/types/beta/chatkit/thread_list_items_params.py +3 -0
- aimlapi/types/beta/chatkit/thread_list_params.py +3 -0
- aimlapi/types/beta/chatkit_workflow.py +3 -0
- aimlapi/types/beta/code_interpreter_tool.py +3 -0
- aimlapi/types/beta/code_interpreter_tool_param.py +3 -0
- aimlapi/types/beta/file_search_tool.py +3 -0
- aimlapi/types/beta/file_search_tool_param.py +3 -0
- aimlapi/types/beta/function_tool.py +3 -0
- aimlapi/types/beta/function_tool_param.py +3 -0
- aimlapi/types/beta/realtime/__init__.py +3 -0
- aimlapi/types/beta/realtime/conversation_created_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_content.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_content_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_create_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_create_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_created_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_delete_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_delete_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_deleted_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_retrieve_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_retrieve_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncate_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncate_event_param.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_truncated_event.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_with_reference.py +3 -0
- aimlapi/types/beta/realtime/conversation_item_with_reference_param.py +3 -0
- aimlapi/types/beta/realtime/error_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_append_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_append_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_clear_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_cleared_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_commit_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_commit_event_param.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_committed_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_speech_started_event.py +3 -0
- aimlapi/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
- aimlapi/types/beta/realtime/rate_limits_updated_event.py +3 -0
- aimlapi/types/beta/realtime/realtime_client_event.py +3 -0
- aimlapi/types/beta/realtime/realtime_client_event_param.py +3 -0
- aimlapi/types/beta/realtime/realtime_connect_params.py +3 -0
- aimlapi/types/beta/realtime/realtime_response.py +3 -0
- aimlapi/types/beta/realtime/realtime_response_status.py +3 -0
- aimlapi/types/beta/realtime/realtime_response_usage.py +3 -0
- aimlapi/types/beta/realtime/realtime_server_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_cancel_event.py +3 -0
- aimlapi/types/beta/realtime/response_cancel_event_param.py +3 -0
- aimlapi/types/beta/realtime/response_content_part_added_event.py +3 -0
- aimlapi/types/beta/realtime/response_content_part_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_create_event.py +3 -0
- aimlapi/types/beta/realtime/response_create_event_param.py +3 -0
- aimlapi/types/beta/realtime/response_created_event.py +3 -0
- aimlapi/types/beta/realtime/response_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_output_item_added_event.py +3 -0
- aimlapi/types/beta/realtime/response_output_item_done_event.py +3 -0
- aimlapi/types/beta/realtime/response_text_delta_event.py +3 -0
- aimlapi/types/beta/realtime/response_text_done_event.py +3 -0
- aimlapi/types/beta/realtime/session.py +3 -0
- aimlapi/types/beta/realtime/session_create_params.py +3 -0
- aimlapi/types/beta/realtime/session_create_response.py +3 -0
- aimlapi/types/beta/realtime/session_created_event.py +3 -0
- aimlapi/types/beta/realtime/session_update_event.py +3 -0
- aimlapi/types/beta/realtime/session_update_event_param.py +3 -0
- aimlapi/types/beta/realtime/session_updated_event.py +3 -0
- aimlapi/types/beta/realtime/transcription_session.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_create_params.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_update.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_update_param.py +3 -0
- aimlapi/types/beta/realtime/transcription_session_updated_event.py +3 -0
- aimlapi/types/beta/thread.py +3 -0
- aimlapi/types/beta/thread_create_and_run_params.py +3 -0
- aimlapi/types/beta/thread_create_params.py +3 -0
- aimlapi/types/beta/thread_deleted.py +3 -0
- aimlapi/types/beta/thread_update_params.py +3 -0
- aimlapi/types/beta/threads/__init__.py +3 -0
- aimlapi/types/beta/threads/annotation.py +3 -0
- aimlapi/types/beta/threads/annotation_delta.py +3 -0
- aimlapi/types/beta/threads/file_citation_annotation.py +3 -0
- aimlapi/types/beta/threads/file_citation_delta_annotation.py +3 -0
- aimlapi/types/beta/threads/file_path_annotation.py +3 -0
- aimlapi/types/beta/threads/file_path_delta_annotation.py +3 -0
- aimlapi/types/beta/threads/image_file.py +3 -0
- aimlapi/types/beta/threads/image_file_content_block.py +3 -0
- aimlapi/types/beta/threads/image_file_content_block_param.py +3 -0
- aimlapi/types/beta/threads/image_file_delta.py +3 -0
- aimlapi/types/beta/threads/image_file_delta_block.py +3 -0
- aimlapi/types/beta/threads/image_file_param.py +3 -0
- aimlapi/types/beta/threads/image_url.py +3 -0
- aimlapi/types/beta/threads/image_url_content_block.py +3 -0
- aimlapi/types/beta/threads/image_url_content_block_param.py +3 -0
- aimlapi/types/beta/threads/image_url_delta.py +3 -0
- aimlapi/types/beta/threads/image_url_delta_block.py +3 -0
- aimlapi/types/beta/threads/image_url_param.py +3 -0
- aimlapi/types/beta/threads/message.py +3 -0
- aimlapi/types/beta/threads/message_content.py +3 -0
- aimlapi/types/beta/threads/message_content_delta.py +3 -0
- aimlapi/types/beta/threads/message_content_part_param.py +3 -0
- aimlapi/types/beta/threads/message_create_params.py +3 -0
- aimlapi/types/beta/threads/message_deleted.py +3 -0
- aimlapi/types/beta/threads/message_delta.py +3 -0
- aimlapi/types/beta/threads/message_delta_event.py +3 -0
- aimlapi/types/beta/threads/message_list_params.py +3 -0
- aimlapi/types/beta/threads/message_update_params.py +3 -0
- aimlapi/types/beta/threads/refusal_content_block.py +3 -0
- aimlapi/types/beta/threads/refusal_delta_block.py +3 -0
- aimlapi/types/beta/threads/required_action_function_tool_call.py +3 -0
- aimlapi/types/beta/threads/run.py +3 -0
- aimlapi/types/beta/threads/run_create_params.py +3 -0
- aimlapi/types/beta/threads/run_list_params.py +3 -0
- aimlapi/types/beta/threads/run_status.py +3 -0
- aimlapi/types/beta/threads/run_submit_tool_outputs_params.py +3 -0
- aimlapi/types/beta/threads/run_update_params.py +3 -0
- aimlapi/types/beta/threads/runs/__init__.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_logs.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_output_image.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/code_interpreter_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/file_search_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/file_search_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/function_tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/function_tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/message_creation_step_details.py +3 -0
- aimlapi/types/beta/threads/runs/run_step.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta_event.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_delta_message_delta.py +3 -0
- aimlapi/types/beta/threads/runs/run_step_include.py +3 -0
- aimlapi/types/beta/threads/runs/step_list_params.py +3 -0
- aimlapi/types/beta/threads/runs/step_retrieve_params.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call_delta.py +3 -0
- aimlapi/types/beta/threads/runs/tool_call_delta_object.py +3 -0
- aimlapi/types/beta/threads/runs/tool_calls_step_details.py +3 -0
- aimlapi/types/beta/threads/text.py +3 -0
- aimlapi/types/beta/threads/text_content_block.py +3 -0
- aimlapi/types/beta/threads/text_content_block_param.py +3 -0
- aimlapi/types/beta/threads/text_delta.py +3 -0
- aimlapi/types/beta/threads/text_delta_block.py +3 -0
- aimlapi/types/chat/__init__.py +3 -0
- aimlapi/types/chat/chat_completion.py +3 -0
- aimlapi/types/chat/chat_completion_allowed_tool_choice_param.py +3 -0
- aimlapi/types/chat/chat_completion_allowed_tools_param.py +3 -0
- aimlapi/types/chat/chat_completion_assistant_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_audio.py +3 -0
- aimlapi/types/chat/chat_completion_audio_param.py +3 -0
- aimlapi/types/chat/chat_completion_chunk.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_image.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_image_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_input_audio_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_refusal_param.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_text.py +3 -0
- aimlapi/types/chat/chat_completion_content_part_text_param.py +3 -0
- aimlapi/types/chat/chat_completion_custom_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_deleted.py +3 -0
- aimlapi/types/chat/chat_completion_developer_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_call_option_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_function_tool.py +3 -0
- aimlapi/types/chat/chat_completion_function_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_message.py +3 -0
- aimlapi/types/chat/chat_completion_message_custom_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_custom_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_function_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_function_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call_param.py +3 -0
- aimlapi/types/chat/chat_completion_message_tool_call_union_param.py +3 -0
- aimlapi/types/chat/chat_completion_modality.py +3 -0
- aimlapi/types/chat/chat_completion_named_tool_choice_custom_param.py +3 -0
- aimlapi/types/chat/chat_completion_named_tool_choice_param.py +3 -0
- aimlapi/types/chat/chat_completion_prediction_content_param.py +3 -0
- aimlapi/types/chat/chat_completion_reasoning_effort.py +3 -0
- aimlapi/types/chat/chat_completion_role.py +3 -0
- aimlapi/types/chat/chat_completion_store_message.py +3 -0
- aimlapi/types/chat/chat_completion_stream_options_param.py +3 -0
- aimlapi/types/chat/chat_completion_system_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_token_logprob.py +3 -0
- aimlapi/types/chat/chat_completion_tool_choice_option_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_message_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_param.py +3 -0
- aimlapi/types/chat/chat_completion_tool_union_param.py +3 -0
- aimlapi/types/chat/chat_completion_user_message_param.py +3 -0
- aimlapi/types/chat/completion_create_params.py +3 -0
- aimlapi/types/chat/completion_list_params.py +3 -0
- aimlapi/types/chat/completion_update_params.py +3 -0
- aimlapi/types/chat/completions/__init__.py +3 -0
- aimlapi/types/chat/completions/message_list_params.py +3 -0
- aimlapi/types/chat/parsed_chat_completion.py +3 -0
- aimlapi/types/chat/parsed_function_tool_call.py +3 -0
- aimlapi/types/chat_model.py +3 -0
- aimlapi/types/completion.py +3 -0
- aimlapi/types/completion_choice.py +3 -0
- aimlapi/types/completion_create_params.py +3 -0
- aimlapi/types/completion_usage.py +3 -0
- aimlapi/types/container_create_params.py +3 -0
- aimlapi/types/container_create_response.py +3 -0
- aimlapi/types/container_list_params.py +3 -0
- aimlapi/types/container_list_response.py +3 -0
- aimlapi/types/container_retrieve_response.py +3 -0
- aimlapi/types/containers/__init__.py +3 -0
- aimlapi/types/containers/file_create_params.py +3 -0
- aimlapi/types/containers/file_create_response.py +3 -0
- aimlapi/types/containers/file_list_params.py +3 -0
- aimlapi/types/containers/file_list_response.py +3 -0
- aimlapi/types/containers/file_retrieve_response.py +3 -0
- aimlapi/types/containers/files/__init__.py +3 -0
- aimlapi/types/conversations/__init__.py +3 -0
- aimlapi/types/conversations/computer_screenshot_content.py +3 -0
- aimlapi/types/conversations/conversation.py +3 -0
- aimlapi/types/conversations/conversation_create_params.py +3 -0
- aimlapi/types/conversations/conversation_deleted_resource.py +3 -0
- aimlapi/types/conversations/conversation_item.py +3 -0
- aimlapi/types/conversations/conversation_item_list.py +3 -0
- aimlapi/types/conversations/conversation_update_params.py +3 -0
- aimlapi/types/conversations/input_file_content.py +3 -0
- aimlapi/types/conversations/input_file_content_param.py +3 -0
- aimlapi/types/conversations/input_image_content.py +3 -0
- aimlapi/types/conversations/input_image_content_param.py +3 -0
- aimlapi/types/conversations/input_text_content.py +3 -0
- aimlapi/types/conversations/input_text_content_param.py +3 -0
- aimlapi/types/conversations/item_create_params.py +3 -0
- aimlapi/types/conversations/item_list_params.py +3 -0
- aimlapi/types/conversations/item_retrieve_params.py +3 -0
- aimlapi/types/conversations/message.py +3 -0
- aimlapi/types/conversations/output_text_content.py +3 -0
- aimlapi/types/conversations/output_text_content_param.py +3 -0
- aimlapi/types/conversations/refusal_content.py +3 -0
- aimlapi/types/conversations/refusal_content_param.py +3 -0
- aimlapi/types/conversations/summary_text_content.py +3 -0
- aimlapi/types/conversations/text_content.py +3 -0
- aimlapi/types/create_embedding_response.py +3 -0
- aimlapi/types/embedding.py +3 -0
- aimlapi/types/embedding_create_params.py +3 -0
- aimlapi/types/embedding_model.py +3 -0
- aimlapi/types/eval_create_params.py +3 -0
- aimlapi/types/eval_create_response.py +3 -0
- aimlapi/types/eval_custom_data_source_config.py +3 -0
- aimlapi/types/eval_delete_response.py +3 -0
- aimlapi/types/eval_list_params.py +3 -0
- aimlapi/types/eval_list_response.py +3 -0
- aimlapi/types/eval_retrieve_response.py +3 -0
- aimlapi/types/eval_stored_completions_data_source_config.py +3 -0
- aimlapi/types/eval_update_params.py +3 -0
- aimlapi/types/eval_update_response.py +3 -0
- aimlapi/types/evals/__init__.py +3 -0
- aimlapi/types/evals/create_eval_completions_run_data_source.py +3 -0
- aimlapi/types/evals/create_eval_completions_run_data_source_param.py +3 -0
- aimlapi/types/evals/create_eval_jsonl_run_data_source.py +3 -0
- aimlapi/types/evals/create_eval_jsonl_run_data_source_param.py +3 -0
- aimlapi/types/evals/eval_api_error.py +3 -0
- aimlapi/types/evals/run_cancel_response.py +3 -0
- aimlapi/types/evals/run_create_params.py +3 -0
- aimlapi/types/evals/run_create_response.py +3 -0
- aimlapi/types/evals/run_delete_response.py +3 -0
- aimlapi/types/evals/run_list_params.py +3 -0
- aimlapi/types/evals/run_list_response.py +3 -0
- aimlapi/types/evals/run_retrieve_response.py +3 -0
- aimlapi/types/evals/runs/__init__.py +3 -0
- aimlapi/types/evals/runs/output_item_list_params.py +3 -0
- aimlapi/types/evals/runs/output_item_list_response.py +3 -0
- aimlapi/types/evals/runs/output_item_retrieve_response.py +3 -0
- aimlapi/types/file_chunking_strategy.py +3 -0
- aimlapi/types/file_chunking_strategy_param.py +3 -0
- aimlapi/types/file_content.py +3 -0
- aimlapi/types/file_create_params.py +3 -0
- aimlapi/types/file_deleted.py +3 -0
- aimlapi/types/file_list_params.py +3 -0
- aimlapi/types/file_object.py +3 -0
- aimlapi/types/file_purpose.py +3 -0
- aimlapi/types/fine_tuning/__init__.py +3 -0
- aimlapi/types/fine_tuning/alpha/__init__.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_run_params.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_run_response.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_validate_params.py +3 -0
- aimlapi/types/fine_tuning/alpha/grader_validate_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/__init__.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_create_params.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_create_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_delete_response.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_retrieve_params.py +3 -0
- aimlapi/types/fine_tuning/checkpoints/permission_retrieve_response.py +3 -0
- aimlapi/types/fine_tuning/dpo_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/dpo_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/dpo_method.py +3 -0
- aimlapi/types/fine_tuning/dpo_method_param.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_event.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_integration.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration.py +3 -0
- aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +3 -0
- aimlapi/types/fine_tuning/job_create_params.py +3 -0
- aimlapi/types/fine_tuning/job_list_events_params.py +3 -0
- aimlapi/types/fine_tuning/job_list_params.py +3 -0
- aimlapi/types/fine_tuning/jobs/__init__.py +3 -0
- aimlapi/types/fine_tuning/jobs/checkpoint_list_params.py +3 -0
- aimlapi/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_method.py +3 -0
- aimlapi/types/fine_tuning/reinforcement_method_param.py +3 -0
- aimlapi/types/fine_tuning/supervised_hyperparameters.py +3 -0
- aimlapi/types/fine_tuning/supervised_hyperparameters_param.py +3 -0
- aimlapi/types/fine_tuning/supervised_method.py +3 -0
- aimlapi/types/fine_tuning/supervised_method_param.py +3 -0
- aimlapi/types/graders/__init__.py +3 -0
- aimlapi/types/graders/label_model_grader.py +3 -0
- aimlapi/types/graders/label_model_grader_param.py +3 -0
- aimlapi/types/graders/multi_grader.py +3 -0
- aimlapi/types/graders/multi_grader_param.py +3 -0
- aimlapi/types/graders/python_grader.py +3 -0
- aimlapi/types/graders/python_grader_param.py +3 -0
- aimlapi/types/graders/score_model_grader.py +3 -0
- aimlapi/types/graders/score_model_grader_param.py +3 -0
- aimlapi/types/graders/string_check_grader.py +3 -0
- aimlapi/types/graders/string_check_grader_param.py +3 -0
- aimlapi/types/graders/text_similarity_grader.py +3 -0
- aimlapi/types/graders/text_similarity_grader_param.py +3 -0
- aimlapi/types/image.py +3 -0
- aimlapi/types/image_create_variation_params.py +3 -0
- aimlapi/types/image_edit_completed_event.py +3 -0
- aimlapi/types/image_edit_params.py +3 -0
- aimlapi/types/image_edit_partial_image_event.py +3 -0
- aimlapi/types/image_edit_stream_event.py +3 -0
- aimlapi/types/image_gen_completed_event.py +3 -0
- aimlapi/types/image_gen_partial_image_event.py +3 -0
- aimlapi/types/image_gen_stream_event.py +3 -0
- aimlapi/types/image_generate_params.py +3 -0
- aimlapi/types/image_model.py +3 -0
- aimlapi/types/images_response.py +3 -0
- aimlapi/types/model.py +3 -0
- aimlapi/types/model_deleted.py +3 -0
- aimlapi/types/moderation.py +3 -0
- aimlapi/types/moderation_create_params.py +3 -0
- aimlapi/types/moderation_create_response.py +3 -0
- aimlapi/types/moderation_image_url_input_param.py +3 -0
- aimlapi/types/moderation_model.py +3 -0
- aimlapi/types/moderation_multi_modal_input_param.py +3 -0
- aimlapi/types/moderation_text_input_param.py +3 -0
- aimlapi/types/other_file_chunking_strategy_object.py +3 -0
- aimlapi/types/realtime/__init__.py +3 -0
- aimlapi/types/realtime/audio_transcription.py +3 -0
- aimlapi/types/realtime/audio_transcription_param.py +3 -0
- aimlapi/types/realtime/call_accept_params.py +3 -0
- aimlapi/types/realtime/call_create_params.py +3 -0
- aimlapi/types/realtime/call_refer_params.py +3 -0
- aimlapi/types/realtime/call_reject_params.py +3 -0
- aimlapi/types/realtime/client_secret_create_params.py +3 -0
- aimlapi/types/realtime/client_secret_create_response.py +3 -0
- aimlapi/types/realtime/conversation_created_event.py +3 -0
- aimlapi/types/realtime/conversation_item.py +3 -0
- aimlapi/types/realtime/conversation_item_added.py +3 -0
- aimlapi/types/realtime/conversation_item_create_event.py +3 -0
- aimlapi/types/realtime/conversation_item_create_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_created_event.py +3 -0
- aimlapi/types/realtime/conversation_item_delete_event.py +3 -0
- aimlapi/types/realtime/conversation_item_delete_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_deleted_event.py +3 -0
- aimlapi/types/realtime/conversation_item_done.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
- aimlapi/types/realtime/conversation_item_input_audio_transcription_segment.py +3 -0
- aimlapi/types/realtime/conversation_item_param.py +3 -0
- aimlapi/types/realtime/conversation_item_retrieve_event.py +3 -0
- aimlapi/types/realtime/conversation_item_retrieve_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_truncate_event.py +3 -0
- aimlapi/types/realtime/conversation_item_truncate_event_param.py +3 -0
- aimlapi/types/realtime/conversation_item_truncated_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_append_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_append_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_clear_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_cleared_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_commit_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_commit_event_param.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_committed_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_speech_started_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
- aimlapi/types/realtime/input_audio_buffer_timeout_triggered.py +3 -0
- aimlapi/types/realtime/log_prob_properties.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_completed.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_failed.py +3 -0
- aimlapi/types/realtime/mcp_list_tools_in_progress.py +3 -0
- aimlapi/types/realtime/noise_reduction_type.py +3 -0
- aimlapi/types/realtime/output_audio_buffer_clear_event.py +3 -0
- aimlapi/types/realtime/output_audio_buffer_clear_event_param.py +3 -0
- aimlapi/types/realtime/rate_limits_updated_event.py +3 -0
- aimlapi/types/realtime/realtime_audio_config.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_input.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_input_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_output.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_output_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_config_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_formats.py +3 -0
- aimlapi/types/realtime/realtime_audio_formats_param.py +3 -0
- aimlapi/types/realtime/realtime_audio_input_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_audio_input_turn_detection_param.py +3 -0
- aimlapi/types/realtime/realtime_client_event.py +3 -0
- aimlapi/types/realtime/realtime_client_event_param.py +3 -0
- aimlapi/types/realtime/realtime_connect_params.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_assistant_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_assistant_message_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_output.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_output_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_function_call_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_system_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_system_message_param.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_user_message.py +3 -0
- aimlapi/types/realtime/realtime_conversation_item_user_message_param.py +3 -0
- aimlapi/types/realtime/realtime_error.py +3 -0
- aimlapi/types/realtime/realtime_error_event.py +3 -0
- aimlapi/types/realtime/realtime_function_tool.py +3 -0
- aimlapi/types/realtime/realtime_function_tool_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_request.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_request_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_response.py +3 -0
- aimlapi/types/realtime/realtime_mcp_approval_response_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_list_tools.py +3 -0
- aimlapi/types/realtime/realtime_mcp_list_tools_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_protocol_error.py +3 -0
- aimlapi/types/realtime/realtime_mcp_protocol_error_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_call.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_call_param.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_execution_error.py +3 -0
- aimlapi/types/realtime/realtime_mcp_tool_execution_error_param.py +3 -0
- aimlapi/types/realtime/realtime_mcphttp_error.py +3 -0
- aimlapi/types/realtime/realtime_mcphttp_error_param.py +3 -0
- aimlapi/types/realtime/realtime_response.py +3 -0
- aimlapi/types/realtime/realtime_response_create_audio_output.py +3 -0
- aimlapi/types/realtime/realtime_response_create_audio_output_param.py +3 -0
- aimlapi/types/realtime/realtime_response_create_mcp_tool.py +3 -0
- aimlapi/types/realtime/realtime_response_create_mcp_tool_param.py +3 -0
- aimlapi/types/realtime/realtime_response_create_params.py +3 -0
- aimlapi/types/realtime/realtime_response_create_params_param.py +3 -0
- aimlapi/types/realtime/realtime_response_status.py +3 -0
- aimlapi/types/realtime/realtime_response_usage.py +3 -0
- aimlapi/types/realtime/realtime_response_usage_input_token_details.py +3 -0
- aimlapi/types/realtime/realtime_response_usage_output_token_details.py +3 -0
- aimlapi/types/realtime/realtime_server_event.py +3 -0
- aimlapi/types/realtime/realtime_session_client_secret.py +3 -0
- aimlapi/types/realtime/realtime_session_create_request.py +3 -0
- aimlapi/types/realtime/realtime_session_create_request_param.py +3 -0
- aimlapi/types/realtime/realtime_session_create_response.py +3 -0
- aimlapi/types/realtime/realtime_tool_choice_config.py +3 -0
- aimlapi/types/realtime/realtime_tool_choice_config_param.py +3 -0
- aimlapi/types/realtime/realtime_tools_config.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_param.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_union.py +3 -0
- aimlapi/types/realtime/realtime_tools_config_union_param.py +3 -0
- aimlapi/types/realtime/realtime_tracing_config.py +3 -0
- aimlapi/types/realtime/realtime_tracing_config_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_audio_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_request.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_request_param.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_create_response.py +3 -0
- aimlapi/types/realtime/realtime_transcription_session_turn_detection.py +3 -0
- aimlapi/types/realtime/realtime_truncation.py +3 -0
- aimlapi/types/realtime/realtime_truncation_param.py +3 -0
- aimlapi/types/realtime/realtime_truncation_retention_ratio.py +3 -0
- aimlapi/types/realtime/realtime_truncation_retention_ratio_param.py +3 -0
- aimlapi/types/realtime/response_audio_delta_event.py +3 -0
- aimlapi/types/realtime/response_audio_done_event.py +3 -0
- aimlapi/types/realtime/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/realtime/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/realtime/response_cancel_event.py +3 -0
- aimlapi/types/realtime/response_cancel_event_param.py +3 -0
- aimlapi/types/realtime/response_content_part_added_event.py +3 -0
- aimlapi/types/realtime/response_content_part_done_event.py +3 -0
- aimlapi/types/realtime/response_create_event.py +3 -0
- aimlapi/types/realtime/response_create_event_param.py +3 -0
- aimlapi/types/realtime/response_created_event.py +3 -0
- aimlapi/types/realtime/response_done_event.py +3 -0
- aimlapi/types/realtime/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/realtime/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/realtime/response_mcp_call_arguments_delta.py +3 -0
- aimlapi/types/realtime/response_mcp_call_arguments_done.py +3 -0
- aimlapi/types/realtime/response_mcp_call_completed.py +3 -0
- aimlapi/types/realtime/response_mcp_call_failed.py +3 -0
- aimlapi/types/realtime/response_mcp_call_in_progress.py +3 -0
- aimlapi/types/realtime/response_output_item_added_event.py +3 -0
- aimlapi/types/realtime/response_output_item_done_event.py +3 -0
- aimlapi/types/realtime/response_text_delta_event.py +3 -0
- aimlapi/types/realtime/response_text_done_event.py +3 -0
- aimlapi/types/realtime/session_created_event.py +3 -0
- aimlapi/types/realtime/session_update_event.py +3 -0
- aimlapi/types/realtime/session_update_event_param.py +3 -0
- aimlapi/types/realtime/session_updated_event.py +3 -0
- aimlapi/types/responses/__init__.py +3 -0
- aimlapi/types/responses/computer_tool.py +3 -0
- aimlapi/types/responses/computer_tool_param.py +3 -0
- aimlapi/types/responses/custom_tool.py +3 -0
- aimlapi/types/responses/custom_tool_param.py +3 -0
- aimlapi/types/responses/easy_input_message.py +3 -0
- aimlapi/types/responses/easy_input_message_param.py +3 -0
- aimlapi/types/responses/file_search_tool.py +3 -0
- aimlapi/types/responses/file_search_tool_param.py +3 -0
- aimlapi/types/responses/function_tool.py +3 -0
- aimlapi/types/responses/function_tool_param.py +3 -0
- aimlapi/types/responses/input_item_list_params.py +3 -0
- aimlapi/types/responses/input_token_count_params.py +3 -0
- aimlapi/types/responses/input_token_count_response.py +3 -0
- aimlapi/types/responses/parsed_response.py +3 -0
- aimlapi/types/responses/response.py +3 -0
- aimlapi/types/responses/response_audio_delta_event.py +3 -0
- aimlapi/types/responses/response_audio_done_event.py +3 -0
- aimlapi/types/responses/response_audio_transcript_delta_event.py +3 -0
- aimlapi/types/responses/response_audio_transcript_done_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_code_delta_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_code_done_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_completed_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_call_interpreting_event.py +3 -0
- aimlapi/types/responses/response_code_interpreter_tool_call.py +3 -0
- aimlapi/types/responses/response_code_interpreter_tool_call_param.py +3 -0
- aimlapi/types/responses/response_completed_event.py +3 -0
- aimlapi/types/responses/response_computer_tool_call.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_item.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_screenshot.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_output_screenshot_param.py +3 -0
- aimlapi/types/responses/response_computer_tool_call_param.py +3 -0
- aimlapi/types/responses/response_content_part_added_event.py +3 -0
- aimlapi/types/responses/response_content_part_done_event.py +3 -0
- aimlapi/types/responses/response_conversation_param.py +3 -0
- aimlapi/types/responses/response_create_params.py +3 -0
- aimlapi/types/responses/response_created_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_input_delta_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_input_done_event.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_output.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_output_param.py +3 -0
- aimlapi/types/responses/response_custom_tool_call_param.py +3 -0
- aimlapi/types/responses/response_error.py +3 -0
- aimlapi/types/responses/response_error_event.py +3 -0
- aimlapi/types/responses/response_failed_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_completed_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_file_search_call_searching_event.py +3 -0
- aimlapi/types/responses/response_file_search_tool_call.py +3 -0
- aimlapi/types/responses/response_file_search_tool_call_param.py +3 -0
- aimlapi/types/responses/response_format_text_config.py +3 -0
- aimlapi/types/responses/response_format_text_config_param.py +3 -0
- aimlapi/types/responses/response_format_text_json_schema_config.py +3 -0
- aimlapi/types/responses/response_format_text_json_schema_config_param.py +3 -0
- aimlapi/types/responses/response_function_call_arguments_delta_event.py +3 -0
- aimlapi/types/responses/response_function_call_arguments_done_event.py +3 -0
- aimlapi/types/responses/response_function_call_output_item.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_list.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_list_param.py +3 -0
- aimlapi/types/responses/response_function_call_output_item_param.py +3 -0
- aimlapi/types/responses/response_function_tool_call.py +3 -0
- aimlapi/types/responses/response_function_tool_call_item.py +3 -0
- aimlapi/types/responses/response_function_tool_call_output_item.py +3 -0
- aimlapi/types/responses/response_function_tool_call_param.py +3 -0
- aimlapi/types/responses/response_function_web_search.py +3 -0
- aimlapi/types/responses/response_function_web_search_param.py +3 -0
- aimlapi/types/responses/response_image_gen_call_completed_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_generating_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_image_gen_call_partial_image_event.py +3 -0
- aimlapi/types/responses/response_in_progress_event.py +3 -0
- aimlapi/types/responses/response_includable.py +3 -0
- aimlapi/types/responses/response_incomplete_event.py +3 -0
- aimlapi/types/responses/response_input_audio.py +3 -0
- aimlapi/types/responses/response_input_audio_param.py +3 -0
- aimlapi/types/responses/response_input_content.py +3 -0
- aimlapi/types/responses/response_input_content_param.py +3 -0
- aimlapi/types/responses/response_input_file.py +3 -0
- aimlapi/types/responses/response_input_file_content.py +3 -0
- aimlapi/types/responses/response_input_file_content_param.py +3 -0
- aimlapi/types/responses/response_input_file_param.py +3 -0
- aimlapi/types/responses/response_input_image.py +3 -0
- aimlapi/types/responses/response_input_image_content.py +3 -0
- aimlapi/types/responses/response_input_image_content_param.py +3 -0
- aimlapi/types/responses/response_input_image_param.py +3 -0
- aimlapi/types/responses/response_input_item.py +3 -0
- aimlapi/types/responses/response_input_item_param.py +3 -0
- aimlapi/types/responses/response_input_message_content_list.py +3 -0
- aimlapi/types/responses/response_input_message_content_list_param.py +3 -0
- aimlapi/types/responses/response_input_message_item.py +3 -0
- aimlapi/types/responses/response_input_param.py +3 -0
- aimlapi/types/responses/response_input_text.py +3 -0
- aimlapi/types/responses/response_input_text_content.py +3 -0
- aimlapi/types/responses/response_input_text_content_param.py +3 -0
- aimlapi/types/responses/response_input_text_param.py +3 -0
- aimlapi/types/responses/response_item.py +3 -0
- aimlapi/types/responses/response_item_list.py +3 -0
- aimlapi/types/responses/response_mcp_call_arguments_delta_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_arguments_done_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_completed_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_failed_event.py +3 -0
- aimlapi/types/responses/response_mcp_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_completed_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_failed_event.py +3 -0
- aimlapi/types/responses/response_mcp_list_tools_in_progress_event.py +3 -0
- aimlapi/types/responses/response_output_item.py +3 -0
- aimlapi/types/responses/response_output_item_added_event.py +3 -0
- aimlapi/types/responses/response_output_item_done_event.py +3 -0
- aimlapi/types/responses/response_output_message.py +3 -0
- aimlapi/types/responses/response_output_message_param.py +3 -0
- aimlapi/types/responses/response_output_refusal.py +3 -0
- aimlapi/types/responses/response_output_refusal_param.py +3 -0
- aimlapi/types/responses/response_output_text.py +3 -0
- aimlapi/types/responses/response_output_text_annotation_added_event.py +3 -0
- aimlapi/types/responses/response_output_text_param.py +3 -0
- aimlapi/types/responses/response_prompt.py +3 -0
- aimlapi/types/responses/response_prompt_param.py +3 -0
- aimlapi/types/responses/response_queued_event.py +3 -0
- aimlapi/types/responses/response_reasoning_item.py +3 -0
- aimlapi/types/responses/response_reasoning_item_param.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_part_added_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_part_done_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_text_delta_event.py +3 -0
- aimlapi/types/responses/response_reasoning_summary_text_done_event.py +3 -0
- aimlapi/types/responses/response_reasoning_text_delta_event.py +3 -0
- aimlapi/types/responses/response_reasoning_text_done_event.py +3 -0
- aimlapi/types/responses/response_refusal_delta_event.py +3 -0
- aimlapi/types/responses/response_refusal_done_event.py +3 -0
- aimlapi/types/responses/response_retrieve_params.py +3 -0
- aimlapi/types/responses/response_status.py +3 -0
- aimlapi/types/responses/response_stream_event.py +3 -0
- aimlapi/types/responses/response_text_config.py +3 -0
- aimlapi/types/responses/response_text_config_param.py +3 -0
- aimlapi/types/responses/response_text_delta_event.py +3 -0
- aimlapi/types/responses/response_text_done_event.py +3 -0
- aimlapi/types/responses/response_usage.py +3 -0
- aimlapi/types/responses/response_web_search_call_completed_event.py +3 -0
- aimlapi/types/responses/response_web_search_call_in_progress_event.py +3 -0
- aimlapi/types/responses/response_web_search_call_searching_event.py +3 -0
- aimlapi/types/responses/tool.py +3 -0
- aimlapi/types/responses/tool_choice_allowed.py +3 -0
- aimlapi/types/responses/tool_choice_allowed_param.py +3 -0
- aimlapi/types/responses/tool_choice_custom.py +3 -0
- aimlapi/types/responses/tool_choice_custom_param.py +3 -0
- aimlapi/types/responses/tool_choice_function.py +3 -0
- aimlapi/types/responses/tool_choice_function_param.py +3 -0
- aimlapi/types/responses/tool_choice_mcp.py +3 -0
- aimlapi/types/responses/tool_choice_mcp_param.py +3 -0
- aimlapi/types/responses/tool_choice_options.py +3 -0
- aimlapi/types/responses/tool_choice_types.py +3 -0
- aimlapi/types/responses/tool_choice_types_param.py +3 -0
- aimlapi/types/responses/tool_param.py +3 -0
- aimlapi/types/responses/web_search_preview_tool.py +3 -0
- aimlapi/types/responses/web_search_preview_tool_param.py +3 -0
- aimlapi/types/responses/web_search_tool.py +3 -0
- aimlapi/types/responses/web_search_tool_param.py +3 -0
- aimlapi/types/shared/__init__.py +3 -0
- aimlapi/types/shared/all_models.py +3 -0
- aimlapi/types/shared/chat_model.py +3 -0
- aimlapi/types/shared/comparison_filter.py +3 -0
- aimlapi/types/shared/compound_filter.py +3 -0
- aimlapi/types/shared/custom_tool_input_format.py +3 -0
- aimlapi/types/shared/error_object.py +3 -0
- aimlapi/types/shared/function_definition.py +3 -0
- aimlapi/types/shared/function_parameters.py +3 -0
- aimlapi/types/shared/metadata.py +3 -0
- aimlapi/types/shared/reasoning.py +3 -0
- aimlapi/types/shared/reasoning_effort.py +3 -0
- aimlapi/types/shared/response_format_json_object.py +3 -0
- aimlapi/types/shared/response_format_json_schema.py +3 -0
- aimlapi/types/shared/response_format_text.py +3 -0
- aimlapi/types/shared/response_format_text_grammar.py +3 -0
- aimlapi/types/shared/response_format_text_python.py +3 -0
- aimlapi/types/shared/responses_model.py +3 -0
- aimlapi/types/shared_params/__init__.py +3 -0
- aimlapi/types/shared_params/chat_model.py +3 -0
- aimlapi/types/shared_params/comparison_filter.py +3 -0
- aimlapi/types/shared_params/compound_filter.py +3 -0
- aimlapi/types/shared_params/custom_tool_input_format.py +3 -0
- aimlapi/types/shared_params/function_definition.py +3 -0
- aimlapi/types/shared_params/function_parameters.py +3 -0
- aimlapi/types/shared_params/metadata.py +3 -0
- aimlapi/types/shared_params/reasoning.py +3 -0
- aimlapi/types/shared_params/reasoning_effort.py +3 -0
- aimlapi/types/shared_params/response_format_json_object.py +3 -0
- aimlapi/types/shared_params/response_format_json_schema.py +3 -0
- aimlapi/types/shared_params/response_format_text.py +3 -0
- aimlapi/types/shared_params/responses_model.py +3 -0
- aimlapi/types/static_file_chunking_strategy.py +3 -0
- aimlapi/types/static_file_chunking_strategy_object.py +3 -0
- aimlapi/types/static_file_chunking_strategy_object_param.py +3 -0
- aimlapi/types/static_file_chunking_strategy_param.py +3 -0
- aimlapi/types/upload.py +3 -0
- aimlapi/types/upload_complete_params.py +3 -0
- aimlapi/types/upload_create_params.py +3 -0
- aimlapi/types/uploads/__init__.py +3 -0
- aimlapi/types/uploads/part_create_params.py +3 -0
- aimlapi/types/uploads/upload_part.py +3 -0
- aimlapi/types/vector_store.py +3 -0
- aimlapi/types/vector_store_create_params.py +3 -0
- aimlapi/types/vector_store_deleted.py +3 -0
- aimlapi/types/vector_store_list_params.py +3 -0
- aimlapi/types/vector_store_search_params.py +3 -0
- aimlapi/types/vector_store_search_response.py +3 -0
- aimlapi/types/vector_store_update_params.py +3 -0
- aimlapi/types/vector_stores/__init__.py +3 -0
- aimlapi/types/vector_stores/file_batch_create_params.py +3 -0
- aimlapi/types/vector_stores/file_batch_list_files_params.py +3 -0
- aimlapi/types/vector_stores/file_content_response.py +3 -0
- aimlapi/types/vector_stores/file_create_params.py +3 -0
- aimlapi/types/vector_stores/file_list_params.py +3 -0
- aimlapi/types/vector_stores/file_update_params.py +3 -0
- aimlapi/types/vector_stores/vector_store_file.py +3 -0
- aimlapi/types/vector_stores/vector_store_file_batch.py +3 -0
- aimlapi/types/vector_stores/vector_store_file_deleted.py +3 -0
- aimlapi/types/video.py +3 -0
- aimlapi/types/video_create_error.py +3 -0
- aimlapi/types/video_create_params.py +3 -0
- aimlapi/types/video_delete_response.py +3 -0
- aimlapi/types/video_download_content_params.py +3 -0
- aimlapi/types/video_list_params.py +3 -0
- aimlapi/types/video_model.py +3 -0
- aimlapi/types/video_remix_params.py +3 -0
- aimlapi/types/video_seconds.py +3 -0
- aimlapi/types/video_size.py +3 -0
- aimlapi/types/webhooks/__init__.py +3 -0
- aimlapi/types/webhooks/batch_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_completed_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_expired_webhook_event.py +3 -0
- aimlapi/types/webhooks/batch_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_canceled_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/eval_run_succeeded_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +3 -0
- aimlapi/types/webhooks/realtime_call_incoming_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_cancelled_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_completed_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_failed_webhook_event.py +3 -0
- aimlapi/types/webhooks/response_incomplete_webhook_event.py +3 -0
- aimlapi/types/webhooks/unwrap_webhook_event.py +3 -0
- aimlapi/types/websocket_connection_options.py +3 -0
- aimlapi/version.py +3 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/METADATA +886 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/RECORD +1958 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/WHEEL +4 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/entry_points.txt +2 -0
- aimlapi_sdk_python-2.8.1b0.dist-info/licenses/LICENSE +201 -0
- openai/__init__.py +395 -0
- openai/__main__.py +3 -0
- openai/_base_client.py +2027 -0
- openai/_client.py +1272 -0
- openai/_compat.py +231 -0
- openai/_constants.py +14 -0
- openai/_exceptions.py +161 -0
- openai/_extras/__init__.py +3 -0
- openai/_extras/_common.py +21 -0
- openai/_extras/numpy_proxy.py +37 -0
- openai/_extras/pandas_proxy.py +28 -0
- openai/_extras/sounddevice_proxy.py +28 -0
- openai/_files.py +123 -0
- openai/_legacy_response.py +488 -0
- openai/_models.py +897 -0
- openai/_module_client.py +173 -0
- openai/_qs.py +150 -0
- openai/_resource.py +43 -0
- openai/_response.py +848 -0
- openai/_streaming.py +408 -0
- openai/_types.py +264 -0
- openai/_utils/__init__.py +67 -0
- openai/_utils/_compat.py +45 -0
- openai/_utils/_datetime_parse.py +136 -0
- openai/_utils/_logs.py +42 -0
- openai/_utils/_proxy.py +65 -0
- openai/_utils/_reflection.py +45 -0
- openai/_utils/_resources_proxy.py +24 -0
- openai/_utils/_streams.py +12 -0
- openai/_utils/_sync.py +58 -0
- openai/_utils/_transform.py +457 -0
- openai/_utils/_typing.py +156 -0
- openai/_utils/_utils.py +437 -0
- openai/_version.py +4 -0
- openai/cli/__init__.py +1 -0
- openai/cli/_api/__init__.py +1 -0
- openai/cli/_api/_main.py +17 -0
- openai/cli/_api/audio.py +108 -0
- openai/cli/_api/chat/__init__.py +13 -0
- openai/cli/_api/chat/completions.py +160 -0
- openai/cli/_api/completions.py +173 -0
- openai/cli/_api/files.py +80 -0
- openai/cli/_api/fine_tuning/__init__.py +13 -0
- openai/cli/_api/fine_tuning/jobs.py +170 -0
- openai/cli/_api/image.py +139 -0
- openai/cli/_api/models.py +45 -0
- openai/cli/_cli.py +233 -0
- openai/cli/_errors.py +21 -0
- openai/cli/_models.py +17 -0
- openai/cli/_progress.py +59 -0
- openai/cli/_tools/__init__.py +1 -0
- openai/cli/_tools/_main.py +17 -0
- openai/cli/_tools/fine_tunes.py +63 -0
- openai/cli/_tools/migrate.py +164 -0
- openai/cli/_utils.py +45 -0
- openai/helpers/__init__.py +4 -0
- openai/helpers/local_audio_player.py +165 -0
- openai/helpers/microphone.py +100 -0
- openai/lib/.keep +4 -0
- openai/lib/__init__.py +2 -0
- openai/lib/_old_api.py +72 -0
- openai/lib/_parsing/__init__.py +12 -0
- openai/lib/_parsing/_completions.py +305 -0
- openai/lib/_parsing/_responses.py +180 -0
- openai/lib/_pydantic.py +155 -0
- openai/lib/_realtime.py +92 -0
- openai/lib/_tools.py +66 -0
- openai/lib/_validators.py +809 -0
- openai/lib/azure.py +647 -0
- openai/lib/streaming/__init__.py +8 -0
- openai/lib/streaming/_assistants.py +1038 -0
- openai/lib/streaming/_deltas.py +64 -0
- openai/lib/streaming/chat/__init__.py +27 -0
- openai/lib/streaming/chat/_completions.py +770 -0
- openai/lib/streaming/chat/_events.py +123 -0
- openai/lib/streaming/chat/_types.py +20 -0
- openai/lib/streaming/responses/__init__.py +13 -0
- openai/lib/streaming/responses/_events.py +148 -0
- openai/lib/streaming/responses/_responses.py +372 -0
- openai/lib/streaming/responses/_types.py +10 -0
- openai/pagination.py +190 -0
- openai/py.typed +0 -0
- openai/resources/__init__.py +229 -0
- openai/resources/audio/__init__.py +61 -0
- openai/resources/audio/audio.py +166 -0
- openai/resources/audio/speech.py +255 -0
- openai/resources/audio/transcriptions.py +980 -0
- openai/resources/audio/translations.py +367 -0
- openai/resources/batches.py +530 -0
- openai/resources/beta/__init__.py +61 -0
- openai/resources/beta/assistants.py +1049 -0
- openai/resources/beta/beta.py +187 -0
- openai/resources/beta/chatkit/__init__.py +47 -0
- openai/resources/beta/chatkit/chatkit.py +134 -0
- openai/resources/beta/chatkit/sessions.py +301 -0
- openai/resources/beta/chatkit/threads.py +521 -0
- openai/resources/beta/realtime/__init__.py +47 -0
- openai/resources/beta/realtime/realtime.py +1094 -0
- openai/resources/beta/realtime/sessions.py +424 -0
- openai/resources/beta/realtime/transcription_sessions.py +282 -0
- openai/resources/beta/threads/__init__.py +47 -0
- openai/resources/beta/threads/messages.py +718 -0
- openai/resources/beta/threads/runs/__init__.py +33 -0
- openai/resources/beta/threads/runs/runs.py +3122 -0
- openai/resources/beta/threads/runs/steps.py +399 -0
- openai/resources/beta/threads/threads.py +1935 -0
- openai/resources/chat/__init__.py +33 -0
- openai/resources/chat/chat.py +102 -0
- openai/resources/chat/completions/__init__.py +33 -0
- openai/resources/chat/completions/completions.py +3143 -0
- openai/resources/chat/completions/messages.py +212 -0
- openai/resources/completions.py +1160 -0
- openai/resources/containers/__init__.py +33 -0
- openai/resources/containers/containers.py +510 -0
- openai/resources/containers/files/__init__.py +33 -0
- openai/resources/containers/files/content.py +173 -0
- openai/resources/containers/files/files.py +545 -0
- openai/resources/conversations/__init__.py +33 -0
- openai/resources/conversations/conversations.py +486 -0
- openai/resources/conversations/items.py +557 -0
- openai/resources/embeddings.py +298 -0
- openai/resources/evals/__init__.py +33 -0
- openai/resources/evals/evals.py +662 -0
- openai/resources/evals/runs/__init__.py +33 -0
- openai/resources/evals/runs/output_items.py +315 -0
- openai/resources/evals/runs/runs.py +634 -0
- openai/resources/files.py +770 -0
- openai/resources/fine_tuning/__init__.py +61 -0
- openai/resources/fine_tuning/alpha/__init__.py +33 -0
- openai/resources/fine_tuning/alpha/alpha.py +102 -0
- openai/resources/fine_tuning/alpha/graders.py +282 -0
- openai/resources/fine_tuning/checkpoints/__init__.py +33 -0
- openai/resources/fine_tuning/checkpoints/checkpoints.py +102 -0
- openai/resources/fine_tuning/checkpoints/permissions.py +418 -0
- openai/resources/fine_tuning/fine_tuning.py +166 -0
- openai/resources/fine_tuning/jobs/__init__.py +33 -0
- openai/resources/fine_tuning/jobs/checkpoints.py +199 -0
- openai/resources/fine_tuning/jobs/jobs.py +918 -0
- openai/resources/images.py +1858 -0
- openai/resources/models.py +306 -0
- openai/resources/moderations.py +197 -0
- openai/resources/realtime/__init__.py +47 -0
- openai/resources/realtime/calls.py +764 -0
- openai/resources/realtime/client_secrets.py +189 -0
- openai/resources/realtime/realtime.py +1079 -0
- openai/resources/responses/__init__.py +47 -0
- openai/resources/responses/input_items.py +226 -0
- openai/resources/responses/input_tokens.py +309 -0
- openai/resources/responses/responses.py +3130 -0
- openai/resources/uploads/__init__.py +33 -0
- openai/resources/uploads/parts.py +205 -0
- openai/resources/uploads/uploads.py +719 -0
- openai/resources/vector_stores/__init__.py +47 -0
- openai/resources/vector_stores/file_batches.py +813 -0
- openai/resources/vector_stores/files.py +939 -0
- openai/resources/vector_stores/vector_stores.py +875 -0
- openai/resources/videos.py +847 -0
- openai/resources/webhooks.py +210 -0
- openai/types/__init__.py +115 -0
- openai/types/audio/__init__.py +23 -0
- openai/types/audio/speech_create_params.py +57 -0
- openai/types/audio/speech_model.py +7 -0
- openai/types/audio/transcription.py +71 -0
- openai/types/audio/transcription_create_params.py +172 -0
- openai/types/audio/transcription_create_response.py +12 -0
- openai/types/audio/transcription_diarized.py +63 -0
- openai/types/audio/transcription_diarized_segment.py +32 -0
- openai/types/audio/transcription_include.py +7 -0
- openai/types/audio/transcription_segment.py +49 -0
- openai/types/audio/transcription_stream_event.py +16 -0
- openai/types/audio/transcription_text_delta_event.py +41 -0
- openai/types/audio/transcription_text_done_event.py +63 -0
- openai/types/audio/transcription_text_segment_event.py +27 -0
- openai/types/audio/transcription_verbose.py +38 -0
- openai/types/audio/transcription_word.py +16 -0
- openai/types/audio/translation.py +9 -0
- openai/types/audio/translation_create_params.py +49 -0
- openai/types/audio/translation_create_response.py +11 -0
- openai/types/audio/translation_verbose.py +22 -0
- openai/types/audio_model.py +7 -0
- openai/types/audio_response_format.py +7 -0
- openai/types/auto_file_chunking_strategy_param.py +12 -0
- openai/types/batch.py +104 -0
- openai/types/batch_create_params.py +72 -0
- openai/types/batch_error.py +21 -0
- openai/types/batch_list_params.py +24 -0
- openai/types/batch_request_counts.py +16 -0
- openai/types/batch_usage.py +35 -0
- openai/types/beta/__init__.py +34 -0
- openai/types/beta/assistant.py +134 -0
- openai/types/beta/assistant_create_params.py +220 -0
- openai/types/beta/assistant_deleted.py +15 -0
- openai/types/beta/assistant_list_params.py +39 -0
- openai/types/beta/assistant_response_format_option.py +14 -0
- openai/types/beta/assistant_response_format_option_param.py +16 -0
- openai/types/beta/assistant_stream_event.py +294 -0
- openai/types/beta/assistant_tool.py +15 -0
- openai/types/beta/assistant_tool_choice.py +16 -0
- openai/types/beta/assistant_tool_choice_function.py +10 -0
- openai/types/beta/assistant_tool_choice_function_param.py +12 -0
- openai/types/beta/assistant_tool_choice_option.py +10 -0
- openai/types/beta/assistant_tool_choice_option_param.py +12 -0
- openai/types/beta/assistant_tool_choice_param.py +16 -0
- openai/types/beta/assistant_tool_param.py +14 -0
- openai/types/beta/assistant_update_params.py +191 -0
- openai/types/beta/chat/__init__.py +3 -0
- openai/types/beta/chatkit/__init__.py +32 -0
- openai/types/beta/chatkit/chat_session.py +43 -0
- openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +10 -0
- openai/types/beta/chatkit/chat_session_chatkit_configuration.py +19 -0
- openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +59 -0
- openai/types/beta/chatkit/chat_session_expires_after_param.py +15 -0
- openai/types/beta/chatkit/chat_session_file_upload.py +18 -0
- openai/types/beta/chatkit/chat_session_history.py +18 -0
- openai/types/beta/chatkit/chat_session_rate_limits.py +10 -0
- openai/types/beta/chatkit/chat_session_rate_limits_param.py +12 -0
- openai/types/beta/chatkit/chat_session_status.py +7 -0
- openai/types/beta/chatkit/chat_session_workflow_param.py +34 -0
- openai/types/beta/chatkit/chatkit_attachment.py +25 -0
- openai/types/beta/chatkit/chatkit_response_output_text.py +62 -0
- openai/types/beta/chatkit/chatkit_thread.py +56 -0
- openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +29 -0
- openai/types/beta/chatkit/chatkit_thread_item_list.py +144 -0
- openai/types/beta/chatkit/chatkit_thread_user_message_item.py +77 -0
- openai/types/beta/chatkit/chatkit_widget_item.py +27 -0
- openai/types/beta/chatkit/session_create_params.py +35 -0
- openai/types/beta/chatkit/thread_delete_response.py +18 -0
- openai/types/beta/chatkit/thread_list_items_params.py +27 -0
- openai/types/beta/chatkit/thread_list_params.py +33 -0
- openai/types/beta/chatkit_workflow.py +32 -0
- openai/types/beta/code_interpreter_tool.py +12 -0
- openai/types/beta/code_interpreter_tool_param.py +12 -0
- openai/types/beta/file_search_tool.py +55 -0
- openai/types/beta/file_search_tool_param.py +54 -0
- openai/types/beta/function_tool.py +15 -0
- openai/types/beta/function_tool_param.py +16 -0
- openai/types/beta/realtime/__init__.py +96 -0
- openai/types/beta/realtime/conversation_created_event.py +27 -0
- openai/types/beta/realtime/conversation_item.py +61 -0
- openai/types/beta/realtime/conversation_item_content.py +32 -0
- openai/types/beta/realtime/conversation_item_content_param.py +31 -0
- openai/types/beta/realtime/conversation_item_create_event.py +29 -0
- openai/types/beta/realtime/conversation_item_create_event_param.py +29 -0
- openai/types/beta/realtime/conversation_item_created_event.py +27 -0
- openai/types/beta/realtime/conversation_item_delete_event.py +19 -0
- openai/types/beta/realtime/conversation_item_delete_event_param.py +18 -0
- openai/types/beta/realtime/conversation_item_deleted_event.py +18 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +87 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +39 -0
- openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
- openai/types/beta/realtime/conversation_item_param.py +62 -0
- openai/types/beta/realtime/conversation_item_retrieve_event.py +19 -0
- openai/types/beta/realtime/conversation_item_retrieve_event_param.py +18 -0
- openai/types/beta/realtime/conversation_item_truncate_event.py +32 -0
- openai/types/beta/realtime/conversation_item_truncate_event_param.py +31 -0
- openai/types/beta/realtime/conversation_item_truncated_event.py +24 -0
- openai/types/beta/realtime/conversation_item_with_reference.py +87 -0
- openai/types/beta/realtime/conversation_item_with_reference_param.py +87 -0
- openai/types/beta/realtime/error_event.py +36 -0
- openai/types/beta/realtime/input_audio_buffer_append_event.py +23 -0
- openai/types/beta/realtime/input_audio_buffer_append_event_param.py +22 -0
- openai/types/beta/realtime/input_audio_buffer_clear_event.py +16 -0
- openai/types/beta/realtime/input_audio_buffer_clear_event_param.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_cleared_event.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_commit_event.py +16 -0
- openai/types/beta/realtime/input_audio_buffer_commit_event_param.py +15 -0
- openai/types/beta/realtime/input_audio_buffer_committed_event.py +25 -0
- openai/types/beta/realtime/input_audio_buffer_speech_started_event.py +26 -0
- openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
- openai/types/beta/realtime/rate_limits_updated_event.py +33 -0
- openai/types/beta/realtime/realtime_client_event.py +47 -0
- openai/types/beta/realtime/realtime_client_event_param.py +44 -0
- openai/types/beta/realtime/realtime_connect_params.py +11 -0
- openai/types/beta/realtime/realtime_response.py +87 -0
- openai/types/beta/realtime/realtime_response_status.py +39 -0
- openai/types/beta/realtime/realtime_response_usage.py +52 -0
- openai/types/beta/realtime/realtime_server_event.py +133 -0
- openai/types/beta/realtime/response_audio_delta_event.py +30 -0
- openai/types/beta/realtime/response_audio_done_event.py +27 -0
- openai/types/beta/realtime/response_audio_transcript_delta_event.py +30 -0
- openai/types/beta/realtime/response_audio_transcript_done_event.py +30 -0
- openai/types/beta/realtime/response_cancel_event.py +22 -0
- openai/types/beta/realtime/response_cancel_event_param.py +21 -0
- openai/types/beta/realtime/response_content_part_added_event.py +45 -0
- openai/types/beta/realtime/response_content_part_done_event.py +45 -0
- openai/types/beta/realtime/response_create_event.py +121 -0
- openai/types/beta/realtime/response_create_event_param.py +122 -0
- openai/types/beta/realtime/response_created_event.py +19 -0
- openai/types/beta/realtime/response_done_event.py +19 -0
- openai/types/beta/realtime/response_function_call_arguments_delta_event.py +30 -0
- openai/types/beta/realtime/response_function_call_arguments_done_event.py +30 -0
- openai/types/beta/realtime/response_output_item_added_event.py +25 -0
- openai/types/beta/realtime/response_output_item_done_event.py +25 -0
- openai/types/beta/realtime/response_text_delta_event.py +30 -0
- openai/types/beta/realtime/response_text_done_event.py +30 -0
- openai/types/beta/realtime/session.py +279 -0
- openai/types/beta/realtime/session_create_params.py +298 -0
- openai/types/beta/realtime/session_create_response.py +196 -0
- openai/types/beta/realtime/session_created_event.py +19 -0
- openai/types/beta/realtime/session_update_event.py +312 -0
- openai/types/beta/realtime/session_update_event_param.py +310 -0
- openai/types/beta/realtime/session_updated_event.py +19 -0
- openai/types/beta/realtime/transcription_session.py +100 -0
- openai/types/beta/realtime/transcription_session_create_params.py +173 -0
- openai/types/beta/realtime/transcription_session_update.py +185 -0
- openai/types/beta/realtime/transcription_session_update_param.py +185 -0
- openai/types/beta/realtime/transcription_session_updated_event.py +24 -0
- openai/types/beta/thread.py +63 -0
- openai/types/beta/thread_create_and_run_params.py +397 -0
- openai/types/beta/thread_create_params.py +186 -0
- openai/types/beta/thread_deleted.py +15 -0
- openai/types/beta/thread_update_params.py +56 -0
- openai/types/beta/threads/__init__.py +46 -0
- openai/types/beta/threads/annotation.py +12 -0
- openai/types/beta/threads/annotation_delta.py +14 -0
- openai/types/beta/threads/file_citation_annotation.py +26 -0
- openai/types/beta/threads/file_citation_delta_annotation.py +33 -0
- openai/types/beta/threads/file_path_annotation.py +26 -0
- openai/types/beta/threads/file_path_delta_annotation.py +30 -0
- openai/types/beta/threads/image_file.py +23 -0
- openai/types/beta/threads/image_file_content_block.py +15 -0
- openai/types/beta/threads/image_file_content_block_param.py +16 -0
- openai/types/beta/threads/image_file_delta.py +23 -0
- openai/types/beta/threads/image_file_delta_block.py +19 -0
- openai/types/beta/threads/image_file_param.py +22 -0
- openai/types/beta/threads/image_url.py +23 -0
- openai/types/beta/threads/image_url_content_block.py +15 -0
- openai/types/beta/threads/image_url_content_block_param.py +16 -0
- openai/types/beta/threads/image_url_delta.py +22 -0
- openai/types/beta/threads/image_url_delta_block.py +19 -0
- openai/types/beta/threads/image_url_param.py +22 -0
- openai/types/beta/threads/message.py +103 -0
- openai/types/beta/threads/message_content.py +18 -0
- openai/types/beta/threads/message_content_delta.py +17 -0
- openai/types/beta/threads/message_content_part_param.py +14 -0
- openai/types/beta/threads/message_create_params.py +55 -0
- openai/types/beta/threads/message_deleted.py +15 -0
- openai/types/beta/threads/message_delta.py +17 -0
- openai/types/beta/threads/message_delta_event.py +19 -0
- openai/types/beta/threads/message_list_params.py +42 -0
- openai/types/beta/threads/message_update_params.py +24 -0
- openai/types/beta/threads/refusal_content_block.py +14 -0
- openai/types/beta/threads/refusal_delta_block.py +18 -0
- openai/types/beta/threads/required_action_function_tool_call.py +34 -0
- openai/types/beta/threads/run.py +245 -0
- openai/types/beta/threads/run_create_params.py +268 -0
- openai/types/beta/threads/run_list_params.py +39 -0
- openai/types/beta/threads/run_status.py +17 -0
- openai/types/beta/threads/run_submit_tool_outputs_params.py +52 -0
- openai/types/beta/threads/run_update_params.py +24 -0
- openai/types/beta/threads/runs/__init__.py +24 -0
- openai/types/beta/threads/runs/code_interpreter_logs.py +19 -0
- openai/types/beta/threads/runs/code_interpreter_output_image.py +26 -0
- openai/types/beta/threads/runs/code_interpreter_tool_call.py +70 -0
- openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +44 -0
- openai/types/beta/threads/runs/file_search_tool_call.py +78 -0
- openai/types/beta/threads/runs/file_search_tool_call_delta.py +25 -0
- openai/types/beta/threads/runs/function_tool_call.py +38 -0
- openai/types/beta/threads/runs/function_tool_call_delta.py +41 -0
- openai/types/beta/threads/runs/message_creation_step_details.py +19 -0
- openai/types/beta/threads/runs/run_step.py +115 -0
- openai/types/beta/threads/runs/run_step_delta.py +20 -0
- openai/types/beta/threads/runs/run_step_delta_event.py +19 -0
- openai/types/beta/threads/runs/run_step_delta_message_delta.py +20 -0
- openai/types/beta/threads/runs/run_step_include.py +7 -0
- openai/types/beta/threads/runs/step_list_params.py +56 -0
- openai/types/beta/threads/runs/step_retrieve_params.py +28 -0
- openai/types/beta/threads/runs/tool_call.py +15 -0
- openai/types/beta/threads/runs/tool_call_delta.py +16 -0
- openai/types/beta/threads/runs/tool_call_delta_object.py +21 -0
- openai/types/beta/threads/runs/tool_calls_step_details.py +21 -0
- openai/types/beta/threads/text.py +15 -0
- openai/types/beta/threads/text_content_block.py +15 -0
- openai/types/beta/threads/text_content_block_param.py +15 -0
- openai/types/beta/threads/text_delta.py +15 -0
- openai/types/beta/threads/text_delta_block.py +19 -0
- openai/types/chat/__init__.py +102 -0
- openai/types/chat/chat_completion.py +89 -0
- openai/types/chat/chat_completion_allowed_tool_choice_param.py +17 -0
- openai/types/chat/chat_completion_allowed_tools_param.py +32 -0
- openai/types/chat/chat_completion_assistant_message_param.py +70 -0
- openai/types/chat/chat_completion_audio.py +25 -0
- openai/types/chat/chat_completion_audio_param.py +25 -0
- openai/types/chat/chat_completion_chunk.py +166 -0
- openai/types/chat/chat_completion_content_part_image.py +27 -0
- openai/types/chat/chat_completion_content_part_image_param.py +26 -0
- openai/types/chat/chat_completion_content_part_input_audio_param.py +22 -0
- openai/types/chat/chat_completion_content_part_param.py +41 -0
- openai/types/chat/chat_completion_content_part_refusal_param.py +15 -0
- openai/types/chat/chat_completion_content_part_text.py +15 -0
- openai/types/chat/chat_completion_content_part_text_param.py +15 -0
- openai/types/chat/chat_completion_custom_tool_param.py +58 -0
- openai/types/chat/chat_completion_deleted.py +18 -0
- openai/types/chat/chat_completion_developer_message_param.py +25 -0
- openai/types/chat/chat_completion_function_call_option_param.py +12 -0
- openai/types/chat/chat_completion_function_message_param.py +19 -0
- openai/types/chat/chat_completion_function_tool.py +15 -0
- openai/types/chat/chat_completion_function_tool_param.py +16 -0
- openai/types/chat/chat_completion_message.py +79 -0
- openai/types/chat/chat_completion_message_custom_tool_call.py +26 -0
- openai/types/chat/chat_completion_message_custom_tool_call_param.py +26 -0
- openai/types/chat/chat_completion_message_function_tool_call.py +31 -0
- openai/types/chat/chat_completion_message_function_tool_call_param.py +31 -0
- openai/types/chat/chat_completion_message_param.py +24 -0
- openai/types/chat/chat_completion_message_tool_call.py +17 -0
- openai/types/chat/chat_completion_message_tool_call_param.py +14 -0
- openai/types/chat/chat_completion_message_tool_call_union_param.py +15 -0
- openai/types/chat/chat_completion_modality.py +7 -0
- openai/types/chat/chat_completion_named_tool_choice_custom_param.py +19 -0
- openai/types/chat/chat_completion_named_tool_choice_param.py +19 -0
- openai/types/chat/chat_completion_prediction_content_param.py +25 -0
- openai/types/chat/chat_completion_reasoning_effort.py +7 -0
- openai/types/chat/chat_completion_role.py +7 -0
- openai/types/chat/chat_completion_store_message.py +23 -0
- openai/types/chat/chat_completion_stream_options_param.py +31 -0
- openai/types/chat/chat_completion_system_message_param.py +25 -0
- openai/types/chat/chat_completion_token_logprob.py +57 -0
- openai/types/chat/chat_completion_tool_choice_option_param.py +19 -0
- openai/types/chat/chat_completion_tool_message_param.py +21 -0
- openai/types/chat/chat_completion_tool_param.py +14 -0
- openai/types/chat/chat_completion_tool_union_param.py +13 -0
- openai/types/chat/chat_completion_user_message_param.py +25 -0
- openai/types/chat/completion_create_params.py +450 -0
- openai/types/chat/completion_list_params.py +37 -0
- openai/types/chat/completion_update_params.py +22 -0
- openai/types/chat/completions/__init__.py +5 -0
- openai/types/chat/completions/message_list_params.py +21 -0
- openai/types/chat/parsed_chat_completion.py +40 -0
- openai/types/chat/parsed_function_tool_call.py +29 -0
- openai/types/chat_model.py +7 -0
- openai/types/completion.py +37 -0
- openai/types/completion_choice.py +35 -0
- openai/types/completion_create_params.py +189 -0
- openai/types/completion_usage.py +54 -0
- openai/types/container_create_params.py +30 -0
- openai/types/container_create_response.py +40 -0
- openai/types/container_list_params.py +30 -0
- openai/types/container_list_response.py +40 -0
- openai/types/container_retrieve_response.py +40 -0
- openai/types/containers/__init__.py +9 -0
- openai/types/containers/file_create_params.py +17 -0
- openai/types/containers/file_create_response.py +30 -0
- openai/types/containers/file_list_params.py +30 -0
- openai/types/containers/file_list_response.py +30 -0
- openai/types/containers/file_retrieve_response.py +30 -0
- openai/types/containers/files/__init__.py +3 -0
- openai/types/conversations/__init__.py +27 -0
- openai/types/conversations/computer_screenshot_content.py +22 -0
- openai/types/conversations/conversation.py +30 -0
- openai/types/conversations/conversation_create_params.py +29 -0
- openai/types/conversations/conversation_deleted_resource.py +15 -0
- openai/types/conversations/conversation_item.py +230 -0
- openai/types/conversations/conversation_item_list.py +26 -0
- openai/types/conversations/conversation_update_params.py +22 -0
- openai/types/conversations/input_file_content.py +7 -0
- openai/types/conversations/input_file_content_param.py +7 -0
- openai/types/conversations/input_image_content.py +7 -0
- openai/types/conversations/input_image_content_param.py +7 -0
- openai/types/conversations/input_text_content.py +7 -0
- openai/types/conversations/input_text_content_param.py +7 -0
- openai/types/conversations/item_create_params.py +24 -0
- openai/types/conversations/item_list_params.py +50 -0
- openai/types/conversations/item_retrieve_params.py +22 -0
- openai/types/conversations/message.py +66 -0
- openai/types/conversations/output_text_content.py +7 -0
- openai/types/conversations/output_text_content_param.py +7 -0
- openai/types/conversations/refusal_content.py +7 -0
- openai/types/conversations/refusal_content_param.py +7 -0
- openai/types/conversations/summary_text_content.py +15 -0
- openai/types/conversations/text_content.py +13 -0
- openai/types/create_embedding_response.py +31 -0
- openai/types/embedding.py +23 -0
- openai/types/embedding_create_params.py +55 -0
- openai/types/embedding_model.py +7 -0
- openai/types/eval_create_params.py +202 -0
- openai/types/eval_create_response.py +111 -0
- openai/types/eval_custom_data_source_config.py +21 -0
- openai/types/eval_delete_response.py +13 -0
- openai/types/eval_list_params.py +27 -0
- openai/types/eval_list_response.py +111 -0
- openai/types/eval_retrieve_response.py +111 -0
- openai/types/eval_stored_completions_data_source_config.py +32 -0
- openai/types/eval_update_params.py +25 -0
- openai/types/eval_update_response.py +111 -0
- openai/types/evals/__init__.py +22 -0
- openai/types/evals/create_eval_completions_run_data_source.py +236 -0
- openai/types/evals/create_eval_completions_run_data_source_param.py +232 -0
- openai/types/evals/create_eval_jsonl_run_data_source.py +42 -0
- openai/types/evals/create_eval_jsonl_run_data_source_param.py +47 -0
- openai/types/evals/eval_api_error.py +13 -0
- openai/types/evals/run_cancel_response.py +417 -0
- openai/types/evals/run_create_params.py +340 -0
- openai/types/evals/run_create_response.py +417 -0
- openai/types/evals/run_delete_response.py +15 -0
- openai/types/evals/run_list_params.py +27 -0
- openai/types/evals/run_list_response.py +417 -0
- openai/types/evals/run_retrieve_response.py +417 -0
- openai/types/evals/runs/__init__.py +7 -0
- openai/types/evals/runs/output_item_list_params.py +30 -0
- openai/types/evals/runs/output_item_list_response.py +134 -0
- openai/types/evals/runs/output_item_retrieve_response.py +134 -0
- openai/types/file_chunking_strategy.py +14 -0
- openai/types/file_chunking_strategy_param.py +13 -0
- openai/types/file_content.py +7 -0
- openai/types/file_create_params.py +45 -0
- openai/types/file_deleted.py +15 -0
- openai/types/file_list_params.py +33 -0
- openai/types/file_object.py +58 -0
- openai/types/file_purpose.py +7 -0
- openai/types/fine_tuning/__init__.py +26 -0
- openai/types/fine_tuning/alpha/__init__.py +8 -0
- openai/types/fine_tuning/alpha/grader_run_params.py +40 -0
- openai/types/fine_tuning/alpha/grader_run_response.py +67 -0
- openai/types/fine_tuning/alpha/grader_validate_params.py +24 -0
- openai/types/fine_tuning/alpha/grader_validate_response.py +20 -0
- openai/types/fine_tuning/checkpoints/__init__.py +9 -0
- openai/types/fine_tuning/checkpoints/permission_create_params.py +14 -0
- openai/types/fine_tuning/checkpoints/permission_create_response.py +21 -0
- openai/types/fine_tuning/checkpoints/permission_delete_response.py +18 -0
- openai/types/fine_tuning/checkpoints/permission_retrieve_params.py +21 -0
- openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +34 -0
- openai/types/fine_tuning/dpo_hyperparameters.py +36 -0
- openai/types/fine_tuning/dpo_hyperparameters_param.py +36 -0
- openai/types/fine_tuning/dpo_method.py +13 -0
- openai/types/fine_tuning/dpo_method_param.py +14 -0
- openai/types/fine_tuning/fine_tuning_job.py +161 -0
- openai/types/fine_tuning/fine_tuning_job_event.py +32 -0
- openai/types/fine_tuning/fine_tuning_job_integration.py +5 -0
- openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +33 -0
- openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +21 -0
- openai/types/fine_tuning/job_create_params.py +176 -0
- openai/types/fine_tuning/job_list_events_params.py +15 -0
- openai/types/fine_tuning/job_list_params.py +23 -0
- openai/types/fine_tuning/jobs/__init__.py +6 -0
- openai/types/fine_tuning/jobs/checkpoint_list_params.py +15 -0
- openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +47 -0
- openai/types/fine_tuning/reinforcement_hyperparameters.py +43 -0
- openai/types/fine_tuning/reinforcement_hyperparameters_param.py +43 -0
- openai/types/fine_tuning/reinforcement_method.py +24 -0
- openai/types/fine_tuning/reinforcement_method_param.py +27 -0
- openai/types/fine_tuning/supervised_hyperparameters.py +29 -0
- openai/types/fine_tuning/supervised_hyperparameters_param.py +29 -0
- openai/types/fine_tuning/supervised_method.py +13 -0
- openai/types/fine_tuning/supervised_method_param.py +14 -0
- openai/types/graders/__init__.py +16 -0
- openai/types/graders/label_model_grader.py +70 -0
- openai/types/graders/label_model_grader_param.py +77 -0
- openai/types/graders/multi_grader.py +32 -0
- openai/types/graders/multi_grader_param.py +35 -0
- openai/types/graders/python_grader.py +22 -0
- openai/types/graders/python_grader_param.py +21 -0
- openai/types/graders/score_model_grader.py +109 -0
- openai/types/graders/score_model_grader_param.py +115 -0
- openai/types/graders/string_check_grader.py +24 -0
- openai/types/graders/string_check_grader_param.py +24 -0
- openai/types/graders/text_similarity_grader.py +40 -0
- openai/types/graders/text_similarity_grader_param.py +42 -0
- openai/types/image.py +26 -0
- openai/types/image_create_variation_params.py +48 -0
- openai/types/image_edit_completed_event.py +55 -0
- openai/types/image_edit_params.py +145 -0
- openai/types/image_edit_partial_image_event.py +33 -0
- openai/types/image_edit_stream_event.py +14 -0
- openai/types/image_gen_completed_event.py +55 -0
- openai/types/image_gen_partial_image_event.py +33 -0
- openai/types/image_gen_stream_event.py +14 -0
- openai/types/image_generate_params.py +143 -0
- openai/types/image_model.py +7 -0
- openai/types/images_response.py +60 -0
- openai/types/model.py +21 -0
- openai/types/model_deleted.py +13 -0
- openai/types/moderation.py +186 -0
- openai/types/moderation_create_params.py +30 -0
- openai/types/moderation_create_response.py +19 -0
- openai/types/moderation_image_url_input_param.py +20 -0
- openai/types/moderation_model.py +9 -0
- openai/types/moderation_multi_modal_input_param.py +13 -0
- openai/types/moderation_text_input_param.py +15 -0
- openai/types/other_file_chunking_strategy_object.py +12 -0
- openai/types/realtime/__init__.py +237 -0
- openai/types/realtime/audio_transcription.py +37 -0
- openai/types/realtime/audio_transcription_param.py +34 -0
- openai/types/realtime/call_accept_params.py +122 -0
- openai/types/realtime/call_create_params.py +17 -0
- openai/types/realtime/call_refer_params.py +15 -0
- openai/types/realtime/call_reject_params.py +15 -0
- openai/types/realtime/client_secret_create_params.py +46 -0
- openai/types/realtime/client_secret_create_response.py +26 -0
- openai/types/realtime/conversation_created_event.py +27 -0
- openai/types/realtime/conversation_item.py +32 -0
- openai/types/realtime/conversation_item_added.py +26 -0
- openai/types/realtime/conversation_item_create_event.py +29 -0
- openai/types/realtime/conversation_item_create_event_param.py +29 -0
- openai/types/realtime/conversation_item_created_event.py +27 -0
- openai/types/realtime/conversation_item_delete_event.py +19 -0
- openai/types/realtime/conversation_item_delete_event_param.py +18 -0
- openai/types/realtime/conversation_item_deleted_event.py +18 -0
- openai/types/realtime/conversation_item_done.py +26 -0
- openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +79 -0
- openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +36 -0
- openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
- openai/types/realtime/conversation_item_input_audio_transcription_segment.py +36 -0
- openai/types/realtime/conversation_item_param.py +30 -0
- openai/types/realtime/conversation_item_retrieve_event.py +19 -0
- openai/types/realtime/conversation_item_retrieve_event_param.py +18 -0
- openai/types/realtime/conversation_item_truncate_event.py +32 -0
- openai/types/realtime/conversation_item_truncate_event_param.py +31 -0
- openai/types/realtime/conversation_item_truncated_event.py +24 -0
- openai/types/realtime/input_audio_buffer_append_event.py +23 -0
- openai/types/realtime/input_audio_buffer_append_event_param.py +22 -0
- openai/types/realtime/input_audio_buffer_clear_event.py +16 -0
- openai/types/realtime/input_audio_buffer_clear_event_param.py +15 -0
- openai/types/realtime/input_audio_buffer_cleared_event.py +15 -0
- openai/types/realtime/input_audio_buffer_commit_event.py +16 -0
- openai/types/realtime/input_audio_buffer_commit_event_param.py +15 -0
- openai/types/realtime/input_audio_buffer_committed_event.py +25 -0
- openai/types/realtime/input_audio_buffer_speech_started_event.py +26 -0
- openai/types/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
- openai/types/realtime/input_audio_buffer_timeout_triggered.py +30 -0
- openai/types/realtime/log_prob_properties.py +18 -0
- openai/types/realtime/mcp_list_tools_completed.py +18 -0
- openai/types/realtime/mcp_list_tools_failed.py +18 -0
- openai/types/realtime/mcp_list_tools_in_progress.py +18 -0
- openai/types/realtime/noise_reduction_type.py +7 -0
- openai/types/realtime/output_audio_buffer_clear_event.py +16 -0
- openai/types/realtime/output_audio_buffer_clear_event_param.py +15 -0
- openai/types/realtime/rate_limits_updated_event.py +33 -0
- openai/types/realtime/realtime_audio_config.py +15 -0
- openai/types/realtime/realtime_audio_config_input.py +63 -0
- openai/types/realtime/realtime_audio_config_input_param.py +65 -0
- openai/types/realtime/realtime_audio_config_output.py +36 -0
- openai/types/realtime/realtime_audio_config_output_param.py +35 -0
- openai/types/realtime/realtime_audio_config_param.py +16 -0
- openai/types/realtime/realtime_audio_formats.py +30 -0
- openai/types/realtime/realtime_audio_formats_param.py +29 -0
- openai/types/realtime/realtime_audio_input_turn_detection.py +98 -0
- openai/types/realtime/realtime_audio_input_turn_detection_param.py +95 -0
- openai/types/realtime/realtime_client_event.py +36 -0
- openai/types/realtime/realtime_client_event_param.py +34 -0
- openai/types/realtime/realtime_connect_params.py +13 -0
- openai/types/realtime/realtime_conversation_item_assistant_message.py +58 -0
- openai/types/realtime/realtime_conversation_item_assistant_message_param.py +58 -0
- openai/types/realtime/realtime_conversation_item_function_call.py +41 -0
- openai/types/realtime/realtime_conversation_item_function_call_output.py +37 -0
- openai/types/realtime/realtime_conversation_item_function_call_output_param.py +36 -0
- openai/types/realtime/realtime_conversation_item_function_call_param.py +40 -0
- openai/types/realtime/realtime_conversation_item_system_message.py +42 -0
- openai/types/realtime/realtime_conversation_item_system_message_param.py +42 -0
- openai/types/realtime/realtime_conversation_item_user_message.py +69 -0
- openai/types/realtime/realtime_conversation_item_user_message_param.py +69 -0
- openai/types/realtime/realtime_error.py +24 -0
- openai/types/realtime/realtime_error_event.py +19 -0
- openai/types/realtime/realtime_function_tool.py +25 -0
- openai/types/realtime/realtime_function_tool_param.py +24 -0
- openai/types/realtime/realtime_mcp_approval_request.py +24 -0
- openai/types/realtime/realtime_mcp_approval_request_param.py +24 -0
- openai/types/realtime/realtime_mcp_approval_response.py +25 -0
- openai/types/realtime/realtime_mcp_approval_response_param.py +25 -0
- openai/types/realtime/realtime_mcp_list_tools.py +36 -0
- openai/types/realtime/realtime_mcp_list_tools_param.py +36 -0
- openai/types/realtime/realtime_mcp_protocol_error.py +15 -0
- openai/types/realtime/realtime_mcp_protocol_error_param.py +15 -0
- openai/types/realtime/realtime_mcp_tool_call.py +43 -0
- openai/types/realtime/realtime_mcp_tool_call_param.py +40 -0
- openai/types/realtime/realtime_mcp_tool_execution_error.py +13 -0
- openai/types/realtime/realtime_mcp_tool_execution_error_param.py +13 -0
- openai/types/realtime/realtime_mcphttp_error.py +15 -0
- openai/types/realtime/realtime_mcphttp_error_param.py +15 -0
- openai/types/realtime/realtime_response.py +98 -0
- openai/types/realtime/realtime_response_create_audio_output.py +29 -0
- openai/types/realtime/realtime_response_create_audio_output_param.py +28 -0
- openai/types/realtime/realtime_response_create_mcp_tool.py +135 -0
- openai/types/realtime/realtime_response_create_mcp_tool_param.py +135 -0
- openai/types/realtime/realtime_response_create_params.py +98 -0
- openai/types/realtime/realtime_response_create_params_param.py +99 -0
- openai/types/realtime/realtime_response_status.py +39 -0
- openai/types/realtime/realtime_response_usage.py +41 -0
- openai/types/realtime/realtime_response_usage_input_token_details.py +35 -0
- openai/types/realtime/realtime_response_usage_output_token_details.py +15 -0
- openai/types/realtime/realtime_server_event.py +155 -0
- openai/types/realtime/realtime_session_client_secret.py +20 -0
- openai/types/realtime/realtime_session_create_request.py +122 -0
- openai/types/realtime/realtime_session_create_request_param.py +122 -0
- openai/types/realtime/realtime_session_create_response.py +475 -0
- openai/types/realtime/realtime_tool_choice_config.py +12 -0
- openai/types/realtime/realtime_tool_choice_config_param.py +14 -0
- openai/types/realtime/realtime_tools_config.py +10 -0
- openai/types/realtime/realtime_tools_config_param.py +143 -0
- openai/types/realtime/realtime_tools_config_union.py +141 -0
- openai/types/realtime/realtime_tools_config_union_param.py +140 -0
- openai/types/realtime/realtime_tracing_config.py +31 -0
- openai/types/realtime/realtime_tracing_config_param.py +31 -0
- openai/types/realtime/realtime_transcription_session_audio.py +12 -0
- openai/types/realtime/realtime_transcription_session_audio_input.py +65 -0
- openai/types/realtime/realtime_transcription_session_audio_input_param.py +67 -0
- openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +98 -0
- openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +95 -0
- openai/types/realtime/realtime_transcription_session_audio_param.py +13 -0
- openai/types/realtime/realtime_transcription_session_create_request.py +27 -0
- openai/types/realtime/realtime_transcription_session_create_request_param.py +28 -0
- openai/types/realtime/realtime_transcription_session_create_response.py +68 -0
- openai/types/realtime/realtime_transcription_session_turn_detection.py +32 -0
- openai/types/realtime/realtime_truncation.py +10 -0
- openai/types/realtime/realtime_truncation_param.py +12 -0
- openai/types/realtime/realtime_truncation_retention_ratio.py +38 -0
- openai/types/realtime/realtime_truncation_retention_ratio_param.py +37 -0
- openai/types/realtime/response_audio_delta_event.py +30 -0
- openai/types/realtime/response_audio_done_event.py +27 -0
- openai/types/realtime/response_audio_transcript_delta_event.py +30 -0
- openai/types/realtime/response_audio_transcript_done_event.py +30 -0
- openai/types/realtime/response_cancel_event.py +22 -0
- openai/types/realtime/response_cancel_event_param.py +21 -0
- openai/types/realtime/response_content_part_added_event.py +45 -0
- openai/types/realtime/response_content_part_done_event.py +45 -0
- openai/types/realtime/response_create_event.py +20 -0
- openai/types/realtime/response_create_event_param.py +20 -0
- openai/types/realtime/response_created_event.py +19 -0
- openai/types/realtime/response_done_event.py +19 -0
- openai/types/realtime/response_function_call_arguments_delta_event.py +30 -0
- openai/types/realtime/response_function_call_arguments_done_event.py +30 -0
- openai/types/realtime/response_mcp_call_arguments_delta.py +31 -0
- openai/types/realtime/response_mcp_call_arguments_done.py +27 -0
- openai/types/realtime/response_mcp_call_completed.py +21 -0
- openai/types/realtime/response_mcp_call_failed.py +21 -0
- openai/types/realtime/response_mcp_call_in_progress.py +21 -0
- openai/types/realtime/response_output_item_added_event.py +25 -0
- openai/types/realtime/response_output_item_done_event.py +25 -0
- openai/types/realtime/response_text_delta_event.py +30 -0
- openai/types/realtime/response_text_done_event.py +30 -0
- openai/types/realtime/session_created_event.py +23 -0
- openai/types/realtime/session_update_event.py +31 -0
- openai/types/realtime/session_update_event_param.py +32 -0
- openai/types/realtime/session_updated_event.py +23 -0
- openai/types/responses/__init__.py +270 -0
- openai/types/responses/apply_patch_tool.py +12 -0
- openai/types/responses/apply_patch_tool_param.py +12 -0
- openai/types/responses/computer_tool.py +21 -0
- openai/types/responses/computer_tool_param.py +21 -0
- openai/types/responses/custom_tool.py +23 -0
- openai/types/responses/custom_tool_param.py +23 -0
- openai/types/responses/easy_input_message.py +26 -0
- openai/types/responses/easy_input_message_param.py +27 -0
- openai/types/responses/file_search_tool.py +58 -0
- openai/types/responses/file_search_tool_param.py +60 -0
- openai/types/responses/function_shell_tool.py +12 -0
- openai/types/responses/function_shell_tool_param.py +12 -0
- openai/types/responses/function_tool.py +28 -0
- openai/types/responses/function_tool_param.py +28 -0
- openai/types/responses/input_item_list_params.py +34 -0
- openai/types/responses/input_token_count_params.py +142 -0
- openai/types/responses/input_token_count_response.py +13 -0
- openai/types/responses/parsed_response.py +105 -0
- openai/types/responses/response.py +307 -0
- openai/types/responses/response_apply_patch_tool_call.py +76 -0
- openai/types/responses/response_apply_patch_tool_call_output.py +31 -0
- openai/types/responses/response_audio_delta_event.py +18 -0
- openai/types/responses/response_audio_done_event.py +15 -0
- openai/types/responses/response_audio_transcript_delta_event.py +18 -0
- openai/types/responses/response_audio_transcript_done_event.py +15 -0
- openai/types/responses/response_code_interpreter_call_code_delta_event.py +27 -0
- openai/types/responses/response_code_interpreter_call_code_done_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_completed_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_in_progress_event.py +24 -0
- openai/types/responses/response_code_interpreter_call_interpreting_event.py +24 -0
- openai/types/responses/response_code_interpreter_tool_call.py +55 -0
- openai/types/responses/response_code_interpreter_tool_call_param.py +54 -0
- openai/types/responses/response_completed_event.py +19 -0
- openai/types/responses/response_computer_tool_call.py +209 -0
- openai/types/responses/response_computer_tool_call_output_item.py +47 -0
- openai/types/responses/response_computer_tool_call_output_screenshot.py +22 -0
- openai/types/responses/response_computer_tool_call_output_screenshot_param.py +21 -0
- openai/types/responses/response_computer_tool_call_param.py +207 -0
- openai/types/responses/response_content_part_added_event.py +44 -0
- openai/types/responses/response_content_part_done_event.py +44 -0
- openai/types/responses/response_conversation_param.py +12 -0
- openai/types/responses/response_create_params.py +334 -0
- openai/types/responses/response_created_event.py +19 -0
- openai/types/responses/response_custom_tool_call.py +25 -0
- openai/types/responses/response_custom_tool_call_input_delta_event.py +24 -0
- openai/types/responses/response_custom_tool_call_input_done_event.py +24 -0
- openai/types/responses/response_custom_tool_call_output.py +33 -0
- openai/types/responses/response_custom_tool_call_output_param.py +31 -0
- openai/types/responses/response_custom_tool_call_param.py +24 -0
- openai/types/responses/response_error.py +34 -0
- openai/types/responses/response_error_event.py +25 -0
- openai/types/responses/response_failed_event.py +19 -0
- openai/types/responses/response_file_search_call_completed_event.py +21 -0
- openai/types/responses/response_file_search_call_in_progress_event.py +21 -0
- openai/types/responses/response_file_search_call_searching_event.py +21 -0
- openai/types/responses/response_file_search_tool_call.py +51 -0
- openai/types/responses/response_file_search_tool_call_param.py +53 -0
- openai/types/responses/response_format_text_config.py +16 -0
- openai/types/responses/response_format_text_config_param.py +16 -0
- openai/types/responses/response_format_text_json_schema_config.py +43 -0
- openai/types/responses/response_format_text_json_schema_config_param.py +41 -0
- openai/types/responses/response_function_call_arguments_delta_event.py +26 -0
- openai/types/responses/response_function_call_arguments_done_event.py +26 -0
- openai/types/responses/response_function_call_output_item.py +16 -0
- openai/types/responses/response_function_call_output_item_list.py +10 -0
- openai/types/responses/response_function_call_output_item_list_param.py +18 -0
- openai/types/responses/response_function_call_output_item_param.py +16 -0
- openai/types/responses/response_function_shell_call_output_content.py +36 -0
- openai/types/responses/response_function_shell_call_output_content_param.py +35 -0
- openai/types/responses/response_function_shell_tool_call.py +44 -0
- openai/types/responses/response_function_shell_tool_call_output.py +70 -0
- openai/types/responses/response_function_tool_call.py +32 -0
- openai/types/responses/response_function_tool_call_item.py +10 -0
- openai/types/responses/response_function_tool_call_output_item.py +40 -0
- openai/types/responses/response_function_tool_call_param.py +31 -0
- openai/types/responses/response_function_web_search.py +67 -0
- openai/types/responses/response_function_web_search_param.py +73 -0
- openai/types/responses/response_image_gen_call_completed_event.py +21 -0
- openai/types/responses/response_image_gen_call_generating_event.py +21 -0
- openai/types/responses/response_image_gen_call_in_progress_event.py +21 -0
- openai/types/responses/response_image_gen_call_partial_image_event.py +30 -0
- openai/types/responses/response_in_progress_event.py +19 -0
- openai/types/responses/response_includable.py +16 -0
- openai/types/responses/response_incomplete_event.py +19 -0
- openai/types/responses/response_input_audio.py +22 -0
- openai/types/responses/response_input_audio_param.py +22 -0
- openai/types/responses/response_input_content.py +15 -0
- openai/types/responses/response_input_content_param.py +14 -0
- openai/types/responses/response_input_file.py +25 -0
- openai/types/responses/response_input_file_content.py +25 -0
- openai/types/responses/response_input_file_content_param.py +25 -0
- openai/types/responses/response_input_file_param.py +25 -0
- openai/types/responses/response_input_image.py +28 -0
- openai/types/responses/response_input_image_content.py +28 -0
- openai/types/responses/response_input_image_content_param.py +28 -0
- openai/types/responses/response_input_image_param.py +28 -0
- openai/types/responses/response_input_item.py +482 -0
- openai/types/responses/response_input_item_param.py +479 -0
- openai/types/responses/response_input_message_content_list.py +10 -0
- openai/types/responses/response_input_message_content_list_param.py +16 -0
- openai/types/responses/response_input_message_item.py +33 -0
- openai/types/responses/response_input_param.py +482 -0
- openai/types/responses/response_input_text.py +15 -0
- openai/types/responses/response_input_text_content.py +15 -0
- openai/types/responses/response_input_text_content_param.py +15 -0
- openai/types/responses/response_input_text_param.py +15 -0
- openai/types/responses/response_item.py +226 -0
- openai/types/responses/response_item_list.py +26 -0
- openai/types/responses/response_mcp_call_arguments_delta_event.py +27 -0
- openai/types/responses/response_mcp_call_arguments_done_event.py +24 -0
- openai/types/responses/response_mcp_call_completed_event.py +21 -0
- openai/types/responses/response_mcp_call_failed_event.py +21 -0
- openai/types/responses/response_mcp_call_in_progress_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_completed_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_failed_event.py +21 -0
- openai/types/responses/response_mcp_list_tools_in_progress_event.py +21 -0
- openai/types/responses/response_output_item.py +189 -0
- openai/types/responses/response_output_item_added_event.py +22 -0
- openai/types/responses/response_output_item_done_event.py +22 -0
- openai/types/responses/response_output_message.py +34 -0
- openai/types/responses/response_output_message_param.py +34 -0
- openai/types/responses/response_output_refusal.py +15 -0
- openai/types/responses/response_output_refusal_param.py +15 -0
- openai/types/responses/response_output_text.py +117 -0
- openai/types/responses/response_output_text_annotation_added_event.py +30 -0
- openai/types/responses/response_output_text_param.py +115 -0
- openai/types/responses/response_prompt.py +28 -0
- openai/types/responses/response_prompt_param.py +29 -0
- openai/types/responses/response_queued_event.py +19 -0
- openai/types/responses/response_reasoning_item.py +51 -0
- openai/types/responses/response_reasoning_item_param.py +51 -0
- openai/types/responses/response_reasoning_summary_part_added_event.py +35 -0
- openai/types/responses/response_reasoning_summary_part_done_event.py +35 -0
- openai/types/responses/response_reasoning_summary_text_delta_event.py +27 -0
- openai/types/responses/response_reasoning_summary_text_done_event.py +27 -0
- openai/types/responses/response_reasoning_text_delta_event.py +27 -0
- openai/types/responses/response_reasoning_text_done_event.py +27 -0
- openai/types/responses/response_refusal_delta_event.py +27 -0
- openai/types/responses/response_refusal_done_event.py +27 -0
- openai/types/responses/response_retrieve_params.py +59 -0
- openai/types/responses/response_status.py +7 -0
- openai/types/responses/response_stream_event.py +120 -0
- openai/types/responses/response_text_config.py +35 -0
- openai/types/responses/response_text_config_param.py +36 -0
- openai/types/responses/response_text_delta_event.py +50 -0
- openai/types/responses/response_text_done_event.py +50 -0
- openai/types/responses/response_usage.py +35 -0
- openai/types/responses/response_web_search_call_completed_event.py +21 -0
- openai/types/responses/response_web_search_call_in_progress_event.py +21 -0
- openai/types/responses/response_web_search_call_searching_event.py +21 -0
- openai/types/responses/tool.py +271 -0
- openai/types/responses/tool_choice_allowed.py +36 -0
- openai/types/responses/tool_choice_allowed_param.py +36 -0
- openai/types/responses/tool_choice_apply_patch.py +12 -0
- openai/types/responses/tool_choice_apply_patch_param.py +12 -0
- openai/types/responses/tool_choice_custom.py +15 -0
- openai/types/responses/tool_choice_custom_param.py +15 -0
- openai/types/responses/tool_choice_function.py +15 -0
- openai/types/responses/tool_choice_function_param.py +15 -0
- openai/types/responses/tool_choice_mcp.py +19 -0
- openai/types/responses/tool_choice_mcp_param.py +19 -0
- openai/types/responses/tool_choice_options.py +7 -0
- openai/types/responses/tool_choice_shell.py +12 -0
- openai/types/responses/tool_choice_shell_param.py +12 -0
- openai/types/responses/tool_choice_types.py +31 -0
- openai/types/responses/tool_choice_types_param.py +33 -0
- openai/types/responses/tool_param.py +271 -0
- openai/types/responses/web_search_preview_tool.py +49 -0
- openai/types/responses/web_search_preview_tool_param.py +49 -0
- openai/types/responses/web_search_tool.py +63 -0
- openai/types/responses/web_search_tool_param.py +65 -0
- openai/types/shared/__init__.py +19 -0
- openai/types/shared/all_models.py +28 -0
- openai/types/shared/chat_model.py +75 -0
- openai/types/shared/comparison_filter.py +34 -0
- openai/types/shared/compound_filter.py +22 -0
- openai/types/shared/custom_tool_input_format.py +28 -0
- openai/types/shared/error_object.py +17 -0
- openai/types/shared/function_definition.py +43 -0
- openai/types/shared/function_parameters.py +8 -0
- openai/types/shared/metadata.py +8 -0
- openai/types/shared/reasoning.py +44 -0
- openai/types/shared/reasoning_effort.py +8 -0
- openai/types/shared/response_format_json_object.py +12 -0
- openai/types/shared/response_format_json_schema.py +48 -0
- openai/types/shared/response_format_text.py +12 -0
- openai/types/shared/response_format_text_grammar.py +15 -0
- openai/types/shared/response_format_text_python.py +12 -0
- openai/types/shared/responses_model.py +28 -0
- openai/types/shared_params/__init__.py +15 -0
- openai/types/shared_params/chat_model.py +77 -0
- openai/types/shared_params/comparison_filter.py +36 -0
- openai/types/shared_params/compound_filter.py +23 -0
- openai/types/shared_params/custom_tool_input_format.py +27 -0
- openai/types/shared_params/function_definition.py +45 -0
- openai/types/shared_params/function_parameters.py +10 -0
- openai/types/shared_params/metadata.py +10 -0
- openai/types/shared_params/reasoning.py +45 -0
- openai/types/shared_params/reasoning_effort.py +10 -0
- openai/types/shared_params/response_format_json_object.py +12 -0
- openai/types/shared_params/response_format_json_schema.py +46 -0
- openai/types/shared_params/response_format_text.py +12 -0
- openai/types/shared_params/responses_model.py +30 -0
- openai/types/static_file_chunking_strategy.py +20 -0
- openai/types/static_file_chunking_strategy_object.py +15 -0
- openai/types/static_file_chunking_strategy_object_param.py +16 -0
- openai/types/static_file_chunking_strategy_param.py +22 -0
- openai/types/upload.py +42 -0
- openai/types/upload_complete_params.py +20 -0
- openai/types/upload_create_params.py +52 -0
- openai/types/uploads/__init__.py +6 -0
- openai/types/uploads/part_create_params.py +14 -0
- openai/types/uploads/upload_part.py +21 -0
- openai/types/vector_store.py +82 -0
- openai/types/vector_store_create_params.py +61 -0
- openai/types/vector_store_deleted.py +15 -0
- openai/types/vector_store_list_params.py +39 -0
- openai/types/vector_store_search_params.py +42 -0
- openai/types/vector_store_search_response.py +39 -0
- openai/types/vector_store_update_params.py +39 -0
- openai/types/vector_stores/__init__.py +13 -0
- openai/types/vector_stores/file_batch_create_params.py +70 -0
- openai/types/vector_stores/file_batch_list_files_params.py +47 -0
- openai/types/vector_stores/file_content_response.py +15 -0
- openai/types/vector_stores/file_create_params.py +35 -0
- openai/types/vector_stores/file_list_params.py +45 -0
- openai/types/vector_stores/file_update_params.py +21 -0
- openai/types/vector_stores/vector_store_file.py +67 -0
- openai/types/vector_stores/vector_store_file_batch.py +54 -0
- openai/types/vector_stores/vector_store_file_deleted.py +15 -0
- openai/types/video.py +53 -0
- openai/types/video_create_error.py +11 -0
- openai/types/video_create_params.py +29 -0
- openai/types/video_delete_response.py +18 -0
- openai/types/video_download_content_params.py +12 -0
- openai/types/video_list_params.py +21 -0
- openai/types/video_model.py +7 -0
- openai/types/video_remix_params.py +12 -0
- openai/types/video_seconds.py +7 -0
- openai/types/video_size.py +7 -0
- openai/types/webhooks/__init__.py +24 -0
- openai/types/webhooks/batch_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/batch_completed_webhook_event.py +30 -0
- openai/types/webhooks/batch_expired_webhook_event.py +30 -0
- openai/types/webhooks/batch_failed_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_canceled_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_failed_webhook_event.py +30 -0
- openai/types/webhooks/eval_run_succeeded_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +30 -0
- openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +30 -0
- openai/types/webhooks/realtime_call_incoming_webhook_event.py +41 -0
- openai/types/webhooks/response_cancelled_webhook_event.py +30 -0
- openai/types/webhooks/response_completed_webhook_event.py +30 -0
- openai/types/webhooks/response_failed_webhook_event.py +30 -0
- openai/types/webhooks/response_incomplete_webhook_event.py +30 -0
- openai/types/webhooks/unwrap_webhook_event.py +44 -0
- openai/types/websocket_connection_options.py +36 -0
- openai/version.py +3 -0
|
@@ -0,0 +1,1858 @@
|
|
|
1
|
+
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Union, Mapping, Optional, cast
|
|
6
|
+
from typing_extensions import Literal, overload
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
|
|
10
|
+
from .. import _legacy_response
|
|
11
|
+
from ..types import image_edit_params, image_generate_params, image_create_variation_params
|
|
12
|
+
from .._types import Body, Omit, Query, Headers, NotGiven, FileTypes, SequenceNotStr, omit, not_given
|
|
13
|
+
from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
|
|
14
|
+
from .._compat import cached_property
|
|
15
|
+
from .._resource import SyncAPIResource, AsyncAPIResource
|
|
16
|
+
from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
|
17
|
+
from .._streaming import Stream, AsyncStream
|
|
18
|
+
from .._base_client import make_request_options
|
|
19
|
+
from ..types.image_model import ImageModel
|
|
20
|
+
from ..types.images_response import ImagesResponse
|
|
21
|
+
from ..types.image_gen_stream_event import ImageGenStreamEvent
|
|
22
|
+
from ..types.image_edit_stream_event import ImageEditStreamEvent
|
|
23
|
+
|
|
24
|
+
__all__ = ["Images", "AsyncImages"]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Images(SyncAPIResource):
|
|
28
|
+
@cached_property
|
|
29
|
+
def with_raw_response(self) -> ImagesWithRawResponse:
|
|
30
|
+
"""
|
|
31
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
32
|
+
the raw response object instead of the parsed content.
|
|
33
|
+
|
|
34
|
+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
|
35
|
+
"""
|
|
36
|
+
return ImagesWithRawResponse(self)
|
|
37
|
+
|
|
38
|
+
@cached_property
|
|
39
|
+
def with_streaming_response(self) -> ImagesWithStreamingResponse:
|
|
40
|
+
"""
|
|
41
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
42
|
+
|
|
43
|
+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
|
44
|
+
"""
|
|
45
|
+
return ImagesWithStreamingResponse(self)
|
|
46
|
+
|
|
47
|
+
def create_variation(
|
|
48
|
+
self,
|
|
49
|
+
*,
|
|
50
|
+
image: FileTypes,
|
|
51
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
52
|
+
n: Optional[int] | Omit = omit,
|
|
53
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
54
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
|
|
55
|
+
user: str | Omit = omit,
|
|
56
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
57
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
58
|
+
extra_headers: Headers | None = None,
|
|
59
|
+
extra_query: Query | None = None,
|
|
60
|
+
extra_body: Body | None = None,
|
|
61
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
62
|
+
) -> ImagesResponse:
|
|
63
|
+
"""Creates a variation of a given image.
|
|
64
|
+
|
|
65
|
+
This endpoint only supports `dall-e-2`.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
|
|
69
|
+
less than 4MB, and square.
|
|
70
|
+
|
|
71
|
+
model: The model to use for image generation. Only `dall-e-2` is supported at this
|
|
72
|
+
time.
|
|
73
|
+
|
|
74
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
75
|
+
|
|
76
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
77
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
78
|
+
generated.
|
|
79
|
+
|
|
80
|
+
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
|
|
81
|
+
`1024x1024`.
|
|
82
|
+
|
|
83
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
84
|
+
and detect abuse.
|
|
85
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
86
|
+
|
|
87
|
+
extra_headers: Send extra headers
|
|
88
|
+
|
|
89
|
+
extra_query: Add additional query parameters to the request
|
|
90
|
+
|
|
91
|
+
extra_body: Add additional JSON properties to the request
|
|
92
|
+
|
|
93
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
94
|
+
"""
|
|
95
|
+
body = deepcopy_minimal(
|
|
96
|
+
{
|
|
97
|
+
"image": image,
|
|
98
|
+
"model": model,
|
|
99
|
+
"n": n,
|
|
100
|
+
"response_format": response_format,
|
|
101
|
+
"size": size,
|
|
102
|
+
"user": user,
|
|
103
|
+
}
|
|
104
|
+
)
|
|
105
|
+
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
|
|
106
|
+
# It should be noted that the actual Content-Type header that will be
|
|
107
|
+
# sent to the server will contain a `boundary` parameter, e.g.
|
|
108
|
+
# multipart/form-data; boundary=---abc--
|
|
109
|
+
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
|
|
110
|
+
return self._post(
|
|
111
|
+
"/images/variations",
|
|
112
|
+
body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
|
|
113
|
+
files=files,
|
|
114
|
+
options=make_request_options(
|
|
115
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
116
|
+
),
|
|
117
|
+
cast_to=ImagesResponse,
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
@overload
|
|
121
|
+
def edit(
|
|
122
|
+
self,
|
|
123
|
+
*,
|
|
124
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
125
|
+
prompt: str,
|
|
126
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
127
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
128
|
+
mask: FileTypes | Omit = omit,
|
|
129
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
130
|
+
n: Optional[int] | Omit = omit,
|
|
131
|
+
output_compression: Optional[int] | Omit = omit,
|
|
132
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
133
|
+
partial_images: Optional[int] | Omit = omit,
|
|
134
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
135
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
136
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
137
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
138
|
+
user: str | Omit = omit,
|
|
139
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
140
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
141
|
+
extra_headers: Headers | None = None,
|
|
142
|
+
extra_query: Query | None = None,
|
|
143
|
+
extra_body: Body | None = None,
|
|
144
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
145
|
+
) -> ImagesResponse:
|
|
146
|
+
"""Creates an edited or extended image given one or more source images and a
|
|
147
|
+
prompt.
|
|
148
|
+
|
|
149
|
+
This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
image: The image(s) to edit. Must be a supported image file or an array of images.
|
|
153
|
+
|
|
154
|
+
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
155
|
+
50MB. You can provide up to 16 images.
|
|
156
|
+
|
|
157
|
+
For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
158
|
+
file less than 4MB.
|
|
159
|
+
|
|
160
|
+
prompt: A text description of the desired image(s). The maximum length is 1000
|
|
161
|
+
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
162
|
+
|
|
163
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
164
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
165
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
166
|
+
automatically determine the best background for the image.
|
|
167
|
+
|
|
168
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
169
|
+
be set to either `png` (default value) or `webp`.
|
|
170
|
+
|
|
171
|
+
input_fidelity: Control how much effort the model will exert to match the style and features,
|
|
172
|
+
especially facial features, of input images. This parameter is only supported
|
|
173
|
+
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
|
174
|
+
`low`. Defaults to `low`.
|
|
175
|
+
|
|
176
|
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
177
|
+
indicate where `image` should be edited. If there are multiple images provided,
|
|
178
|
+
the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
179
|
+
4MB, and have the same dimensions as `image`.
|
|
180
|
+
|
|
181
|
+
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
182
|
+
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
183
|
+
is used.
|
|
184
|
+
|
|
185
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
186
|
+
|
|
187
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
188
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
189
|
+
defaults to 100.
|
|
190
|
+
|
|
191
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
192
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
|
193
|
+
default value is `png`.
|
|
194
|
+
|
|
195
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
196
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
197
|
+
0, the response will be a single image sent in one streaming event.
|
|
198
|
+
|
|
199
|
+
Note that the final image may be sent before the full number of partial images
|
|
200
|
+
are generated if the full image is generated more quickly.
|
|
201
|
+
|
|
202
|
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
203
|
+
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
204
|
+
Defaults to `auto`.
|
|
205
|
+
|
|
206
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
207
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
208
|
+
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
209
|
+
will always return base64-encoded images.
|
|
210
|
+
|
|
211
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
212
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
213
|
+
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
214
|
+
|
|
215
|
+
stream: Edit the image in streaming mode. Defaults to `false`. See the
|
|
216
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
217
|
+
for more information.
|
|
218
|
+
|
|
219
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
220
|
+
and detect abuse.
|
|
221
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
222
|
+
|
|
223
|
+
extra_headers: Send extra headers
|
|
224
|
+
|
|
225
|
+
extra_query: Add additional query parameters to the request
|
|
226
|
+
|
|
227
|
+
extra_body: Add additional JSON properties to the request
|
|
228
|
+
|
|
229
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
230
|
+
"""
|
|
231
|
+
...
|
|
232
|
+
|
|
233
|
+
@overload
|
|
234
|
+
def edit(
|
|
235
|
+
self,
|
|
236
|
+
*,
|
|
237
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
238
|
+
prompt: str,
|
|
239
|
+
stream: Literal[True],
|
|
240
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
241
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
242
|
+
mask: FileTypes | Omit = omit,
|
|
243
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
244
|
+
n: Optional[int] | Omit = omit,
|
|
245
|
+
output_compression: Optional[int] | Omit = omit,
|
|
246
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
247
|
+
partial_images: Optional[int] | Omit = omit,
|
|
248
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
249
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
250
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
251
|
+
user: str | Omit = omit,
|
|
252
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
253
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
254
|
+
extra_headers: Headers | None = None,
|
|
255
|
+
extra_query: Query | None = None,
|
|
256
|
+
extra_body: Body | None = None,
|
|
257
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
258
|
+
) -> Stream[ImageEditStreamEvent]:
|
|
259
|
+
"""Creates an edited or extended image given one or more source images and a
|
|
260
|
+
prompt.
|
|
261
|
+
|
|
262
|
+
This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
image: The image(s) to edit. Must be a supported image file or an array of images.
|
|
266
|
+
|
|
267
|
+
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
268
|
+
50MB. You can provide up to 16 images.
|
|
269
|
+
|
|
270
|
+
For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
271
|
+
file less than 4MB.
|
|
272
|
+
|
|
273
|
+
prompt: A text description of the desired image(s). The maximum length is 1000
|
|
274
|
+
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
275
|
+
|
|
276
|
+
stream: Edit the image in streaming mode. Defaults to `false`. See the
|
|
277
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
278
|
+
for more information.
|
|
279
|
+
|
|
280
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
281
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
282
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
283
|
+
automatically determine the best background for the image.
|
|
284
|
+
|
|
285
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
286
|
+
be set to either `png` (default value) or `webp`.
|
|
287
|
+
|
|
288
|
+
input_fidelity: Control how much effort the model will exert to match the style and features,
|
|
289
|
+
especially facial features, of input images. This parameter is only supported
|
|
290
|
+
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
|
291
|
+
`low`. Defaults to `low`.
|
|
292
|
+
|
|
293
|
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
294
|
+
indicate where `image` should be edited. If there are multiple images provided,
|
|
295
|
+
the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
296
|
+
4MB, and have the same dimensions as `image`.
|
|
297
|
+
|
|
298
|
+
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
299
|
+
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
300
|
+
is used.
|
|
301
|
+
|
|
302
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
303
|
+
|
|
304
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
305
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
306
|
+
defaults to 100.
|
|
307
|
+
|
|
308
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
309
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
|
310
|
+
default value is `png`.
|
|
311
|
+
|
|
312
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
313
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
314
|
+
0, the response will be a single image sent in one streaming event.
|
|
315
|
+
|
|
316
|
+
Note that the final image may be sent before the full number of partial images
|
|
317
|
+
are generated if the full image is generated more quickly.
|
|
318
|
+
|
|
319
|
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
320
|
+
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
321
|
+
Defaults to `auto`.
|
|
322
|
+
|
|
323
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
324
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
325
|
+
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
326
|
+
will always return base64-encoded images.
|
|
327
|
+
|
|
328
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
329
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
330
|
+
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
331
|
+
|
|
332
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
333
|
+
and detect abuse.
|
|
334
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
335
|
+
|
|
336
|
+
extra_headers: Send extra headers
|
|
337
|
+
|
|
338
|
+
extra_query: Add additional query parameters to the request
|
|
339
|
+
|
|
340
|
+
extra_body: Add additional JSON properties to the request
|
|
341
|
+
|
|
342
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
343
|
+
"""
|
|
344
|
+
...
|
|
345
|
+
|
|
346
|
+
@overload
|
|
347
|
+
def edit(
|
|
348
|
+
self,
|
|
349
|
+
*,
|
|
350
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
351
|
+
prompt: str,
|
|
352
|
+
stream: bool,
|
|
353
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
354
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
355
|
+
mask: FileTypes | Omit = omit,
|
|
356
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
357
|
+
n: Optional[int] | Omit = omit,
|
|
358
|
+
output_compression: Optional[int] | Omit = omit,
|
|
359
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
360
|
+
partial_images: Optional[int] | Omit = omit,
|
|
361
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
362
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
363
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
364
|
+
user: str | Omit = omit,
|
|
365
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
366
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
367
|
+
extra_headers: Headers | None = None,
|
|
368
|
+
extra_query: Query | None = None,
|
|
369
|
+
extra_body: Body | None = None,
|
|
370
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
371
|
+
) -> ImagesResponse | Stream[ImageEditStreamEvent]:
|
|
372
|
+
"""Creates an edited or extended image given one or more source images and a
|
|
373
|
+
prompt.
|
|
374
|
+
|
|
375
|
+
This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
image: The image(s) to edit. Must be a supported image file or an array of images.
|
|
379
|
+
|
|
380
|
+
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
381
|
+
50MB. You can provide up to 16 images.
|
|
382
|
+
|
|
383
|
+
For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
384
|
+
file less than 4MB.
|
|
385
|
+
|
|
386
|
+
prompt: A text description of the desired image(s). The maximum length is 1000
|
|
387
|
+
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
388
|
+
|
|
389
|
+
stream: Edit the image in streaming mode. Defaults to `false`. See the
|
|
390
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
391
|
+
for more information.
|
|
392
|
+
|
|
393
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
394
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
395
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
396
|
+
automatically determine the best background for the image.
|
|
397
|
+
|
|
398
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
399
|
+
be set to either `png` (default value) or `webp`.
|
|
400
|
+
|
|
401
|
+
input_fidelity: Control how much effort the model will exert to match the style and features,
|
|
402
|
+
especially facial features, of input images. This parameter is only supported
|
|
403
|
+
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
|
404
|
+
`low`. Defaults to `low`.
|
|
405
|
+
|
|
406
|
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
407
|
+
indicate where `image` should be edited. If there are multiple images provided,
|
|
408
|
+
the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
409
|
+
4MB, and have the same dimensions as `image`.
|
|
410
|
+
|
|
411
|
+
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
412
|
+
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
413
|
+
is used.
|
|
414
|
+
|
|
415
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
416
|
+
|
|
417
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
418
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
419
|
+
defaults to 100.
|
|
420
|
+
|
|
421
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
422
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
|
423
|
+
default value is `png`.
|
|
424
|
+
|
|
425
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
426
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
427
|
+
0, the response will be a single image sent in one streaming event.
|
|
428
|
+
|
|
429
|
+
Note that the final image may be sent before the full number of partial images
|
|
430
|
+
are generated if the full image is generated more quickly.
|
|
431
|
+
|
|
432
|
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
433
|
+
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
434
|
+
Defaults to `auto`.
|
|
435
|
+
|
|
436
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
437
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
438
|
+
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
439
|
+
will always return base64-encoded images.
|
|
440
|
+
|
|
441
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
442
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
443
|
+
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
444
|
+
|
|
445
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
446
|
+
and detect abuse.
|
|
447
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
448
|
+
|
|
449
|
+
extra_headers: Send extra headers
|
|
450
|
+
|
|
451
|
+
extra_query: Add additional query parameters to the request
|
|
452
|
+
|
|
453
|
+
extra_body: Add additional JSON properties to the request
|
|
454
|
+
|
|
455
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
456
|
+
"""
|
|
457
|
+
...
|
|
458
|
+
|
|
459
|
+
@required_args(["image", "prompt"], ["image", "prompt", "stream"])
|
|
460
|
+
def edit(
|
|
461
|
+
self,
|
|
462
|
+
*,
|
|
463
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
464
|
+
prompt: str,
|
|
465
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
466
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
467
|
+
mask: FileTypes | Omit = omit,
|
|
468
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
469
|
+
n: Optional[int] | Omit = omit,
|
|
470
|
+
output_compression: Optional[int] | Omit = omit,
|
|
471
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
472
|
+
partial_images: Optional[int] | Omit = omit,
|
|
473
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
474
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
475
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
476
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
477
|
+
user: str | Omit = omit,
|
|
478
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
479
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
480
|
+
extra_headers: Headers | None = None,
|
|
481
|
+
extra_query: Query | None = None,
|
|
482
|
+
extra_body: Body | None = None,
|
|
483
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
484
|
+
) -> ImagesResponse | Stream[ImageEditStreamEvent]:
|
|
485
|
+
body = deepcopy_minimal(
|
|
486
|
+
{
|
|
487
|
+
"image": image,
|
|
488
|
+
"prompt": prompt,
|
|
489
|
+
"background": background,
|
|
490
|
+
"input_fidelity": input_fidelity,
|
|
491
|
+
"mask": mask,
|
|
492
|
+
"model": model,
|
|
493
|
+
"n": n,
|
|
494
|
+
"output_compression": output_compression,
|
|
495
|
+
"output_format": output_format,
|
|
496
|
+
"partial_images": partial_images,
|
|
497
|
+
"quality": quality,
|
|
498
|
+
"response_format": response_format,
|
|
499
|
+
"size": size,
|
|
500
|
+
"stream": stream,
|
|
501
|
+
"user": user,
|
|
502
|
+
}
|
|
503
|
+
)
|
|
504
|
+
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
|
|
505
|
+
# It should be noted that the actual Content-Type header that will be
|
|
506
|
+
# sent to the server will contain a `boundary` parameter, e.g.
|
|
507
|
+
# multipart/form-data; boundary=---abc--
|
|
508
|
+
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
|
|
509
|
+
return self._post(
|
|
510
|
+
"/images/edits",
|
|
511
|
+
body=maybe_transform(
|
|
512
|
+
body,
|
|
513
|
+
image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
|
|
514
|
+
),
|
|
515
|
+
files=files,
|
|
516
|
+
options=make_request_options(
|
|
517
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
518
|
+
),
|
|
519
|
+
cast_to=ImagesResponse,
|
|
520
|
+
stream=stream or False,
|
|
521
|
+
stream_cls=Stream[ImageEditStreamEvent],
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
@overload
|
|
525
|
+
def generate(
|
|
526
|
+
self,
|
|
527
|
+
*,
|
|
528
|
+
prompt: str,
|
|
529
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
530
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
531
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
532
|
+
n: Optional[int] | Omit = omit,
|
|
533
|
+
output_compression: Optional[int] | Omit = omit,
|
|
534
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
535
|
+
partial_images: Optional[int] | Omit = omit,
|
|
536
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
537
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
538
|
+
size: Optional[
|
|
539
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
540
|
+
]
|
|
541
|
+
| Omit = omit,
|
|
542
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
543
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
544
|
+
user: str | Omit = omit,
|
|
545
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
546
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
547
|
+
extra_headers: Headers | None = None,
|
|
548
|
+
extra_query: Query | None = None,
|
|
549
|
+
extra_body: Body | None = None,
|
|
550
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
551
|
+
) -> ImagesResponse:
|
|
552
|
+
"""
|
|
553
|
+
Creates an image given a prompt.
|
|
554
|
+
[Learn more](https://platform.openai.com/docs/guides/images).
|
|
555
|
+
|
|
556
|
+
Args:
|
|
557
|
+
prompt: A text description of the desired image(s). The maximum length is 32000
|
|
558
|
+
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
559
|
+
for `dall-e-3`.
|
|
560
|
+
|
|
561
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
562
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
563
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
564
|
+
automatically determine the best background for the image.
|
|
565
|
+
|
|
566
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
567
|
+
be set to either `png` (default value) or `webp`.
|
|
568
|
+
|
|
569
|
+
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
570
|
+
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
571
|
+
`gpt-image-1` is used.
|
|
572
|
+
|
|
573
|
+
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
574
|
+
be either `low` for less restrictive filtering or `auto` (default value).
|
|
575
|
+
|
|
576
|
+
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
577
|
+
`n=1` is supported.
|
|
578
|
+
|
|
579
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
580
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
581
|
+
defaults to 100.
|
|
582
|
+
|
|
583
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
584
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
585
|
+
|
|
586
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
587
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
588
|
+
0, the response will be a single image sent in one streaming event.
|
|
589
|
+
|
|
590
|
+
Note that the final image may be sent before the full number of partial images
|
|
591
|
+
are generated if the full image is generated more quickly.
|
|
592
|
+
|
|
593
|
+
quality: The quality of the image that will be generated.
|
|
594
|
+
|
|
595
|
+
- `auto` (default value) will automatically select the best quality for the
|
|
596
|
+
given model.
|
|
597
|
+
- `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
598
|
+
- `hd` and `standard` are supported for `dall-e-3`.
|
|
599
|
+
- `standard` is the only option for `dall-e-2`.
|
|
600
|
+
|
|
601
|
+
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
602
|
+
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
603
|
+
after the image has been generated. This parameter isn't supported for
|
|
604
|
+
`gpt-image-1` which will always return base64-encoded images.
|
|
605
|
+
|
|
606
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
607
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
608
|
+
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
609
|
+
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
610
|
+
|
|
611
|
+
stream: Generate the image in streaming mode. Defaults to `false`. See the
|
|
612
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
613
|
+
for more information. This parameter is only supported for `gpt-image-1`.
|
|
614
|
+
|
|
615
|
+
style: The style of the generated images. This parameter is only supported for
|
|
616
|
+
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
617
|
+
towards generating hyper-real and dramatic images. Natural causes the model to
|
|
618
|
+
produce more natural, less hyper-real looking images.
|
|
619
|
+
|
|
620
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
621
|
+
and detect abuse.
|
|
622
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
623
|
+
|
|
624
|
+
extra_headers: Send extra headers
|
|
625
|
+
|
|
626
|
+
extra_query: Add additional query parameters to the request
|
|
627
|
+
|
|
628
|
+
extra_body: Add additional JSON properties to the request
|
|
629
|
+
|
|
630
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
631
|
+
"""
|
|
632
|
+
...
|
|
633
|
+
|
|
634
|
+
@overload
|
|
635
|
+
def generate(
|
|
636
|
+
self,
|
|
637
|
+
*,
|
|
638
|
+
prompt: str,
|
|
639
|
+
stream: Literal[True],
|
|
640
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
641
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
642
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
643
|
+
n: Optional[int] | Omit = omit,
|
|
644
|
+
output_compression: Optional[int] | Omit = omit,
|
|
645
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
646
|
+
partial_images: Optional[int] | Omit = omit,
|
|
647
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
648
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
649
|
+
size: Optional[
|
|
650
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
651
|
+
]
|
|
652
|
+
| Omit = omit,
|
|
653
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
654
|
+
user: str | Omit = omit,
|
|
655
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
656
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
657
|
+
extra_headers: Headers | None = None,
|
|
658
|
+
extra_query: Query | None = None,
|
|
659
|
+
extra_body: Body | None = None,
|
|
660
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
661
|
+
) -> Stream[ImageGenStreamEvent]:
|
|
662
|
+
"""
|
|
663
|
+
Creates an image given a prompt.
|
|
664
|
+
[Learn more](https://platform.openai.com/docs/guides/images).
|
|
665
|
+
|
|
666
|
+
Args:
|
|
667
|
+
prompt: A text description of the desired image(s). The maximum length is 32000
|
|
668
|
+
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
669
|
+
for `dall-e-3`.
|
|
670
|
+
|
|
671
|
+
stream: Generate the image in streaming mode. Defaults to `false`. See the
|
|
672
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
673
|
+
for more information. This parameter is only supported for `gpt-image-1`.
|
|
674
|
+
|
|
675
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
676
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
677
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
678
|
+
automatically determine the best background for the image.
|
|
679
|
+
|
|
680
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
681
|
+
be set to either `png` (default value) or `webp`.
|
|
682
|
+
|
|
683
|
+
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
684
|
+
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
685
|
+
`gpt-image-1` is used.
|
|
686
|
+
|
|
687
|
+
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
688
|
+
be either `low` for less restrictive filtering or `auto` (default value).
|
|
689
|
+
|
|
690
|
+
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
691
|
+
`n=1` is supported.
|
|
692
|
+
|
|
693
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
694
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
695
|
+
defaults to 100.
|
|
696
|
+
|
|
697
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
698
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
699
|
+
|
|
700
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
701
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
702
|
+
0, the response will be a single image sent in one streaming event.
|
|
703
|
+
|
|
704
|
+
Note that the final image may be sent before the full number of partial images
|
|
705
|
+
are generated if the full image is generated more quickly.
|
|
706
|
+
|
|
707
|
+
quality: The quality of the image that will be generated.
|
|
708
|
+
|
|
709
|
+
- `auto` (default value) will automatically select the best quality for the
|
|
710
|
+
given model.
|
|
711
|
+
- `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
712
|
+
- `hd` and `standard` are supported for `dall-e-3`.
|
|
713
|
+
- `standard` is the only option for `dall-e-2`.
|
|
714
|
+
|
|
715
|
+
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
716
|
+
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
717
|
+
after the image has been generated. This parameter isn't supported for
|
|
718
|
+
`gpt-image-1` which will always return base64-encoded images.
|
|
719
|
+
|
|
720
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
721
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
722
|
+
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
723
|
+
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
724
|
+
|
|
725
|
+
style: The style of the generated images. This parameter is only supported for
|
|
726
|
+
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
727
|
+
towards generating hyper-real and dramatic images. Natural causes the model to
|
|
728
|
+
produce more natural, less hyper-real looking images.
|
|
729
|
+
|
|
730
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
731
|
+
and detect abuse.
|
|
732
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
733
|
+
|
|
734
|
+
extra_headers: Send extra headers
|
|
735
|
+
|
|
736
|
+
extra_query: Add additional query parameters to the request
|
|
737
|
+
|
|
738
|
+
extra_body: Add additional JSON properties to the request
|
|
739
|
+
|
|
740
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
741
|
+
"""
|
|
742
|
+
...
|
|
743
|
+
|
|
744
|
+
@overload
|
|
745
|
+
def generate(
|
|
746
|
+
self,
|
|
747
|
+
*,
|
|
748
|
+
prompt: str,
|
|
749
|
+
stream: bool,
|
|
750
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
751
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
752
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
753
|
+
n: Optional[int] | Omit = omit,
|
|
754
|
+
output_compression: Optional[int] | Omit = omit,
|
|
755
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
756
|
+
partial_images: Optional[int] | Omit = omit,
|
|
757
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
758
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
759
|
+
size: Optional[
|
|
760
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
761
|
+
]
|
|
762
|
+
| Omit = omit,
|
|
763
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
764
|
+
user: str | Omit = omit,
|
|
765
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
766
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
767
|
+
extra_headers: Headers | None = None,
|
|
768
|
+
extra_query: Query | None = None,
|
|
769
|
+
extra_body: Body | None = None,
|
|
770
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
771
|
+
) -> ImagesResponse | Stream[ImageGenStreamEvent]:
|
|
772
|
+
"""
|
|
773
|
+
Creates an image given a prompt.
|
|
774
|
+
[Learn more](https://platform.openai.com/docs/guides/images).
|
|
775
|
+
|
|
776
|
+
Args:
|
|
777
|
+
prompt: A text description of the desired image(s). The maximum length is 32000
|
|
778
|
+
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
779
|
+
for `dall-e-3`.
|
|
780
|
+
|
|
781
|
+
stream: Generate the image in streaming mode. Defaults to `false`. See the
|
|
782
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
783
|
+
for more information. This parameter is only supported for `gpt-image-1`.
|
|
784
|
+
|
|
785
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
786
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
787
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
788
|
+
automatically determine the best background for the image.
|
|
789
|
+
|
|
790
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
791
|
+
be set to either `png` (default value) or `webp`.
|
|
792
|
+
|
|
793
|
+
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
794
|
+
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
795
|
+
`gpt-image-1` is used.
|
|
796
|
+
|
|
797
|
+
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
798
|
+
be either `low` for less restrictive filtering or `auto` (default value).
|
|
799
|
+
|
|
800
|
+
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
801
|
+
`n=1` is supported.
|
|
802
|
+
|
|
803
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
804
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
805
|
+
defaults to 100.
|
|
806
|
+
|
|
807
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
808
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
809
|
+
|
|
810
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
811
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
812
|
+
0, the response will be a single image sent in one streaming event.
|
|
813
|
+
|
|
814
|
+
Note that the final image may be sent before the full number of partial images
|
|
815
|
+
are generated if the full image is generated more quickly.
|
|
816
|
+
|
|
817
|
+
quality: The quality of the image that will be generated.
|
|
818
|
+
|
|
819
|
+
- `auto` (default value) will automatically select the best quality for the
|
|
820
|
+
given model.
|
|
821
|
+
- `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
822
|
+
- `hd` and `standard` are supported for `dall-e-3`.
|
|
823
|
+
- `standard` is the only option for `dall-e-2`.
|
|
824
|
+
|
|
825
|
+
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
826
|
+
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
827
|
+
after the image has been generated. This parameter isn't supported for
|
|
828
|
+
`gpt-image-1` which will always return base64-encoded images.
|
|
829
|
+
|
|
830
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
831
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
832
|
+
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
833
|
+
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
834
|
+
|
|
835
|
+
style: The style of the generated images. This parameter is only supported for
|
|
836
|
+
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
837
|
+
towards generating hyper-real and dramatic images. Natural causes the model to
|
|
838
|
+
produce more natural, less hyper-real looking images.
|
|
839
|
+
|
|
840
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
841
|
+
and detect abuse.
|
|
842
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
843
|
+
|
|
844
|
+
extra_headers: Send extra headers
|
|
845
|
+
|
|
846
|
+
extra_query: Add additional query parameters to the request
|
|
847
|
+
|
|
848
|
+
extra_body: Add additional JSON properties to the request
|
|
849
|
+
|
|
850
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
851
|
+
"""
|
|
852
|
+
...
|
|
853
|
+
|
|
854
|
+
@required_args(["prompt"], ["prompt", "stream"])
|
|
855
|
+
def generate(
|
|
856
|
+
self,
|
|
857
|
+
*,
|
|
858
|
+
prompt: str,
|
|
859
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
860
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
861
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
862
|
+
n: Optional[int] | Omit = omit,
|
|
863
|
+
output_compression: Optional[int] | Omit = omit,
|
|
864
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
865
|
+
partial_images: Optional[int] | Omit = omit,
|
|
866
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
867
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
868
|
+
size: Optional[
|
|
869
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
870
|
+
]
|
|
871
|
+
| Omit = omit,
|
|
872
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
873
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
874
|
+
user: str | Omit = omit,
|
|
875
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
876
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
877
|
+
extra_headers: Headers | None = None,
|
|
878
|
+
extra_query: Query | None = None,
|
|
879
|
+
extra_body: Body | None = None,
|
|
880
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
881
|
+
) -> ImagesResponse | Stream[ImageGenStreamEvent]:
|
|
882
|
+
return self._post(
|
|
883
|
+
"/images/generations",
|
|
884
|
+
body=maybe_transform(
|
|
885
|
+
{
|
|
886
|
+
"prompt": prompt,
|
|
887
|
+
"background": background,
|
|
888
|
+
"model": model,
|
|
889
|
+
"moderation": moderation,
|
|
890
|
+
"n": n,
|
|
891
|
+
"output_compression": output_compression,
|
|
892
|
+
"output_format": output_format,
|
|
893
|
+
"partial_images": partial_images,
|
|
894
|
+
"quality": quality,
|
|
895
|
+
"response_format": response_format,
|
|
896
|
+
"size": size,
|
|
897
|
+
"stream": stream,
|
|
898
|
+
"style": style,
|
|
899
|
+
"user": user,
|
|
900
|
+
},
|
|
901
|
+
image_generate_params.ImageGenerateParamsStreaming
|
|
902
|
+
if stream
|
|
903
|
+
else image_generate_params.ImageGenerateParamsNonStreaming,
|
|
904
|
+
),
|
|
905
|
+
options=make_request_options(
|
|
906
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
907
|
+
),
|
|
908
|
+
cast_to=ImagesResponse,
|
|
909
|
+
stream=stream or False,
|
|
910
|
+
stream_cls=Stream[ImageGenStreamEvent],
|
|
911
|
+
)
|
|
912
|
+
|
|
913
|
+
|
|
914
|
+
class AsyncImages(AsyncAPIResource):
|
|
915
|
+
@cached_property
|
|
916
|
+
def with_raw_response(self) -> AsyncImagesWithRawResponse:
|
|
917
|
+
"""
|
|
918
|
+
This property can be used as a prefix for any HTTP method call to return
|
|
919
|
+
the raw response object instead of the parsed content.
|
|
920
|
+
|
|
921
|
+
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
|
|
922
|
+
"""
|
|
923
|
+
return AsyncImagesWithRawResponse(self)
|
|
924
|
+
|
|
925
|
+
@cached_property
|
|
926
|
+
def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
|
|
927
|
+
"""
|
|
928
|
+
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
|
|
929
|
+
|
|
930
|
+
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
|
|
931
|
+
"""
|
|
932
|
+
return AsyncImagesWithStreamingResponse(self)
|
|
933
|
+
|
|
934
|
+
async def create_variation(
|
|
935
|
+
self,
|
|
936
|
+
*,
|
|
937
|
+
image: FileTypes,
|
|
938
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
939
|
+
n: Optional[int] | Omit = omit,
|
|
940
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
941
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
|
|
942
|
+
user: str | Omit = omit,
|
|
943
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
944
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
945
|
+
extra_headers: Headers | None = None,
|
|
946
|
+
extra_query: Query | None = None,
|
|
947
|
+
extra_body: Body | None = None,
|
|
948
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
949
|
+
) -> ImagesResponse:
|
|
950
|
+
"""Creates a variation of a given image.
|
|
951
|
+
|
|
952
|
+
This endpoint only supports `dall-e-2`.
|
|
953
|
+
|
|
954
|
+
Args:
|
|
955
|
+
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
|
|
956
|
+
less than 4MB, and square.
|
|
957
|
+
|
|
958
|
+
model: The model to use for image generation. Only `dall-e-2` is supported at this
|
|
959
|
+
time.
|
|
960
|
+
|
|
961
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
962
|
+
|
|
963
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
964
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
965
|
+
generated.
|
|
966
|
+
|
|
967
|
+
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
|
|
968
|
+
`1024x1024`.
|
|
969
|
+
|
|
970
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
971
|
+
and detect abuse.
|
|
972
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
973
|
+
|
|
974
|
+
extra_headers: Send extra headers
|
|
975
|
+
|
|
976
|
+
extra_query: Add additional query parameters to the request
|
|
977
|
+
|
|
978
|
+
extra_body: Add additional JSON properties to the request
|
|
979
|
+
|
|
980
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
981
|
+
"""
|
|
982
|
+
body = deepcopy_minimal(
|
|
983
|
+
{
|
|
984
|
+
"image": image,
|
|
985
|
+
"model": model,
|
|
986
|
+
"n": n,
|
|
987
|
+
"response_format": response_format,
|
|
988
|
+
"size": size,
|
|
989
|
+
"user": user,
|
|
990
|
+
}
|
|
991
|
+
)
|
|
992
|
+
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
|
|
993
|
+
# It should be noted that the actual Content-Type header that will be
|
|
994
|
+
# sent to the server will contain a `boundary` parameter, e.g.
|
|
995
|
+
# multipart/form-data; boundary=---abc--
|
|
996
|
+
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
|
|
997
|
+
return await self._post(
|
|
998
|
+
"/images/variations",
|
|
999
|
+
body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
|
|
1000
|
+
files=files,
|
|
1001
|
+
options=make_request_options(
|
|
1002
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1003
|
+
),
|
|
1004
|
+
cast_to=ImagesResponse,
|
|
1005
|
+
)
|
|
1006
|
+
|
|
1007
|
+
@overload
|
|
1008
|
+
async def edit(
|
|
1009
|
+
self,
|
|
1010
|
+
*,
|
|
1011
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
1012
|
+
prompt: str,
|
|
1013
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1014
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
1015
|
+
mask: FileTypes | Omit = omit,
|
|
1016
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1017
|
+
n: Optional[int] | Omit = omit,
|
|
1018
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1019
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1020
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1021
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1022
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1023
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
1024
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
1025
|
+
user: str | Omit = omit,
|
|
1026
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1027
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1028
|
+
extra_headers: Headers | None = None,
|
|
1029
|
+
extra_query: Query | None = None,
|
|
1030
|
+
extra_body: Body | None = None,
|
|
1031
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1032
|
+
) -> ImagesResponse:
|
|
1033
|
+
"""Creates an edited or extended image given one or more source images and a
|
|
1034
|
+
prompt.
|
|
1035
|
+
|
|
1036
|
+
This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
1037
|
+
|
|
1038
|
+
Args:
|
|
1039
|
+
image: The image(s) to edit. Must be a supported image file or an array of images.
|
|
1040
|
+
|
|
1041
|
+
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
1042
|
+
50MB. You can provide up to 16 images.
|
|
1043
|
+
|
|
1044
|
+
For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
1045
|
+
file less than 4MB.
|
|
1046
|
+
|
|
1047
|
+
prompt: A text description of the desired image(s). The maximum length is 1000
|
|
1048
|
+
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
1049
|
+
|
|
1050
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
1051
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
1052
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
1053
|
+
automatically determine the best background for the image.
|
|
1054
|
+
|
|
1055
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
1056
|
+
be set to either `png` (default value) or `webp`.
|
|
1057
|
+
|
|
1058
|
+
input_fidelity: Control how much effort the model will exert to match the style and features,
|
|
1059
|
+
especially facial features, of input images. This parameter is only supported
|
|
1060
|
+
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
|
1061
|
+
`low`. Defaults to `low`.
|
|
1062
|
+
|
|
1063
|
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
1064
|
+
indicate where `image` should be edited. If there are multiple images provided,
|
|
1065
|
+
the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
1066
|
+
4MB, and have the same dimensions as `image`.
|
|
1067
|
+
|
|
1068
|
+
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
1069
|
+
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
1070
|
+
is used.
|
|
1071
|
+
|
|
1072
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
1073
|
+
|
|
1074
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
1075
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
1076
|
+
defaults to 100.
|
|
1077
|
+
|
|
1078
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
1079
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
|
1080
|
+
default value is `png`.
|
|
1081
|
+
|
|
1082
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
1083
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
1084
|
+
0, the response will be a single image sent in one streaming event.
|
|
1085
|
+
|
|
1086
|
+
Note that the final image may be sent before the full number of partial images
|
|
1087
|
+
are generated if the full image is generated more quickly.
|
|
1088
|
+
|
|
1089
|
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
1090
|
+
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
1091
|
+
Defaults to `auto`.
|
|
1092
|
+
|
|
1093
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
1094
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
1095
|
+
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
1096
|
+
will always return base64-encoded images.
|
|
1097
|
+
|
|
1098
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
1099
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
1100
|
+
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
1101
|
+
|
|
1102
|
+
stream: Edit the image in streaming mode. Defaults to `false`. See the
|
|
1103
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
1104
|
+
for more information.
|
|
1105
|
+
|
|
1106
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
1107
|
+
and detect abuse.
|
|
1108
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
1109
|
+
|
|
1110
|
+
extra_headers: Send extra headers
|
|
1111
|
+
|
|
1112
|
+
extra_query: Add additional query parameters to the request
|
|
1113
|
+
|
|
1114
|
+
extra_body: Add additional JSON properties to the request
|
|
1115
|
+
|
|
1116
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1117
|
+
"""
|
|
1118
|
+
...
|
|
1119
|
+
|
|
1120
|
+
@overload
|
|
1121
|
+
async def edit(
|
|
1122
|
+
self,
|
|
1123
|
+
*,
|
|
1124
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
1125
|
+
prompt: str,
|
|
1126
|
+
stream: Literal[True],
|
|
1127
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1128
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
1129
|
+
mask: FileTypes | Omit = omit,
|
|
1130
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1131
|
+
n: Optional[int] | Omit = omit,
|
|
1132
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1133
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1134
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1135
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1136
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1137
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
1138
|
+
user: str | Omit = omit,
|
|
1139
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1140
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1141
|
+
extra_headers: Headers | None = None,
|
|
1142
|
+
extra_query: Query | None = None,
|
|
1143
|
+
extra_body: Body | None = None,
|
|
1144
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1145
|
+
) -> AsyncStream[ImageEditStreamEvent]:
|
|
1146
|
+
"""Creates an edited or extended image given one or more source images and a
|
|
1147
|
+
prompt.
|
|
1148
|
+
|
|
1149
|
+
This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
1150
|
+
|
|
1151
|
+
Args:
|
|
1152
|
+
image: The image(s) to edit. Must be a supported image file or an array of images.
|
|
1153
|
+
|
|
1154
|
+
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
1155
|
+
50MB. You can provide up to 16 images.
|
|
1156
|
+
|
|
1157
|
+
For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
1158
|
+
file less than 4MB.
|
|
1159
|
+
|
|
1160
|
+
prompt: A text description of the desired image(s). The maximum length is 1000
|
|
1161
|
+
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
1162
|
+
|
|
1163
|
+
stream: Edit the image in streaming mode. Defaults to `false`. See the
|
|
1164
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
1165
|
+
for more information.
|
|
1166
|
+
|
|
1167
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
1168
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
1169
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
1170
|
+
automatically determine the best background for the image.
|
|
1171
|
+
|
|
1172
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
1173
|
+
be set to either `png` (default value) or `webp`.
|
|
1174
|
+
|
|
1175
|
+
input_fidelity: Control how much effort the model will exert to match the style and features,
|
|
1176
|
+
especially facial features, of input images. This parameter is only supported
|
|
1177
|
+
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
|
1178
|
+
`low`. Defaults to `low`.
|
|
1179
|
+
|
|
1180
|
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
1181
|
+
indicate where `image` should be edited. If there are multiple images provided,
|
|
1182
|
+
the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
1183
|
+
4MB, and have the same dimensions as `image`.
|
|
1184
|
+
|
|
1185
|
+
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
1186
|
+
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
1187
|
+
is used.
|
|
1188
|
+
|
|
1189
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
1190
|
+
|
|
1191
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
1192
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
1193
|
+
defaults to 100.
|
|
1194
|
+
|
|
1195
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
1196
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
|
1197
|
+
default value is `png`.
|
|
1198
|
+
|
|
1199
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
1200
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
1201
|
+
0, the response will be a single image sent in one streaming event.
|
|
1202
|
+
|
|
1203
|
+
Note that the final image may be sent before the full number of partial images
|
|
1204
|
+
are generated if the full image is generated more quickly.
|
|
1205
|
+
|
|
1206
|
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
1207
|
+
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
1208
|
+
Defaults to `auto`.
|
|
1209
|
+
|
|
1210
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
1211
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
1212
|
+
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
1213
|
+
will always return base64-encoded images.
|
|
1214
|
+
|
|
1215
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
1216
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
1217
|
+
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
1218
|
+
|
|
1219
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
1220
|
+
and detect abuse.
|
|
1221
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
1222
|
+
|
|
1223
|
+
extra_headers: Send extra headers
|
|
1224
|
+
|
|
1225
|
+
extra_query: Add additional query parameters to the request
|
|
1226
|
+
|
|
1227
|
+
extra_body: Add additional JSON properties to the request
|
|
1228
|
+
|
|
1229
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1230
|
+
"""
|
|
1231
|
+
...
|
|
1232
|
+
|
|
1233
|
+
@overload
|
|
1234
|
+
async def edit(
|
|
1235
|
+
self,
|
|
1236
|
+
*,
|
|
1237
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
1238
|
+
prompt: str,
|
|
1239
|
+
stream: bool,
|
|
1240
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1241
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
1242
|
+
mask: FileTypes | Omit = omit,
|
|
1243
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1244
|
+
n: Optional[int] | Omit = omit,
|
|
1245
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1246
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1247
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1248
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1249
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1250
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
1251
|
+
user: str | Omit = omit,
|
|
1252
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1253
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1254
|
+
extra_headers: Headers | None = None,
|
|
1255
|
+
extra_query: Query | None = None,
|
|
1256
|
+
extra_body: Body | None = None,
|
|
1257
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1258
|
+
) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
|
|
1259
|
+
"""Creates an edited or extended image given one or more source images and a
|
|
1260
|
+
prompt.
|
|
1261
|
+
|
|
1262
|
+
This endpoint only supports `gpt-image-1` and `dall-e-2`.
|
|
1263
|
+
|
|
1264
|
+
Args:
|
|
1265
|
+
image: The image(s) to edit. Must be a supported image file or an array of images.
|
|
1266
|
+
|
|
1267
|
+
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
|
|
1268
|
+
50MB. You can provide up to 16 images.
|
|
1269
|
+
|
|
1270
|
+
For `dall-e-2`, you can only provide one image, and it should be a square `png`
|
|
1271
|
+
file less than 4MB.
|
|
1272
|
+
|
|
1273
|
+
prompt: A text description of the desired image(s). The maximum length is 1000
|
|
1274
|
+
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
|
|
1275
|
+
|
|
1276
|
+
stream: Edit the image in streaming mode. Defaults to `false`. See the
|
|
1277
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
1278
|
+
for more information.
|
|
1279
|
+
|
|
1280
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
1281
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
1282
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
1283
|
+
automatically determine the best background for the image.
|
|
1284
|
+
|
|
1285
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
1286
|
+
be set to either `png` (default value) or `webp`.
|
|
1287
|
+
|
|
1288
|
+
input_fidelity: Control how much effort the model will exert to match the style and features,
|
|
1289
|
+
especially facial features, of input images. This parameter is only supported
|
|
1290
|
+
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
|
|
1291
|
+
`low`. Defaults to `low`.
|
|
1292
|
+
|
|
1293
|
+
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
|
|
1294
|
+
indicate where `image` should be edited. If there are multiple images provided,
|
|
1295
|
+
the mask will be applied on the first image. Must be a valid PNG file, less than
|
|
1296
|
+
4MB, and have the same dimensions as `image`.
|
|
1297
|
+
|
|
1298
|
+
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
|
|
1299
|
+
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
|
|
1300
|
+
is used.
|
|
1301
|
+
|
|
1302
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
1303
|
+
|
|
1304
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
1305
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
1306
|
+
defaults to 100.
|
|
1307
|
+
|
|
1308
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
1309
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
|
|
1310
|
+
default value is `png`.
|
|
1311
|
+
|
|
1312
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
1313
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
1314
|
+
0, the response will be a single image sent in one streaming event.
|
|
1315
|
+
|
|
1316
|
+
Note that the final image may be sent before the full number of partial images
|
|
1317
|
+
are generated if the full image is generated more quickly.
|
|
1318
|
+
|
|
1319
|
+
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
|
|
1320
|
+
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
|
|
1321
|
+
Defaults to `auto`.
|
|
1322
|
+
|
|
1323
|
+
response_format: The format in which the generated images are returned. Must be one of `url` or
|
|
1324
|
+
`b64_json`. URLs are only valid for 60 minutes after the image has been
|
|
1325
|
+
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
|
|
1326
|
+
will always return base64-encoded images.
|
|
1327
|
+
|
|
1328
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
1329
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
1330
|
+
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
|
|
1331
|
+
|
|
1332
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
1333
|
+
and detect abuse.
|
|
1334
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
1335
|
+
|
|
1336
|
+
extra_headers: Send extra headers
|
|
1337
|
+
|
|
1338
|
+
extra_query: Add additional query parameters to the request
|
|
1339
|
+
|
|
1340
|
+
extra_body: Add additional JSON properties to the request
|
|
1341
|
+
|
|
1342
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1343
|
+
"""
|
|
1344
|
+
...
|
|
1345
|
+
|
|
1346
|
+
@required_args(["image", "prompt"], ["image", "prompt", "stream"])
|
|
1347
|
+
async def edit(
|
|
1348
|
+
self,
|
|
1349
|
+
*,
|
|
1350
|
+
image: Union[FileTypes, SequenceNotStr[FileTypes]],
|
|
1351
|
+
prompt: str,
|
|
1352
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1353
|
+
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
|
|
1354
|
+
mask: FileTypes | Omit = omit,
|
|
1355
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1356
|
+
n: Optional[int] | Omit = omit,
|
|
1357
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1358
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1359
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1360
|
+
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1361
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1362
|
+
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
|
|
1363
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
1364
|
+
user: str | Omit = omit,
|
|
1365
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1366
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1367
|
+
extra_headers: Headers | None = None,
|
|
1368
|
+
extra_query: Query | None = None,
|
|
1369
|
+
extra_body: Body | None = None,
|
|
1370
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1371
|
+
) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
|
|
1372
|
+
body = deepcopy_minimal(
|
|
1373
|
+
{
|
|
1374
|
+
"image": image,
|
|
1375
|
+
"prompt": prompt,
|
|
1376
|
+
"background": background,
|
|
1377
|
+
"input_fidelity": input_fidelity,
|
|
1378
|
+
"mask": mask,
|
|
1379
|
+
"model": model,
|
|
1380
|
+
"n": n,
|
|
1381
|
+
"output_compression": output_compression,
|
|
1382
|
+
"output_format": output_format,
|
|
1383
|
+
"partial_images": partial_images,
|
|
1384
|
+
"quality": quality,
|
|
1385
|
+
"response_format": response_format,
|
|
1386
|
+
"size": size,
|
|
1387
|
+
"stream": stream,
|
|
1388
|
+
"user": user,
|
|
1389
|
+
}
|
|
1390
|
+
)
|
|
1391
|
+
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
|
|
1392
|
+
# It should be noted that the actual Content-Type header that will be
|
|
1393
|
+
# sent to the server will contain a `boundary` parameter, e.g.
|
|
1394
|
+
# multipart/form-data; boundary=---abc--
|
|
1395
|
+
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
|
|
1396
|
+
return await self._post(
|
|
1397
|
+
"/images/edits",
|
|
1398
|
+
body=await async_maybe_transform(
|
|
1399
|
+
body,
|
|
1400
|
+
image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
|
|
1401
|
+
),
|
|
1402
|
+
files=files,
|
|
1403
|
+
options=make_request_options(
|
|
1404
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1405
|
+
),
|
|
1406
|
+
cast_to=ImagesResponse,
|
|
1407
|
+
stream=stream or False,
|
|
1408
|
+
stream_cls=AsyncStream[ImageEditStreamEvent],
|
|
1409
|
+
)
|
|
1410
|
+
|
|
1411
|
+
@overload
|
|
1412
|
+
async def generate(
|
|
1413
|
+
self,
|
|
1414
|
+
*,
|
|
1415
|
+
prompt: str,
|
|
1416
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1417
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1418
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
1419
|
+
n: Optional[int] | Omit = omit,
|
|
1420
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1421
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1422
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1423
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1424
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1425
|
+
size: Optional[
|
|
1426
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
1427
|
+
]
|
|
1428
|
+
| Omit = omit,
|
|
1429
|
+
stream: Optional[Literal[False]] | Omit = omit,
|
|
1430
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
1431
|
+
user: str | Omit = omit,
|
|
1432
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1433
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1434
|
+
extra_headers: Headers | None = None,
|
|
1435
|
+
extra_query: Query | None = None,
|
|
1436
|
+
extra_body: Body | None = None,
|
|
1437
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1438
|
+
) -> ImagesResponse:
|
|
1439
|
+
"""
|
|
1440
|
+
Creates an image given a prompt.
|
|
1441
|
+
[Learn more](https://platform.openai.com/docs/guides/images).
|
|
1442
|
+
|
|
1443
|
+
Args:
|
|
1444
|
+
prompt: A text description of the desired image(s). The maximum length is 32000
|
|
1445
|
+
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
1446
|
+
for `dall-e-3`.
|
|
1447
|
+
|
|
1448
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
1449
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
1450
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
1451
|
+
automatically determine the best background for the image.
|
|
1452
|
+
|
|
1453
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
1454
|
+
be set to either `png` (default value) or `webp`.
|
|
1455
|
+
|
|
1456
|
+
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
1457
|
+
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
1458
|
+
`gpt-image-1` is used.
|
|
1459
|
+
|
|
1460
|
+
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
1461
|
+
be either `low` for less restrictive filtering or `auto` (default value).
|
|
1462
|
+
|
|
1463
|
+
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
1464
|
+
`n=1` is supported.
|
|
1465
|
+
|
|
1466
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
1467
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
1468
|
+
defaults to 100.
|
|
1469
|
+
|
|
1470
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
1471
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
1472
|
+
|
|
1473
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
1474
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
1475
|
+
0, the response will be a single image sent in one streaming event.
|
|
1476
|
+
|
|
1477
|
+
Note that the final image may be sent before the full number of partial images
|
|
1478
|
+
are generated if the full image is generated more quickly.
|
|
1479
|
+
|
|
1480
|
+
quality: The quality of the image that will be generated.
|
|
1481
|
+
|
|
1482
|
+
- `auto` (default value) will automatically select the best quality for the
|
|
1483
|
+
given model.
|
|
1484
|
+
- `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
1485
|
+
- `hd` and `standard` are supported for `dall-e-3`.
|
|
1486
|
+
- `standard` is the only option for `dall-e-2`.
|
|
1487
|
+
|
|
1488
|
+
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
1489
|
+
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
1490
|
+
after the image has been generated. This parameter isn't supported for
|
|
1491
|
+
`gpt-image-1` which will always return base64-encoded images.
|
|
1492
|
+
|
|
1493
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
1494
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
1495
|
+
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
1496
|
+
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
1497
|
+
|
|
1498
|
+
stream: Generate the image in streaming mode. Defaults to `false`. See the
|
|
1499
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
1500
|
+
for more information. This parameter is only supported for `gpt-image-1`.
|
|
1501
|
+
|
|
1502
|
+
style: The style of the generated images. This parameter is only supported for
|
|
1503
|
+
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
1504
|
+
towards generating hyper-real and dramatic images. Natural causes the model to
|
|
1505
|
+
produce more natural, less hyper-real looking images.
|
|
1506
|
+
|
|
1507
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
1508
|
+
and detect abuse.
|
|
1509
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
1510
|
+
|
|
1511
|
+
extra_headers: Send extra headers
|
|
1512
|
+
|
|
1513
|
+
extra_query: Add additional query parameters to the request
|
|
1514
|
+
|
|
1515
|
+
extra_body: Add additional JSON properties to the request
|
|
1516
|
+
|
|
1517
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1518
|
+
"""
|
|
1519
|
+
...
|
|
1520
|
+
|
|
1521
|
+
@overload
|
|
1522
|
+
async def generate(
|
|
1523
|
+
self,
|
|
1524
|
+
*,
|
|
1525
|
+
prompt: str,
|
|
1526
|
+
stream: Literal[True],
|
|
1527
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1528
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1529
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
1530
|
+
n: Optional[int] | Omit = omit,
|
|
1531
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1532
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1533
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1534
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1535
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1536
|
+
size: Optional[
|
|
1537
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
1538
|
+
]
|
|
1539
|
+
| Omit = omit,
|
|
1540
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
1541
|
+
user: str | Omit = omit,
|
|
1542
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1543
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1544
|
+
extra_headers: Headers | None = None,
|
|
1545
|
+
extra_query: Query | None = None,
|
|
1546
|
+
extra_body: Body | None = None,
|
|
1547
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1548
|
+
) -> AsyncStream[ImageGenStreamEvent]:
|
|
1549
|
+
"""
|
|
1550
|
+
Creates an image given a prompt.
|
|
1551
|
+
[Learn more](https://platform.openai.com/docs/guides/images).
|
|
1552
|
+
|
|
1553
|
+
Args:
|
|
1554
|
+
prompt: A text description of the desired image(s). The maximum length is 32000
|
|
1555
|
+
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
1556
|
+
for `dall-e-3`.
|
|
1557
|
+
|
|
1558
|
+
stream: Generate the image in streaming mode. Defaults to `false`. See the
|
|
1559
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
1560
|
+
for more information. This parameter is only supported for `gpt-image-1`.
|
|
1561
|
+
|
|
1562
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
1563
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
1564
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
1565
|
+
automatically determine the best background for the image.
|
|
1566
|
+
|
|
1567
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
1568
|
+
be set to either `png` (default value) or `webp`.
|
|
1569
|
+
|
|
1570
|
+
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
1571
|
+
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
1572
|
+
`gpt-image-1` is used.
|
|
1573
|
+
|
|
1574
|
+
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
1575
|
+
be either `low` for less restrictive filtering or `auto` (default value).
|
|
1576
|
+
|
|
1577
|
+
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
1578
|
+
`n=1` is supported.
|
|
1579
|
+
|
|
1580
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
1581
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
1582
|
+
defaults to 100.
|
|
1583
|
+
|
|
1584
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
1585
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
1586
|
+
|
|
1587
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
1588
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
1589
|
+
0, the response will be a single image sent in one streaming event.
|
|
1590
|
+
|
|
1591
|
+
Note that the final image may be sent before the full number of partial images
|
|
1592
|
+
are generated if the full image is generated more quickly.
|
|
1593
|
+
|
|
1594
|
+
quality: The quality of the image that will be generated.
|
|
1595
|
+
|
|
1596
|
+
- `auto` (default value) will automatically select the best quality for the
|
|
1597
|
+
given model.
|
|
1598
|
+
- `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
1599
|
+
- `hd` and `standard` are supported for `dall-e-3`.
|
|
1600
|
+
- `standard` is the only option for `dall-e-2`.
|
|
1601
|
+
|
|
1602
|
+
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
1603
|
+
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
1604
|
+
after the image has been generated. This parameter isn't supported for
|
|
1605
|
+
`gpt-image-1` which will always return base64-encoded images.
|
|
1606
|
+
|
|
1607
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
1608
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
1609
|
+
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
1610
|
+
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
1611
|
+
|
|
1612
|
+
style: The style of the generated images. This parameter is only supported for
|
|
1613
|
+
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
1614
|
+
towards generating hyper-real and dramatic images. Natural causes the model to
|
|
1615
|
+
produce more natural, less hyper-real looking images.
|
|
1616
|
+
|
|
1617
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
1618
|
+
and detect abuse.
|
|
1619
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
1620
|
+
|
|
1621
|
+
extra_headers: Send extra headers
|
|
1622
|
+
|
|
1623
|
+
extra_query: Add additional query parameters to the request
|
|
1624
|
+
|
|
1625
|
+
extra_body: Add additional JSON properties to the request
|
|
1626
|
+
|
|
1627
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1628
|
+
"""
|
|
1629
|
+
...
|
|
1630
|
+
|
|
1631
|
+
@overload
|
|
1632
|
+
async def generate(
|
|
1633
|
+
self,
|
|
1634
|
+
*,
|
|
1635
|
+
prompt: str,
|
|
1636
|
+
stream: bool,
|
|
1637
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1638
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1639
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
1640
|
+
n: Optional[int] | Omit = omit,
|
|
1641
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1642
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1643
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1644
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1645
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1646
|
+
size: Optional[
|
|
1647
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
1648
|
+
]
|
|
1649
|
+
| Omit = omit,
|
|
1650
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
1651
|
+
user: str | Omit = omit,
|
|
1652
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1653
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1654
|
+
extra_headers: Headers | None = None,
|
|
1655
|
+
extra_query: Query | None = None,
|
|
1656
|
+
extra_body: Body | None = None,
|
|
1657
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1658
|
+
) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
|
|
1659
|
+
"""
|
|
1660
|
+
Creates an image given a prompt.
|
|
1661
|
+
[Learn more](https://platform.openai.com/docs/guides/images).
|
|
1662
|
+
|
|
1663
|
+
Args:
|
|
1664
|
+
prompt: A text description of the desired image(s). The maximum length is 32000
|
|
1665
|
+
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
|
|
1666
|
+
for `dall-e-3`.
|
|
1667
|
+
|
|
1668
|
+
stream: Generate the image in streaming mode. Defaults to `false`. See the
|
|
1669
|
+
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
|
|
1670
|
+
for more information. This parameter is only supported for `gpt-image-1`.
|
|
1671
|
+
|
|
1672
|
+
background: Allows to set transparency for the background of the generated image(s). This
|
|
1673
|
+
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
|
|
1674
|
+
`opaque` or `auto` (default value). When `auto` is used, the model will
|
|
1675
|
+
automatically determine the best background for the image.
|
|
1676
|
+
|
|
1677
|
+
If `transparent`, the output format needs to support transparency, so it should
|
|
1678
|
+
be set to either `png` (default value) or `webp`.
|
|
1679
|
+
|
|
1680
|
+
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
|
|
1681
|
+
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
|
|
1682
|
+
`gpt-image-1` is used.
|
|
1683
|
+
|
|
1684
|
+
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
|
|
1685
|
+
be either `low` for less restrictive filtering or `auto` (default value).
|
|
1686
|
+
|
|
1687
|
+
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
|
|
1688
|
+
`n=1` is supported.
|
|
1689
|
+
|
|
1690
|
+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
|
|
1691
|
+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
|
|
1692
|
+
defaults to 100.
|
|
1693
|
+
|
|
1694
|
+
output_format: The format in which the generated images are returned. This parameter is only
|
|
1695
|
+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
|
|
1696
|
+
|
|
1697
|
+
partial_images: The number of partial images to generate. This parameter is used for streaming
|
|
1698
|
+
responses that return partial images. Value must be between 0 and 3. When set to
|
|
1699
|
+
0, the response will be a single image sent in one streaming event.
|
|
1700
|
+
|
|
1701
|
+
Note that the final image may be sent before the full number of partial images
|
|
1702
|
+
are generated if the full image is generated more quickly.
|
|
1703
|
+
|
|
1704
|
+
quality: The quality of the image that will be generated.
|
|
1705
|
+
|
|
1706
|
+
- `auto` (default value) will automatically select the best quality for the
|
|
1707
|
+
given model.
|
|
1708
|
+
- `high`, `medium` and `low` are supported for `gpt-image-1`.
|
|
1709
|
+
- `hd` and `standard` are supported for `dall-e-3`.
|
|
1710
|
+
- `standard` is the only option for `dall-e-2`.
|
|
1711
|
+
|
|
1712
|
+
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
|
|
1713
|
+
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
|
|
1714
|
+
after the image has been generated. This parameter isn't supported for
|
|
1715
|
+
`gpt-image-1` which will always return base64-encoded images.
|
|
1716
|
+
|
|
1717
|
+
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
|
|
1718
|
+
(landscape), `1024x1536` (portrait), or `auto` (default value) for
|
|
1719
|
+
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
|
|
1720
|
+
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
|
|
1721
|
+
|
|
1722
|
+
style: The style of the generated images. This parameter is only supported for
|
|
1723
|
+
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
|
|
1724
|
+
towards generating hyper-real and dramatic images. Natural causes the model to
|
|
1725
|
+
produce more natural, less hyper-real looking images.
|
|
1726
|
+
|
|
1727
|
+
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
|
1728
|
+
and detect abuse.
|
|
1729
|
+
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
|
|
1730
|
+
|
|
1731
|
+
extra_headers: Send extra headers
|
|
1732
|
+
|
|
1733
|
+
extra_query: Add additional query parameters to the request
|
|
1734
|
+
|
|
1735
|
+
extra_body: Add additional JSON properties to the request
|
|
1736
|
+
|
|
1737
|
+
timeout: Override the client-level default timeout for this request, in seconds
|
|
1738
|
+
"""
|
|
1739
|
+
...
|
|
1740
|
+
|
|
1741
|
+
@required_args(["prompt"], ["prompt", "stream"])
|
|
1742
|
+
async def generate(
|
|
1743
|
+
self,
|
|
1744
|
+
*,
|
|
1745
|
+
prompt: str,
|
|
1746
|
+
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
|
|
1747
|
+
model: Union[str, ImageModel, None] | Omit = omit,
|
|
1748
|
+
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
|
|
1749
|
+
n: Optional[int] | Omit = omit,
|
|
1750
|
+
output_compression: Optional[int] | Omit = omit,
|
|
1751
|
+
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
|
|
1752
|
+
partial_images: Optional[int] | Omit = omit,
|
|
1753
|
+
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
|
|
1754
|
+
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
|
|
1755
|
+
size: Optional[
|
|
1756
|
+
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
|
|
1757
|
+
]
|
|
1758
|
+
| Omit = omit,
|
|
1759
|
+
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
|
|
1760
|
+
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
|
|
1761
|
+
user: str | Omit = omit,
|
|
1762
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1763
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1764
|
+
extra_headers: Headers | None = None,
|
|
1765
|
+
extra_query: Query | None = None,
|
|
1766
|
+
extra_body: Body | None = None,
|
|
1767
|
+
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
1768
|
+
) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
|
|
1769
|
+
return await self._post(
|
|
1770
|
+
"/images/generations",
|
|
1771
|
+
body=await async_maybe_transform(
|
|
1772
|
+
{
|
|
1773
|
+
"prompt": prompt,
|
|
1774
|
+
"background": background,
|
|
1775
|
+
"model": model,
|
|
1776
|
+
"moderation": moderation,
|
|
1777
|
+
"n": n,
|
|
1778
|
+
"output_compression": output_compression,
|
|
1779
|
+
"output_format": output_format,
|
|
1780
|
+
"partial_images": partial_images,
|
|
1781
|
+
"quality": quality,
|
|
1782
|
+
"response_format": response_format,
|
|
1783
|
+
"size": size,
|
|
1784
|
+
"stream": stream,
|
|
1785
|
+
"style": style,
|
|
1786
|
+
"user": user,
|
|
1787
|
+
},
|
|
1788
|
+
image_generate_params.ImageGenerateParamsStreaming
|
|
1789
|
+
if stream
|
|
1790
|
+
else image_generate_params.ImageGenerateParamsNonStreaming,
|
|
1791
|
+
),
|
|
1792
|
+
options=make_request_options(
|
|
1793
|
+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
1794
|
+
),
|
|
1795
|
+
cast_to=ImagesResponse,
|
|
1796
|
+
stream=stream or False,
|
|
1797
|
+
stream_cls=AsyncStream[ImageGenStreamEvent],
|
|
1798
|
+
)
|
|
1799
|
+
|
|
1800
|
+
|
|
1801
|
+
class ImagesWithRawResponse:
|
|
1802
|
+
def __init__(self, images: Images) -> None:
|
|
1803
|
+
self._images = images
|
|
1804
|
+
|
|
1805
|
+
self.create_variation = _legacy_response.to_raw_response_wrapper(
|
|
1806
|
+
images.create_variation,
|
|
1807
|
+
)
|
|
1808
|
+
self.edit = _legacy_response.to_raw_response_wrapper(
|
|
1809
|
+
images.edit,
|
|
1810
|
+
)
|
|
1811
|
+
self.generate = _legacy_response.to_raw_response_wrapper(
|
|
1812
|
+
images.generate,
|
|
1813
|
+
)
|
|
1814
|
+
|
|
1815
|
+
|
|
1816
|
+
class AsyncImagesWithRawResponse:
|
|
1817
|
+
def __init__(self, images: AsyncImages) -> None:
|
|
1818
|
+
self._images = images
|
|
1819
|
+
|
|
1820
|
+
self.create_variation = _legacy_response.async_to_raw_response_wrapper(
|
|
1821
|
+
images.create_variation,
|
|
1822
|
+
)
|
|
1823
|
+
self.edit = _legacy_response.async_to_raw_response_wrapper(
|
|
1824
|
+
images.edit,
|
|
1825
|
+
)
|
|
1826
|
+
self.generate = _legacy_response.async_to_raw_response_wrapper(
|
|
1827
|
+
images.generate,
|
|
1828
|
+
)
|
|
1829
|
+
|
|
1830
|
+
|
|
1831
|
+
class ImagesWithStreamingResponse:
|
|
1832
|
+
def __init__(self, images: Images) -> None:
|
|
1833
|
+
self._images = images
|
|
1834
|
+
|
|
1835
|
+
self.create_variation = to_streamed_response_wrapper(
|
|
1836
|
+
images.create_variation,
|
|
1837
|
+
)
|
|
1838
|
+
self.edit = to_streamed_response_wrapper(
|
|
1839
|
+
images.edit,
|
|
1840
|
+
)
|
|
1841
|
+
self.generate = to_streamed_response_wrapper(
|
|
1842
|
+
images.generate,
|
|
1843
|
+
)
|
|
1844
|
+
|
|
1845
|
+
|
|
1846
|
+
class AsyncImagesWithStreamingResponse:
|
|
1847
|
+
def __init__(self, images: AsyncImages) -> None:
|
|
1848
|
+
self._images = images
|
|
1849
|
+
|
|
1850
|
+
self.create_variation = async_to_streamed_response_wrapper(
|
|
1851
|
+
images.create_variation,
|
|
1852
|
+
)
|
|
1853
|
+
self.edit = async_to_streamed_response_wrapper(
|
|
1854
|
+
images.edit,
|
|
1855
|
+
)
|
|
1856
|
+
self.generate = async_to_streamed_response_wrapper(
|
|
1857
|
+
images.generate,
|
|
1858
|
+
)
|