aimlapi-sdk-python 2.8.1b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1958) hide show
  1. aimlapi/__init__.py +243 -0
  2. aimlapi/__main__.py +3 -0
  3. aimlapi/_client.py +368 -0
  4. aimlapi/_utils/__init__.py +3 -0
  5. aimlapi/_utils/_compat.py +3 -0
  6. aimlapi/_utils/_datetime_parse.py +3 -0
  7. aimlapi/_utils/_logs.py +3 -0
  8. aimlapi/_utils/_proxy.py +3 -0
  9. aimlapi/_utils/_reflection.py +3 -0
  10. aimlapi/_utils/_resources_proxy.py +3 -0
  11. aimlapi/_utils/_streams.py +3 -0
  12. aimlapi/_utils/_sync.py +3 -0
  13. aimlapi/_utils/_transform.py +3 -0
  14. aimlapi/_utils/_typing.py +3 -0
  15. aimlapi/_utils/_utils.py +3 -0
  16. aimlapi/_version.py +9 -0
  17. aimlapi/cli/__init__.py +3 -0
  18. aimlapi/cli/_api/__init__.py +3 -0
  19. aimlapi/cli/_api/_main.py +3 -0
  20. aimlapi/cli/_api/audio.py +3 -0
  21. aimlapi/cli/_api/chat/__init__.py +3 -0
  22. aimlapi/cli/_api/chat/completions.py +3 -0
  23. aimlapi/cli/_api/completions.py +3 -0
  24. aimlapi/cli/_api/files.py +3 -0
  25. aimlapi/cli/_api/fine_tuning/__init__.py +3 -0
  26. aimlapi/cli/_api/fine_tuning/jobs.py +3 -0
  27. aimlapi/cli/_api/image.py +3 -0
  28. aimlapi/cli/_api/models.py +3 -0
  29. aimlapi/cli/_cli.py +3 -0
  30. aimlapi/cli/_errors.py +3 -0
  31. aimlapi/cli/_models.py +3 -0
  32. aimlapi/cli/_progress.py +3 -0
  33. aimlapi/cli/_tools/__init__.py +3 -0
  34. aimlapi/cli/_tools/_main.py +3 -0
  35. aimlapi/cli/_tools/fine_tunes.py +3 -0
  36. aimlapi/cli/_tools/migrate.py +3 -0
  37. aimlapi/cli/_utils.py +3 -0
  38. aimlapi/helpers/__init__.py +3 -0
  39. aimlapi/helpers/local_audio_player.py +3 -0
  40. aimlapi/helpers/microphone.py +3 -0
  41. aimlapi/lib/__init__.py +3 -0
  42. aimlapi/lib/_old_api.py +3 -0
  43. aimlapi/lib/_parsing/__init__.py +3 -0
  44. aimlapi/lib/_parsing/_completions.py +3 -0
  45. aimlapi/lib/_parsing/_responses.py +3 -0
  46. aimlapi/lib/_pydantic.py +3 -0
  47. aimlapi/lib/_realtime.py +3 -0
  48. aimlapi/lib/_tools.py +3 -0
  49. aimlapi/lib/_validators.py +3 -0
  50. aimlapi/lib/azure.py +3 -0
  51. aimlapi/lib/streaming/__init__.py +3 -0
  52. aimlapi/lib/streaming/_assistants.py +3 -0
  53. aimlapi/lib/streaming/_deltas.py +3 -0
  54. aimlapi/lib/streaming/chat/__init__.py +3 -0
  55. aimlapi/lib/streaming/chat/_completions.py +3 -0
  56. aimlapi/lib/streaming/chat/_events.py +3 -0
  57. aimlapi/lib/streaming/chat/_types.py +3 -0
  58. aimlapi/lib/streaming/responses/__init__.py +3 -0
  59. aimlapi/lib/streaming/responses/_events.py +3 -0
  60. aimlapi/lib/streaming/responses/_responses.py +3 -0
  61. aimlapi/lib/streaming/responses/_types.py +3 -0
  62. aimlapi/pagination.py +3 -0
  63. aimlapi/resources/__init__.py +3 -0
  64. aimlapi/resources/audio/__init__.py +47 -0
  65. aimlapi/resources/audio/_polling.py +129 -0
  66. aimlapi/resources/audio/audio.py +56 -0
  67. aimlapi/resources/audio/speech.py +428 -0
  68. aimlapi/resources/audio/transcriptions.py +219 -0
  69. aimlapi/resources/audio/translations.py +3 -0
  70. aimlapi/resources/batches.py +3 -0
  71. aimlapi/resources/beta/__init__.py +3 -0
  72. aimlapi/resources/beta/assistants.py +3 -0
  73. aimlapi/resources/beta/beta.py +3 -0
  74. aimlapi/resources/beta/chatkit/__init__.py +3 -0
  75. aimlapi/resources/beta/chatkit/chatkit.py +3 -0
  76. aimlapi/resources/beta/chatkit/sessions.py +3 -0
  77. aimlapi/resources/beta/chatkit/threads.py +3 -0
  78. aimlapi/resources/beta/realtime/__init__.py +3 -0
  79. aimlapi/resources/beta/realtime/realtime.py +3 -0
  80. aimlapi/resources/beta/realtime/sessions.py +3 -0
  81. aimlapi/resources/beta/realtime/transcription_sessions.py +3 -0
  82. aimlapi/resources/beta/threads/__init__.py +3 -0
  83. aimlapi/resources/beta/threads/messages.py +3 -0
  84. aimlapi/resources/beta/threads/runs/__init__.py +3 -0
  85. aimlapi/resources/beta/threads/runs/runs.py +3 -0
  86. aimlapi/resources/beta/threads/runs/steps.py +3 -0
  87. aimlapi/resources/beta/threads/threads.py +3 -0
  88. aimlapi/resources/chat/__init__.py +3 -0
  89. aimlapi/resources/chat/chat.py +86 -0
  90. aimlapi/resources/chat/completions/__init__.py +4 -0
  91. aimlapi/resources/chat/completions/completions.py +452 -0
  92. aimlapi/resources/chat/completions/messages.py +3 -0
  93. aimlapi/resources/completions.py +3 -0
  94. aimlapi/resources/containers/__init__.py +3 -0
  95. aimlapi/resources/containers/containers.py +3 -0
  96. aimlapi/resources/containers/files/__init__.py +3 -0
  97. aimlapi/resources/containers/files/content.py +3 -0
  98. aimlapi/resources/containers/files/files.py +3 -0
  99. aimlapi/resources/conversations/__init__.py +3 -0
  100. aimlapi/resources/conversations/conversations.py +3 -0
  101. aimlapi/resources/conversations/items.py +3 -0
  102. aimlapi/resources/embeddings.py +3 -0
  103. aimlapi/resources/evals/__init__.py +3 -0
  104. aimlapi/resources/evals/evals.py +3 -0
  105. aimlapi/resources/evals/runs/__init__.py +3 -0
  106. aimlapi/resources/evals/runs/output_items.py +3 -0
  107. aimlapi/resources/evals/runs/runs.py +3 -0
  108. aimlapi/resources/files.py +3 -0
  109. aimlapi/resources/fine_tuning/__init__.py +3 -0
  110. aimlapi/resources/fine_tuning/alpha/__init__.py +3 -0
  111. aimlapi/resources/fine_tuning/alpha/alpha.py +3 -0
  112. aimlapi/resources/fine_tuning/alpha/graders.py +3 -0
  113. aimlapi/resources/fine_tuning/checkpoints/__init__.py +3 -0
  114. aimlapi/resources/fine_tuning/checkpoints/checkpoints.py +3 -0
  115. aimlapi/resources/fine_tuning/checkpoints/permissions.py +3 -0
  116. aimlapi/resources/fine_tuning/fine_tuning.py +3 -0
  117. aimlapi/resources/fine_tuning/jobs/__init__.py +3 -0
  118. aimlapi/resources/fine_tuning/jobs/checkpoints.py +3 -0
  119. aimlapi/resources/fine_tuning/jobs/jobs.py +3 -0
  120. aimlapi/resources/images.py +184 -0
  121. aimlapi/resources/models.py +3 -0
  122. aimlapi/resources/moderations.py +3 -0
  123. aimlapi/resources/realtime/__init__.py +3 -0
  124. aimlapi/resources/realtime/calls.py +3 -0
  125. aimlapi/resources/realtime/client_secrets.py +3 -0
  126. aimlapi/resources/realtime/realtime.py +3 -0
  127. aimlapi/resources/responses/__init__.py +4 -0
  128. aimlapi/resources/responses/input_items.py +3 -0
  129. aimlapi/resources/responses/input_tokens.py +3 -0
  130. aimlapi/resources/responses/responses.py +229 -0
  131. aimlapi/resources/uploads/__init__.py +19 -0
  132. aimlapi/resources/uploads/parts.py +3 -0
  133. aimlapi/resources/uploads/uploads.py +99 -0
  134. aimlapi/resources/vector_stores/__init__.py +3 -0
  135. aimlapi/resources/vector_stores/file_batches.py +3 -0
  136. aimlapi/resources/vector_stores/files.py +3 -0
  137. aimlapi/resources/vector_stores/vector_stores.py +3 -0
  138. aimlapi/resources/videos.py +267 -0
  139. aimlapi/resources/webhooks.py +3 -0
  140. aimlapi/types/__init__.py +3 -0
  141. aimlapi/types/audio/__init__.py +3 -0
  142. aimlapi/types/audio/speech_create_params.py +3 -0
  143. aimlapi/types/audio/speech_model.py +3 -0
  144. aimlapi/types/audio/transcription.py +3 -0
  145. aimlapi/types/audio/transcription_create_params.py +3 -0
  146. aimlapi/types/audio/transcription_create_response.py +3 -0
  147. aimlapi/types/audio/transcription_diarized.py +3 -0
  148. aimlapi/types/audio/transcription_diarized_segment.py +3 -0
  149. aimlapi/types/audio/transcription_include.py +3 -0
  150. aimlapi/types/audio/transcription_segment.py +3 -0
  151. aimlapi/types/audio/transcription_stream_event.py +3 -0
  152. aimlapi/types/audio/transcription_text_delta_event.py +3 -0
  153. aimlapi/types/audio/transcription_text_done_event.py +3 -0
  154. aimlapi/types/audio/transcription_text_segment_event.py +3 -0
  155. aimlapi/types/audio/transcription_verbose.py +3 -0
  156. aimlapi/types/audio/transcription_word.py +3 -0
  157. aimlapi/types/audio/translation.py +3 -0
  158. aimlapi/types/audio/translation_create_params.py +3 -0
  159. aimlapi/types/audio/translation_create_response.py +3 -0
  160. aimlapi/types/audio/translation_verbose.py +3 -0
  161. aimlapi/types/audio_model.py +3 -0
  162. aimlapi/types/audio_response_format.py +3 -0
  163. aimlapi/types/auto_file_chunking_strategy_param.py +3 -0
  164. aimlapi/types/batch.py +3 -0
  165. aimlapi/types/batch_create_params.py +3 -0
  166. aimlapi/types/batch_error.py +3 -0
  167. aimlapi/types/batch_list_params.py +3 -0
  168. aimlapi/types/batch_request_counts.py +3 -0
  169. aimlapi/types/batch_usage.py +3 -0
  170. aimlapi/types/beta/__init__.py +3 -0
  171. aimlapi/types/beta/assistant.py +3 -0
  172. aimlapi/types/beta/assistant_create_params.py +3 -0
  173. aimlapi/types/beta/assistant_deleted.py +3 -0
  174. aimlapi/types/beta/assistant_list_params.py +3 -0
  175. aimlapi/types/beta/assistant_response_format_option.py +3 -0
  176. aimlapi/types/beta/assistant_response_format_option_param.py +3 -0
  177. aimlapi/types/beta/assistant_stream_event.py +3 -0
  178. aimlapi/types/beta/assistant_tool.py +3 -0
  179. aimlapi/types/beta/assistant_tool_choice.py +3 -0
  180. aimlapi/types/beta/assistant_tool_choice_function.py +3 -0
  181. aimlapi/types/beta/assistant_tool_choice_function_param.py +3 -0
  182. aimlapi/types/beta/assistant_tool_choice_option.py +3 -0
  183. aimlapi/types/beta/assistant_tool_choice_option_param.py +3 -0
  184. aimlapi/types/beta/assistant_tool_choice_param.py +3 -0
  185. aimlapi/types/beta/assistant_tool_param.py +3 -0
  186. aimlapi/types/beta/assistant_update_params.py +3 -0
  187. aimlapi/types/beta/chat/__init__.py +3 -0
  188. aimlapi/types/beta/chatkit/__init__.py +3 -0
  189. aimlapi/types/beta/chatkit/chat_session.py +3 -0
  190. aimlapi/types/beta/chatkit/chat_session_automatic_thread_titling.py +3 -0
  191. aimlapi/types/beta/chatkit/chat_session_chatkit_configuration.py +3 -0
  192. aimlapi/types/beta/chatkit/chat_session_chatkit_configuration_param.py +3 -0
  193. aimlapi/types/beta/chatkit/chat_session_expires_after_param.py +3 -0
  194. aimlapi/types/beta/chatkit/chat_session_file_upload.py +3 -0
  195. aimlapi/types/beta/chatkit/chat_session_history.py +3 -0
  196. aimlapi/types/beta/chatkit/chat_session_rate_limits.py +3 -0
  197. aimlapi/types/beta/chatkit/chat_session_rate_limits_param.py +3 -0
  198. aimlapi/types/beta/chatkit/chat_session_status.py +3 -0
  199. aimlapi/types/beta/chatkit/chat_session_workflow_param.py +3 -0
  200. aimlapi/types/beta/chatkit/chatkit_attachment.py +3 -0
  201. aimlapi/types/beta/chatkit/chatkit_response_output_text.py +3 -0
  202. aimlapi/types/beta/chatkit/chatkit_thread.py +3 -0
  203. aimlapi/types/beta/chatkit/chatkit_thread_assistant_message_item.py +3 -0
  204. aimlapi/types/beta/chatkit/chatkit_thread_item_list.py +3 -0
  205. aimlapi/types/beta/chatkit/chatkit_thread_user_message_item.py +3 -0
  206. aimlapi/types/beta/chatkit/chatkit_widget_item.py +3 -0
  207. aimlapi/types/beta/chatkit/session_create_params.py +3 -0
  208. aimlapi/types/beta/chatkit/thread_delete_response.py +3 -0
  209. aimlapi/types/beta/chatkit/thread_list_items_params.py +3 -0
  210. aimlapi/types/beta/chatkit/thread_list_params.py +3 -0
  211. aimlapi/types/beta/chatkit_workflow.py +3 -0
  212. aimlapi/types/beta/code_interpreter_tool.py +3 -0
  213. aimlapi/types/beta/code_interpreter_tool_param.py +3 -0
  214. aimlapi/types/beta/file_search_tool.py +3 -0
  215. aimlapi/types/beta/file_search_tool_param.py +3 -0
  216. aimlapi/types/beta/function_tool.py +3 -0
  217. aimlapi/types/beta/function_tool_param.py +3 -0
  218. aimlapi/types/beta/realtime/__init__.py +3 -0
  219. aimlapi/types/beta/realtime/conversation_created_event.py +3 -0
  220. aimlapi/types/beta/realtime/conversation_item.py +3 -0
  221. aimlapi/types/beta/realtime/conversation_item_content.py +3 -0
  222. aimlapi/types/beta/realtime/conversation_item_content_param.py +3 -0
  223. aimlapi/types/beta/realtime/conversation_item_create_event.py +3 -0
  224. aimlapi/types/beta/realtime/conversation_item_create_event_param.py +3 -0
  225. aimlapi/types/beta/realtime/conversation_item_created_event.py +3 -0
  226. aimlapi/types/beta/realtime/conversation_item_delete_event.py +3 -0
  227. aimlapi/types/beta/realtime/conversation_item_delete_event_param.py +3 -0
  228. aimlapi/types/beta/realtime/conversation_item_deleted_event.py +3 -0
  229. aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
  230. aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
  231. aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
  232. aimlapi/types/beta/realtime/conversation_item_param.py +3 -0
  233. aimlapi/types/beta/realtime/conversation_item_retrieve_event.py +3 -0
  234. aimlapi/types/beta/realtime/conversation_item_retrieve_event_param.py +3 -0
  235. aimlapi/types/beta/realtime/conversation_item_truncate_event.py +3 -0
  236. aimlapi/types/beta/realtime/conversation_item_truncate_event_param.py +3 -0
  237. aimlapi/types/beta/realtime/conversation_item_truncated_event.py +3 -0
  238. aimlapi/types/beta/realtime/conversation_item_with_reference.py +3 -0
  239. aimlapi/types/beta/realtime/conversation_item_with_reference_param.py +3 -0
  240. aimlapi/types/beta/realtime/error_event.py +3 -0
  241. aimlapi/types/beta/realtime/input_audio_buffer_append_event.py +3 -0
  242. aimlapi/types/beta/realtime/input_audio_buffer_append_event_param.py +3 -0
  243. aimlapi/types/beta/realtime/input_audio_buffer_clear_event.py +3 -0
  244. aimlapi/types/beta/realtime/input_audio_buffer_clear_event_param.py +3 -0
  245. aimlapi/types/beta/realtime/input_audio_buffer_cleared_event.py +3 -0
  246. aimlapi/types/beta/realtime/input_audio_buffer_commit_event.py +3 -0
  247. aimlapi/types/beta/realtime/input_audio_buffer_commit_event_param.py +3 -0
  248. aimlapi/types/beta/realtime/input_audio_buffer_committed_event.py +3 -0
  249. aimlapi/types/beta/realtime/input_audio_buffer_speech_started_event.py +3 -0
  250. aimlapi/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
  251. aimlapi/types/beta/realtime/rate_limits_updated_event.py +3 -0
  252. aimlapi/types/beta/realtime/realtime_client_event.py +3 -0
  253. aimlapi/types/beta/realtime/realtime_client_event_param.py +3 -0
  254. aimlapi/types/beta/realtime/realtime_connect_params.py +3 -0
  255. aimlapi/types/beta/realtime/realtime_response.py +3 -0
  256. aimlapi/types/beta/realtime/realtime_response_status.py +3 -0
  257. aimlapi/types/beta/realtime/realtime_response_usage.py +3 -0
  258. aimlapi/types/beta/realtime/realtime_server_event.py +3 -0
  259. aimlapi/types/beta/realtime/response_audio_delta_event.py +3 -0
  260. aimlapi/types/beta/realtime/response_audio_done_event.py +3 -0
  261. aimlapi/types/beta/realtime/response_audio_transcript_delta_event.py +3 -0
  262. aimlapi/types/beta/realtime/response_audio_transcript_done_event.py +3 -0
  263. aimlapi/types/beta/realtime/response_cancel_event.py +3 -0
  264. aimlapi/types/beta/realtime/response_cancel_event_param.py +3 -0
  265. aimlapi/types/beta/realtime/response_content_part_added_event.py +3 -0
  266. aimlapi/types/beta/realtime/response_content_part_done_event.py +3 -0
  267. aimlapi/types/beta/realtime/response_create_event.py +3 -0
  268. aimlapi/types/beta/realtime/response_create_event_param.py +3 -0
  269. aimlapi/types/beta/realtime/response_created_event.py +3 -0
  270. aimlapi/types/beta/realtime/response_done_event.py +3 -0
  271. aimlapi/types/beta/realtime/response_function_call_arguments_delta_event.py +3 -0
  272. aimlapi/types/beta/realtime/response_function_call_arguments_done_event.py +3 -0
  273. aimlapi/types/beta/realtime/response_output_item_added_event.py +3 -0
  274. aimlapi/types/beta/realtime/response_output_item_done_event.py +3 -0
  275. aimlapi/types/beta/realtime/response_text_delta_event.py +3 -0
  276. aimlapi/types/beta/realtime/response_text_done_event.py +3 -0
  277. aimlapi/types/beta/realtime/session.py +3 -0
  278. aimlapi/types/beta/realtime/session_create_params.py +3 -0
  279. aimlapi/types/beta/realtime/session_create_response.py +3 -0
  280. aimlapi/types/beta/realtime/session_created_event.py +3 -0
  281. aimlapi/types/beta/realtime/session_update_event.py +3 -0
  282. aimlapi/types/beta/realtime/session_update_event_param.py +3 -0
  283. aimlapi/types/beta/realtime/session_updated_event.py +3 -0
  284. aimlapi/types/beta/realtime/transcription_session.py +3 -0
  285. aimlapi/types/beta/realtime/transcription_session_create_params.py +3 -0
  286. aimlapi/types/beta/realtime/transcription_session_update.py +3 -0
  287. aimlapi/types/beta/realtime/transcription_session_update_param.py +3 -0
  288. aimlapi/types/beta/realtime/transcription_session_updated_event.py +3 -0
  289. aimlapi/types/beta/thread.py +3 -0
  290. aimlapi/types/beta/thread_create_and_run_params.py +3 -0
  291. aimlapi/types/beta/thread_create_params.py +3 -0
  292. aimlapi/types/beta/thread_deleted.py +3 -0
  293. aimlapi/types/beta/thread_update_params.py +3 -0
  294. aimlapi/types/beta/threads/__init__.py +3 -0
  295. aimlapi/types/beta/threads/annotation.py +3 -0
  296. aimlapi/types/beta/threads/annotation_delta.py +3 -0
  297. aimlapi/types/beta/threads/file_citation_annotation.py +3 -0
  298. aimlapi/types/beta/threads/file_citation_delta_annotation.py +3 -0
  299. aimlapi/types/beta/threads/file_path_annotation.py +3 -0
  300. aimlapi/types/beta/threads/file_path_delta_annotation.py +3 -0
  301. aimlapi/types/beta/threads/image_file.py +3 -0
  302. aimlapi/types/beta/threads/image_file_content_block.py +3 -0
  303. aimlapi/types/beta/threads/image_file_content_block_param.py +3 -0
  304. aimlapi/types/beta/threads/image_file_delta.py +3 -0
  305. aimlapi/types/beta/threads/image_file_delta_block.py +3 -0
  306. aimlapi/types/beta/threads/image_file_param.py +3 -0
  307. aimlapi/types/beta/threads/image_url.py +3 -0
  308. aimlapi/types/beta/threads/image_url_content_block.py +3 -0
  309. aimlapi/types/beta/threads/image_url_content_block_param.py +3 -0
  310. aimlapi/types/beta/threads/image_url_delta.py +3 -0
  311. aimlapi/types/beta/threads/image_url_delta_block.py +3 -0
  312. aimlapi/types/beta/threads/image_url_param.py +3 -0
  313. aimlapi/types/beta/threads/message.py +3 -0
  314. aimlapi/types/beta/threads/message_content.py +3 -0
  315. aimlapi/types/beta/threads/message_content_delta.py +3 -0
  316. aimlapi/types/beta/threads/message_content_part_param.py +3 -0
  317. aimlapi/types/beta/threads/message_create_params.py +3 -0
  318. aimlapi/types/beta/threads/message_deleted.py +3 -0
  319. aimlapi/types/beta/threads/message_delta.py +3 -0
  320. aimlapi/types/beta/threads/message_delta_event.py +3 -0
  321. aimlapi/types/beta/threads/message_list_params.py +3 -0
  322. aimlapi/types/beta/threads/message_update_params.py +3 -0
  323. aimlapi/types/beta/threads/refusal_content_block.py +3 -0
  324. aimlapi/types/beta/threads/refusal_delta_block.py +3 -0
  325. aimlapi/types/beta/threads/required_action_function_tool_call.py +3 -0
  326. aimlapi/types/beta/threads/run.py +3 -0
  327. aimlapi/types/beta/threads/run_create_params.py +3 -0
  328. aimlapi/types/beta/threads/run_list_params.py +3 -0
  329. aimlapi/types/beta/threads/run_status.py +3 -0
  330. aimlapi/types/beta/threads/run_submit_tool_outputs_params.py +3 -0
  331. aimlapi/types/beta/threads/run_update_params.py +3 -0
  332. aimlapi/types/beta/threads/runs/__init__.py +3 -0
  333. aimlapi/types/beta/threads/runs/code_interpreter_logs.py +3 -0
  334. aimlapi/types/beta/threads/runs/code_interpreter_output_image.py +3 -0
  335. aimlapi/types/beta/threads/runs/code_interpreter_tool_call.py +3 -0
  336. aimlapi/types/beta/threads/runs/code_interpreter_tool_call_delta.py +3 -0
  337. aimlapi/types/beta/threads/runs/file_search_tool_call.py +3 -0
  338. aimlapi/types/beta/threads/runs/file_search_tool_call_delta.py +3 -0
  339. aimlapi/types/beta/threads/runs/function_tool_call.py +3 -0
  340. aimlapi/types/beta/threads/runs/function_tool_call_delta.py +3 -0
  341. aimlapi/types/beta/threads/runs/message_creation_step_details.py +3 -0
  342. aimlapi/types/beta/threads/runs/run_step.py +3 -0
  343. aimlapi/types/beta/threads/runs/run_step_delta.py +3 -0
  344. aimlapi/types/beta/threads/runs/run_step_delta_event.py +3 -0
  345. aimlapi/types/beta/threads/runs/run_step_delta_message_delta.py +3 -0
  346. aimlapi/types/beta/threads/runs/run_step_include.py +3 -0
  347. aimlapi/types/beta/threads/runs/step_list_params.py +3 -0
  348. aimlapi/types/beta/threads/runs/step_retrieve_params.py +3 -0
  349. aimlapi/types/beta/threads/runs/tool_call.py +3 -0
  350. aimlapi/types/beta/threads/runs/tool_call_delta.py +3 -0
  351. aimlapi/types/beta/threads/runs/tool_call_delta_object.py +3 -0
  352. aimlapi/types/beta/threads/runs/tool_calls_step_details.py +3 -0
  353. aimlapi/types/beta/threads/text.py +3 -0
  354. aimlapi/types/beta/threads/text_content_block.py +3 -0
  355. aimlapi/types/beta/threads/text_content_block_param.py +3 -0
  356. aimlapi/types/beta/threads/text_delta.py +3 -0
  357. aimlapi/types/beta/threads/text_delta_block.py +3 -0
  358. aimlapi/types/chat/__init__.py +3 -0
  359. aimlapi/types/chat/chat_completion.py +3 -0
  360. aimlapi/types/chat/chat_completion_allowed_tool_choice_param.py +3 -0
  361. aimlapi/types/chat/chat_completion_allowed_tools_param.py +3 -0
  362. aimlapi/types/chat/chat_completion_assistant_message_param.py +3 -0
  363. aimlapi/types/chat/chat_completion_audio.py +3 -0
  364. aimlapi/types/chat/chat_completion_audio_param.py +3 -0
  365. aimlapi/types/chat/chat_completion_chunk.py +3 -0
  366. aimlapi/types/chat/chat_completion_content_part_image.py +3 -0
  367. aimlapi/types/chat/chat_completion_content_part_image_param.py +3 -0
  368. aimlapi/types/chat/chat_completion_content_part_input_audio_param.py +3 -0
  369. aimlapi/types/chat/chat_completion_content_part_param.py +3 -0
  370. aimlapi/types/chat/chat_completion_content_part_refusal_param.py +3 -0
  371. aimlapi/types/chat/chat_completion_content_part_text.py +3 -0
  372. aimlapi/types/chat/chat_completion_content_part_text_param.py +3 -0
  373. aimlapi/types/chat/chat_completion_custom_tool_param.py +3 -0
  374. aimlapi/types/chat/chat_completion_deleted.py +3 -0
  375. aimlapi/types/chat/chat_completion_developer_message_param.py +3 -0
  376. aimlapi/types/chat/chat_completion_function_call_option_param.py +3 -0
  377. aimlapi/types/chat/chat_completion_function_message_param.py +3 -0
  378. aimlapi/types/chat/chat_completion_function_tool.py +3 -0
  379. aimlapi/types/chat/chat_completion_function_tool_param.py +3 -0
  380. aimlapi/types/chat/chat_completion_message.py +3 -0
  381. aimlapi/types/chat/chat_completion_message_custom_tool_call.py +3 -0
  382. aimlapi/types/chat/chat_completion_message_custom_tool_call_param.py +3 -0
  383. aimlapi/types/chat/chat_completion_message_function_tool_call.py +3 -0
  384. aimlapi/types/chat/chat_completion_message_function_tool_call_param.py +3 -0
  385. aimlapi/types/chat/chat_completion_message_param.py +3 -0
  386. aimlapi/types/chat/chat_completion_message_tool_call.py +3 -0
  387. aimlapi/types/chat/chat_completion_message_tool_call_param.py +3 -0
  388. aimlapi/types/chat/chat_completion_message_tool_call_union_param.py +3 -0
  389. aimlapi/types/chat/chat_completion_modality.py +3 -0
  390. aimlapi/types/chat/chat_completion_named_tool_choice_custom_param.py +3 -0
  391. aimlapi/types/chat/chat_completion_named_tool_choice_param.py +3 -0
  392. aimlapi/types/chat/chat_completion_prediction_content_param.py +3 -0
  393. aimlapi/types/chat/chat_completion_reasoning_effort.py +3 -0
  394. aimlapi/types/chat/chat_completion_role.py +3 -0
  395. aimlapi/types/chat/chat_completion_store_message.py +3 -0
  396. aimlapi/types/chat/chat_completion_stream_options_param.py +3 -0
  397. aimlapi/types/chat/chat_completion_system_message_param.py +3 -0
  398. aimlapi/types/chat/chat_completion_token_logprob.py +3 -0
  399. aimlapi/types/chat/chat_completion_tool_choice_option_param.py +3 -0
  400. aimlapi/types/chat/chat_completion_tool_message_param.py +3 -0
  401. aimlapi/types/chat/chat_completion_tool_param.py +3 -0
  402. aimlapi/types/chat/chat_completion_tool_union_param.py +3 -0
  403. aimlapi/types/chat/chat_completion_user_message_param.py +3 -0
  404. aimlapi/types/chat/completion_create_params.py +3 -0
  405. aimlapi/types/chat/completion_list_params.py +3 -0
  406. aimlapi/types/chat/completion_update_params.py +3 -0
  407. aimlapi/types/chat/completions/__init__.py +3 -0
  408. aimlapi/types/chat/completions/message_list_params.py +3 -0
  409. aimlapi/types/chat/parsed_chat_completion.py +3 -0
  410. aimlapi/types/chat/parsed_function_tool_call.py +3 -0
  411. aimlapi/types/chat_model.py +3 -0
  412. aimlapi/types/completion.py +3 -0
  413. aimlapi/types/completion_choice.py +3 -0
  414. aimlapi/types/completion_create_params.py +3 -0
  415. aimlapi/types/completion_usage.py +3 -0
  416. aimlapi/types/container_create_params.py +3 -0
  417. aimlapi/types/container_create_response.py +3 -0
  418. aimlapi/types/container_list_params.py +3 -0
  419. aimlapi/types/container_list_response.py +3 -0
  420. aimlapi/types/container_retrieve_response.py +3 -0
  421. aimlapi/types/containers/__init__.py +3 -0
  422. aimlapi/types/containers/file_create_params.py +3 -0
  423. aimlapi/types/containers/file_create_response.py +3 -0
  424. aimlapi/types/containers/file_list_params.py +3 -0
  425. aimlapi/types/containers/file_list_response.py +3 -0
  426. aimlapi/types/containers/file_retrieve_response.py +3 -0
  427. aimlapi/types/containers/files/__init__.py +3 -0
  428. aimlapi/types/conversations/__init__.py +3 -0
  429. aimlapi/types/conversations/computer_screenshot_content.py +3 -0
  430. aimlapi/types/conversations/conversation.py +3 -0
  431. aimlapi/types/conversations/conversation_create_params.py +3 -0
  432. aimlapi/types/conversations/conversation_deleted_resource.py +3 -0
  433. aimlapi/types/conversations/conversation_item.py +3 -0
  434. aimlapi/types/conversations/conversation_item_list.py +3 -0
  435. aimlapi/types/conversations/conversation_update_params.py +3 -0
  436. aimlapi/types/conversations/input_file_content.py +3 -0
  437. aimlapi/types/conversations/input_file_content_param.py +3 -0
  438. aimlapi/types/conversations/input_image_content.py +3 -0
  439. aimlapi/types/conversations/input_image_content_param.py +3 -0
  440. aimlapi/types/conversations/input_text_content.py +3 -0
  441. aimlapi/types/conversations/input_text_content_param.py +3 -0
  442. aimlapi/types/conversations/item_create_params.py +3 -0
  443. aimlapi/types/conversations/item_list_params.py +3 -0
  444. aimlapi/types/conversations/item_retrieve_params.py +3 -0
  445. aimlapi/types/conversations/message.py +3 -0
  446. aimlapi/types/conversations/output_text_content.py +3 -0
  447. aimlapi/types/conversations/output_text_content_param.py +3 -0
  448. aimlapi/types/conversations/refusal_content.py +3 -0
  449. aimlapi/types/conversations/refusal_content_param.py +3 -0
  450. aimlapi/types/conversations/summary_text_content.py +3 -0
  451. aimlapi/types/conversations/text_content.py +3 -0
  452. aimlapi/types/create_embedding_response.py +3 -0
  453. aimlapi/types/embedding.py +3 -0
  454. aimlapi/types/embedding_create_params.py +3 -0
  455. aimlapi/types/embedding_model.py +3 -0
  456. aimlapi/types/eval_create_params.py +3 -0
  457. aimlapi/types/eval_create_response.py +3 -0
  458. aimlapi/types/eval_custom_data_source_config.py +3 -0
  459. aimlapi/types/eval_delete_response.py +3 -0
  460. aimlapi/types/eval_list_params.py +3 -0
  461. aimlapi/types/eval_list_response.py +3 -0
  462. aimlapi/types/eval_retrieve_response.py +3 -0
  463. aimlapi/types/eval_stored_completions_data_source_config.py +3 -0
  464. aimlapi/types/eval_update_params.py +3 -0
  465. aimlapi/types/eval_update_response.py +3 -0
  466. aimlapi/types/evals/__init__.py +3 -0
  467. aimlapi/types/evals/create_eval_completions_run_data_source.py +3 -0
  468. aimlapi/types/evals/create_eval_completions_run_data_source_param.py +3 -0
  469. aimlapi/types/evals/create_eval_jsonl_run_data_source.py +3 -0
  470. aimlapi/types/evals/create_eval_jsonl_run_data_source_param.py +3 -0
  471. aimlapi/types/evals/eval_api_error.py +3 -0
  472. aimlapi/types/evals/run_cancel_response.py +3 -0
  473. aimlapi/types/evals/run_create_params.py +3 -0
  474. aimlapi/types/evals/run_create_response.py +3 -0
  475. aimlapi/types/evals/run_delete_response.py +3 -0
  476. aimlapi/types/evals/run_list_params.py +3 -0
  477. aimlapi/types/evals/run_list_response.py +3 -0
  478. aimlapi/types/evals/run_retrieve_response.py +3 -0
  479. aimlapi/types/evals/runs/__init__.py +3 -0
  480. aimlapi/types/evals/runs/output_item_list_params.py +3 -0
  481. aimlapi/types/evals/runs/output_item_list_response.py +3 -0
  482. aimlapi/types/evals/runs/output_item_retrieve_response.py +3 -0
  483. aimlapi/types/file_chunking_strategy.py +3 -0
  484. aimlapi/types/file_chunking_strategy_param.py +3 -0
  485. aimlapi/types/file_content.py +3 -0
  486. aimlapi/types/file_create_params.py +3 -0
  487. aimlapi/types/file_deleted.py +3 -0
  488. aimlapi/types/file_list_params.py +3 -0
  489. aimlapi/types/file_object.py +3 -0
  490. aimlapi/types/file_purpose.py +3 -0
  491. aimlapi/types/fine_tuning/__init__.py +3 -0
  492. aimlapi/types/fine_tuning/alpha/__init__.py +3 -0
  493. aimlapi/types/fine_tuning/alpha/grader_run_params.py +3 -0
  494. aimlapi/types/fine_tuning/alpha/grader_run_response.py +3 -0
  495. aimlapi/types/fine_tuning/alpha/grader_validate_params.py +3 -0
  496. aimlapi/types/fine_tuning/alpha/grader_validate_response.py +3 -0
  497. aimlapi/types/fine_tuning/checkpoints/__init__.py +3 -0
  498. aimlapi/types/fine_tuning/checkpoints/permission_create_params.py +3 -0
  499. aimlapi/types/fine_tuning/checkpoints/permission_create_response.py +3 -0
  500. aimlapi/types/fine_tuning/checkpoints/permission_delete_response.py +3 -0
  501. aimlapi/types/fine_tuning/checkpoints/permission_retrieve_params.py +3 -0
  502. aimlapi/types/fine_tuning/checkpoints/permission_retrieve_response.py +3 -0
  503. aimlapi/types/fine_tuning/dpo_hyperparameters.py +3 -0
  504. aimlapi/types/fine_tuning/dpo_hyperparameters_param.py +3 -0
  505. aimlapi/types/fine_tuning/dpo_method.py +3 -0
  506. aimlapi/types/fine_tuning/dpo_method_param.py +3 -0
  507. aimlapi/types/fine_tuning/fine_tuning_job.py +3 -0
  508. aimlapi/types/fine_tuning/fine_tuning_job_event.py +3 -0
  509. aimlapi/types/fine_tuning/fine_tuning_job_integration.py +3 -0
  510. aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration.py +3 -0
  511. aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +3 -0
  512. aimlapi/types/fine_tuning/job_create_params.py +3 -0
  513. aimlapi/types/fine_tuning/job_list_events_params.py +3 -0
  514. aimlapi/types/fine_tuning/job_list_params.py +3 -0
  515. aimlapi/types/fine_tuning/jobs/__init__.py +3 -0
  516. aimlapi/types/fine_tuning/jobs/checkpoint_list_params.py +3 -0
  517. aimlapi/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +3 -0
  518. aimlapi/types/fine_tuning/reinforcement_hyperparameters.py +3 -0
  519. aimlapi/types/fine_tuning/reinforcement_hyperparameters_param.py +3 -0
  520. aimlapi/types/fine_tuning/reinforcement_method.py +3 -0
  521. aimlapi/types/fine_tuning/reinforcement_method_param.py +3 -0
  522. aimlapi/types/fine_tuning/supervised_hyperparameters.py +3 -0
  523. aimlapi/types/fine_tuning/supervised_hyperparameters_param.py +3 -0
  524. aimlapi/types/fine_tuning/supervised_method.py +3 -0
  525. aimlapi/types/fine_tuning/supervised_method_param.py +3 -0
  526. aimlapi/types/graders/__init__.py +3 -0
  527. aimlapi/types/graders/label_model_grader.py +3 -0
  528. aimlapi/types/graders/label_model_grader_param.py +3 -0
  529. aimlapi/types/graders/multi_grader.py +3 -0
  530. aimlapi/types/graders/multi_grader_param.py +3 -0
  531. aimlapi/types/graders/python_grader.py +3 -0
  532. aimlapi/types/graders/python_grader_param.py +3 -0
  533. aimlapi/types/graders/score_model_grader.py +3 -0
  534. aimlapi/types/graders/score_model_grader_param.py +3 -0
  535. aimlapi/types/graders/string_check_grader.py +3 -0
  536. aimlapi/types/graders/string_check_grader_param.py +3 -0
  537. aimlapi/types/graders/text_similarity_grader.py +3 -0
  538. aimlapi/types/graders/text_similarity_grader_param.py +3 -0
  539. aimlapi/types/image.py +3 -0
  540. aimlapi/types/image_create_variation_params.py +3 -0
  541. aimlapi/types/image_edit_completed_event.py +3 -0
  542. aimlapi/types/image_edit_params.py +3 -0
  543. aimlapi/types/image_edit_partial_image_event.py +3 -0
  544. aimlapi/types/image_edit_stream_event.py +3 -0
  545. aimlapi/types/image_gen_completed_event.py +3 -0
  546. aimlapi/types/image_gen_partial_image_event.py +3 -0
  547. aimlapi/types/image_gen_stream_event.py +3 -0
  548. aimlapi/types/image_generate_params.py +3 -0
  549. aimlapi/types/image_model.py +3 -0
  550. aimlapi/types/images_response.py +3 -0
  551. aimlapi/types/model.py +3 -0
  552. aimlapi/types/model_deleted.py +3 -0
  553. aimlapi/types/moderation.py +3 -0
  554. aimlapi/types/moderation_create_params.py +3 -0
  555. aimlapi/types/moderation_create_response.py +3 -0
  556. aimlapi/types/moderation_image_url_input_param.py +3 -0
  557. aimlapi/types/moderation_model.py +3 -0
  558. aimlapi/types/moderation_multi_modal_input_param.py +3 -0
  559. aimlapi/types/moderation_text_input_param.py +3 -0
  560. aimlapi/types/other_file_chunking_strategy_object.py +3 -0
  561. aimlapi/types/realtime/__init__.py +3 -0
  562. aimlapi/types/realtime/audio_transcription.py +3 -0
  563. aimlapi/types/realtime/audio_transcription_param.py +3 -0
  564. aimlapi/types/realtime/call_accept_params.py +3 -0
  565. aimlapi/types/realtime/call_create_params.py +3 -0
  566. aimlapi/types/realtime/call_refer_params.py +3 -0
  567. aimlapi/types/realtime/call_reject_params.py +3 -0
  568. aimlapi/types/realtime/client_secret_create_params.py +3 -0
  569. aimlapi/types/realtime/client_secret_create_response.py +3 -0
  570. aimlapi/types/realtime/conversation_created_event.py +3 -0
  571. aimlapi/types/realtime/conversation_item.py +3 -0
  572. aimlapi/types/realtime/conversation_item_added.py +3 -0
  573. aimlapi/types/realtime/conversation_item_create_event.py +3 -0
  574. aimlapi/types/realtime/conversation_item_create_event_param.py +3 -0
  575. aimlapi/types/realtime/conversation_item_created_event.py +3 -0
  576. aimlapi/types/realtime/conversation_item_delete_event.py +3 -0
  577. aimlapi/types/realtime/conversation_item_delete_event_param.py +3 -0
  578. aimlapi/types/realtime/conversation_item_deleted_event.py +3 -0
  579. aimlapi/types/realtime/conversation_item_done.py +3 -0
  580. aimlapi/types/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
  581. aimlapi/types/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
  582. aimlapi/types/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
  583. aimlapi/types/realtime/conversation_item_input_audio_transcription_segment.py +3 -0
  584. aimlapi/types/realtime/conversation_item_param.py +3 -0
  585. aimlapi/types/realtime/conversation_item_retrieve_event.py +3 -0
  586. aimlapi/types/realtime/conversation_item_retrieve_event_param.py +3 -0
  587. aimlapi/types/realtime/conversation_item_truncate_event.py +3 -0
  588. aimlapi/types/realtime/conversation_item_truncate_event_param.py +3 -0
  589. aimlapi/types/realtime/conversation_item_truncated_event.py +3 -0
  590. aimlapi/types/realtime/input_audio_buffer_append_event.py +3 -0
  591. aimlapi/types/realtime/input_audio_buffer_append_event_param.py +3 -0
  592. aimlapi/types/realtime/input_audio_buffer_clear_event.py +3 -0
  593. aimlapi/types/realtime/input_audio_buffer_clear_event_param.py +3 -0
  594. aimlapi/types/realtime/input_audio_buffer_cleared_event.py +3 -0
  595. aimlapi/types/realtime/input_audio_buffer_commit_event.py +3 -0
  596. aimlapi/types/realtime/input_audio_buffer_commit_event_param.py +3 -0
  597. aimlapi/types/realtime/input_audio_buffer_committed_event.py +3 -0
  598. aimlapi/types/realtime/input_audio_buffer_speech_started_event.py +3 -0
  599. aimlapi/types/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
  600. aimlapi/types/realtime/input_audio_buffer_timeout_triggered.py +3 -0
  601. aimlapi/types/realtime/log_prob_properties.py +3 -0
  602. aimlapi/types/realtime/mcp_list_tools_completed.py +3 -0
  603. aimlapi/types/realtime/mcp_list_tools_failed.py +3 -0
  604. aimlapi/types/realtime/mcp_list_tools_in_progress.py +3 -0
  605. aimlapi/types/realtime/noise_reduction_type.py +3 -0
  606. aimlapi/types/realtime/output_audio_buffer_clear_event.py +3 -0
  607. aimlapi/types/realtime/output_audio_buffer_clear_event_param.py +3 -0
  608. aimlapi/types/realtime/rate_limits_updated_event.py +3 -0
  609. aimlapi/types/realtime/realtime_audio_config.py +3 -0
  610. aimlapi/types/realtime/realtime_audio_config_input.py +3 -0
  611. aimlapi/types/realtime/realtime_audio_config_input_param.py +3 -0
  612. aimlapi/types/realtime/realtime_audio_config_output.py +3 -0
  613. aimlapi/types/realtime/realtime_audio_config_output_param.py +3 -0
  614. aimlapi/types/realtime/realtime_audio_config_param.py +3 -0
  615. aimlapi/types/realtime/realtime_audio_formats.py +3 -0
  616. aimlapi/types/realtime/realtime_audio_formats_param.py +3 -0
  617. aimlapi/types/realtime/realtime_audio_input_turn_detection.py +3 -0
  618. aimlapi/types/realtime/realtime_audio_input_turn_detection_param.py +3 -0
  619. aimlapi/types/realtime/realtime_client_event.py +3 -0
  620. aimlapi/types/realtime/realtime_client_event_param.py +3 -0
  621. aimlapi/types/realtime/realtime_connect_params.py +3 -0
  622. aimlapi/types/realtime/realtime_conversation_item_assistant_message.py +3 -0
  623. aimlapi/types/realtime/realtime_conversation_item_assistant_message_param.py +3 -0
  624. aimlapi/types/realtime/realtime_conversation_item_function_call.py +3 -0
  625. aimlapi/types/realtime/realtime_conversation_item_function_call_output.py +3 -0
  626. aimlapi/types/realtime/realtime_conversation_item_function_call_output_param.py +3 -0
  627. aimlapi/types/realtime/realtime_conversation_item_function_call_param.py +3 -0
  628. aimlapi/types/realtime/realtime_conversation_item_system_message.py +3 -0
  629. aimlapi/types/realtime/realtime_conversation_item_system_message_param.py +3 -0
  630. aimlapi/types/realtime/realtime_conversation_item_user_message.py +3 -0
  631. aimlapi/types/realtime/realtime_conversation_item_user_message_param.py +3 -0
  632. aimlapi/types/realtime/realtime_error.py +3 -0
  633. aimlapi/types/realtime/realtime_error_event.py +3 -0
  634. aimlapi/types/realtime/realtime_function_tool.py +3 -0
  635. aimlapi/types/realtime/realtime_function_tool_param.py +3 -0
  636. aimlapi/types/realtime/realtime_mcp_approval_request.py +3 -0
  637. aimlapi/types/realtime/realtime_mcp_approval_request_param.py +3 -0
  638. aimlapi/types/realtime/realtime_mcp_approval_response.py +3 -0
  639. aimlapi/types/realtime/realtime_mcp_approval_response_param.py +3 -0
  640. aimlapi/types/realtime/realtime_mcp_list_tools.py +3 -0
  641. aimlapi/types/realtime/realtime_mcp_list_tools_param.py +3 -0
  642. aimlapi/types/realtime/realtime_mcp_protocol_error.py +3 -0
  643. aimlapi/types/realtime/realtime_mcp_protocol_error_param.py +3 -0
  644. aimlapi/types/realtime/realtime_mcp_tool_call.py +3 -0
  645. aimlapi/types/realtime/realtime_mcp_tool_call_param.py +3 -0
  646. aimlapi/types/realtime/realtime_mcp_tool_execution_error.py +3 -0
  647. aimlapi/types/realtime/realtime_mcp_tool_execution_error_param.py +3 -0
  648. aimlapi/types/realtime/realtime_mcphttp_error.py +3 -0
  649. aimlapi/types/realtime/realtime_mcphttp_error_param.py +3 -0
  650. aimlapi/types/realtime/realtime_response.py +3 -0
  651. aimlapi/types/realtime/realtime_response_create_audio_output.py +3 -0
  652. aimlapi/types/realtime/realtime_response_create_audio_output_param.py +3 -0
  653. aimlapi/types/realtime/realtime_response_create_mcp_tool.py +3 -0
  654. aimlapi/types/realtime/realtime_response_create_mcp_tool_param.py +3 -0
  655. aimlapi/types/realtime/realtime_response_create_params.py +3 -0
  656. aimlapi/types/realtime/realtime_response_create_params_param.py +3 -0
  657. aimlapi/types/realtime/realtime_response_status.py +3 -0
  658. aimlapi/types/realtime/realtime_response_usage.py +3 -0
  659. aimlapi/types/realtime/realtime_response_usage_input_token_details.py +3 -0
  660. aimlapi/types/realtime/realtime_response_usage_output_token_details.py +3 -0
  661. aimlapi/types/realtime/realtime_server_event.py +3 -0
  662. aimlapi/types/realtime/realtime_session_client_secret.py +3 -0
  663. aimlapi/types/realtime/realtime_session_create_request.py +3 -0
  664. aimlapi/types/realtime/realtime_session_create_request_param.py +3 -0
  665. aimlapi/types/realtime/realtime_session_create_response.py +3 -0
  666. aimlapi/types/realtime/realtime_tool_choice_config.py +3 -0
  667. aimlapi/types/realtime/realtime_tool_choice_config_param.py +3 -0
  668. aimlapi/types/realtime/realtime_tools_config.py +3 -0
  669. aimlapi/types/realtime/realtime_tools_config_param.py +3 -0
  670. aimlapi/types/realtime/realtime_tools_config_union.py +3 -0
  671. aimlapi/types/realtime/realtime_tools_config_union_param.py +3 -0
  672. aimlapi/types/realtime/realtime_tracing_config.py +3 -0
  673. aimlapi/types/realtime/realtime_tracing_config_param.py +3 -0
  674. aimlapi/types/realtime/realtime_transcription_session_audio.py +3 -0
  675. aimlapi/types/realtime/realtime_transcription_session_audio_input.py +3 -0
  676. aimlapi/types/realtime/realtime_transcription_session_audio_input_param.py +3 -0
  677. aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +3 -0
  678. aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +3 -0
  679. aimlapi/types/realtime/realtime_transcription_session_audio_param.py +3 -0
  680. aimlapi/types/realtime/realtime_transcription_session_create_request.py +3 -0
  681. aimlapi/types/realtime/realtime_transcription_session_create_request_param.py +3 -0
  682. aimlapi/types/realtime/realtime_transcription_session_create_response.py +3 -0
  683. aimlapi/types/realtime/realtime_transcription_session_turn_detection.py +3 -0
  684. aimlapi/types/realtime/realtime_truncation.py +3 -0
  685. aimlapi/types/realtime/realtime_truncation_param.py +3 -0
  686. aimlapi/types/realtime/realtime_truncation_retention_ratio.py +3 -0
  687. aimlapi/types/realtime/realtime_truncation_retention_ratio_param.py +3 -0
  688. aimlapi/types/realtime/response_audio_delta_event.py +3 -0
  689. aimlapi/types/realtime/response_audio_done_event.py +3 -0
  690. aimlapi/types/realtime/response_audio_transcript_delta_event.py +3 -0
  691. aimlapi/types/realtime/response_audio_transcript_done_event.py +3 -0
  692. aimlapi/types/realtime/response_cancel_event.py +3 -0
  693. aimlapi/types/realtime/response_cancel_event_param.py +3 -0
  694. aimlapi/types/realtime/response_content_part_added_event.py +3 -0
  695. aimlapi/types/realtime/response_content_part_done_event.py +3 -0
  696. aimlapi/types/realtime/response_create_event.py +3 -0
  697. aimlapi/types/realtime/response_create_event_param.py +3 -0
  698. aimlapi/types/realtime/response_created_event.py +3 -0
  699. aimlapi/types/realtime/response_done_event.py +3 -0
  700. aimlapi/types/realtime/response_function_call_arguments_delta_event.py +3 -0
  701. aimlapi/types/realtime/response_function_call_arguments_done_event.py +3 -0
  702. aimlapi/types/realtime/response_mcp_call_arguments_delta.py +3 -0
  703. aimlapi/types/realtime/response_mcp_call_arguments_done.py +3 -0
  704. aimlapi/types/realtime/response_mcp_call_completed.py +3 -0
  705. aimlapi/types/realtime/response_mcp_call_failed.py +3 -0
  706. aimlapi/types/realtime/response_mcp_call_in_progress.py +3 -0
  707. aimlapi/types/realtime/response_output_item_added_event.py +3 -0
  708. aimlapi/types/realtime/response_output_item_done_event.py +3 -0
  709. aimlapi/types/realtime/response_text_delta_event.py +3 -0
  710. aimlapi/types/realtime/response_text_done_event.py +3 -0
  711. aimlapi/types/realtime/session_created_event.py +3 -0
  712. aimlapi/types/realtime/session_update_event.py +3 -0
  713. aimlapi/types/realtime/session_update_event_param.py +3 -0
  714. aimlapi/types/realtime/session_updated_event.py +3 -0
  715. aimlapi/types/responses/__init__.py +3 -0
  716. aimlapi/types/responses/computer_tool.py +3 -0
  717. aimlapi/types/responses/computer_tool_param.py +3 -0
  718. aimlapi/types/responses/custom_tool.py +3 -0
  719. aimlapi/types/responses/custom_tool_param.py +3 -0
  720. aimlapi/types/responses/easy_input_message.py +3 -0
  721. aimlapi/types/responses/easy_input_message_param.py +3 -0
  722. aimlapi/types/responses/file_search_tool.py +3 -0
  723. aimlapi/types/responses/file_search_tool_param.py +3 -0
  724. aimlapi/types/responses/function_tool.py +3 -0
  725. aimlapi/types/responses/function_tool_param.py +3 -0
  726. aimlapi/types/responses/input_item_list_params.py +3 -0
  727. aimlapi/types/responses/input_token_count_params.py +3 -0
  728. aimlapi/types/responses/input_token_count_response.py +3 -0
  729. aimlapi/types/responses/parsed_response.py +3 -0
  730. aimlapi/types/responses/response.py +3 -0
  731. aimlapi/types/responses/response_audio_delta_event.py +3 -0
  732. aimlapi/types/responses/response_audio_done_event.py +3 -0
  733. aimlapi/types/responses/response_audio_transcript_delta_event.py +3 -0
  734. aimlapi/types/responses/response_audio_transcript_done_event.py +3 -0
  735. aimlapi/types/responses/response_code_interpreter_call_code_delta_event.py +3 -0
  736. aimlapi/types/responses/response_code_interpreter_call_code_done_event.py +3 -0
  737. aimlapi/types/responses/response_code_interpreter_call_completed_event.py +3 -0
  738. aimlapi/types/responses/response_code_interpreter_call_in_progress_event.py +3 -0
  739. aimlapi/types/responses/response_code_interpreter_call_interpreting_event.py +3 -0
  740. aimlapi/types/responses/response_code_interpreter_tool_call.py +3 -0
  741. aimlapi/types/responses/response_code_interpreter_tool_call_param.py +3 -0
  742. aimlapi/types/responses/response_completed_event.py +3 -0
  743. aimlapi/types/responses/response_computer_tool_call.py +3 -0
  744. aimlapi/types/responses/response_computer_tool_call_output_item.py +3 -0
  745. aimlapi/types/responses/response_computer_tool_call_output_screenshot.py +3 -0
  746. aimlapi/types/responses/response_computer_tool_call_output_screenshot_param.py +3 -0
  747. aimlapi/types/responses/response_computer_tool_call_param.py +3 -0
  748. aimlapi/types/responses/response_content_part_added_event.py +3 -0
  749. aimlapi/types/responses/response_content_part_done_event.py +3 -0
  750. aimlapi/types/responses/response_conversation_param.py +3 -0
  751. aimlapi/types/responses/response_create_params.py +3 -0
  752. aimlapi/types/responses/response_created_event.py +3 -0
  753. aimlapi/types/responses/response_custom_tool_call.py +3 -0
  754. aimlapi/types/responses/response_custom_tool_call_input_delta_event.py +3 -0
  755. aimlapi/types/responses/response_custom_tool_call_input_done_event.py +3 -0
  756. aimlapi/types/responses/response_custom_tool_call_output.py +3 -0
  757. aimlapi/types/responses/response_custom_tool_call_output_param.py +3 -0
  758. aimlapi/types/responses/response_custom_tool_call_param.py +3 -0
  759. aimlapi/types/responses/response_error.py +3 -0
  760. aimlapi/types/responses/response_error_event.py +3 -0
  761. aimlapi/types/responses/response_failed_event.py +3 -0
  762. aimlapi/types/responses/response_file_search_call_completed_event.py +3 -0
  763. aimlapi/types/responses/response_file_search_call_in_progress_event.py +3 -0
  764. aimlapi/types/responses/response_file_search_call_searching_event.py +3 -0
  765. aimlapi/types/responses/response_file_search_tool_call.py +3 -0
  766. aimlapi/types/responses/response_file_search_tool_call_param.py +3 -0
  767. aimlapi/types/responses/response_format_text_config.py +3 -0
  768. aimlapi/types/responses/response_format_text_config_param.py +3 -0
  769. aimlapi/types/responses/response_format_text_json_schema_config.py +3 -0
  770. aimlapi/types/responses/response_format_text_json_schema_config_param.py +3 -0
  771. aimlapi/types/responses/response_function_call_arguments_delta_event.py +3 -0
  772. aimlapi/types/responses/response_function_call_arguments_done_event.py +3 -0
  773. aimlapi/types/responses/response_function_call_output_item.py +3 -0
  774. aimlapi/types/responses/response_function_call_output_item_list.py +3 -0
  775. aimlapi/types/responses/response_function_call_output_item_list_param.py +3 -0
  776. aimlapi/types/responses/response_function_call_output_item_param.py +3 -0
  777. aimlapi/types/responses/response_function_tool_call.py +3 -0
  778. aimlapi/types/responses/response_function_tool_call_item.py +3 -0
  779. aimlapi/types/responses/response_function_tool_call_output_item.py +3 -0
  780. aimlapi/types/responses/response_function_tool_call_param.py +3 -0
  781. aimlapi/types/responses/response_function_web_search.py +3 -0
  782. aimlapi/types/responses/response_function_web_search_param.py +3 -0
  783. aimlapi/types/responses/response_image_gen_call_completed_event.py +3 -0
  784. aimlapi/types/responses/response_image_gen_call_generating_event.py +3 -0
  785. aimlapi/types/responses/response_image_gen_call_in_progress_event.py +3 -0
  786. aimlapi/types/responses/response_image_gen_call_partial_image_event.py +3 -0
  787. aimlapi/types/responses/response_in_progress_event.py +3 -0
  788. aimlapi/types/responses/response_includable.py +3 -0
  789. aimlapi/types/responses/response_incomplete_event.py +3 -0
  790. aimlapi/types/responses/response_input_audio.py +3 -0
  791. aimlapi/types/responses/response_input_audio_param.py +3 -0
  792. aimlapi/types/responses/response_input_content.py +3 -0
  793. aimlapi/types/responses/response_input_content_param.py +3 -0
  794. aimlapi/types/responses/response_input_file.py +3 -0
  795. aimlapi/types/responses/response_input_file_content.py +3 -0
  796. aimlapi/types/responses/response_input_file_content_param.py +3 -0
  797. aimlapi/types/responses/response_input_file_param.py +3 -0
  798. aimlapi/types/responses/response_input_image.py +3 -0
  799. aimlapi/types/responses/response_input_image_content.py +3 -0
  800. aimlapi/types/responses/response_input_image_content_param.py +3 -0
  801. aimlapi/types/responses/response_input_image_param.py +3 -0
  802. aimlapi/types/responses/response_input_item.py +3 -0
  803. aimlapi/types/responses/response_input_item_param.py +3 -0
  804. aimlapi/types/responses/response_input_message_content_list.py +3 -0
  805. aimlapi/types/responses/response_input_message_content_list_param.py +3 -0
  806. aimlapi/types/responses/response_input_message_item.py +3 -0
  807. aimlapi/types/responses/response_input_param.py +3 -0
  808. aimlapi/types/responses/response_input_text.py +3 -0
  809. aimlapi/types/responses/response_input_text_content.py +3 -0
  810. aimlapi/types/responses/response_input_text_content_param.py +3 -0
  811. aimlapi/types/responses/response_input_text_param.py +3 -0
  812. aimlapi/types/responses/response_item.py +3 -0
  813. aimlapi/types/responses/response_item_list.py +3 -0
  814. aimlapi/types/responses/response_mcp_call_arguments_delta_event.py +3 -0
  815. aimlapi/types/responses/response_mcp_call_arguments_done_event.py +3 -0
  816. aimlapi/types/responses/response_mcp_call_completed_event.py +3 -0
  817. aimlapi/types/responses/response_mcp_call_failed_event.py +3 -0
  818. aimlapi/types/responses/response_mcp_call_in_progress_event.py +3 -0
  819. aimlapi/types/responses/response_mcp_list_tools_completed_event.py +3 -0
  820. aimlapi/types/responses/response_mcp_list_tools_failed_event.py +3 -0
  821. aimlapi/types/responses/response_mcp_list_tools_in_progress_event.py +3 -0
  822. aimlapi/types/responses/response_output_item.py +3 -0
  823. aimlapi/types/responses/response_output_item_added_event.py +3 -0
  824. aimlapi/types/responses/response_output_item_done_event.py +3 -0
  825. aimlapi/types/responses/response_output_message.py +3 -0
  826. aimlapi/types/responses/response_output_message_param.py +3 -0
  827. aimlapi/types/responses/response_output_refusal.py +3 -0
  828. aimlapi/types/responses/response_output_refusal_param.py +3 -0
  829. aimlapi/types/responses/response_output_text.py +3 -0
  830. aimlapi/types/responses/response_output_text_annotation_added_event.py +3 -0
  831. aimlapi/types/responses/response_output_text_param.py +3 -0
  832. aimlapi/types/responses/response_prompt.py +3 -0
  833. aimlapi/types/responses/response_prompt_param.py +3 -0
  834. aimlapi/types/responses/response_queued_event.py +3 -0
  835. aimlapi/types/responses/response_reasoning_item.py +3 -0
  836. aimlapi/types/responses/response_reasoning_item_param.py +3 -0
  837. aimlapi/types/responses/response_reasoning_summary_part_added_event.py +3 -0
  838. aimlapi/types/responses/response_reasoning_summary_part_done_event.py +3 -0
  839. aimlapi/types/responses/response_reasoning_summary_text_delta_event.py +3 -0
  840. aimlapi/types/responses/response_reasoning_summary_text_done_event.py +3 -0
  841. aimlapi/types/responses/response_reasoning_text_delta_event.py +3 -0
  842. aimlapi/types/responses/response_reasoning_text_done_event.py +3 -0
  843. aimlapi/types/responses/response_refusal_delta_event.py +3 -0
  844. aimlapi/types/responses/response_refusal_done_event.py +3 -0
  845. aimlapi/types/responses/response_retrieve_params.py +3 -0
  846. aimlapi/types/responses/response_status.py +3 -0
  847. aimlapi/types/responses/response_stream_event.py +3 -0
  848. aimlapi/types/responses/response_text_config.py +3 -0
  849. aimlapi/types/responses/response_text_config_param.py +3 -0
  850. aimlapi/types/responses/response_text_delta_event.py +3 -0
  851. aimlapi/types/responses/response_text_done_event.py +3 -0
  852. aimlapi/types/responses/response_usage.py +3 -0
  853. aimlapi/types/responses/response_web_search_call_completed_event.py +3 -0
  854. aimlapi/types/responses/response_web_search_call_in_progress_event.py +3 -0
  855. aimlapi/types/responses/response_web_search_call_searching_event.py +3 -0
  856. aimlapi/types/responses/tool.py +3 -0
  857. aimlapi/types/responses/tool_choice_allowed.py +3 -0
  858. aimlapi/types/responses/tool_choice_allowed_param.py +3 -0
  859. aimlapi/types/responses/tool_choice_custom.py +3 -0
  860. aimlapi/types/responses/tool_choice_custom_param.py +3 -0
  861. aimlapi/types/responses/tool_choice_function.py +3 -0
  862. aimlapi/types/responses/tool_choice_function_param.py +3 -0
  863. aimlapi/types/responses/tool_choice_mcp.py +3 -0
  864. aimlapi/types/responses/tool_choice_mcp_param.py +3 -0
  865. aimlapi/types/responses/tool_choice_options.py +3 -0
  866. aimlapi/types/responses/tool_choice_types.py +3 -0
  867. aimlapi/types/responses/tool_choice_types_param.py +3 -0
  868. aimlapi/types/responses/tool_param.py +3 -0
  869. aimlapi/types/responses/web_search_preview_tool.py +3 -0
  870. aimlapi/types/responses/web_search_preview_tool_param.py +3 -0
  871. aimlapi/types/responses/web_search_tool.py +3 -0
  872. aimlapi/types/responses/web_search_tool_param.py +3 -0
  873. aimlapi/types/shared/__init__.py +3 -0
  874. aimlapi/types/shared/all_models.py +3 -0
  875. aimlapi/types/shared/chat_model.py +3 -0
  876. aimlapi/types/shared/comparison_filter.py +3 -0
  877. aimlapi/types/shared/compound_filter.py +3 -0
  878. aimlapi/types/shared/custom_tool_input_format.py +3 -0
  879. aimlapi/types/shared/error_object.py +3 -0
  880. aimlapi/types/shared/function_definition.py +3 -0
  881. aimlapi/types/shared/function_parameters.py +3 -0
  882. aimlapi/types/shared/metadata.py +3 -0
  883. aimlapi/types/shared/reasoning.py +3 -0
  884. aimlapi/types/shared/reasoning_effort.py +3 -0
  885. aimlapi/types/shared/response_format_json_object.py +3 -0
  886. aimlapi/types/shared/response_format_json_schema.py +3 -0
  887. aimlapi/types/shared/response_format_text.py +3 -0
  888. aimlapi/types/shared/response_format_text_grammar.py +3 -0
  889. aimlapi/types/shared/response_format_text_python.py +3 -0
  890. aimlapi/types/shared/responses_model.py +3 -0
  891. aimlapi/types/shared_params/__init__.py +3 -0
  892. aimlapi/types/shared_params/chat_model.py +3 -0
  893. aimlapi/types/shared_params/comparison_filter.py +3 -0
  894. aimlapi/types/shared_params/compound_filter.py +3 -0
  895. aimlapi/types/shared_params/custom_tool_input_format.py +3 -0
  896. aimlapi/types/shared_params/function_definition.py +3 -0
  897. aimlapi/types/shared_params/function_parameters.py +3 -0
  898. aimlapi/types/shared_params/metadata.py +3 -0
  899. aimlapi/types/shared_params/reasoning.py +3 -0
  900. aimlapi/types/shared_params/reasoning_effort.py +3 -0
  901. aimlapi/types/shared_params/response_format_json_object.py +3 -0
  902. aimlapi/types/shared_params/response_format_json_schema.py +3 -0
  903. aimlapi/types/shared_params/response_format_text.py +3 -0
  904. aimlapi/types/shared_params/responses_model.py +3 -0
  905. aimlapi/types/static_file_chunking_strategy.py +3 -0
  906. aimlapi/types/static_file_chunking_strategy_object.py +3 -0
  907. aimlapi/types/static_file_chunking_strategy_object_param.py +3 -0
  908. aimlapi/types/static_file_chunking_strategy_param.py +3 -0
  909. aimlapi/types/upload.py +3 -0
  910. aimlapi/types/upload_complete_params.py +3 -0
  911. aimlapi/types/upload_create_params.py +3 -0
  912. aimlapi/types/uploads/__init__.py +3 -0
  913. aimlapi/types/uploads/part_create_params.py +3 -0
  914. aimlapi/types/uploads/upload_part.py +3 -0
  915. aimlapi/types/vector_store.py +3 -0
  916. aimlapi/types/vector_store_create_params.py +3 -0
  917. aimlapi/types/vector_store_deleted.py +3 -0
  918. aimlapi/types/vector_store_list_params.py +3 -0
  919. aimlapi/types/vector_store_search_params.py +3 -0
  920. aimlapi/types/vector_store_search_response.py +3 -0
  921. aimlapi/types/vector_store_update_params.py +3 -0
  922. aimlapi/types/vector_stores/__init__.py +3 -0
  923. aimlapi/types/vector_stores/file_batch_create_params.py +3 -0
  924. aimlapi/types/vector_stores/file_batch_list_files_params.py +3 -0
  925. aimlapi/types/vector_stores/file_content_response.py +3 -0
  926. aimlapi/types/vector_stores/file_create_params.py +3 -0
  927. aimlapi/types/vector_stores/file_list_params.py +3 -0
  928. aimlapi/types/vector_stores/file_update_params.py +3 -0
  929. aimlapi/types/vector_stores/vector_store_file.py +3 -0
  930. aimlapi/types/vector_stores/vector_store_file_batch.py +3 -0
  931. aimlapi/types/vector_stores/vector_store_file_deleted.py +3 -0
  932. aimlapi/types/video.py +3 -0
  933. aimlapi/types/video_create_error.py +3 -0
  934. aimlapi/types/video_create_params.py +3 -0
  935. aimlapi/types/video_delete_response.py +3 -0
  936. aimlapi/types/video_download_content_params.py +3 -0
  937. aimlapi/types/video_list_params.py +3 -0
  938. aimlapi/types/video_model.py +3 -0
  939. aimlapi/types/video_remix_params.py +3 -0
  940. aimlapi/types/video_seconds.py +3 -0
  941. aimlapi/types/video_size.py +3 -0
  942. aimlapi/types/webhooks/__init__.py +3 -0
  943. aimlapi/types/webhooks/batch_cancelled_webhook_event.py +3 -0
  944. aimlapi/types/webhooks/batch_completed_webhook_event.py +3 -0
  945. aimlapi/types/webhooks/batch_expired_webhook_event.py +3 -0
  946. aimlapi/types/webhooks/batch_failed_webhook_event.py +3 -0
  947. aimlapi/types/webhooks/eval_run_canceled_webhook_event.py +3 -0
  948. aimlapi/types/webhooks/eval_run_failed_webhook_event.py +3 -0
  949. aimlapi/types/webhooks/eval_run_succeeded_webhook_event.py +3 -0
  950. aimlapi/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +3 -0
  951. aimlapi/types/webhooks/fine_tuning_job_failed_webhook_event.py +3 -0
  952. aimlapi/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +3 -0
  953. aimlapi/types/webhooks/realtime_call_incoming_webhook_event.py +3 -0
  954. aimlapi/types/webhooks/response_cancelled_webhook_event.py +3 -0
  955. aimlapi/types/webhooks/response_completed_webhook_event.py +3 -0
  956. aimlapi/types/webhooks/response_failed_webhook_event.py +3 -0
  957. aimlapi/types/webhooks/response_incomplete_webhook_event.py +3 -0
  958. aimlapi/types/webhooks/unwrap_webhook_event.py +3 -0
  959. aimlapi/types/websocket_connection_options.py +3 -0
  960. aimlapi/version.py +3 -0
  961. aimlapi_sdk_python-2.8.1b0.dist-info/METADATA +886 -0
  962. aimlapi_sdk_python-2.8.1b0.dist-info/RECORD +1958 -0
  963. aimlapi_sdk_python-2.8.1b0.dist-info/WHEEL +4 -0
  964. aimlapi_sdk_python-2.8.1b0.dist-info/entry_points.txt +2 -0
  965. aimlapi_sdk_python-2.8.1b0.dist-info/licenses/LICENSE +201 -0
  966. openai/__init__.py +395 -0
  967. openai/__main__.py +3 -0
  968. openai/_base_client.py +2027 -0
  969. openai/_client.py +1272 -0
  970. openai/_compat.py +231 -0
  971. openai/_constants.py +14 -0
  972. openai/_exceptions.py +161 -0
  973. openai/_extras/__init__.py +3 -0
  974. openai/_extras/_common.py +21 -0
  975. openai/_extras/numpy_proxy.py +37 -0
  976. openai/_extras/pandas_proxy.py +28 -0
  977. openai/_extras/sounddevice_proxy.py +28 -0
  978. openai/_files.py +123 -0
  979. openai/_legacy_response.py +488 -0
  980. openai/_models.py +897 -0
  981. openai/_module_client.py +173 -0
  982. openai/_qs.py +150 -0
  983. openai/_resource.py +43 -0
  984. openai/_response.py +848 -0
  985. openai/_streaming.py +408 -0
  986. openai/_types.py +264 -0
  987. openai/_utils/__init__.py +67 -0
  988. openai/_utils/_compat.py +45 -0
  989. openai/_utils/_datetime_parse.py +136 -0
  990. openai/_utils/_logs.py +42 -0
  991. openai/_utils/_proxy.py +65 -0
  992. openai/_utils/_reflection.py +45 -0
  993. openai/_utils/_resources_proxy.py +24 -0
  994. openai/_utils/_streams.py +12 -0
  995. openai/_utils/_sync.py +58 -0
  996. openai/_utils/_transform.py +457 -0
  997. openai/_utils/_typing.py +156 -0
  998. openai/_utils/_utils.py +437 -0
  999. openai/_version.py +4 -0
  1000. openai/cli/__init__.py +1 -0
  1001. openai/cli/_api/__init__.py +1 -0
  1002. openai/cli/_api/_main.py +17 -0
  1003. openai/cli/_api/audio.py +108 -0
  1004. openai/cli/_api/chat/__init__.py +13 -0
  1005. openai/cli/_api/chat/completions.py +160 -0
  1006. openai/cli/_api/completions.py +173 -0
  1007. openai/cli/_api/files.py +80 -0
  1008. openai/cli/_api/fine_tuning/__init__.py +13 -0
  1009. openai/cli/_api/fine_tuning/jobs.py +170 -0
  1010. openai/cli/_api/image.py +139 -0
  1011. openai/cli/_api/models.py +45 -0
  1012. openai/cli/_cli.py +233 -0
  1013. openai/cli/_errors.py +21 -0
  1014. openai/cli/_models.py +17 -0
  1015. openai/cli/_progress.py +59 -0
  1016. openai/cli/_tools/__init__.py +1 -0
  1017. openai/cli/_tools/_main.py +17 -0
  1018. openai/cli/_tools/fine_tunes.py +63 -0
  1019. openai/cli/_tools/migrate.py +164 -0
  1020. openai/cli/_utils.py +45 -0
  1021. openai/helpers/__init__.py +4 -0
  1022. openai/helpers/local_audio_player.py +165 -0
  1023. openai/helpers/microphone.py +100 -0
  1024. openai/lib/.keep +4 -0
  1025. openai/lib/__init__.py +2 -0
  1026. openai/lib/_old_api.py +72 -0
  1027. openai/lib/_parsing/__init__.py +12 -0
  1028. openai/lib/_parsing/_completions.py +305 -0
  1029. openai/lib/_parsing/_responses.py +180 -0
  1030. openai/lib/_pydantic.py +155 -0
  1031. openai/lib/_realtime.py +92 -0
  1032. openai/lib/_tools.py +66 -0
  1033. openai/lib/_validators.py +809 -0
  1034. openai/lib/azure.py +647 -0
  1035. openai/lib/streaming/__init__.py +8 -0
  1036. openai/lib/streaming/_assistants.py +1038 -0
  1037. openai/lib/streaming/_deltas.py +64 -0
  1038. openai/lib/streaming/chat/__init__.py +27 -0
  1039. openai/lib/streaming/chat/_completions.py +770 -0
  1040. openai/lib/streaming/chat/_events.py +123 -0
  1041. openai/lib/streaming/chat/_types.py +20 -0
  1042. openai/lib/streaming/responses/__init__.py +13 -0
  1043. openai/lib/streaming/responses/_events.py +148 -0
  1044. openai/lib/streaming/responses/_responses.py +372 -0
  1045. openai/lib/streaming/responses/_types.py +10 -0
  1046. openai/pagination.py +190 -0
  1047. openai/py.typed +0 -0
  1048. openai/resources/__init__.py +229 -0
  1049. openai/resources/audio/__init__.py +61 -0
  1050. openai/resources/audio/audio.py +166 -0
  1051. openai/resources/audio/speech.py +255 -0
  1052. openai/resources/audio/transcriptions.py +980 -0
  1053. openai/resources/audio/translations.py +367 -0
  1054. openai/resources/batches.py +530 -0
  1055. openai/resources/beta/__init__.py +61 -0
  1056. openai/resources/beta/assistants.py +1049 -0
  1057. openai/resources/beta/beta.py +187 -0
  1058. openai/resources/beta/chatkit/__init__.py +47 -0
  1059. openai/resources/beta/chatkit/chatkit.py +134 -0
  1060. openai/resources/beta/chatkit/sessions.py +301 -0
  1061. openai/resources/beta/chatkit/threads.py +521 -0
  1062. openai/resources/beta/realtime/__init__.py +47 -0
  1063. openai/resources/beta/realtime/realtime.py +1094 -0
  1064. openai/resources/beta/realtime/sessions.py +424 -0
  1065. openai/resources/beta/realtime/transcription_sessions.py +282 -0
  1066. openai/resources/beta/threads/__init__.py +47 -0
  1067. openai/resources/beta/threads/messages.py +718 -0
  1068. openai/resources/beta/threads/runs/__init__.py +33 -0
  1069. openai/resources/beta/threads/runs/runs.py +3122 -0
  1070. openai/resources/beta/threads/runs/steps.py +399 -0
  1071. openai/resources/beta/threads/threads.py +1935 -0
  1072. openai/resources/chat/__init__.py +33 -0
  1073. openai/resources/chat/chat.py +102 -0
  1074. openai/resources/chat/completions/__init__.py +33 -0
  1075. openai/resources/chat/completions/completions.py +3143 -0
  1076. openai/resources/chat/completions/messages.py +212 -0
  1077. openai/resources/completions.py +1160 -0
  1078. openai/resources/containers/__init__.py +33 -0
  1079. openai/resources/containers/containers.py +510 -0
  1080. openai/resources/containers/files/__init__.py +33 -0
  1081. openai/resources/containers/files/content.py +173 -0
  1082. openai/resources/containers/files/files.py +545 -0
  1083. openai/resources/conversations/__init__.py +33 -0
  1084. openai/resources/conversations/conversations.py +486 -0
  1085. openai/resources/conversations/items.py +557 -0
  1086. openai/resources/embeddings.py +298 -0
  1087. openai/resources/evals/__init__.py +33 -0
  1088. openai/resources/evals/evals.py +662 -0
  1089. openai/resources/evals/runs/__init__.py +33 -0
  1090. openai/resources/evals/runs/output_items.py +315 -0
  1091. openai/resources/evals/runs/runs.py +634 -0
  1092. openai/resources/files.py +770 -0
  1093. openai/resources/fine_tuning/__init__.py +61 -0
  1094. openai/resources/fine_tuning/alpha/__init__.py +33 -0
  1095. openai/resources/fine_tuning/alpha/alpha.py +102 -0
  1096. openai/resources/fine_tuning/alpha/graders.py +282 -0
  1097. openai/resources/fine_tuning/checkpoints/__init__.py +33 -0
  1098. openai/resources/fine_tuning/checkpoints/checkpoints.py +102 -0
  1099. openai/resources/fine_tuning/checkpoints/permissions.py +418 -0
  1100. openai/resources/fine_tuning/fine_tuning.py +166 -0
  1101. openai/resources/fine_tuning/jobs/__init__.py +33 -0
  1102. openai/resources/fine_tuning/jobs/checkpoints.py +199 -0
  1103. openai/resources/fine_tuning/jobs/jobs.py +918 -0
  1104. openai/resources/images.py +1858 -0
  1105. openai/resources/models.py +306 -0
  1106. openai/resources/moderations.py +197 -0
  1107. openai/resources/realtime/__init__.py +47 -0
  1108. openai/resources/realtime/calls.py +764 -0
  1109. openai/resources/realtime/client_secrets.py +189 -0
  1110. openai/resources/realtime/realtime.py +1079 -0
  1111. openai/resources/responses/__init__.py +47 -0
  1112. openai/resources/responses/input_items.py +226 -0
  1113. openai/resources/responses/input_tokens.py +309 -0
  1114. openai/resources/responses/responses.py +3130 -0
  1115. openai/resources/uploads/__init__.py +33 -0
  1116. openai/resources/uploads/parts.py +205 -0
  1117. openai/resources/uploads/uploads.py +719 -0
  1118. openai/resources/vector_stores/__init__.py +47 -0
  1119. openai/resources/vector_stores/file_batches.py +813 -0
  1120. openai/resources/vector_stores/files.py +939 -0
  1121. openai/resources/vector_stores/vector_stores.py +875 -0
  1122. openai/resources/videos.py +847 -0
  1123. openai/resources/webhooks.py +210 -0
  1124. openai/types/__init__.py +115 -0
  1125. openai/types/audio/__init__.py +23 -0
  1126. openai/types/audio/speech_create_params.py +57 -0
  1127. openai/types/audio/speech_model.py +7 -0
  1128. openai/types/audio/transcription.py +71 -0
  1129. openai/types/audio/transcription_create_params.py +172 -0
  1130. openai/types/audio/transcription_create_response.py +12 -0
  1131. openai/types/audio/transcription_diarized.py +63 -0
  1132. openai/types/audio/transcription_diarized_segment.py +32 -0
  1133. openai/types/audio/transcription_include.py +7 -0
  1134. openai/types/audio/transcription_segment.py +49 -0
  1135. openai/types/audio/transcription_stream_event.py +16 -0
  1136. openai/types/audio/transcription_text_delta_event.py +41 -0
  1137. openai/types/audio/transcription_text_done_event.py +63 -0
  1138. openai/types/audio/transcription_text_segment_event.py +27 -0
  1139. openai/types/audio/transcription_verbose.py +38 -0
  1140. openai/types/audio/transcription_word.py +16 -0
  1141. openai/types/audio/translation.py +9 -0
  1142. openai/types/audio/translation_create_params.py +49 -0
  1143. openai/types/audio/translation_create_response.py +11 -0
  1144. openai/types/audio/translation_verbose.py +22 -0
  1145. openai/types/audio_model.py +7 -0
  1146. openai/types/audio_response_format.py +7 -0
  1147. openai/types/auto_file_chunking_strategy_param.py +12 -0
  1148. openai/types/batch.py +104 -0
  1149. openai/types/batch_create_params.py +72 -0
  1150. openai/types/batch_error.py +21 -0
  1151. openai/types/batch_list_params.py +24 -0
  1152. openai/types/batch_request_counts.py +16 -0
  1153. openai/types/batch_usage.py +35 -0
  1154. openai/types/beta/__init__.py +34 -0
  1155. openai/types/beta/assistant.py +134 -0
  1156. openai/types/beta/assistant_create_params.py +220 -0
  1157. openai/types/beta/assistant_deleted.py +15 -0
  1158. openai/types/beta/assistant_list_params.py +39 -0
  1159. openai/types/beta/assistant_response_format_option.py +14 -0
  1160. openai/types/beta/assistant_response_format_option_param.py +16 -0
  1161. openai/types/beta/assistant_stream_event.py +294 -0
  1162. openai/types/beta/assistant_tool.py +15 -0
  1163. openai/types/beta/assistant_tool_choice.py +16 -0
  1164. openai/types/beta/assistant_tool_choice_function.py +10 -0
  1165. openai/types/beta/assistant_tool_choice_function_param.py +12 -0
  1166. openai/types/beta/assistant_tool_choice_option.py +10 -0
  1167. openai/types/beta/assistant_tool_choice_option_param.py +12 -0
  1168. openai/types/beta/assistant_tool_choice_param.py +16 -0
  1169. openai/types/beta/assistant_tool_param.py +14 -0
  1170. openai/types/beta/assistant_update_params.py +191 -0
  1171. openai/types/beta/chat/__init__.py +3 -0
  1172. openai/types/beta/chatkit/__init__.py +32 -0
  1173. openai/types/beta/chatkit/chat_session.py +43 -0
  1174. openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +10 -0
  1175. openai/types/beta/chatkit/chat_session_chatkit_configuration.py +19 -0
  1176. openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +59 -0
  1177. openai/types/beta/chatkit/chat_session_expires_after_param.py +15 -0
  1178. openai/types/beta/chatkit/chat_session_file_upload.py +18 -0
  1179. openai/types/beta/chatkit/chat_session_history.py +18 -0
  1180. openai/types/beta/chatkit/chat_session_rate_limits.py +10 -0
  1181. openai/types/beta/chatkit/chat_session_rate_limits_param.py +12 -0
  1182. openai/types/beta/chatkit/chat_session_status.py +7 -0
  1183. openai/types/beta/chatkit/chat_session_workflow_param.py +34 -0
  1184. openai/types/beta/chatkit/chatkit_attachment.py +25 -0
  1185. openai/types/beta/chatkit/chatkit_response_output_text.py +62 -0
  1186. openai/types/beta/chatkit/chatkit_thread.py +56 -0
  1187. openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +29 -0
  1188. openai/types/beta/chatkit/chatkit_thread_item_list.py +144 -0
  1189. openai/types/beta/chatkit/chatkit_thread_user_message_item.py +77 -0
  1190. openai/types/beta/chatkit/chatkit_widget_item.py +27 -0
  1191. openai/types/beta/chatkit/session_create_params.py +35 -0
  1192. openai/types/beta/chatkit/thread_delete_response.py +18 -0
  1193. openai/types/beta/chatkit/thread_list_items_params.py +27 -0
  1194. openai/types/beta/chatkit/thread_list_params.py +33 -0
  1195. openai/types/beta/chatkit_workflow.py +32 -0
  1196. openai/types/beta/code_interpreter_tool.py +12 -0
  1197. openai/types/beta/code_interpreter_tool_param.py +12 -0
  1198. openai/types/beta/file_search_tool.py +55 -0
  1199. openai/types/beta/file_search_tool_param.py +54 -0
  1200. openai/types/beta/function_tool.py +15 -0
  1201. openai/types/beta/function_tool_param.py +16 -0
  1202. openai/types/beta/realtime/__init__.py +96 -0
  1203. openai/types/beta/realtime/conversation_created_event.py +27 -0
  1204. openai/types/beta/realtime/conversation_item.py +61 -0
  1205. openai/types/beta/realtime/conversation_item_content.py +32 -0
  1206. openai/types/beta/realtime/conversation_item_content_param.py +31 -0
  1207. openai/types/beta/realtime/conversation_item_create_event.py +29 -0
  1208. openai/types/beta/realtime/conversation_item_create_event_param.py +29 -0
  1209. openai/types/beta/realtime/conversation_item_created_event.py +27 -0
  1210. openai/types/beta/realtime/conversation_item_delete_event.py +19 -0
  1211. openai/types/beta/realtime/conversation_item_delete_event_param.py +18 -0
  1212. openai/types/beta/realtime/conversation_item_deleted_event.py +18 -0
  1213. openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +87 -0
  1214. openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +39 -0
  1215. openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
  1216. openai/types/beta/realtime/conversation_item_param.py +62 -0
  1217. openai/types/beta/realtime/conversation_item_retrieve_event.py +19 -0
  1218. openai/types/beta/realtime/conversation_item_retrieve_event_param.py +18 -0
  1219. openai/types/beta/realtime/conversation_item_truncate_event.py +32 -0
  1220. openai/types/beta/realtime/conversation_item_truncate_event_param.py +31 -0
  1221. openai/types/beta/realtime/conversation_item_truncated_event.py +24 -0
  1222. openai/types/beta/realtime/conversation_item_with_reference.py +87 -0
  1223. openai/types/beta/realtime/conversation_item_with_reference_param.py +87 -0
  1224. openai/types/beta/realtime/error_event.py +36 -0
  1225. openai/types/beta/realtime/input_audio_buffer_append_event.py +23 -0
  1226. openai/types/beta/realtime/input_audio_buffer_append_event_param.py +22 -0
  1227. openai/types/beta/realtime/input_audio_buffer_clear_event.py +16 -0
  1228. openai/types/beta/realtime/input_audio_buffer_clear_event_param.py +15 -0
  1229. openai/types/beta/realtime/input_audio_buffer_cleared_event.py +15 -0
  1230. openai/types/beta/realtime/input_audio_buffer_commit_event.py +16 -0
  1231. openai/types/beta/realtime/input_audio_buffer_commit_event_param.py +15 -0
  1232. openai/types/beta/realtime/input_audio_buffer_committed_event.py +25 -0
  1233. openai/types/beta/realtime/input_audio_buffer_speech_started_event.py +26 -0
  1234. openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
  1235. openai/types/beta/realtime/rate_limits_updated_event.py +33 -0
  1236. openai/types/beta/realtime/realtime_client_event.py +47 -0
  1237. openai/types/beta/realtime/realtime_client_event_param.py +44 -0
  1238. openai/types/beta/realtime/realtime_connect_params.py +11 -0
  1239. openai/types/beta/realtime/realtime_response.py +87 -0
  1240. openai/types/beta/realtime/realtime_response_status.py +39 -0
  1241. openai/types/beta/realtime/realtime_response_usage.py +52 -0
  1242. openai/types/beta/realtime/realtime_server_event.py +133 -0
  1243. openai/types/beta/realtime/response_audio_delta_event.py +30 -0
  1244. openai/types/beta/realtime/response_audio_done_event.py +27 -0
  1245. openai/types/beta/realtime/response_audio_transcript_delta_event.py +30 -0
  1246. openai/types/beta/realtime/response_audio_transcript_done_event.py +30 -0
  1247. openai/types/beta/realtime/response_cancel_event.py +22 -0
  1248. openai/types/beta/realtime/response_cancel_event_param.py +21 -0
  1249. openai/types/beta/realtime/response_content_part_added_event.py +45 -0
  1250. openai/types/beta/realtime/response_content_part_done_event.py +45 -0
  1251. openai/types/beta/realtime/response_create_event.py +121 -0
  1252. openai/types/beta/realtime/response_create_event_param.py +122 -0
  1253. openai/types/beta/realtime/response_created_event.py +19 -0
  1254. openai/types/beta/realtime/response_done_event.py +19 -0
  1255. openai/types/beta/realtime/response_function_call_arguments_delta_event.py +30 -0
  1256. openai/types/beta/realtime/response_function_call_arguments_done_event.py +30 -0
  1257. openai/types/beta/realtime/response_output_item_added_event.py +25 -0
  1258. openai/types/beta/realtime/response_output_item_done_event.py +25 -0
  1259. openai/types/beta/realtime/response_text_delta_event.py +30 -0
  1260. openai/types/beta/realtime/response_text_done_event.py +30 -0
  1261. openai/types/beta/realtime/session.py +279 -0
  1262. openai/types/beta/realtime/session_create_params.py +298 -0
  1263. openai/types/beta/realtime/session_create_response.py +196 -0
  1264. openai/types/beta/realtime/session_created_event.py +19 -0
  1265. openai/types/beta/realtime/session_update_event.py +312 -0
  1266. openai/types/beta/realtime/session_update_event_param.py +310 -0
  1267. openai/types/beta/realtime/session_updated_event.py +19 -0
  1268. openai/types/beta/realtime/transcription_session.py +100 -0
  1269. openai/types/beta/realtime/transcription_session_create_params.py +173 -0
  1270. openai/types/beta/realtime/transcription_session_update.py +185 -0
  1271. openai/types/beta/realtime/transcription_session_update_param.py +185 -0
  1272. openai/types/beta/realtime/transcription_session_updated_event.py +24 -0
  1273. openai/types/beta/thread.py +63 -0
  1274. openai/types/beta/thread_create_and_run_params.py +397 -0
  1275. openai/types/beta/thread_create_params.py +186 -0
  1276. openai/types/beta/thread_deleted.py +15 -0
  1277. openai/types/beta/thread_update_params.py +56 -0
  1278. openai/types/beta/threads/__init__.py +46 -0
  1279. openai/types/beta/threads/annotation.py +12 -0
  1280. openai/types/beta/threads/annotation_delta.py +14 -0
  1281. openai/types/beta/threads/file_citation_annotation.py +26 -0
  1282. openai/types/beta/threads/file_citation_delta_annotation.py +33 -0
  1283. openai/types/beta/threads/file_path_annotation.py +26 -0
  1284. openai/types/beta/threads/file_path_delta_annotation.py +30 -0
  1285. openai/types/beta/threads/image_file.py +23 -0
  1286. openai/types/beta/threads/image_file_content_block.py +15 -0
  1287. openai/types/beta/threads/image_file_content_block_param.py +16 -0
  1288. openai/types/beta/threads/image_file_delta.py +23 -0
  1289. openai/types/beta/threads/image_file_delta_block.py +19 -0
  1290. openai/types/beta/threads/image_file_param.py +22 -0
  1291. openai/types/beta/threads/image_url.py +23 -0
  1292. openai/types/beta/threads/image_url_content_block.py +15 -0
  1293. openai/types/beta/threads/image_url_content_block_param.py +16 -0
  1294. openai/types/beta/threads/image_url_delta.py +22 -0
  1295. openai/types/beta/threads/image_url_delta_block.py +19 -0
  1296. openai/types/beta/threads/image_url_param.py +22 -0
  1297. openai/types/beta/threads/message.py +103 -0
  1298. openai/types/beta/threads/message_content.py +18 -0
  1299. openai/types/beta/threads/message_content_delta.py +17 -0
  1300. openai/types/beta/threads/message_content_part_param.py +14 -0
  1301. openai/types/beta/threads/message_create_params.py +55 -0
  1302. openai/types/beta/threads/message_deleted.py +15 -0
  1303. openai/types/beta/threads/message_delta.py +17 -0
  1304. openai/types/beta/threads/message_delta_event.py +19 -0
  1305. openai/types/beta/threads/message_list_params.py +42 -0
  1306. openai/types/beta/threads/message_update_params.py +24 -0
  1307. openai/types/beta/threads/refusal_content_block.py +14 -0
  1308. openai/types/beta/threads/refusal_delta_block.py +18 -0
  1309. openai/types/beta/threads/required_action_function_tool_call.py +34 -0
  1310. openai/types/beta/threads/run.py +245 -0
  1311. openai/types/beta/threads/run_create_params.py +268 -0
  1312. openai/types/beta/threads/run_list_params.py +39 -0
  1313. openai/types/beta/threads/run_status.py +17 -0
  1314. openai/types/beta/threads/run_submit_tool_outputs_params.py +52 -0
  1315. openai/types/beta/threads/run_update_params.py +24 -0
  1316. openai/types/beta/threads/runs/__init__.py +24 -0
  1317. openai/types/beta/threads/runs/code_interpreter_logs.py +19 -0
  1318. openai/types/beta/threads/runs/code_interpreter_output_image.py +26 -0
  1319. openai/types/beta/threads/runs/code_interpreter_tool_call.py +70 -0
  1320. openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +44 -0
  1321. openai/types/beta/threads/runs/file_search_tool_call.py +78 -0
  1322. openai/types/beta/threads/runs/file_search_tool_call_delta.py +25 -0
  1323. openai/types/beta/threads/runs/function_tool_call.py +38 -0
  1324. openai/types/beta/threads/runs/function_tool_call_delta.py +41 -0
  1325. openai/types/beta/threads/runs/message_creation_step_details.py +19 -0
  1326. openai/types/beta/threads/runs/run_step.py +115 -0
  1327. openai/types/beta/threads/runs/run_step_delta.py +20 -0
  1328. openai/types/beta/threads/runs/run_step_delta_event.py +19 -0
  1329. openai/types/beta/threads/runs/run_step_delta_message_delta.py +20 -0
  1330. openai/types/beta/threads/runs/run_step_include.py +7 -0
  1331. openai/types/beta/threads/runs/step_list_params.py +56 -0
  1332. openai/types/beta/threads/runs/step_retrieve_params.py +28 -0
  1333. openai/types/beta/threads/runs/tool_call.py +15 -0
  1334. openai/types/beta/threads/runs/tool_call_delta.py +16 -0
  1335. openai/types/beta/threads/runs/tool_call_delta_object.py +21 -0
  1336. openai/types/beta/threads/runs/tool_calls_step_details.py +21 -0
  1337. openai/types/beta/threads/text.py +15 -0
  1338. openai/types/beta/threads/text_content_block.py +15 -0
  1339. openai/types/beta/threads/text_content_block_param.py +15 -0
  1340. openai/types/beta/threads/text_delta.py +15 -0
  1341. openai/types/beta/threads/text_delta_block.py +19 -0
  1342. openai/types/chat/__init__.py +102 -0
  1343. openai/types/chat/chat_completion.py +89 -0
  1344. openai/types/chat/chat_completion_allowed_tool_choice_param.py +17 -0
  1345. openai/types/chat/chat_completion_allowed_tools_param.py +32 -0
  1346. openai/types/chat/chat_completion_assistant_message_param.py +70 -0
  1347. openai/types/chat/chat_completion_audio.py +25 -0
  1348. openai/types/chat/chat_completion_audio_param.py +25 -0
  1349. openai/types/chat/chat_completion_chunk.py +166 -0
  1350. openai/types/chat/chat_completion_content_part_image.py +27 -0
  1351. openai/types/chat/chat_completion_content_part_image_param.py +26 -0
  1352. openai/types/chat/chat_completion_content_part_input_audio_param.py +22 -0
  1353. openai/types/chat/chat_completion_content_part_param.py +41 -0
  1354. openai/types/chat/chat_completion_content_part_refusal_param.py +15 -0
  1355. openai/types/chat/chat_completion_content_part_text.py +15 -0
  1356. openai/types/chat/chat_completion_content_part_text_param.py +15 -0
  1357. openai/types/chat/chat_completion_custom_tool_param.py +58 -0
  1358. openai/types/chat/chat_completion_deleted.py +18 -0
  1359. openai/types/chat/chat_completion_developer_message_param.py +25 -0
  1360. openai/types/chat/chat_completion_function_call_option_param.py +12 -0
  1361. openai/types/chat/chat_completion_function_message_param.py +19 -0
  1362. openai/types/chat/chat_completion_function_tool.py +15 -0
  1363. openai/types/chat/chat_completion_function_tool_param.py +16 -0
  1364. openai/types/chat/chat_completion_message.py +79 -0
  1365. openai/types/chat/chat_completion_message_custom_tool_call.py +26 -0
  1366. openai/types/chat/chat_completion_message_custom_tool_call_param.py +26 -0
  1367. openai/types/chat/chat_completion_message_function_tool_call.py +31 -0
  1368. openai/types/chat/chat_completion_message_function_tool_call_param.py +31 -0
  1369. openai/types/chat/chat_completion_message_param.py +24 -0
  1370. openai/types/chat/chat_completion_message_tool_call.py +17 -0
  1371. openai/types/chat/chat_completion_message_tool_call_param.py +14 -0
  1372. openai/types/chat/chat_completion_message_tool_call_union_param.py +15 -0
  1373. openai/types/chat/chat_completion_modality.py +7 -0
  1374. openai/types/chat/chat_completion_named_tool_choice_custom_param.py +19 -0
  1375. openai/types/chat/chat_completion_named_tool_choice_param.py +19 -0
  1376. openai/types/chat/chat_completion_prediction_content_param.py +25 -0
  1377. openai/types/chat/chat_completion_reasoning_effort.py +7 -0
  1378. openai/types/chat/chat_completion_role.py +7 -0
  1379. openai/types/chat/chat_completion_store_message.py +23 -0
  1380. openai/types/chat/chat_completion_stream_options_param.py +31 -0
  1381. openai/types/chat/chat_completion_system_message_param.py +25 -0
  1382. openai/types/chat/chat_completion_token_logprob.py +57 -0
  1383. openai/types/chat/chat_completion_tool_choice_option_param.py +19 -0
  1384. openai/types/chat/chat_completion_tool_message_param.py +21 -0
  1385. openai/types/chat/chat_completion_tool_param.py +14 -0
  1386. openai/types/chat/chat_completion_tool_union_param.py +13 -0
  1387. openai/types/chat/chat_completion_user_message_param.py +25 -0
  1388. openai/types/chat/completion_create_params.py +450 -0
  1389. openai/types/chat/completion_list_params.py +37 -0
  1390. openai/types/chat/completion_update_params.py +22 -0
  1391. openai/types/chat/completions/__init__.py +5 -0
  1392. openai/types/chat/completions/message_list_params.py +21 -0
  1393. openai/types/chat/parsed_chat_completion.py +40 -0
  1394. openai/types/chat/parsed_function_tool_call.py +29 -0
  1395. openai/types/chat_model.py +7 -0
  1396. openai/types/completion.py +37 -0
  1397. openai/types/completion_choice.py +35 -0
  1398. openai/types/completion_create_params.py +189 -0
  1399. openai/types/completion_usage.py +54 -0
  1400. openai/types/container_create_params.py +30 -0
  1401. openai/types/container_create_response.py +40 -0
  1402. openai/types/container_list_params.py +30 -0
  1403. openai/types/container_list_response.py +40 -0
  1404. openai/types/container_retrieve_response.py +40 -0
  1405. openai/types/containers/__init__.py +9 -0
  1406. openai/types/containers/file_create_params.py +17 -0
  1407. openai/types/containers/file_create_response.py +30 -0
  1408. openai/types/containers/file_list_params.py +30 -0
  1409. openai/types/containers/file_list_response.py +30 -0
  1410. openai/types/containers/file_retrieve_response.py +30 -0
  1411. openai/types/containers/files/__init__.py +3 -0
  1412. openai/types/conversations/__init__.py +27 -0
  1413. openai/types/conversations/computer_screenshot_content.py +22 -0
  1414. openai/types/conversations/conversation.py +30 -0
  1415. openai/types/conversations/conversation_create_params.py +29 -0
  1416. openai/types/conversations/conversation_deleted_resource.py +15 -0
  1417. openai/types/conversations/conversation_item.py +230 -0
  1418. openai/types/conversations/conversation_item_list.py +26 -0
  1419. openai/types/conversations/conversation_update_params.py +22 -0
  1420. openai/types/conversations/input_file_content.py +7 -0
  1421. openai/types/conversations/input_file_content_param.py +7 -0
  1422. openai/types/conversations/input_image_content.py +7 -0
  1423. openai/types/conversations/input_image_content_param.py +7 -0
  1424. openai/types/conversations/input_text_content.py +7 -0
  1425. openai/types/conversations/input_text_content_param.py +7 -0
  1426. openai/types/conversations/item_create_params.py +24 -0
  1427. openai/types/conversations/item_list_params.py +50 -0
  1428. openai/types/conversations/item_retrieve_params.py +22 -0
  1429. openai/types/conversations/message.py +66 -0
  1430. openai/types/conversations/output_text_content.py +7 -0
  1431. openai/types/conversations/output_text_content_param.py +7 -0
  1432. openai/types/conversations/refusal_content.py +7 -0
  1433. openai/types/conversations/refusal_content_param.py +7 -0
  1434. openai/types/conversations/summary_text_content.py +15 -0
  1435. openai/types/conversations/text_content.py +13 -0
  1436. openai/types/create_embedding_response.py +31 -0
  1437. openai/types/embedding.py +23 -0
  1438. openai/types/embedding_create_params.py +55 -0
  1439. openai/types/embedding_model.py +7 -0
  1440. openai/types/eval_create_params.py +202 -0
  1441. openai/types/eval_create_response.py +111 -0
  1442. openai/types/eval_custom_data_source_config.py +21 -0
  1443. openai/types/eval_delete_response.py +13 -0
  1444. openai/types/eval_list_params.py +27 -0
  1445. openai/types/eval_list_response.py +111 -0
  1446. openai/types/eval_retrieve_response.py +111 -0
  1447. openai/types/eval_stored_completions_data_source_config.py +32 -0
  1448. openai/types/eval_update_params.py +25 -0
  1449. openai/types/eval_update_response.py +111 -0
  1450. openai/types/evals/__init__.py +22 -0
  1451. openai/types/evals/create_eval_completions_run_data_source.py +236 -0
  1452. openai/types/evals/create_eval_completions_run_data_source_param.py +232 -0
  1453. openai/types/evals/create_eval_jsonl_run_data_source.py +42 -0
  1454. openai/types/evals/create_eval_jsonl_run_data_source_param.py +47 -0
  1455. openai/types/evals/eval_api_error.py +13 -0
  1456. openai/types/evals/run_cancel_response.py +417 -0
  1457. openai/types/evals/run_create_params.py +340 -0
  1458. openai/types/evals/run_create_response.py +417 -0
  1459. openai/types/evals/run_delete_response.py +15 -0
  1460. openai/types/evals/run_list_params.py +27 -0
  1461. openai/types/evals/run_list_response.py +417 -0
  1462. openai/types/evals/run_retrieve_response.py +417 -0
  1463. openai/types/evals/runs/__init__.py +7 -0
  1464. openai/types/evals/runs/output_item_list_params.py +30 -0
  1465. openai/types/evals/runs/output_item_list_response.py +134 -0
  1466. openai/types/evals/runs/output_item_retrieve_response.py +134 -0
  1467. openai/types/file_chunking_strategy.py +14 -0
  1468. openai/types/file_chunking_strategy_param.py +13 -0
  1469. openai/types/file_content.py +7 -0
  1470. openai/types/file_create_params.py +45 -0
  1471. openai/types/file_deleted.py +15 -0
  1472. openai/types/file_list_params.py +33 -0
  1473. openai/types/file_object.py +58 -0
  1474. openai/types/file_purpose.py +7 -0
  1475. openai/types/fine_tuning/__init__.py +26 -0
  1476. openai/types/fine_tuning/alpha/__init__.py +8 -0
  1477. openai/types/fine_tuning/alpha/grader_run_params.py +40 -0
  1478. openai/types/fine_tuning/alpha/grader_run_response.py +67 -0
  1479. openai/types/fine_tuning/alpha/grader_validate_params.py +24 -0
  1480. openai/types/fine_tuning/alpha/grader_validate_response.py +20 -0
  1481. openai/types/fine_tuning/checkpoints/__init__.py +9 -0
  1482. openai/types/fine_tuning/checkpoints/permission_create_params.py +14 -0
  1483. openai/types/fine_tuning/checkpoints/permission_create_response.py +21 -0
  1484. openai/types/fine_tuning/checkpoints/permission_delete_response.py +18 -0
  1485. openai/types/fine_tuning/checkpoints/permission_retrieve_params.py +21 -0
  1486. openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +34 -0
  1487. openai/types/fine_tuning/dpo_hyperparameters.py +36 -0
  1488. openai/types/fine_tuning/dpo_hyperparameters_param.py +36 -0
  1489. openai/types/fine_tuning/dpo_method.py +13 -0
  1490. openai/types/fine_tuning/dpo_method_param.py +14 -0
  1491. openai/types/fine_tuning/fine_tuning_job.py +161 -0
  1492. openai/types/fine_tuning/fine_tuning_job_event.py +32 -0
  1493. openai/types/fine_tuning/fine_tuning_job_integration.py +5 -0
  1494. openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +33 -0
  1495. openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +21 -0
  1496. openai/types/fine_tuning/job_create_params.py +176 -0
  1497. openai/types/fine_tuning/job_list_events_params.py +15 -0
  1498. openai/types/fine_tuning/job_list_params.py +23 -0
  1499. openai/types/fine_tuning/jobs/__init__.py +6 -0
  1500. openai/types/fine_tuning/jobs/checkpoint_list_params.py +15 -0
  1501. openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +47 -0
  1502. openai/types/fine_tuning/reinforcement_hyperparameters.py +43 -0
  1503. openai/types/fine_tuning/reinforcement_hyperparameters_param.py +43 -0
  1504. openai/types/fine_tuning/reinforcement_method.py +24 -0
  1505. openai/types/fine_tuning/reinforcement_method_param.py +27 -0
  1506. openai/types/fine_tuning/supervised_hyperparameters.py +29 -0
  1507. openai/types/fine_tuning/supervised_hyperparameters_param.py +29 -0
  1508. openai/types/fine_tuning/supervised_method.py +13 -0
  1509. openai/types/fine_tuning/supervised_method_param.py +14 -0
  1510. openai/types/graders/__init__.py +16 -0
  1511. openai/types/graders/label_model_grader.py +70 -0
  1512. openai/types/graders/label_model_grader_param.py +77 -0
  1513. openai/types/graders/multi_grader.py +32 -0
  1514. openai/types/graders/multi_grader_param.py +35 -0
  1515. openai/types/graders/python_grader.py +22 -0
  1516. openai/types/graders/python_grader_param.py +21 -0
  1517. openai/types/graders/score_model_grader.py +109 -0
  1518. openai/types/graders/score_model_grader_param.py +115 -0
  1519. openai/types/graders/string_check_grader.py +24 -0
  1520. openai/types/graders/string_check_grader_param.py +24 -0
  1521. openai/types/graders/text_similarity_grader.py +40 -0
  1522. openai/types/graders/text_similarity_grader_param.py +42 -0
  1523. openai/types/image.py +26 -0
  1524. openai/types/image_create_variation_params.py +48 -0
  1525. openai/types/image_edit_completed_event.py +55 -0
  1526. openai/types/image_edit_params.py +145 -0
  1527. openai/types/image_edit_partial_image_event.py +33 -0
  1528. openai/types/image_edit_stream_event.py +14 -0
  1529. openai/types/image_gen_completed_event.py +55 -0
  1530. openai/types/image_gen_partial_image_event.py +33 -0
  1531. openai/types/image_gen_stream_event.py +14 -0
  1532. openai/types/image_generate_params.py +143 -0
  1533. openai/types/image_model.py +7 -0
  1534. openai/types/images_response.py +60 -0
  1535. openai/types/model.py +21 -0
  1536. openai/types/model_deleted.py +13 -0
  1537. openai/types/moderation.py +186 -0
  1538. openai/types/moderation_create_params.py +30 -0
  1539. openai/types/moderation_create_response.py +19 -0
  1540. openai/types/moderation_image_url_input_param.py +20 -0
  1541. openai/types/moderation_model.py +9 -0
  1542. openai/types/moderation_multi_modal_input_param.py +13 -0
  1543. openai/types/moderation_text_input_param.py +15 -0
  1544. openai/types/other_file_chunking_strategy_object.py +12 -0
  1545. openai/types/realtime/__init__.py +237 -0
  1546. openai/types/realtime/audio_transcription.py +37 -0
  1547. openai/types/realtime/audio_transcription_param.py +34 -0
  1548. openai/types/realtime/call_accept_params.py +122 -0
  1549. openai/types/realtime/call_create_params.py +17 -0
  1550. openai/types/realtime/call_refer_params.py +15 -0
  1551. openai/types/realtime/call_reject_params.py +15 -0
  1552. openai/types/realtime/client_secret_create_params.py +46 -0
  1553. openai/types/realtime/client_secret_create_response.py +26 -0
  1554. openai/types/realtime/conversation_created_event.py +27 -0
  1555. openai/types/realtime/conversation_item.py +32 -0
  1556. openai/types/realtime/conversation_item_added.py +26 -0
  1557. openai/types/realtime/conversation_item_create_event.py +29 -0
  1558. openai/types/realtime/conversation_item_create_event_param.py +29 -0
  1559. openai/types/realtime/conversation_item_created_event.py +27 -0
  1560. openai/types/realtime/conversation_item_delete_event.py +19 -0
  1561. openai/types/realtime/conversation_item_delete_event_param.py +18 -0
  1562. openai/types/realtime/conversation_item_deleted_event.py +18 -0
  1563. openai/types/realtime/conversation_item_done.py +26 -0
  1564. openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +79 -0
  1565. openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +36 -0
  1566. openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
  1567. openai/types/realtime/conversation_item_input_audio_transcription_segment.py +36 -0
  1568. openai/types/realtime/conversation_item_param.py +30 -0
  1569. openai/types/realtime/conversation_item_retrieve_event.py +19 -0
  1570. openai/types/realtime/conversation_item_retrieve_event_param.py +18 -0
  1571. openai/types/realtime/conversation_item_truncate_event.py +32 -0
  1572. openai/types/realtime/conversation_item_truncate_event_param.py +31 -0
  1573. openai/types/realtime/conversation_item_truncated_event.py +24 -0
  1574. openai/types/realtime/input_audio_buffer_append_event.py +23 -0
  1575. openai/types/realtime/input_audio_buffer_append_event_param.py +22 -0
  1576. openai/types/realtime/input_audio_buffer_clear_event.py +16 -0
  1577. openai/types/realtime/input_audio_buffer_clear_event_param.py +15 -0
  1578. openai/types/realtime/input_audio_buffer_cleared_event.py +15 -0
  1579. openai/types/realtime/input_audio_buffer_commit_event.py +16 -0
  1580. openai/types/realtime/input_audio_buffer_commit_event_param.py +15 -0
  1581. openai/types/realtime/input_audio_buffer_committed_event.py +25 -0
  1582. openai/types/realtime/input_audio_buffer_speech_started_event.py +26 -0
  1583. openai/types/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
  1584. openai/types/realtime/input_audio_buffer_timeout_triggered.py +30 -0
  1585. openai/types/realtime/log_prob_properties.py +18 -0
  1586. openai/types/realtime/mcp_list_tools_completed.py +18 -0
  1587. openai/types/realtime/mcp_list_tools_failed.py +18 -0
  1588. openai/types/realtime/mcp_list_tools_in_progress.py +18 -0
  1589. openai/types/realtime/noise_reduction_type.py +7 -0
  1590. openai/types/realtime/output_audio_buffer_clear_event.py +16 -0
  1591. openai/types/realtime/output_audio_buffer_clear_event_param.py +15 -0
  1592. openai/types/realtime/rate_limits_updated_event.py +33 -0
  1593. openai/types/realtime/realtime_audio_config.py +15 -0
  1594. openai/types/realtime/realtime_audio_config_input.py +63 -0
  1595. openai/types/realtime/realtime_audio_config_input_param.py +65 -0
  1596. openai/types/realtime/realtime_audio_config_output.py +36 -0
  1597. openai/types/realtime/realtime_audio_config_output_param.py +35 -0
  1598. openai/types/realtime/realtime_audio_config_param.py +16 -0
  1599. openai/types/realtime/realtime_audio_formats.py +30 -0
  1600. openai/types/realtime/realtime_audio_formats_param.py +29 -0
  1601. openai/types/realtime/realtime_audio_input_turn_detection.py +98 -0
  1602. openai/types/realtime/realtime_audio_input_turn_detection_param.py +95 -0
  1603. openai/types/realtime/realtime_client_event.py +36 -0
  1604. openai/types/realtime/realtime_client_event_param.py +34 -0
  1605. openai/types/realtime/realtime_connect_params.py +13 -0
  1606. openai/types/realtime/realtime_conversation_item_assistant_message.py +58 -0
  1607. openai/types/realtime/realtime_conversation_item_assistant_message_param.py +58 -0
  1608. openai/types/realtime/realtime_conversation_item_function_call.py +41 -0
  1609. openai/types/realtime/realtime_conversation_item_function_call_output.py +37 -0
  1610. openai/types/realtime/realtime_conversation_item_function_call_output_param.py +36 -0
  1611. openai/types/realtime/realtime_conversation_item_function_call_param.py +40 -0
  1612. openai/types/realtime/realtime_conversation_item_system_message.py +42 -0
  1613. openai/types/realtime/realtime_conversation_item_system_message_param.py +42 -0
  1614. openai/types/realtime/realtime_conversation_item_user_message.py +69 -0
  1615. openai/types/realtime/realtime_conversation_item_user_message_param.py +69 -0
  1616. openai/types/realtime/realtime_error.py +24 -0
  1617. openai/types/realtime/realtime_error_event.py +19 -0
  1618. openai/types/realtime/realtime_function_tool.py +25 -0
  1619. openai/types/realtime/realtime_function_tool_param.py +24 -0
  1620. openai/types/realtime/realtime_mcp_approval_request.py +24 -0
  1621. openai/types/realtime/realtime_mcp_approval_request_param.py +24 -0
  1622. openai/types/realtime/realtime_mcp_approval_response.py +25 -0
  1623. openai/types/realtime/realtime_mcp_approval_response_param.py +25 -0
  1624. openai/types/realtime/realtime_mcp_list_tools.py +36 -0
  1625. openai/types/realtime/realtime_mcp_list_tools_param.py +36 -0
  1626. openai/types/realtime/realtime_mcp_protocol_error.py +15 -0
  1627. openai/types/realtime/realtime_mcp_protocol_error_param.py +15 -0
  1628. openai/types/realtime/realtime_mcp_tool_call.py +43 -0
  1629. openai/types/realtime/realtime_mcp_tool_call_param.py +40 -0
  1630. openai/types/realtime/realtime_mcp_tool_execution_error.py +13 -0
  1631. openai/types/realtime/realtime_mcp_tool_execution_error_param.py +13 -0
  1632. openai/types/realtime/realtime_mcphttp_error.py +15 -0
  1633. openai/types/realtime/realtime_mcphttp_error_param.py +15 -0
  1634. openai/types/realtime/realtime_response.py +98 -0
  1635. openai/types/realtime/realtime_response_create_audio_output.py +29 -0
  1636. openai/types/realtime/realtime_response_create_audio_output_param.py +28 -0
  1637. openai/types/realtime/realtime_response_create_mcp_tool.py +135 -0
  1638. openai/types/realtime/realtime_response_create_mcp_tool_param.py +135 -0
  1639. openai/types/realtime/realtime_response_create_params.py +98 -0
  1640. openai/types/realtime/realtime_response_create_params_param.py +99 -0
  1641. openai/types/realtime/realtime_response_status.py +39 -0
  1642. openai/types/realtime/realtime_response_usage.py +41 -0
  1643. openai/types/realtime/realtime_response_usage_input_token_details.py +35 -0
  1644. openai/types/realtime/realtime_response_usage_output_token_details.py +15 -0
  1645. openai/types/realtime/realtime_server_event.py +155 -0
  1646. openai/types/realtime/realtime_session_client_secret.py +20 -0
  1647. openai/types/realtime/realtime_session_create_request.py +122 -0
  1648. openai/types/realtime/realtime_session_create_request_param.py +122 -0
  1649. openai/types/realtime/realtime_session_create_response.py +475 -0
  1650. openai/types/realtime/realtime_tool_choice_config.py +12 -0
  1651. openai/types/realtime/realtime_tool_choice_config_param.py +14 -0
  1652. openai/types/realtime/realtime_tools_config.py +10 -0
  1653. openai/types/realtime/realtime_tools_config_param.py +143 -0
  1654. openai/types/realtime/realtime_tools_config_union.py +141 -0
  1655. openai/types/realtime/realtime_tools_config_union_param.py +140 -0
  1656. openai/types/realtime/realtime_tracing_config.py +31 -0
  1657. openai/types/realtime/realtime_tracing_config_param.py +31 -0
  1658. openai/types/realtime/realtime_transcription_session_audio.py +12 -0
  1659. openai/types/realtime/realtime_transcription_session_audio_input.py +65 -0
  1660. openai/types/realtime/realtime_transcription_session_audio_input_param.py +67 -0
  1661. openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +98 -0
  1662. openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +95 -0
  1663. openai/types/realtime/realtime_transcription_session_audio_param.py +13 -0
  1664. openai/types/realtime/realtime_transcription_session_create_request.py +27 -0
  1665. openai/types/realtime/realtime_transcription_session_create_request_param.py +28 -0
  1666. openai/types/realtime/realtime_transcription_session_create_response.py +68 -0
  1667. openai/types/realtime/realtime_transcription_session_turn_detection.py +32 -0
  1668. openai/types/realtime/realtime_truncation.py +10 -0
  1669. openai/types/realtime/realtime_truncation_param.py +12 -0
  1670. openai/types/realtime/realtime_truncation_retention_ratio.py +38 -0
  1671. openai/types/realtime/realtime_truncation_retention_ratio_param.py +37 -0
  1672. openai/types/realtime/response_audio_delta_event.py +30 -0
  1673. openai/types/realtime/response_audio_done_event.py +27 -0
  1674. openai/types/realtime/response_audio_transcript_delta_event.py +30 -0
  1675. openai/types/realtime/response_audio_transcript_done_event.py +30 -0
  1676. openai/types/realtime/response_cancel_event.py +22 -0
  1677. openai/types/realtime/response_cancel_event_param.py +21 -0
  1678. openai/types/realtime/response_content_part_added_event.py +45 -0
  1679. openai/types/realtime/response_content_part_done_event.py +45 -0
  1680. openai/types/realtime/response_create_event.py +20 -0
  1681. openai/types/realtime/response_create_event_param.py +20 -0
  1682. openai/types/realtime/response_created_event.py +19 -0
  1683. openai/types/realtime/response_done_event.py +19 -0
  1684. openai/types/realtime/response_function_call_arguments_delta_event.py +30 -0
  1685. openai/types/realtime/response_function_call_arguments_done_event.py +30 -0
  1686. openai/types/realtime/response_mcp_call_arguments_delta.py +31 -0
  1687. openai/types/realtime/response_mcp_call_arguments_done.py +27 -0
  1688. openai/types/realtime/response_mcp_call_completed.py +21 -0
  1689. openai/types/realtime/response_mcp_call_failed.py +21 -0
  1690. openai/types/realtime/response_mcp_call_in_progress.py +21 -0
  1691. openai/types/realtime/response_output_item_added_event.py +25 -0
  1692. openai/types/realtime/response_output_item_done_event.py +25 -0
  1693. openai/types/realtime/response_text_delta_event.py +30 -0
  1694. openai/types/realtime/response_text_done_event.py +30 -0
  1695. openai/types/realtime/session_created_event.py +23 -0
  1696. openai/types/realtime/session_update_event.py +31 -0
  1697. openai/types/realtime/session_update_event_param.py +32 -0
  1698. openai/types/realtime/session_updated_event.py +23 -0
  1699. openai/types/responses/__init__.py +270 -0
  1700. openai/types/responses/apply_patch_tool.py +12 -0
  1701. openai/types/responses/apply_patch_tool_param.py +12 -0
  1702. openai/types/responses/computer_tool.py +21 -0
  1703. openai/types/responses/computer_tool_param.py +21 -0
  1704. openai/types/responses/custom_tool.py +23 -0
  1705. openai/types/responses/custom_tool_param.py +23 -0
  1706. openai/types/responses/easy_input_message.py +26 -0
  1707. openai/types/responses/easy_input_message_param.py +27 -0
  1708. openai/types/responses/file_search_tool.py +58 -0
  1709. openai/types/responses/file_search_tool_param.py +60 -0
  1710. openai/types/responses/function_shell_tool.py +12 -0
  1711. openai/types/responses/function_shell_tool_param.py +12 -0
  1712. openai/types/responses/function_tool.py +28 -0
  1713. openai/types/responses/function_tool_param.py +28 -0
  1714. openai/types/responses/input_item_list_params.py +34 -0
  1715. openai/types/responses/input_token_count_params.py +142 -0
  1716. openai/types/responses/input_token_count_response.py +13 -0
  1717. openai/types/responses/parsed_response.py +105 -0
  1718. openai/types/responses/response.py +307 -0
  1719. openai/types/responses/response_apply_patch_tool_call.py +76 -0
  1720. openai/types/responses/response_apply_patch_tool_call_output.py +31 -0
  1721. openai/types/responses/response_audio_delta_event.py +18 -0
  1722. openai/types/responses/response_audio_done_event.py +15 -0
  1723. openai/types/responses/response_audio_transcript_delta_event.py +18 -0
  1724. openai/types/responses/response_audio_transcript_done_event.py +15 -0
  1725. openai/types/responses/response_code_interpreter_call_code_delta_event.py +27 -0
  1726. openai/types/responses/response_code_interpreter_call_code_done_event.py +24 -0
  1727. openai/types/responses/response_code_interpreter_call_completed_event.py +24 -0
  1728. openai/types/responses/response_code_interpreter_call_in_progress_event.py +24 -0
  1729. openai/types/responses/response_code_interpreter_call_interpreting_event.py +24 -0
  1730. openai/types/responses/response_code_interpreter_tool_call.py +55 -0
  1731. openai/types/responses/response_code_interpreter_tool_call_param.py +54 -0
  1732. openai/types/responses/response_completed_event.py +19 -0
  1733. openai/types/responses/response_computer_tool_call.py +209 -0
  1734. openai/types/responses/response_computer_tool_call_output_item.py +47 -0
  1735. openai/types/responses/response_computer_tool_call_output_screenshot.py +22 -0
  1736. openai/types/responses/response_computer_tool_call_output_screenshot_param.py +21 -0
  1737. openai/types/responses/response_computer_tool_call_param.py +207 -0
  1738. openai/types/responses/response_content_part_added_event.py +44 -0
  1739. openai/types/responses/response_content_part_done_event.py +44 -0
  1740. openai/types/responses/response_conversation_param.py +12 -0
  1741. openai/types/responses/response_create_params.py +334 -0
  1742. openai/types/responses/response_created_event.py +19 -0
  1743. openai/types/responses/response_custom_tool_call.py +25 -0
  1744. openai/types/responses/response_custom_tool_call_input_delta_event.py +24 -0
  1745. openai/types/responses/response_custom_tool_call_input_done_event.py +24 -0
  1746. openai/types/responses/response_custom_tool_call_output.py +33 -0
  1747. openai/types/responses/response_custom_tool_call_output_param.py +31 -0
  1748. openai/types/responses/response_custom_tool_call_param.py +24 -0
  1749. openai/types/responses/response_error.py +34 -0
  1750. openai/types/responses/response_error_event.py +25 -0
  1751. openai/types/responses/response_failed_event.py +19 -0
  1752. openai/types/responses/response_file_search_call_completed_event.py +21 -0
  1753. openai/types/responses/response_file_search_call_in_progress_event.py +21 -0
  1754. openai/types/responses/response_file_search_call_searching_event.py +21 -0
  1755. openai/types/responses/response_file_search_tool_call.py +51 -0
  1756. openai/types/responses/response_file_search_tool_call_param.py +53 -0
  1757. openai/types/responses/response_format_text_config.py +16 -0
  1758. openai/types/responses/response_format_text_config_param.py +16 -0
  1759. openai/types/responses/response_format_text_json_schema_config.py +43 -0
  1760. openai/types/responses/response_format_text_json_schema_config_param.py +41 -0
  1761. openai/types/responses/response_function_call_arguments_delta_event.py +26 -0
  1762. openai/types/responses/response_function_call_arguments_done_event.py +26 -0
  1763. openai/types/responses/response_function_call_output_item.py +16 -0
  1764. openai/types/responses/response_function_call_output_item_list.py +10 -0
  1765. openai/types/responses/response_function_call_output_item_list_param.py +18 -0
  1766. openai/types/responses/response_function_call_output_item_param.py +16 -0
  1767. openai/types/responses/response_function_shell_call_output_content.py +36 -0
  1768. openai/types/responses/response_function_shell_call_output_content_param.py +35 -0
  1769. openai/types/responses/response_function_shell_tool_call.py +44 -0
  1770. openai/types/responses/response_function_shell_tool_call_output.py +70 -0
  1771. openai/types/responses/response_function_tool_call.py +32 -0
  1772. openai/types/responses/response_function_tool_call_item.py +10 -0
  1773. openai/types/responses/response_function_tool_call_output_item.py +40 -0
  1774. openai/types/responses/response_function_tool_call_param.py +31 -0
  1775. openai/types/responses/response_function_web_search.py +67 -0
  1776. openai/types/responses/response_function_web_search_param.py +73 -0
  1777. openai/types/responses/response_image_gen_call_completed_event.py +21 -0
  1778. openai/types/responses/response_image_gen_call_generating_event.py +21 -0
  1779. openai/types/responses/response_image_gen_call_in_progress_event.py +21 -0
  1780. openai/types/responses/response_image_gen_call_partial_image_event.py +30 -0
  1781. openai/types/responses/response_in_progress_event.py +19 -0
  1782. openai/types/responses/response_includable.py +16 -0
  1783. openai/types/responses/response_incomplete_event.py +19 -0
  1784. openai/types/responses/response_input_audio.py +22 -0
  1785. openai/types/responses/response_input_audio_param.py +22 -0
  1786. openai/types/responses/response_input_content.py +15 -0
  1787. openai/types/responses/response_input_content_param.py +14 -0
  1788. openai/types/responses/response_input_file.py +25 -0
  1789. openai/types/responses/response_input_file_content.py +25 -0
  1790. openai/types/responses/response_input_file_content_param.py +25 -0
  1791. openai/types/responses/response_input_file_param.py +25 -0
  1792. openai/types/responses/response_input_image.py +28 -0
  1793. openai/types/responses/response_input_image_content.py +28 -0
  1794. openai/types/responses/response_input_image_content_param.py +28 -0
  1795. openai/types/responses/response_input_image_param.py +28 -0
  1796. openai/types/responses/response_input_item.py +482 -0
  1797. openai/types/responses/response_input_item_param.py +479 -0
  1798. openai/types/responses/response_input_message_content_list.py +10 -0
  1799. openai/types/responses/response_input_message_content_list_param.py +16 -0
  1800. openai/types/responses/response_input_message_item.py +33 -0
  1801. openai/types/responses/response_input_param.py +482 -0
  1802. openai/types/responses/response_input_text.py +15 -0
  1803. openai/types/responses/response_input_text_content.py +15 -0
  1804. openai/types/responses/response_input_text_content_param.py +15 -0
  1805. openai/types/responses/response_input_text_param.py +15 -0
  1806. openai/types/responses/response_item.py +226 -0
  1807. openai/types/responses/response_item_list.py +26 -0
  1808. openai/types/responses/response_mcp_call_arguments_delta_event.py +27 -0
  1809. openai/types/responses/response_mcp_call_arguments_done_event.py +24 -0
  1810. openai/types/responses/response_mcp_call_completed_event.py +21 -0
  1811. openai/types/responses/response_mcp_call_failed_event.py +21 -0
  1812. openai/types/responses/response_mcp_call_in_progress_event.py +21 -0
  1813. openai/types/responses/response_mcp_list_tools_completed_event.py +21 -0
  1814. openai/types/responses/response_mcp_list_tools_failed_event.py +21 -0
  1815. openai/types/responses/response_mcp_list_tools_in_progress_event.py +21 -0
  1816. openai/types/responses/response_output_item.py +189 -0
  1817. openai/types/responses/response_output_item_added_event.py +22 -0
  1818. openai/types/responses/response_output_item_done_event.py +22 -0
  1819. openai/types/responses/response_output_message.py +34 -0
  1820. openai/types/responses/response_output_message_param.py +34 -0
  1821. openai/types/responses/response_output_refusal.py +15 -0
  1822. openai/types/responses/response_output_refusal_param.py +15 -0
  1823. openai/types/responses/response_output_text.py +117 -0
  1824. openai/types/responses/response_output_text_annotation_added_event.py +30 -0
  1825. openai/types/responses/response_output_text_param.py +115 -0
  1826. openai/types/responses/response_prompt.py +28 -0
  1827. openai/types/responses/response_prompt_param.py +29 -0
  1828. openai/types/responses/response_queued_event.py +19 -0
  1829. openai/types/responses/response_reasoning_item.py +51 -0
  1830. openai/types/responses/response_reasoning_item_param.py +51 -0
  1831. openai/types/responses/response_reasoning_summary_part_added_event.py +35 -0
  1832. openai/types/responses/response_reasoning_summary_part_done_event.py +35 -0
  1833. openai/types/responses/response_reasoning_summary_text_delta_event.py +27 -0
  1834. openai/types/responses/response_reasoning_summary_text_done_event.py +27 -0
  1835. openai/types/responses/response_reasoning_text_delta_event.py +27 -0
  1836. openai/types/responses/response_reasoning_text_done_event.py +27 -0
  1837. openai/types/responses/response_refusal_delta_event.py +27 -0
  1838. openai/types/responses/response_refusal_done_event.py +27 -0
  1839. openai/types/responses/response_retrieve_params.py +59 -0
  1840. openai/types/responses/response_status.py +7 -0
  1841. openai/types/responses/response_stream_event.py +120 -0
  1842. openai/types/responses/response_text_config.py +35 -0
  1843. openai/types/responses/response_text_config_param.py +36 -0
  1844. openai/types/responses/response_text_delta_event.py +50 -0
  1845. openai/types/responses/response_text_done_event.py +50 -0
  1846. openai/types/responses/response_usage.py +35 -0
  1847. openai/types/responses/response_web_search_call_completed_event.py +21 -0
  1848. openai/types/responses/response_web_search_call_in_progress_event.py +21 -0
  1849. openai/types/responses/response_web_search_call_searching_event.py +21 -0
  1850. openai/types/responses/tool.py +271 -0
  1851. openai/types/responses/tool_choice_allowed.py +36 -0
  1852. openai/types/responses/tool_choice_allowed_param.py +36 -0
  1853. openai/types/responses/tool_choice_apply_patch.py +12 -0
  1854. openai/types/responses/tool_choice_apply_patch_param.py +12 -0
  1855. openai/types/responses/tool_choice_custom.py +15 -0
  1856. openai/types/responses/tool_choice_custom_param.py +15 -0
  1857. openai/types/responses/tool_choice_function.py +15 -0
  1858. openai/types/responses/tool_choice_function_param.py +15 -0
  1859. openai/types/responses/tool_choice_mcp.py +19 -0
  1860. openai/types/responses/tool_choice_mcp_param.py +19 -0
  1861. openai/types/responses/tool_choice_options.py +7 -0
  1862. openai/types/responses/tool_choice_shell.py +12 -0
  1863. openai/types/responses/tool_choice_shell_param.py +12 -0
  1864. openai/types/responses/tool_choice_types.py +31 -0
  1865. openai/types/responses/tool_choice_types_param.py +33 -0
  1866. openai/types/responses/tool_param.py +271 -0
  1867. openai/types/responses/web_search_preview_tool.py +49 -0
  1868. openai/types/responses/web_search_preview_tool_param.py +49 -0
  1869. openai/types/responses/web_search_tool.py +63 -0
  1870. openai/types/responses/web_search_tool_param.py +65 -0
  1871. openai/types/shared/__init__.py +19 -0
  1872. openai/types/shared/all_models.py +28 -0
  1873. openai/types/shared/chat_model.py +75 -0
  1874. openai/types/shared/comparison_filter.py +34 -0
  1875. openai/types/shared/compound_filter.py +22 -0
  1876. openai/types/shared/custom_tool_input_format.py +28 -0
  1877. openai/types/shared/error_object.py +17 -0
  1878. openai/types/shared/function_definition.py +43 -0
  1879. openai/types/shared/function_parameters.py +8 -0
  1880. openai/types/shared/metadata.py +8 -0
  1881. openai/types/shared/reasoning.py +44 -0
  1882. openai/types/shared/reasoning_effort.py +8 -0
  1883. openai/types/shared/response_format_json_object.py +12 -0
  1884. openai/types/shared/response_format_json_schema.py +48 -0
  1885. openai/types/shared/response_format_text.py +12 -0
  1886. openai/types/shared/response_format_text_grammar.py +15 -0
  1887. openai/types/shared/response_format_text_python.py +12 -0
  1888. openai/types/shared/responses_model.py +28 -0
  1889. openai/types/shared_params/__init__.py +15 -0
  1890. openai/types/shared_params/chat_model.py +77 -0
  1891. openai/types/shared_params/comparison_filter.py +36 -0
  1892. openai/types/shared_params/compound_filter.py +23 -0
  1893. openai/types/shared_params/custom_tool_input_format.py +27 -0
  1894. openai/types/shared_params/function_definition.py +45 -0
  1895. openai/types/shared_params/function_parameters.py +10 -0
  1896. openai/types/shared_params/metadata.py +10 -0
  1897. openai/types/shared_params/reasoning.py +45 -0
  1898. openai/types/shared_params/reasoning_effort.py +10 -0
  1899. openai/types/shared_params/response_format_json_object.py +12 -0
  1900. openai/types/shared_params/response_format_json_schema.py +46 -0
  1901. openai/types/shared_params/response_format_text.py +12 -0
  1902. openai/types/shared_params/responses_model.py +30 -0
  1903. openai/types/static_file_chunking_strategy.py +20 -0
  1904. openai/types/static_file_chunking_strategy_object.py +15 -0
  1905. openai/types/static_file_chunking_strategy_object_param.py +16 -0
  1906. openai/types/static_file_chunking_strategy_param.py +22 -0
  1907. openai/types/upload.py +42 -0
  1908. openai/types/upload_complete_params.py +20 -0
  1909. openai/types/upload_create_params.py +52 -0
  1910. openai/types/uploads/__init__.py +6 -0
  1911. openai/types/uploads/part_create_params.py +14 -0
  1912. openai/types/uploads/upload_part.py +21 -0
  1913. openai/types/vector_store.py +82 -0
  1914. openai/types/vector_store_create_params.py +61 -0
  1915. openai/types/vector_store_deleted.py +15 -0
  1916. openai/types/vector_store_list_params.py +39 -0
  1917. openai/types/vector_store_search_params.py +42 -0
  1918. openai/types/vector_store_search_response.py +39 -0
  1919. openai/types/vector_store_update_params.py +39 -0
  1920. openai/types/vector_stores/__init__.py +13 -0
  1921. openai/types/vector_stores/file_batch_create_params.py +70 -0
  1922. openai/types/vector_stores/file_batch_list_files_params.py +47 -0
  1923. openai/types/vector_stores/file_content_response.py +15 -0
  1924. openai/types/vector_stores/file_create_params.py +35 -0
  1925. openai/types/vector_stores/file_list_params.py +45 -0
  1926. openai/types/vector_stores/file_update_params.py +21 -0
  1927. openai/types/vector_stores/vector_store_file.py +67 -0
  1928. openai/types/vector_stores/vector_store_file_batch.py +54 -0
  1929. openai/types/vector_stores/vector_store_file_deleted.py +15 -0
  1930. openai/types/video.py +53 -0
  1931. openai/types/video_create_error.py +11 -0
  1932. openai/types/video_create_params.py +29 -0
  1933. openai/types/video_delete_response.py +18 -0
  1934. openai/types/video_download_content_params.py +12 -0
  1935. openai/types/video_list_params.py +21 -0
  1936. openai/types/video_model.py +7 -0
  1937. openai/types/video_remix_params.py +12 -0
  1938. openai/types/video_seconds.py +7 -0
  1939. openai/types/video_size.py +7 -0
  1940. openai/types/webhooks/__init__.py +24 -0
  1941. openai/types/webhooks/batch_cancelled_webhook_event.py +30 -0
  1942. openai/types/webhooks/batch_completed_webhook_event.py +30 -0
  1943. openai/types/webhooks/batch_expired_webhook_event.py +30 -0
  1944. openai/types/webhooks/batch_failed_webhook_event.py +30 -0
  1945. openai/types/webhooks/eval_run_canceled_webhook_event.py +30 -0
  1946. openai/types/webhooks/eval_run_failed_webhook_event.py +30 -0
  1947. openai/types/webhooks/eval_run_succeeded_webhook_event.py +30 -0
  1948. openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +30 -0
  1949. openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +30 -0
  1950. openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +30 -0
  1951. openai/types/webhooks/realtime_call_incoming_webhook_event.py +41 -0
  1952. openai/types/webhooks/response_cancelled_webhook_event.py +30 -0
  1953. openai/types/webhooks/response_completed_webhook_event.py +30 -0
  1954. openai/types/webhooks/response_failed_webhook_event.py +30 -0
  1955. openai/types/webhooks/response_incomplete_webhook_event.py +30 -0
  1956. openai/types/webhooks/unwrap_webhook_event.py +44 -0
  1957. openai/types/websocket_connection_options.py +36 -0
  1958. openai/version.py +3 -0
@@ -0,0 +1,3143 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ import inspect
6
+ from typing import Dict, List, Type, Union, Iterable, Optional, cast
7
+ from functools import partial
8
+ from typing_extensions import Literal, overload
9
+
10
+ import httpx
11
+ import pydantic
12
+
13
+ from .... import _legacy_response
14
+ from .messages import (
15
+ Messages,
16
+ AsyncMessages,
17
+ MessagesWithRawResponse,
18
+ AsyncMessagesWithRawResponse,
19
+ MessagesWithStreamingResponse,
20
+ AsyncMessagesWithStreamingResponse,
21
+ )
22
+ from ...._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
23
+ from ...._utils import required_args, maybe_transform, async_maybe_transform
24
+ from ...._compat import cached_property
25
+ from ...._resource import SyncAPIResource, AsyncAPIResource
26
+ from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
27
+ from ...._streaming import Stream, AsyncStream
28
+ from ....pagination import SyncCursorPage, AsyncCursorPage
29
+ from ....types.chat import (
30
+ ChatCompletionAudioParam,
31
+ completion_list_params,
32
+ completion_create_params,
33
+ completion_update_params,
34
+ )
35
+ from ...._base_client import AsyncPaginator, make_request_options
36
+ from ....lib._parsing import (
37
+ ResponseFormatT,
38
+ validate_input_tools as _validate_input_tools,
39
+ parse_chat_completion as _parse_chat_completion,
40
+ type_to_response_format_param as _type_to_response_format,
41
+ )
42
+ from ....lib.streaming.chat import ChatCompletionStreamManager, AsyncChatCompletionStreamManager
43
+ from ....types.shared.chat_model import ChatModel
44
+ from ....types.chat.chat_completion import ChatCompletion
45
+ from ....types.shared_params.metadata import Metadata
46
+ from ....types.shared.reasoning_effort import ReasoningEffort
47
+ from ....types.chat.chat_completion_chunk import ChatCompletionChunk
48
+ from ....types.chat.parsed_chat_completion import ParsedChatCompletion
49
+ from ....types.chat.chat_completion_deleted import ChatCompletionDeleted
50
+ from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam
51
+ from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam
52
+ from ....types.chat.chat_completion_tool_union_param import ChatCompletionToolUnionParam
53
+ from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
54
+ from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam
55
+ from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
56
+
57
+ __all__ = ["Completions", "AsyncCompletions"]
58
+
59
+
60
+ class Completions(SyncAPIResource):
61
+ @cached_property
62
+ def messages(self) -> Messages:
63
+ return Messages(self._client)
64
+
65
+ @cached_property
66
+ def with_raw_response(self) -> CompletionsWithRawResponse:
67
+ """
68
+ This property can be used as a prefix for any HTTP method call to return
69
+ the raw response object instead of the parsed content.
70
+
71
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
72
+ """
73
+ return CompletionsWithRawResponse(self)
74
+
75
+ @cached_property
76
+ def with_streaming_response(self) -> CompletionsWithStreamingResponse:
77
+ """
78
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
79
+
80
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
81
+ """
82
+ return CompletionsWithStreamingResponse(self)
83
+
84
+ def parse(
85
+ self,
86
+ *,
87
+ messages: Iterable[ChatCompletionMessageParam],
88
+ model: Union[str, ChatModel],
89
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
90
+ response_format: type[ResponseFormatT] | Omit = omit,
91
+ frequency_penalty: Optional[float] | Omit = omit,
92
+ function_call: completion_create_params.FunctionCall | Omit = omit,
93
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
94
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
95
+ logprobs: Optional[bool] | Omit = omit,
96
+ max_completion_tokens: Optional[int] | Omit = omit,
97
+ max_tokens: Optional[int] | Omit = omit,
98
+ metadata: Optional[Metadata] | Omit = omit,
99
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
100
+ n: Optional[int] | Omit = omit,
101
+ parallel_tool_calls: bool | Omit = omit,
102
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
103
+ presence_penalty: Optional[float] | Omit = omit,
104
+ prompt_cache_key: str | Omit = omit,
105
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
106
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
107
+ safety_identifier: str | Omit = omit,
108
+ seed: Optional[int] | Omit = omit,
109
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
110
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
111
+ store: Optional[bool] | Omit = omit,
112
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
113
+ temperature: Optional[float] | Omit = omit,
114
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
115
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
116
+ top_logprobs: Optional[int] | Omit = omit,
117
+ top_p: Optional[float] | Omit = omit,
118
+ user: str | Omit = omit,
119
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
120
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
121
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
122
+ # The extra values given here take precedence over values defined on the client or passed to this method.
123
+ extra_headers: Headers | None = None,
124
+ extra_query: Query | None = None,
125
+ extra_body: Body | None = None,
126
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
127
+ ) -> ParsedChatCompletion[ResponseFormatT]:
128
+ """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
129
+ & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
130
+
131
+ You can pass a pydantic model to this method and it will automatically convert the model
132
+ into a JSON schema, send it to the API and parse the response content back into the given model.
133
+
134
+ This method will also automatically parse `function` tool calls if:
135
+ - You use the `openai.pydantic_function_tool()` helper method
136
+ - You mark your tool schema with `"strict": True`
137
+
138
+ Example usage:
139
+ ```py
140
+ from pydantic import BaseModel
141
+ from openai import OpenAI
142
+
143
+
144
+ class Step(BaseModel):
145
+ explanation: str
146
+ output: str
147
+
148
+
149
+ class MathResponse(BaseModel):
150
+ steps: List[Step]
151
+ final_answer: str
152
+
153
+
154
+ client = OpenAI()
155
+ completion = client.chat.completions.parse(
156
+ model="gpt-4o-2024-08-06",
157
+ messages=[
158
+ {"role": "system", "content": "You are a helpful math tutor."},
159
+ {"role": "user", "content": "solve 8x + 31 = 2"},
160
+ ],
161
+ response_format=MathResponse,
162
+ )
163
+
164
+ message = completion.choices[0].message
165
+ if message.parsed:
166
+ print(message.parsed.steps)
167
+ print("answer: ", message.parsed.final_answer)
168
+ ```
169
+ """
170
+ chat_completion_tools = _validate_input_tools(tools)
171
+
172
+ extra_headers = {
173
+ "X-Stainless-Helper-Method": "chat.completions.parse",
174
+ **(extra_headers or {}),
175
+ }
176
+
177
+ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
178
+ return _parse_chat_completion(
179
+ response_format=response_format,
180
+ chat_completion=raw_completion,
181
+ input_tools=chat_completion_tools,
182
+ )
183
+
184
+ return self._post(
185
+ "/chat/completions",
186
+ body=maybe_transform(
187
+ {
188
+ "messages": messages,
189
+ "model": model,
190
+ "audio": audio,
191
+ "frequency_penalty": frequency_penalty,
192
+ "function_call": function_call,
193
+ "functions": functions,
194
+ "logit_bias": logit_bias,
195
+ "logprobs": logprobs,
196
+ "max_completion_tokens": max_completion_tokens,
197
+ "max_tokens": max_tokens,
198
+ "metadata": metadata,
199
+ "modalities": modalities,
200
+ "n": n,
201
+ "parallel_tool_calls": parallel_tool_calls,
202
+ "prediction": prediction,
203
+ "presence_penalty": presence_penalty,
204
+ "prompt_cache_key": prompt_cache_key,
205
+ "prompt_cache_retention": prompt_cache_retention,
206
+ "reasoning_effort": reasoning_effort,
207
+ "response_format": _type_to_response_format(response_format),
208
+ "safety_identifier": safety_identifier,
209
+ "seed": seed,
210
+ "service_tier": service_tier,
211
+ "stop": stop,
212
+ "store": store,
213
+ "stream": False,
214
+ "stream_options": stream_options,
215
+ "temperature": temperature,
216
+ "tool_choice": tool_choice,
217
+ "tools": tools,
218
+ "top_logprobs": top_logprobs,
219
+ "top_p": top_p,
220
+ "user": user,
221
+ "verbosity": verbosity,
222
+ "web_search_options": web_search_options,
223
+ },
224
+ completion_create_params.CompletionCreateParams,
225
+ ),
226
+ options=make_request_options(
227
+ extra_headers=extra_headers,
228
+ extra_query=extra_query,
229
+ extra_body=extra_body,
230
+ timeout=timeout,
231
+ post_parser=parser,
232
+ ),
233
+ # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
234
+ # in the `parser` function above
235
+ cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
236
+ stream=False,
237
+ )
238
+
239
+ @overload
240
+ def create(
241
+ self,
242
+ *,
243
+ messages: Iterable[ChatCompletionMessageParam],
244
+ model: Union[str, ChatModel],
245
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
246
+ frequency_penalty: Optional[float] | Omit = omit,
247
+ function_call: completion_create_params.FunctionCall | Omit = omit,
248
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
249
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
250
+ logprobs: Optional[bool] | Omit = omit,
251
+ max_completion_tokens: Optional[int] | Omit = omit,
252
+ max_tokens: Optional[int] | Omit = omit,
253
+ metadata: Optional[Metadata] | Omit = omit,
254
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
255
+ n: Optional[int] | Omit = omit,
256
+ parallel_tool_calls: bool | Omit = omit,
257
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
258
+ presence_penalty: Optional[float] | Omit = omit,
259
+ prompt_cache_key: str | Omit = omit,
260
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
261
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
262
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
263
+ safety_identifier: str | Omit = omit,
264
+ seed: Optional[int] | Omit = omit,
265
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
266
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
267
+ store: Optional[bool] | Omit = omit,
268
+ stream: Optional[Literal[False]] | Omit = omit,
269
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
270
+ temperature: Optional[float] | Omit = omit,
271
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
272
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
273
+ top_logprobs: Optional[int] | Omit = omit,
274
+ top_p: Optional[float] | Omit = omit,
275
+ user: str | Omit = omit,
276
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
277
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
278
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
279
+ # The extra values given here take precedence over values defined on the client or passed to this method.
280
+ extra_headers: Headers | None = None,
281
+ extra_query: Query | None = None,
282
+ extra_body: Body | None = None,
283
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
284
+ ) -> ChatCompletion:
285
+ """
286
+ **Starting a new project?** We recommend trying
287
+ [Responses](https://platform.openai.com/docs/api-reference/responses) to take
288
+ advantage of the latest OpenAI platform features. Compare
289
+ [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
290
+
291
+ ---
292
+
293
+ Creates a model response for the given chat conversation. Learn more in the
294
+ [text generation](https://platform.openai.com/docs/guides/text-generation),
295
+ [vision](https://platform.openai.com/docs/guides/vision), and
296
+ [audio](https://platform.openai.com/docs/guides/audio) guides.
297
+
298
+ Parameter support can differ depending on the model used to generate the
299
+ response, particularly for newer reasoning models. Parameters that are only
300
+ supported for reasoning models are noted below. For the current state of
301
+ unsupported parameters in reasoning models,
302
+ [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
303
+
304
+ Args:
305
+ messages: A list of messages comprising the conversation so far. Depending on the
306
+ [model](https://platform.openai.com/docs/models) you use, different message
307
+ types (modalities) are supported, like
308
+ [text](https://platform.openai.com/docs/guides/text-generation),
309
+ [images](https://platform.openai.com/docs/guides/vision), and
310
+ [audio](https://platform.openai.com/docs/guides/audio).
311
+
312
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
313
+ wide range of models with different capabilities, performance characteristics,
314
+ and price points. Refer to the
315
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
316
+ available models.
317
+
318
+ audio: Parameters for audio output. Required when audio output is requested with
319
+ `modalities: ["audio"]`.
320
+ [Learn more](https://platform.openai.com/docs/guides/audio).
321
+
322
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
323
+ existing frequency in the text so far, decreasing the model's likelihood to
324
+ repeat the same line verbatim.
325
+
326
+ function_call: Deprecated in favor of `tool_choice`.
327
+
328
+ Controls which (if any) function is called by the model.
329
+
330
+ `none` means the model will not call a function and instead generates a message.
331
+
332
+ `auto` means the model can pick between generating a message or calling a
333
+ function.
334
+
335
+ Specifying a particular function via `{"name": "my_function"}` forces the model
336
+ to call that function.
337
+
338
+ `none` is the default when no functions are present. `auto` is the default if
339
+ functions are present.
340
+
341
+ functions: Deprecated in favor of `tools`.
342
+
343
+ A list of functions the model may generate JSON inputs for.
344
+
345
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
346
+
347
+ Accepts a JSON object that maps tokens (specified by their token ID in the
348
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
349
+ bias is added to the logits generated by the model prior to sampling. The exact
350
+ effect will vary per model, but values between -1 and 1 should decrease or
351
+ increase likelihood of selection; values like -100 or 100 should result in a ban
352
+ or exclusive selection of the relevant token.
353
+
354
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
355
+ returns the log probabilities of each output token returned in the `content` of
356
+ `message`.
357
+
358
+ max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
359
+ including visible output tokens and
360
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
361
+
362
+ max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
363
+ completion. This value can be used to control
364
+ [costs](https://openai.com/api/pricing/) for text generated via API.
365
+
366
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
367
+ compatible with
368
+ [o-series models](https://platform.openai.com/docs/guides/reasoning).
369
+
370
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
371
+ for storing additional information about the object in a structured format, and
372
+ querying for objects via API or the dashboard.
373
+
374
+ Keys are strings with a maximum length of 64 characters. Values are strings with
375
+ a maximum length of 512 characters.
376
+
377
+ modalities: Output types that you would like the model to generate. Most models are capable
378
+ of generating text, which is the default:
379
+
380
+ `["text"]`
381
+
382
+ The `gpt-4o-audio-preview` model can also be used to
383
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
384
+ this model generate both text and audio responses, you can use:
385
+
386
+ `["text", "audio"]`
387
+
388
+ n: How many chat completion choices to generate for each input message. Note that
389
+ you will be charged based on the number of generated tokens across all of the
390
+ choices. Keep `n` as `1` to minimize costs.
391
+
392
+ parallel_tool_calls: Whether to enable
393
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
394
+ during tool use.
395
+
396
+ prediction: Static predicted output content, such as the content of a text file that is
397
+ being regenerated.
398
+
399
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
400
+ whether they appear in the text so far, increasing the model's likelihood to
401
+ talk about new topics.
402
+
403
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
404
+ hit rates. Replaces the `user` field.
405
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
406
+
407
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
408
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
409
+ of 24 hours.
410
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
411
+
412
+ reasoning_effort: Constrains effort on reasoning for
413
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
414
+ supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
415
+ reasoning effort can result in faster responses and fewer tokens used on
416
+ reasoning in a response.
417
+
418
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
419
+ reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
420
+ calls are supported for all reasoning values in gpt-5.1.
421
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
422
+ support `none`.
423
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
424
+
425
+ response_format: An object specifying the format that the model must output.
426
+
427
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
428
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
429
+ in the
430
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
431
+
432
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
433
+ ensures the message the model generates is valid JSON. Using `json_schema` is
434
+ preferred for models that support it.
435
+
436
+ safety_identifier: A stable identifier used to help detect users of your application that may be
437
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
438
+ identifies each user. We recommend hashing their username or email address, in
439
+ order to avoid sending us any identifying information.
440
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
441
+
442
+ seed: This feature is in Beta. If specified, our system will make a best effort to
443
+ sample deterministically, such that repeated requests with the same `seed` and
444
+ parameters should return the same result. Determinism is not guaranteed, and you
445
+ should refer to the `system_fingerprint` response parameter to monitor changes
446
+ in the backend.
447
+
448
+ service_tier: Specifies the processing type used for serving the request.
449
+
450
+ - If set to 'auto', then the request will be processed with the service tier
451
+ configured in the Project settings. Unless otherwise configured, the Project
452
+ will use 'default'.
453
+ - If set to 'default', then the request will be processed with the standard
454
+ pricing and performance for the selected model.
455
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
456
+ '[priority](https://openai.com/api-priority-processing/)', then the request
457
+ will be processed with the corresponding service tier.
458
+ - When not set, the default behavior is 'auto'.
459
+
460
+ When the `service_tier` parameter is set, the response body will include the
461
+ `service_tier` value based on the processing mode actually used to serve the
462
+ request. This response value may be different from the value set in the
463
+ parameter.
464
+
465
+ stop: Not supported with latest reasoning models `o3` and `o4-mini`.
466
+
467
+ Up to 4 sequences where the API will stop generating further tokens. The
468
+ returned text will not contain the stop sequence.
469
+
470
+ store: Whether or not to store the output of this chat completion request for use in
471
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
472
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
473
+
474
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
475
+
476
+ stream: If set to true, the model response data will be streamed to the client as it is
477
+ generated using
478
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
479
+ See the
480
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
481
+ for more information, along with the
482
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
483
+ guide for more information on how to handle the streaming events.
484
+
485
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
486
+
487
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
488
+ make the output more random, while lower values like 0.2 will make it more
489
+ focused and deterministic. We generally recommend altering this or `top_p` but
490
+ not both.
491
+
492
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
493
+ not call any tool and instead generates a message. `auto` means the model can
494
+ pick between generating a message or calling one or more tools. `required` means
495
+ the model must call one or more tools. Specifying a particular tool via
496
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
497
+ call that tool.
498
+
499
+ `none` is the default when no tools are present. `auto` is the default if tools
500
+ are present.
501
+
502
+ tools: A list of tools the model may call. You can provide either
503
+ [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
504
+ or [function tools](https://platform.openai.com/docs/guides/function-calling).
505
+
506
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
507
+ return at each token position, each with an associated log probability.
508
+ `logprobs` must be set to `true` if this parameter is used.
509
+
510
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
511
+ model considers the results of the tokens with top_p probability mass. So 0.1
512
+ means only the tokens comprising the top 10% probability mass are considered.
513
+
514
+ We generally recommend altering this or `temperature` but not both.
515
+
516
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
517
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
518
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
519
+ similar requests and to help OpenAI detect and prevent abuse.
520
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
521
+
522
+ verbosity: Constrains the verbosity of the model's response. Lower values will result in
523
+ more concise responses, while higher values will result in more verbose
524
+ responses. Currently supported values are `low`, `medium`, and `high`.
525
+
526
+ web_search_options: This tool searches the web for relevant results to use in a response. Learn more
527
+ about the
528
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
529
+
530
+ extra_headers: Send extra headers
531
+
532
+ extra_query: Add additional query parameters to the request
533
+
534
+ extra_body: Add additional JSON properties to the request
535
+
536
+ timeout: Override the client-level default timeout for this request, in seconds
537
+ """
538
+ ...
539
+
540
+ @overload
541
+ def create(
542
+ self,
543
+ *,
544
+ messages: Iterable[ChatCompletionMessageParam],
545
+ model: Union[str, ChatModel],
546
+ stream: Literal[True],
547
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
548
+ frequency_penalty: Optional[float] | Omit = omit,
549
+ function_call: completion_create_params.FunctionCall | Omit = omit,
550
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
551
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
552
+ logprobs: Optional[bool] | Omit = omit,
553
+ max_completion_tokens: Optional[int] | Omit = omit,
554
+ max_tokens: Optional[int] | Omit = omit,
555
+ metadata: Optional[Metadata] | Omit = omit,
556
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
557
+ n: Optional[int] | Omit = omit,
558
+ parallel_tool_calls: bool | Omit = omit,
559
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
560
+ presence_penalty: Optional[float] | Omit = omit,
561
+ prompt_cache_key: str | Omit = omit,
562
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
563
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
564
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
565
+ safety_identifier: str | Omit = omit,
566
+ seed: Optional[int] | Omit = omit,
567
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
568
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
569
+ store: Optional[bool] | Omit = omit,
570
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
571
+ temperature: Optional[float] | Omit = omit,
572
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
573
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
574
+ top_logprobs: Optional[int] | Omit = omit,
575
+ top_p: Optional[float] | Omit = omit,
576
+ user: str | Omit = omit,
577
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
578
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
579
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
580
+ # The extra values given here take precedence over values defined on the client or passed to this method.
581
+ extra_headers: Headers | None = None,
582
+ extra_query: Query | None = None,
583
+ extra_body: Body | None = None,
584
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
585
+ ) -> Stream[ChatCompletionChunk]:
586
+ """
587
+ **Starting a new project?** We recommend trying
588
+ [Responses](https://platform.openai.com/docs/api-reference/responses) to take
589
+ advantage of the latest OpenAI platform features. Compare
590
+ [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
591
+
592
+ ---
593
+
594
+ Creates a model response for the given chat conversation. Learn more in the
595
+ [text generation](https://platform.openai.com/docs/guides/text-generation),
596
+ [vision](https://platform.openai.com/docs/guides/vision), and
597
+ [audio](https://platform.openai.com/docs/guides/audio) guides.
598
+
599
+ Parameter support can differ depending on the model used to generate the
600
+ response, particularly for newer reasoning models. Parameters that are only
601
+ supported for reasoning models are noted below. For the current state of
602
+ unsupported parameters in reasoning models,
603
+ [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
604
+
605
+ Args:
606
+ messages: A list of messages comprising the conversation so far. Depending on the
607
+ [model](https://platform.openai.com/docs/models) you use, different message
608
+ types (modalities) are supported, like
609
+ [text](https://platform.openai.com/docs/guides/text-generation),
610
+ [images](https://platform.openai.com/docs/guides/vision), and
611
+ [audio](https://platform.openai.com/docs/guides/audio).
612
+
613
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
614
+ wide range of models with different capabilities, performance characteristics,
615
+ and price points. Refer to the
616
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
617
+ available models.
618
+
619
+ stream: If set to true, the model response data will be streamed to the client as it is
620
+ generated using
621
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
622
+ See the
623
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
624
+ for more information, along with the
625
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
626
+ guide for more information on how to handle the streaming events.
627
+
628
+ audio: Parameters for audio output. Required when audio output is requested with
629
+ `modalities: ["audio"]`.
630
+ [Learn more](https://platform.openai.com/docs/guides/audio).
631
+
632
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
633
+ existing frequency in the text so far, decreasing the model's likelihood to
634
+ repeat the same line verbatim.
635
+
636
+ function_call: Deprecated in favor of `tool_choice`.
637
+
638
+ Controls which (if any) function is called by the model.
639
+
640
+ `none` means the model will not call a function and instead generates a message.
641
+
642
+ `auto` means the model can pick between generating a message or calling a
643
+ function.
644
+
645
+ Specifying a particular function via `{"name": "my_function"}` forces the model
646
+ to call that function.
647
+
648
+ `none` is the default when no functions are present. `auto` is the default if
649
+ functions are present.
650
+
651
+ functions: Deprecated in favor of `tools`.
652
+
653
+ A list of functions the model may generate JSON inputs for.
654
+
655
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
656
+
657
+ Accepts a JSON object that maps tokens (specified by their token ID in the
658
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
659
+ bias is added to the logits generated by the model prior to sampling. The exact
660
+ effect will vary per model, but values between -1 and 1 should decrease or
661
+ increase likelihood of selection; values like -100 or 100 should result in a ban
662
+ or exclusive selection of the relevant token.
663
+
664
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
665
+ returns the log probabilities of each output token returned in the `content` of
666
+ `message`.
667
+
668
+ max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
669
+ including visible output tokens and
670
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
671
+
672
+ max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
673
+ completion. This value can be used to control
674
+ [costs](https://openai.com/api/pricing/) for text generated via API.
675
+
676
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
677
+ compatible with
678
+ [o-series models](https://platform.openai.com/docs/guides/reasoning).
679
+
680
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
681
+ for storing additional information about the object in a structured format, and
682
+ querying for objects via API or the dashboard.
683
+
684
+ Keys are strings with a maximum length of 64 characters. Values are strings with
685
+ a maximum length of 512 characters.
686
+
687
+ modalities: Output types that you would like the model to generate. Most models are capable
688
+ of generating text, which is the default:
689
+
690
+ `["text"]`
691
+
692
+ The `gpt-4o-audio-preview` model can also be used to
693
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
694
+ this model generate both text and audio responses, you can use:
695
+
696
+ `["text", "audio"]`
697
+
698
+ n: How many chat completion choices to generate for each input message. Note that
699
+ you will be charged based on the number of generated tokens across all of the
700
+ choices. Keep `n` as `1` to minimize costs.
701
+
702
+ parallel_tool_calls: Whether to enable
703
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
704
+ during tool use.
705
+
706
+ prediction: Static predicted output content, such as the content of a text file that is
707
+ being regenerated.
708
+
709
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
710
+ whether they appear in the text so far, increasing the model's likelihood to
711
+ talk about new topics.
712
+
713
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
714
+ hit rates. Replaces the `user` field.
715
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
716
+
717
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
718
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
719
+ of 24 hours.
720
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
721
+
722
+ reasoning_effort: Constrains effort on reasoning for
723
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
724
+ supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
725
+ reasoning effort can result in faster responses and fewer tokens used on
726
+ reasoning in a response.
727
+
728
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
729
+ reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
730
+ calls are supported for all reasoning values in gpt-5.1.
731
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
732
+ support `none`.
733
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
734
+
735
+ response_format: An object specifying the format that the model must output.
736
+
737
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
738
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
739
+ in the
740
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
741
+
742
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
743
+ ensures the message the model generates is valid JSON. Using `json_schema` is
744
+ preferred for models that support it.
745
+
746
+ safety_identifier: A stable identifier used to help detect users of your application that may be
747
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
748
+ identifies each user. We recommend hashing their username or email address, in
749
+ order to avoid sending us any identifying information.
750
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
751
+
752
+ seed: This feature is in Beta. If specified, our system will make a best effort to
753
+ sample deterministically, such that repeated requests with the same `seed` and
754
+ parameters should return the same result. Determinism is not guaranteed, and you
755
+ should refer to the `system_fingerprint` response parameter to monitor changes
756
+ in the backend.
757
+
758
+ service_tier: Specifies the processing type used for serving the request.
759
+
760
+ - If set to 'auto', then the request will be processed with the service tier
761
+ configured in the Project settings. Unless otherwise configured, the Project
762
+ will use 'default'.
763
+ - If set to 'default', then the request will be processed with the standard
764
+ pricing and performance for the selected model.
765
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
766
+ '[priority](https://openai.com/api-priority-processing/)', then the request
767
+ will be processed with the corresponding service tier.
768
+ - When not set, the default behavior is 'auto'.
769
+
770
+ When the `service_tier` parameter is set, the response body will include the
771
+ `service_tier` value based on the processing mode actually used to serve the
772
+ request. This response value may be different from the value set in the
773
+ parameter.
774
+
775
+ stop: Not supported with latest reasoning models `o3` and `o4-mini`.
776
+
777
+ Up to 4 sequences where the API will stop generating further tokens. The
778
+ returned text will not contain the stop sequence.
779
+
780
+ store: Whether or not to store the output of this chat completion request for use in
781
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
782
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
783
+
784
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
785
+
786
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
787
+
788
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
789
+ make the output more random, while lower values like 0.2 will make it more
790
+ focused and deterministic. We generally recommend altering this or `top_p` but
791
+ not both.
792
+
793
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
794
+ not call any tool and instead generates a message. `auto` means the model can
795
+ pick between generating a message or calling one or more tools. `required` means
796
+ the model must call one or more tools. Specifying a particular tool via
797
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
798
+ call that tool.
799
+
800
+ `none` is the default when no tools are present. `auto` is the default if tools
801
+ are present.
802
+
803
+ tools: A list of tools the model may call. You can provide either
804
+ [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
805
+ or [function tools](https://platform.openai.com/docs/guides/function-calling).
806
+
807
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
808
+ return at each token position, each with an associated log probability.
809
+ `logprobs` must be set to `true` if this parameter is used.
810
+
811
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
812
+ model considers the results of the tokens with top_p probability mass. So 0.1
813
+ means only the tokens comprising the top 10% probability mass are considered.
814
+
815
+ We generally recommend altering this or `temperature` but not both.
816
+
817
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
818
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
819
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
820
+ similar requests and to help OpenAI detect and prevent abuse.
821
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
822
+
823
+ verbosity: Constrains the verbosity of the model's response. Lower values will result in
824
+ more concise responses, while higher values will result in more verbose
825
+ responses. Currently supported values are `low`, `medium`, and `high`.
826
+
827
+ web_search_options: This tool searches the web for relevant results to use in a response. Learn more
828
+ about the
829
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
830
+
831
+ extra_headers: Send extra headers
832
+
833
+ extra_query: Add additional query parameters to the request
834
+
835
+ extra_body: Add additional JSON properties to the request
836
+
837
+ timeout: Override the client-level default timeout for this request, in seconds
838
+ """
839
+ ...
840
+
841
+ @overload
842
+ def create(
843
+ self,
844
+ *,
845
+ messages: Iterable[ChatCompletionMessageParam],
846
+ model: Union[str, ChatModel],
847
+ stream: bool,
848
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
849
+ frequency_penalty: Optional[float] | Omit = omit,
850
+ function_call: completion_create_params.FunctionCall | Omit = omit,
851
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
852
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
853
+ logprobs: Optional[bool] | Omit = omit,
854
+ max_completion_tokens: Optional[int] | Omit = omit,
855
+ max_tokens: Optional[int] | Omit = omit,
856
+ metadata: Optional[Metadata] | Omit = omit,
857
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
858
+ n: Optional[int] | Omit = omit,
859
+ parallel_tool_calls: bool | Omit = omit,
860
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
861
+ presence_penalty: Optional[float] | Omit = omit,
862
+ prompt_cache_key: str | Omit = omit,
863
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
864
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
865
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
866
+ safety_identifier: str | Omit = omit,
867
+ seed: Optional[int] | Omit = omit,
868
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
869
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
870
+ store: Optional[bool] | Omit = omit,
871
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
872
+ temperature: Optional[float] | Omit = omit,
873
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
874
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
875
+ top_logprobs: Optional[int] | Omit = omit,
876
+ top_p: Optional[float] | Omit = omit,
877
+ user: str | Omit = omit,
878
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
879
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
880
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
881
+ # The extra values given here take precedence over values defined on the client or passed to this method.
882
+ extra_headers: Headers | None = None,
883
+ extra_query: Query | None = None,
884
+ extra_body: Body | None = None,
885
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
886
+ ) -> ChatCompletion | Stream[ChatCompletionChunk]:
887
+ """
888
+ **Starting a new project?** We recommend trying
889
+ [Responses](https://platform.openai.com/docs/api-reference/responses) to take
890
+ advantage of the latest OpenAI platform features. Compare
891
+ [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
892
+
893
+ ---
894
+
895
+ Creates a model response for the given chat conversation. Learn more in the
896
+ [text generation](https://platform.openai.com/docs/guides/text-generation),
897
+ [vision](https://platform.openai.com/docs/guides/vision), and
898
+ [audio](https://platform.openai.com/docs/guides/audio) guides.
899
+
900
+ Parameter support can differ depending on the model used to generate the
901
+ response, particularly for newer reasoning models. Parameters that are only
902
+ supported for reasoning models are noted below. For the current state of
903
+ unsupported parameters in reasoning models,
904
+ [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
905
+
906
+ Args:
907
+ messages: A list of messages comprising the conversation so far. Depending on the
908
+ [model](https://platform.openai.com/docs/models) you use, different message
909
+ types (modalities) are supported, like
910
+ [text](https://platform.openai.com/docs/guides/text-generation),
911
+ [images](https://platform.openai.com/docs/guides/vision), and
912
+ [audio](https://platform.openai.com/docs/guides/audio).
913
+
914
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
915
+ wide range of models with different capabilities, performance characteristics,
916
+ and price points. Refer to the
917
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
918
+ available models.
919
+
920
+ stream: If set to true, the model response data will be streamed to the client as it is
921
+ generated using
922
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
923
+ See the
924
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
925
+ for more information, along with the
926
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
927
+ guide for more information on how to handle the streaming events.
928
+
929
+ audio: Parameters for audio output. Required when audio output is requested with
930
+ `modalities: ["audio"]`.
931
+ [Learn more](https://platform.openai.com/docs/guides/audio).
932
+
933
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
934
+ existing frequency in the text so far, decreasing the model's likelihood to
935
+ repeat the same line verbatim.
936
+
937
+ function_call: Deprecated in favor of `tool_choice`.
938
+
939
+ Controls which (if any) function is called by the model.
940
+
941
+ `none` means the model will not call a function and instead generates a message.
942
+
943
+ `auto` means the model can pick between generating a message or calling a
944
+ function.
945
+
946
+ Specifying a particular function via `{"name": "my_function"}` forces the model
947
+ to call that function.
948
+
949
+ `none` is the default when no functions are present. `auto` is the default if
950
+ functions are present.
951
+
952
+ functions: Deprecated in favor of `tools`.
953
+
954
+ A list of functions the model may generate JSON inputs for.
955
+
956
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
957
+
958
+ Accepts a JSON object that maps tokens (specified by their token ID in the
959
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
960
+ bias is added to the logits generated by the model prior to sampling. The exact
961
+ effect will vary per model, but values between -1 and 1 should decrease or
962
+ increase likelihood of selection; values like -100 or 100 should result in a ban
963
+ or exclusive selection of the relevant token.
964
+
965
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
966
+ returns the log probabilities of each output token returned in the `content` of
967
+ `message`.
968
+
969
+ max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
970
+ including visible output tokens and
971
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
972
+
973
+ max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
974
+ completion. This value can be used to control
975
+ [costs](https://openai.com/api/pricing/) for text generated via API.
976
+
977
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
978
+ compatible with
979
+ [o-series models](https://platform.openai.com/docs/guides/reasoning).
980
+
981
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
982
+ for storing additional information about the object in a structured format, and
983
+ querying for objects via API or the dashboard.
984
+
985
+ Keys are strings with a maximum length of 64 characters. Values are strings with
986
+ a maximum length of 512 characters.
987
+
988
+ modalities: Output types that you would like the model to generate. Most models are capable
989
+ of generating text, which is the default:
990
+
991
+ `["text"]`
992
+
993
+ The `gpt-4o-audio-preview` model can also be used to
994
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
995
+ this model generate both text and audio responses, you can use:
996
+
997
+ `["text", "audio"]`
998
+
999
+ n: How many chat completion choices to generate for each input message. Note that
1000
+ you will be charged based on the number of generated tokens across all of the
1001
+ choices. Keep `n` as `1` to minimize costs.
1002
+
1003
+ parallel_tool_calls: Whether to enable
1004
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
1005
+ during tool use.
1006
+
1007
+ prediction: Static predicted output content, such as the content of a text file that is
1008
+ being regenerated.
1009
+
1010
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
1011
+ whether they appear in the text so far, increasing the model's likelihood to
1012
+ talk about new topics.
1013
+
1014
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
1015
+ hit rates. Replaces the `user` field.
1016
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
1017
+
1018
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
1019
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
1020
+ of 24 hours.
1021
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1022
+
1023
+ reasoning_effort: Constrains effort on reasoning for
1024
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1025
+ supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1026
+ reasoning effort can result in faster responses and fewer tokens used on
1027
+ reasoning in a response.
1028
+
1029
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1030
+ reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1031
+ calls are supported for all reasoning values in gpt-5.1.
1032
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1033
+ support `none`.
1034
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1035
+
1036
+ response_format: An object specifying the format that the model must output.
1037
+
1038
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
1039
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
1040
+ in the
1041
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1042
+
1043
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1044
+ ensures the message the model generates is valid JSON. Using `json_schema` is
1045
+ preferred for models that support it.
1046
+
1047
+ safety_identifier: A stable identifier used to help detect users of your application that may be
1048
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
1049
+ identifies each user. We recommend hashing their username or email address, in
1050
+ order to avoid sending us any identifying information.
1051
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
1052
+
1053
+ seed: This feature is in Beta. If specified, our system will make a best effort to
1054
+ sample deterministically, such that repeated requests with the same `seed` and
1055
+ parameters should return the same result. Determinism is not guaranteed, and you
1056
+ should refer to the `system_fingerprint` response parameter to monitor changes
1057
+ in the backend.
1058
+
1059
+ service_tier: Specifies the processing type used for serving the request.
1060
+
1061
+ - If set to 'auto', then the request will be processed with the service tier
1062
+ configured in the Project settings. Unless otherwise configured, the Project
1063
+ will use 'default'.
1064
+ - If set to 'default', then the request will be processed with the standard
1065
+ pricing and performance for the selected model.
1066
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1067
+ '[priority](https://openai.com/api-priority-processing/)', then the request
1068
+ will be processed with the corresponding service tier.
1069
+ - When not set, the default behavior is 'auto'.
1070
+
1071
+ When the `service_tier` parameter is set, the response body will include the
1072
+ `service_tier` value based on the processing mode actually used to serve the
1073
+ request. This response value may be different from the value set in the
1074
+ parameter.
1075
+
1076
+ stop: Not supported with latest reasoning models `o3` and `o4-mini`.
1077
+
1078
+ Up to 4 sequences where the API will stop generating further tokens. The
1079
+ returned text will not contain the stop sequence.
1080
+
1081
+ store: Whether or not to store the output of this chat completion request for use in
1082
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
1083
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
1084
+
1085
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
1086
+
1087
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
1088
+
1089
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1090
+ make the output more random, while lower values like 0.2 will make it more
1091
+ focused and deterministic. We generally recommend altering this or `top_p` but
1092
+ not both.
1093
+
1094
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
1095
+ not call any tool and instead generates a message. `auto` means the model can
1096
+ pick between generating a message or calling one or more tools. `required` means
1097
+ the model must call one or more tools. Specifying a particular tool via
1098
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
1099
+ call that tool.
1100
+
1101
+ `none` is the default when no tools are present. `auto` is the default if tools
1102
+ are present.
1103
+
1104
+ tools: A list of tools the model may call. You can provide either
1105
+ [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
1106
+ or [function tools](https://platform.openai.com/docs/guides/function-calling).
1107
+
1108
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
1109
+ return at each token position, each with an associated log probability.
1110
+ `logprobs` must be set to `true` if this parameter is used.
1111
+
1112
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
1113
+ model considers the results of the tokens with top_p probability mass. So 0.1
1114
+ means only the tokens comprising the top 10% probability mass are considered.
1115
+
1116
+ We generally recommend altering this or `temperature` but not both.
1117
+
1118
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
1119
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
1120
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
1121
+ similar requests and to help OpenAI detect and prevent abuse.
1122
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
1123
+
1124
+ verbosity: Constrains the verbosity of the model's response. Lower values will result in
1125
+ more concise responses, while higher values will result in more verbose
1126
+ responses. Currently supported values are `low`, `medium`, and `high`.
1127
+
1128
+ web_search_options: This tool searches the web for relevant results to use in a response. Learn more
1129
+ about the
1130
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
1131
+
1132
+ extra_headers: Send extra headers
1133
+
1134
+ extra_query: Add additional query parameters to the request
1135
+
1136
+ extra_body: Add additional JSON properties to the request
1137
+
1138
+ timeout: Override the client-level default timeout for this request, in seconds
1139
+ """
1140
+ ...
1141
+
1142
+ @required_args(["messages", "model"], ["messages", "model", "stream"])
1143
+ def create(
1144
+ self,
1145
+ *,
1146
+ messages: Iterable[ChatCompletionMessageParam],
1147
+ model: Union[str, ChatModel],
1148
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
1149
+ frequency_penalty: Optional[float] | Omit = omit,
1150
+ function_call: completion_create_params.FunctionCall | Omit = omit,
1151
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
1152
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
1153
+ logprobs: Optional[bool] | Omit = omit,
1154
+ max_completion_tokens: Optional[int] | Omit = omit,
1155
+ max_tokens: Optional[int] | Omit = omit,
1156
+ metadata: Optional[Metadata] | Omit = omit,
1157
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
1158
+ n: Optional[int] | Omit = omit,
1159
+ parallel_tool_calls: bool | Omit = omit,
1160
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
1161
+ presence_penalty: Optional[float] | Omit = omit,
1162
+ prompt_cache_key: str | Omit = omit,
1163
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1164
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
1165
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
1166
+ safety_identifier: str | Omit = omit,
1167
+ seed: Optional[int] | Omit = omit,
1168
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1169
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
1170
+ store: Optional[bool] | Omit = omit,
1171
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
1172
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
1173
+ temperature: Optional[float] | Omit = omit,
1174
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
1175
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
1176
+ top_logprobs: Optional[int] | Omit = omit,
1177
+ top_p: Optional[float] | Omit = omit,
1178
+ user: str | Omit = omit,
1179
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
1180
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
1181
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1182
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1183
+ extra_headers: Headers | None = None,
1184
+ extra_query: Query | None = None,
1185
+ extra_body: Body | None = None,
1186
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1187
+ ) -> ChatCompletion | Stream[ChatCompletionChunk]:
1188
+ validate_response_format(response_format)
1189
+ return self._post(
1190
+ "/chat/completions",
1191
+ body=maybe_transform(
1192
+ {
1193
+ "messages": messages,
1194
+ "model": model,
1195
+ "audio": audio,
1196
+ "frequency_penalty": frequency_penalty,
1197
+ "function_call": function_call,
1198
+ "functions": functions,
1199
+ "logit_bias": logit_bias,
1200
+ "logprobs": logprobs,
1201
+ "max_completion_tokens": max_completion_tokens,
1202
+ "max_tokens": max_tokens,
1203
+ "metadata": metadata,
1204
+ "modalities": modalities,
1205
+ "n": n,
1206
+ "parallel_tool_calls": parallel_tool_calls,
1207
+ "prediction": prediction,
1208
+ "presence_penalty": presence_penalty,
1209
+ "prompt_cache_key": prompt_cache_key,
1210
+ "prompt_cache_retention": prompt_cache_retention,
1211
+ "reasoning_effort": reasoning_effort,
1212
+ "response_format": response_format,
1213
+ "safety_identifier": safety_identifier,
1214
+ "seed": seed,
1215
+ "service_tier": service_tier,
1216
+ "stop": stop,
1217
+ "store": store,
1218
+ "stream": stream,
1219
+ "stream_options": stream_options,
1220
+ "temperature": temperature,
1221
+ "tool_choice": tool_choice,
1222
+ "tools": tools,
1223
+ "top_logprobs": top_logprobs,
1224
+ "top_p": top_p,
1225
+ "user": user,
1226
+ "verbosity": verbosity,
1227
+ "web_search_options": web_search_options,
1228
+ },
1229
+ completion_create_params.CompletionCreateParamsStreaming
1230
+ if stream
1231
+ else completion_create_params.CompletionCreateParamsNonStreaming,
1232
+ ),
1233
+ options=make_request_options(
1234
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1235
+ ),
1236
+ cast_to=ChatCompletion,
1237
+ stream=stream or False,
1238
+ stream_cls=Stream[ChatCompletionChunk],
1239
+ )
1240
+
1241
+ def retrieve(
1242
+ self,
1243
+ completion_id: str,
1244
+ *,
1245
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1246
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1247
+ extra_headers: Headers | None = None,
1248
+ extra_query: Query | None = None,
1249
+ extra_body: Body | None = None,
1250
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1251
+ ) -> ChatCompletion:
1252
+ """Get a stored chat completion.
1253
+
1254
+ Only Chat Completions that have been created with
1255
+ the `store` parameter set to `true` will be returned.
1256
+
1257
+ Args:
1258
+ extra_headers: Send extra headers
1259
+
1260
+ extra_query: Add additional query parameters to the request
1261
+
1262
+ extra_body: Add additional JSON properties to the request
1263
+
1264
+ timeout: Override the client-level default timeout for this request, in seconds
1265
+ """
1266
+ if not completion_id:
1267
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
1268
+ return self._get(
1269
+ f"/chat/completions/{completion_id}",
1270
+ options=make_request_options(
1271
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1272
+ ),
1273
+ cast_to=ChatCompletion,
1274
+ )
1275
+
1276
+ def update(
1277
+ self,
1278
+ completion_id: str,
1279
+ *,
1280
+ metadata: Optional[Metadata],
1281
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1282
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1283
+ extra_headers: Headers | None = None,
1284
+ extra_query: Query | None = None,
1285
+ extra_body: Body | None = None,
1286
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1287
+ ) -> ChatCompletion:
1288
+ """Modify a stored chat completion.
1289
+
1290
+ Only Chat Completions that have been created
1291
+ with the `store` parameter set to `true` can be modified. Currently, the only
1292
+ supported modification is to update the `metadata` field.
1293
+
1294
+ Args:
1295
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
1296
+ for storing additional information about the object in a structured format, and
1297
+ querying for objects via API or the dashboard.
1298
+
1299
+ Keys are strings with a maximum length of 64 characters. Values are strings with
1300
+ a maximum length of 512 characters.
1301
+
1302
+ extra_headers: Send extra headers
1303
+
1304
+ extra_query: Add additional query parameters to the request
1305
+
1306
+ extra_body: Add additional JSON properties to the request
1307
+
1308
+ timeout: Override the client-level default timeout for this request, in seconds
1309
+ """
1310
+ if not completion_id:
1311
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
1312
+ return self._post(
1313
+ f"/chat/completions/{completion_id}",
1314
+ body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
1315
+ options=make_request_options(
1316
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1317
+ ),
1318
+ cast_to=ChatCompletion,
1319
+ )
1320
+
1321
+ def list(
1322
+ self,
1323
+ *,
1324
+ after: str | Omit = omit,
1325
+ limit: int | Omit = omit,
1326
+ metadata: Optional[Metadata] | Omit = omit,
1327
+ model: str | Omit = omit,
1328
+ order: Literal["asc", "desc"] | Omit = omit,
1329
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1330
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1331
+ extra_headers: Headers | None = None,
1332
+ extra_query: Query | None = None,
1333
+ extra_body: Body | None = None,
1334
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1335
+ ) -> SyncCursorPage[ChatCompletion]:
1336
+ """List stored Chat Completions.
1337
+
1338
+ Only Chat Completions that have been stored with
1339
+ the `store` parameter set to `true` will be returned.
1340
+
1341
+ Args:
1342
+ after: Identifier for the last chat completion from the previous pagination request.
1343
+
1344
+ limit: Number of Chat Completions to retrieve.
1345
+
1346
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
1347
+ for storing additional information about the object in a structured format, and
1348
+ querying for objects via API or the dashboard.
1349
+
1350
+ Keys are strings with a maximum length of 64 characters. Values are strings with
1351
+ a maximum length of 512 characters.
1352
+
1353
+ model: The model used to generate the Chat Completions.
1354
+
1355
+ order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
1356
+ `desc` for descending order. Defaults to `asc`.
1357
+
1358
+ extra_headers: Send extra headers
1359
+
1360
+ extra_query: Add additional query parameters to the request
1361
+
1362
+ extra_body: Add additional JSON properties to the request
1363
+
1364
+ timeout: Override the client-level default timeout for this request, in seconds
1365
+ """
1366
+ return self._get_api_list(
1367
+ "/chat/completions",
1368
+ page=SyncCursorPage[ChatCompletion],
1369
+ options=make_request_options(
1370
+ extra_headers=extra_headers,
1371
+ extra_query=extra_query,
1372
+ extra_body=extra_body,
1373
+ timeout=timeout,
1374
+ query=maybe_transform(
1375
+ {
1376
+ "after": after,
1377
+ "limit": limit,
1378
+ "metadata": metadata,
1379
+ "model": model,
1380
+ "order": order,
1381
+ },
1382
+ completion_list_params.CompletionListParams,
1383
+ ),
1384
+ ),
1385
+ model=ChatCompletion,
1386
+ )
1387
+
1388
+ def delete(
1389
+ self,
1390
+ completion_id: str,
1391
+ *,
1392
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1393
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1394
+ extra_headers: Headers | None = None,
1395
+ extra_query: Query | None = None,
1396
+ extra_body: Body | None = None,
1397
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1398
+ ) -> ChatCompletionDeleted:
1399
+ """Delete a stored chat completion.
1400
+
1401
+ Only Chat Completions that have been created
1402
+ with the `store` parameter set to `true` can be deleted.
1403
+
1404
+ Args:
1405
+ extra_headers: Send extra headers
1406
+
1407
+ extra_query: Add additional query parameters to the request
1408
+
1409
+ extra_body: Add additional JSON properties to the request
1410
+
1411
+ timeout: Override the client-level default timeout for this request, in seconds
1412
+ """
1413
+ if not completion_id:
1414
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
1415
+ return self._delete(
1416
+ f"/chat/completions/{completion_id}",
1417
+ options=make_request_options(
1418
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1419
+ ),
1420
+ cast_to=ChatCompletionDeleted,
1421
+ )
1422
+
1423
+ def stream(
1424
+ self,
1425
+ *,
1426
+ messages: Iterable[ChatCompletionMessageParam],
1427
+ model: Union[str, ChatModel],
1428
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
1429
+ response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit,
1430
+ frequency_penalty: Optional[float] | Omit = omit,
1431
+ function_call: completion_create_params.FunctionCall | Omit = omit,
1432
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
1433
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
1434
+ logprobs: Optional[bool] | Omit = omit,
1435
+ max_completion_tokens: Optional[int] | Omit = omit,
1436
+ max_tokens: Optional[int] | Omit = omit,
1437
+ metadata: Optional[Metadata] | Omit = omit,
1438
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
1439
+ n: Optional[int] | Omit = omit,
1440
+ parallel_tool_calls: bool | Omit = omit,
1441
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
1442
+ presence_penalty: Optional[float] | Omit = omit,
1443
+ prompt_cache_key: str | Omit = omit,
1444
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1445
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
1446
+ safety_identifier: str | Omit = omit,
1447
+ seed: Optional[int] | Omit = omit,
1448
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1449
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
1450
+ store: Optional[bool] | Omit = omit,
1451
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
1452
+ temperature: Optional[float] | Omit = omit,
1453
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
1454
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
1455
+ top_logprobs: Optional[int] | Omit = omit,
1456
+ top_p: Optional[float] | Omit = omit,
1457
+ user: str | Omit = omit,
1458
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
1459
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
1460
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1461
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1462
+ extra_headers: Headers | None = None,
1463
+ extra_query: Query | None = None,
1464
+ extra_body: Body | None = None,
1465
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1466
+ ) -> ChatCompletionStreamManager[ResponseFormatT]:
1467
+ """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
1468
+ and automatic accumulation of each delta.
1469
+
1470
+ This also supports all of the parsing utilities that `.parse()` does.
1471
+
1472
+ Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
1473
+
1474
+ ```py
1475
+ with client.chat.completions.stream(
1476
+ model="gpt-4o-2024-08-06",
1477
+ messages=[...],
1478
+ ) as stream:
1479
+ for event in stream:
1480
+ if event.type == "content.delta":
1481
+ print(event.delta, flush=True, end="")
1482
+ ```
1483
+
1484
+ When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
1485
+
1486
+ When the context manager exits, the response will be closed, however the `stream` instance is still available outside
1487
+ the context manager.
1488
+ """
1489
+ extra_headers = {
1490
+ "X-Stainless-Helper-Method": "chat.completions.stream",
1491
+ **(extra_headers or {}),
1492
+ }
1493
+
1494
+ api_request: partial[Stream[ChatCompletionChunk]] = partial(
1495
+ self.create,
1496
+ messages=messages,
1497
+ model=model,
1498
+ audio=audio,
1499
+ stream=True,
1500
+ response_format=_type_to_response_format(response_format),
1501
+ frequency_penalty=frequency_penalty,
1502
+ function_call=function_call,
1503
+ functions=functions,
1504
+ logit_bias=logit_bias,
1505
+ logprobs=logprobs,
1506
+ max_completion_tokens=max_completion_tokens,
1507
+ max_tokens=max_tokens,
1508
+ metadata=metadata,
1509
+ modalities=modalities,
1510
+ n=n,
1511
+ parallel_tool_calls=parallel_tool_calls,
1512
+ prediction=prediction,
1513
+ presence_penalty=presence_penalty,
1514
+ prompt_cache_key=prompt_cache_key,
1515
+ prompt_cache_retention=prompt_cache_retention,
1516
+ reasoning_effort=reasoning_effort,
1517
+ safety_identifier=safety_identifier,
1518
+ seed=seed,
1519
+ service_tier=service_tier,
1520
+ store=store,
1521
+ stop=stop,
1522
+ stream_options=stream_options,
1523
+ temperature=temperature,
1524
+ tool_choice=tool_choice,
1525
+ tools=tools,
1526
+ top_logprobs=top_logprobs,
1527
+ top_p=top_p,
1528
+ user=user,
1529
+ verbosity=verbosity,
1530
+ web_search_options=web_search_options,
1531
+ extra_headers=extra_headers,
1532
+ extra_query=extra_query,
1533
+ extra_body=extra_body,
1534
+ timeout=timeout,
1535
+ )
1536
+ return ChatCompletionStreamManager(
1537
+ api_request,
1538
+ response_format=response_format,
1539
+ input_tools=tools,
1540
+ )
1541
+
1542
+
1543
+ class AsyncCompletions(AsyncAPIResource):
1544
+ @cached_property
1545
+ def messages(self) -> AsyncMessages:
1546
+ return AsyncMessages(self._client)
1547
+
1548
+ @cached_property
1549
+ def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
1550
+ """
1551
+ This property can be used as a prefix for any HTTP method call to return
1552
+ the raw response object instead of the parsed content.
1553
+
1554
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
1555
+ """
1556
+ return AsyncCompletionsWithRawResponse(self)
1557
+
1558
+ @cached_property
1559
+ def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
1560
+ """
1561
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
1562
+
1563
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
1564
+ """
1565
+ return AsyncCompletionsWithStreamingResponse(self)
1566
+
1567
+ async def parse(
1568
+ self,
1569
+ *,
1570
+ messages: Iterable[ChatCompletionMessageParam],
1571
+ model: Union[str, ChatModel],
1572
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
1573
+ response_format: type[ResponseFormatT] | Omit = omit,
1574
+ frequency_penalty: Optional[float] | Omit = omit,
1575
+ function_call: completion_create_params.FunctionCall | Omit = omit,
1576
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
1577
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
1578
+ logprobs: Optional[bool] | Omit = omit,
1579
+ max_completion_tokens: Optional[int] | Omit = omit,
1580
+ max_tokens: Optional[int] | Omit = omit,
1581
+ metadata: Optional[Metadata] | Omit = omit,
1582
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
1583
+ n: Optional[int] | Omit = omit,
1584
+ parallel_tool_calls: bool | Omit = omit,
1585
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
1586
+ presence_penalty: Optional[float] | Omit = omit,
1587
+ prompt_cache_key: str | Omit = omit,
1588
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1589
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
1590
+ safety_identifier: str | Omit = omit,
1591
+ seed: Optional[int] | Omit = omit,
1592
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1593
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
1594
+ store: Optional[bool] | Omit = omit,
1595
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
1596
+ temperature: Optional[float] | Omit = omit,
1597
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
1598
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
1599
+ top_logprobs: Optional[int] | Omit = omit,
1600
+ top_p: Optional[float] | Omit = omit,
1601
+ user: str | Omit = omit,
1602
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
1603
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
1604
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1605
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1606
+ extra_headers: Headers | None = None,
1607
+ extra_query: Query | None = None,
1608
+ extra_body: Body | None = None,
1609
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1610
+ ) -> ParsedChatCompletion[ResponseFormatT]:
1611
+ """Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
1612
+ & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
1613
+
1614
+ You can pass a pydantic model to this method and it will automatically convert the model
1615
+ into a JSON schema, send it to the API and parse the response content back into the given model.
1616
+
1617
+ This method will also automatically parse `function` tool calls if:
1618
+ - You use the `openai.pydantic_function_tool()` helper method
1619
+ - You mark your tool schema with `"strict": True`
1620
+
1621
+ Example usage:
1622
+ ```py
1623
+ from pydantic import BaseModel
1624
+ from openai import AsyncOpenAI
1625
+
1626
+
1627
+ class Step(BaseModel):
1628
+ explanation: str
1629
+ output: str
1630
+
1631
+
1632
+ class MathResponse(BaseModel):
1633
+ steps: List[Step]
1634
+ final_answer: str
1635
+
1636
+
1637
+ client = AsyncOpenAI()
1638
+ completion = await client.chat.completions.parse(
1639
+ model="gpt-4o-2024-08-06",
1640
+ messages=[
1641
+ {"role": "system", "content": "You are a helpful math tutor."},
1642
+ {"role": "user", "content": "solve 8x + 31 = 2"},
1643
+ ],
1644
+ response_format=MathResponse,
1645
+ )
1646
+
1647
+ message = completion.choices[0].message
1648
+ if message.parsed:
1649
+ print(message.parsed.steps)
1650
+ print("answer: ", message.parsed.final_answer)
1651
+ ```
1652
+ """
1653
+ _validate_input_tools(tools)
1654
+
1655
+ extra_headers = {
1656
+ "X-Stainless-Helper-Method": "chat.completions.parse",
1657
+ **(extra_headers or {}),
1658
+ }
1659
+
1660
+ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
1661
+ return _parse_chat_completion(
1662
+ response_format=response_format,
1663
+ chat_completion=raw_completion,
1664
+ input_tools=tools,
1665
+ )
1666
+
1667
+ return await self._post(
1668
+ "/chat/completions",
1669
+ body=await async_maybe_transform(
1670
+ {
1671
+ "messages": messages,
1672
+ "model": model,
1673
+ "audio": audio,
1674
+ "frequency_penalty": frequency_penalty,
1675
+ "function_call": function_call,
1676
+ "functions": functions,
1677
+ "logit_bias": logit_bias,
1678
+ "logprobs": logprobs,
1679
+ "max_completion_tokens": max_completion_tokens,
1680
+ "max_tokens": max_tokens,
1681
+ "metadata": metadata,
1682
+ "modalities": modalities,
1683
+ "n": n,
1684
+ "parallel_tool_calls": parallel_tool_calls,
1685
+ "prediction": prediction,
1686
+ "presence_penalty": presence_penalty,
1687
+ "prompt_cache_key": prompt_cache_key,
1688
+ "prompt_cache_retention": prompt_cache_retention,
1689
+ "reasoning_effort": reasoning_effort,
1690
+ "response_format": _type_to_response_format(response_format),
1691
+ "safety_identifier": safety_identifier,
1692
+ "seed": seed,
1693
+ "service_tier": service_tier,
1694
+ "store": store,
1695
+ "stop": stop,
1696
+ "stream": False,
1697
+ "stream_options": stream_options,
1698
+ "temperature": temperature,
1699
+ "tool_choice": tool_choice,
1700
+ "tools": tools,
1701
+ "top_logprobs": top_logprobs,
1702
+ "top_p": top_p,
1703
+ "user": user,
1704
+ "verbosity": verbosity,
1705
+ "web_search_options": web_search_options,
1706
+ },
1707
+ completion_create_params.CompletionCreateParams,
1708
+ ),
1709
+ options=make_request_options(
1710
+ extra_headers=extra_headers,
1711
+ extra_query=extra_query,
1712
+ extra_body=extra_body,
1713
+ timeout=timeout,
1714
+ post_parser=parser,
1715
+ ),
1716
+ # we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
1717
+ # in the `parser` function above
1718
+ cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
1719
+ stream=False,
1720
+ )
1721
+
1722
+ @overload
1723
+ async def create(
1724
+ self,
1725
+ *,
1726
+ messages: Iterable[ChatCompletionMessageParam],
1727
+ model: Union[str, ChatModel],
1728
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
1729
+ frequency_penalty: Optional[float] | Omit = omit,
1730
+ function_call: completion_create_params.FunctionCall | Omit = omit,
1731
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
1732
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
1733
+ logprobs: Optional[bool] | Omit = omit,
1734
+ max_completion_tokens: Optional[int] | Omit = omit,
1735
+ max_tokens: Optional[int] | Omit = omit,
1736
+ metadata: Optional[Metadata] | Omit = omit,
1737
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
1738
+ n: Optional[int] | Omit = omit,
1739
+ parallel_tool_calls: bool | Omit = omit,
1740
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
1741
+ presence_penalty: Optional[float] | Omit = omit,
1742
+ prompt_cache_key: str | Omit = omit,
1743
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1744
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
1745
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
1746
+ safety_identifier: str | Omit = omit,
1747
+ seed: Optional[int] | Omit = omit,
1748
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1749
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
1750
+ store: Optional[bool] | Omit = omit,
1751
+ stream: Optional[Literal[False]] | Omit = omit,
1752
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
1753
+ temperature: Optional[float] | Omit = omit,
1754
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
1755
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
1756
+ top_logprobs: Optional[int] | Omit = omit,
1757
+ top_p: Optional[float] | Omit = omit,
1758
+ user: str | Omit = omit,
1759
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
1760
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
1761
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1762
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1763
+ extra_headers: Headers | None = None,
1764
+ extra_query: Query | None = None,
1765
+ extra_body: Body | None = None,
1766
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1767
+ ) -> ChatCompletion:
1768
+ """
1769
+ **Starting a new project?** We recommend trying
1770
+ [Responses](https://platform.openai.com/docs/api-reference/responses) to take
1771
+ advantage of the latest OpenAI platform features. Compare
1772
+ [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
1773
+
1774
+ ---
1775
+
1776
+ Creates a model response for the given chat conversation. Learn more in the
1777
+ [text generation](https://platform.openai.com/docs/guides/text-generation),
1778
+ [vision](https://platform.openai.com/docs/guides/vision), and
1779
+ [audio](https://platform.openai.com/docs/guides/audio) guides.
1780
+
1781
+ Parameter support can differ depending on the model used to generate the
1782
+ response, particularly for newer reasoning models. Parameters that are only
1783
+ supported for reasoning models are noted below. For the current state of
1784
+ unsupported parameters in reasoning models,
1785
+ [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
1786
+
1787
+ Args:
1788
+ messages: A list of messages comprising the conversation so far. Depending on the
1789
+ [model](https://platform.openai.com/docs/models) you use, different message
1790
+ types (modalities) are supported, like
1791
+ [text](https://platform.openai.com/docs/guides/text-generation),
1792
+ [images](https://platform.openai.com/docs/guides/vision), and
1793
+ [audio](https://platform.openai.com/docs/guides/audio).
1794
+
1795
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
1796
+ wide range of models with different capabilities, performance characteristics,
1797
+ and price points. Refer to the
1798
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
1799
+ available models.
1800
+
1801
+ audio: Parameters for audio output. Required when audio output is requested with
1802
+ `modalities: ["audio"]`.
1803
+ [Learn more](https://platform.openai.com/docs/guides/audio).
1804
+
1805
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
1806
+ existing frequency in the text so far, decreasing the model's likelihood to
1807
+ repeat the same line verbatim.
1808
+
1809
+ function_call: Deprecated in favor of `tool_choice`.
1810
+
1811
+ Controls which (if any) function is called by the model.
1812
+
1813
+ `none` means the model will not call a function and instead generates a message.
1814
+
1815
+ `auto` means the model can pick between generating a message or calling a
1816
+ function.
1817
+
1818
+ Specifying a particular function via `{"name": "my_function"}` forces the model
1819
+ to call that function.
1820
+
1821
+ `none` is the default when no functions are present. `auto` is the default if
1822
+ functions are present.
1823
+
1824
+ functions: Deprecated in favor of `tools`.
1825
+
1826
+ A list of functions the model may generate JSON inputs for.
1827
+
1828
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
1829
+
1830
+ Accepts a JSON object that maps tokens (specified by their token ID in the
1831
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
1832
+ bias is added to the logits generated by the model prior to sampling. The exact
1833
+ effect will vary per model, but values between -1 and 1 should decrease or
1834
+ increase likelihood of selection; values like -100 or 100 should result in a ban
1835
+ or exclusive selection of the relevant token.
1836
+
1837
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
1838
+ returns the log probabilities of each output token returned in the `content` of
1839
+ `message`.
1840
+
1841
+ max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
1842
+ including visible output tokens and
1843
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
1844
+
1845
+ max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
1846
+ completion. This value can be used to control
1847
+ [costs](https://openai.com/api/pricing/) for text generated via API.
1848
+
1849
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
1850
+ compatible with
1851
+ [o-series models](https://platform.openai.com/docs/guides/reasoning).
1852
+
1853
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
1854
+ for storing additional information about the object in a structured format, and
1855
+ querying for objects via API or the dashboard.
1856
+
1857
+ Keys are strings with a maximum length of 64 characters. Values are strings with
1858
+ a maximum length of 512 characters.
1859
+
1860
+ modalities: Output types that you would like the model to generate. Most models are capable
1861
+ of generating text, which is the default:
1862
+
1863
+ `["text"]`
1864
+
1865
+ The `gpt-4o-audio-preview` model can also be used to
1866
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
1867
+ this model generate both text and audio responses, you can use:
1868
+
1869
+ `["text", "audio"]`
1870
+
1871
+ n: How many chat completion choices to generate for each input message. Note that
1872
+ you will be charged based on the number of generated tokens across all of the
1873
+ choices. Keep `n` as `1` to minimize costs.
1874
+
1875
+ parallel_tool_calls: Whether to enable
1876
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
1877
+ during tool use.
1878
+
1879
+ prediction: Static predicted output content, such as the content of a text file that is
1880
+ being regenerated.
1881
+
1882
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
1883
+ whether they appear in the text so far, increasing the model's likelihood to
1884
+ talk about new topics.
1885
+
1886
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
1887
+ hit rates. Replaces the `user` field.
1888
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
1889
+
1890
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
1891
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
1892
+ of 24 hours.
1893
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1894
+
1895
+ reasoning_effort: Constrains effort on reasoning for
1896
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
1897
+ supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
1898
+ reasoning effort can result in faster responses and fewer tokens used on
1899
+ reasoning in a response.
1900
+
1901
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
1902
+ reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
1903
+ calls are supported for all reasoning values in gpt-5.1.
1904
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
1905
+ support `none`.
1906
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
1907
+
1908
+ response_format: An object specifying the format that the model must output.
1909
+
1910
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
1911
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
1912
+ in the
1913
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1914
+
1915
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
1916
+ ensures the message the model generates is valid JSON. Using `json_schema` is
1917
+ preferred for models that support it.
1918
+
1919
+ safety_identifier: A stable identifier used to help detect users of your application that may be
1920
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
1921
+ identifies each user. We recommend hashing their username or email address, in
1922
+ order to avoid sending us any identifying information.
1923
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
1924
+
1925
+ seed: This feature is in Beta. If specified, our system will make a best effort to
1926
+ sample deterministically, such that repeated requests with the same `seed` and
1927
+ parameters should return the same result. Determinism is not guaranteed, and you
1928
+ should refer to the `system_fingerprint` response parameter to monitor changes
1929
+ in the backend.
1930
+
1931
+ service_tier: Specifies the processing type used for serving the request.
1932
+
1933
+ - If set to 'auto', then the request will be processed with the service tier
1934
+ configured in the Project settings. Unless otherwise configured, the Project
1935
+ will use 'default'.
1936
+ - If set to 'default', then the request will be processed with the standard
1937
+ pricing and performance for the selected model.
1938
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1939
+ '[priority](https://openai.com/api-priority-processing/)', then the request
1940
+ will be processed with the corresponding service tier.
1941
+ - When not set, the default behavior is 'auto'.
1942
+
1943
+ When the `service_tier` parameter is set, the response body will include the
1944
+ `service_tier` value based on the processing mode actually used to serve the
1945
+ request. This response value may be different from the value set in the
1946
+ parameter.
1947
+
1948
+ stop: Not supported with latest reasoning models `o3` and `o4-mini`.
1949
+
1950
+ Up to 4 sequences where the API will stop generating further tokens. The
1951
+ returned text will not contain the stop sequence.
1952
+
1953
+ store: Whether or not to store the output of this chat completion request for use in
1954
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
1955
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
1956
+
1957
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
1958
+
1959
+ stream: If set to true, the model response data will be streamed to the client as it is
1960
+ generated using
1961
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
1962
+ See the
1963
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
1964
+ for more information, along with the
1965
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
1966
+ guide for more information on how to handle the streaming events.
1967
+
1968
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
1969
+
1970
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1971
+ make the output more random, while lower values like 0.2 will make it more
1972
+ focused and deterministic. We generally recommend altering this or `top_p` but
1973
+ not both.
1974
+
1975
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
1976
+ not call any tool and instead generates a message. `auto` means the model can
1977
+ pick between generating a message or calling one or more tools. `required` means
1978
+ the model must call one or more tools. Specifying a particular tool via
1979
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
1980
+ call that tool.
1981
+
1982
+ `none` is the default when no tools are present. `auto` is the default if tools
1983
+ are present.
1984
+
1985
+ tools: A list of tools the model may call. You can provide either
1986
+ [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
1987
+ or [function tools](https://platform.openai.com/docs/guides/function-calling).
1988
+
1989
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
1990
+ return at each token position, each with an associated log probability.
1991
+ `logprobs` must be set to `true` if this parameter is used.
1992
+
1993
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
1994
+ model considers the results of the tokens with top_p probability mass. So 0.1
1995
+ means only the tokens comprising the top 10% probability mass are considered.
1996
+
1997
+ We generally recommend altering this or `temperature` but not both.
1998
+
1999
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
2000
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
2001
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
2002
+ similar requests and to help OpenAI detect and prevent abuse.
2003
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2004
+
2005
+ verbosity: Constrains the verbosity of the model's response. Lower values will result in
2006
+ more concise responses, while higher values will result in more verbose
2007
+ responses. Currently supported values are `low`, `medium`, and `high`.
2008
+
2009
+ web_search_options: This tool searches the web for relevant results to use in a response. Learn more
2010
+ about the
2011
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
2012
+
2013
+ extra_headers: Send extra headers
2014
+
2015
+ extra_query: Add additional query parameters to the request
2016
+
2017
+ extra_body: Add additional JSON properties to the request
2018
+
2019
+ timeout: Override the client-level default timeout for this request, in seconds
2020
+ """
2021
+ ...
2022
+
2023
+ @overload
2024
+ async def create(
2025
+ self,
2026
+ *,
2027
+ messages: Iterable[ChatCompletionMessageParam],
2028
+ model: Union[str, ChatModel],
2029
+ stream: Literal[True],
2030
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
2031
+ frequency_penalty: Optional[float] | Omit = omit,
2032
+ function_call: completion_create_params.FunctionCall | Omit = omit,
2033
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
2034
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
2035
+ logprobs: Optional[bool] | Omit = omit,
2036
+ max_completion_tokens: Optional[int] | Omit = omit,
2037
+ max_tokens: Optional[int] | Omit = omit,
2038
+ metadata: Optional[Metadata] | Omit = omit,
2039
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
2040
+ n: Optional[int] | Omit = omit,
2041
+ parallel_tool_calls: bool | Omit = omit,
2042
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
2043
+ presence_penalty: Optional[float] | Omit = omit,
2044
+ prompt_cache_key: str | Omit = omit,
2045
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2046
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
2047
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
2048
+ safety_identifier: str | Omit = omit,
2049
+ seed: Optional[int] | Omit = omit,
2050
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2051
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
2052
+ store: Optional[bool] | Omit = omit,
2053
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
2054
+ temperature: Optional[float] | Omit = omit,
2055
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
2056
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
2057
+ top_logprobs: Optional[int] | Omit = omit,
2058
+ top_p: Optional[float] | Omit = omit,
2059
+ user: str | Omit = omit,
2060
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
2061
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
2062
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2063
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2064
+ extra_headers: Headers | None = None,
2065
+ extra_query: Query | None = None,
2066
+ extra_body: Body | None = None,
2067
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2068
+ ) -> AsyncStream[ChatCompletionChunk]:
2069
+ """
2070
+ **Starting a new project?** We recommend trying
2071
+ [Responses](https://platform.openai.com/docs/api-reference/responses) to take
2072
+ advantage of the latest OpenAI platform features. Compare
2073
+ [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
2074
+
2075
+ ---
2076
+
2077
+ Creates a model response for the given chat conversation. Learn more in the
2078
+ [text generation](https://platform.openai.com/docs/guides/text-generation),
2079
+ [vision](https://platform.openai.com/docs/guides/vision), and
2080
+ [audio](https://platform.openai.com/docs/guides/audio) guides.
2081
+
2082
+ Parameter support can differ depending on the model used to generate the
2083
+ response, particularly for newer reasoning models. Parameters that are only
2084
+ supported for reasoning models are noted below. For the current state of
2085
+ unsupported parameters in reasoning models,
2086
+ [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
2087
+
2088
+ Args:
2089
+ messages: A list of messages comprising the conversation so far. Depending on the
2090
+ [model](https://platform.openai.com/docs/models) you use, different message
2091
+ types (modalities) are supported, like
2092
+ [text](https://platform.openai.com/docs/guides/text-generation),
2093
+ [images](https://platform.openai.com/docs/guides/vision), and
2094
+ [audio](https://platform.openai.com/docs/guides/audio).
2095
+
2096
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2097
+ wide range of models with different capabilities, performance characteristics,
2098
+ and price points. Refer to the
2099
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
2100
+ available models.
2101
+
2102
+ stream: If set to true, the model response data will be streamed to the client as it is
2103
+ generated using
2104
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
2105
+ See the
2106
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
2107
+ for more information, along with the
2108
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
2109
+ guide for more information on how to handle the streaming events.
2110
+
2111
+ audio: Parameters for audio output. Required when audio output is requested with
2112
+ `modalities: ["audio"]`.
2113
+ [Learn more](https://platform.openai.com/docs/guides/audio).
2114
+
2115
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
2116
+ existing frequency in the text so far, decreasing the model's likelihood to
2117
+ repeat the same line verbatim.
2118
+
2119
+ function_call: Deprecated in favor of `tool_choice`.
2120
+
2121
+ Controls which (if any) function is called by the model.
2122
+
2123
+ `none` means the model will not call a function and instead generates a message.
2124
+
2125
+ `auto` means the model can pick between generating a message or calling a
2126
+ function.
2127
+
2128
+ Specifying a particular function via `{"name": "my_function"}` forces the model
2129
+ to call that function.
2130
+
2131
+ `none` is the default when no functions are present. `auto` is the default if
2132
+ functions are present.
2133
+
2134
+ functions: Deprecated in favor of `tools`.
2135
+
2136
+ A list of functions the model may generate JSON inputs for.
2137
+
2138
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
2139
+
2140
+ Accepts a JSON object that maps tokens (specified by their token ID in the
2141
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
2142
+ bias is added to the logits generated by the model prior to sampling. The exact
2143
+ effect will vary per model, but values between -1 and 1 should decrease or
2144
+ increase likelihood of selection; values like -100 or 100 should result in a ban
2145
+ or exclusive selection of the relevant token.
2146
+
2147
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
2148
+ returns the log probabilities of each output token returned in the `content` of
2149
+ `message`.
2150
+
2151
+ max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
2152
+ including visible output tokens and
2153
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2154
+
2155
+ max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
2156
+ completion. This value can be used to control
2157
+ [costs](https://openai.com/api/pricing/) for text generated via API.
2158
+
2159
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
2160
+ compatible with
2161
+ [o-series models](https://platform.openai.com/docs/guides/reasoning).
2162
+
2163
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
2164
+ for storing additional information about the object in a structured format, and
2165
+ querying for objects via API or the dashboard.
2166
+
2167
+ Keys are strings with a maximum length of 64 characters. Values are strings with
2168
+ a maximum length of 512 characters.
2169
+
2170
+ modalities: Output types that you would like the model to generate. Most models are capable
2171
+ of generating text, which is the default:
2172
+
2173
+ `["text"]`
2174
+
2175
+ The `gpt-4o-audio-preview` model can also be used to
2176
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
2177
+ this model generate both text and audio responses, you can use:
2178
+
2179
+ `["text", "audio"]`
2180
+
2181
+ n: How many chat completion choices to generate for each input message. Note that
2182
+ you will be charged based on the number of generated tokens across all of the
2183
+ choices. Keep `n` as `1` to minimize costs.
2184
+
2185
+ parallel_tool_calls: Whether to enable
2186
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
2187
+ during tool use.
2188
+
2189
+ prediction: Static predicted output content, such as the content of a text file that is
2190
+ being regenerated.
2191
+
2192
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
2193
+ whether they appear in the text so far, increasing the model's likelihood to
2194
+ talk about new topics.
2195
+
2196
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
2197
+ hit rates. Replaces the `user` field.
2198
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2199
+
2200
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
2201
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
2202
+ of 24 hours.
2203
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2204
+
2205
+ reasoning_effort: Constrains effort on reasoning for
2206
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
2207
+ supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
2208
+ reasoning effort can result in faster responses and fewer tokens used on
2209
+ reasoning in a response.
2210
+
2211
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
2212
+ reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
2213
+ calls are supported for all reasoning values in gpt-5.1.
2214
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
2215
+ support `none`.
2216
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
2217
+
2218
+ response_format: An object specifying the format that the model must output.
2219
+
2220
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
2221
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
2222
+ in the
2223
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
2224
+
2225
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
2226
+ ensures the message the model generates is valid JSON. Using `json_schema` is
2227
+ preferred for models that support it.
2228
+
2229
+ safety_identifier: A stable identifier used to help detect users of your application that may be
2230
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
2231
+ identifies each user. We recommend hashing their username or email address, in
2232
+ order to avoid sending us any identifying information.
2233
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2234
+
2235
+ seed: This feature is in Beta. If specified, our system will make a best effort to
2236
+ sample deterministically, such that repeated requests with the same `seed` and
2237
+ parameters should return the same result. Determinism is not guaranteed, and you
2238
+ should refer to the `system_fingerprint` response parameter to monitor changes
2239
+ in the backend.
2240
+
2241
+ service_tier: Specifies the processing type used for serving the request.
2242
+
2243
+ - If set to 'auto', then the request will be processed with the service tier
2244
+ configured in the Project settings. Unless otherwise configured, the Project
2245
+ will use 'default'.
2246
+ - If set to 'default', then the request will be processed with the standard
2247
+ pricing and performance for the selected model.
2248
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
2249
+ '[priority](https://openai.com/api-priority-processing/)', then the request
2250
+ will be processed with the corresponding service tier.
2251
+ - When not set, the default behavior is 'auto'.
2252
+
2253
+ When the `service_tier` parameter is set, the response body will include the
2254
+ `service_tier` value based on the processing mode actually used to serve the
2255
+ request. This response value may be different from the value set in the
2256
+ parameter.
2257
+
2258
+ stop: Not supported with latest reasoning models `o3` and `o4-mini`.
2259
+
2260
+ Up to 4 sequences where the API will stop generating further tokens. The
2261
+ returned text will not contain the stop sequence.
2262
+
2263
+ store: Whether or not to store the output of this chat completion request for use in
2264
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
2265
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
2266
+
2267
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
2268
+
2269
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
2270
+
2271
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
2272
+ make the output more random, while lower values like 0.2 will make it more
2273
+ focused and deterministic. We generally recommend altering this or `top_p` but
2274
+ not both.
2275
+
2276
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
2277
+ not call any tool and instead generates a message. `auto` means the model can
2278
+ pick between generating a message or calling one or more tools. `required` means
2279
+ the model must call one or more tools. Specifying a particular tool via
2280
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
2281
+ call that tool.
2282
+
2283
+ `none` is the default when no tools are present. `auto` is the default if tools
2284
+ are present.
2285
+
2286
+ tools: A list of tools the model may call. You can provide either
2287
+ [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
2288
+ or [function tools](https://platform.openai.com/docs/guides/function-calling).
2289
+
2290
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
2291
+ return at each token position, each with an associated log probability.
2292
+ `logprobs` must be set to `true` if this parameter is used.
2293
+
2294
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
2295
+ model considers the results of the tokens with top_p probability mass. So 0.1
2296
+ means only the tokens comprising the top 10% probability mass are considered.
2297
+
2298
+ We generally recommend altering this or `temperature` but not both.
2299
+
2300
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
2301
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
2302
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
2303
+ similar requests and to help OpenAI detect and prevent abuse.
2304
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2305
+
2306
+ verbosity: Constrains the verbosity of the model's response. Lower values will result in
2307
+ more concise responses, while higher values will result in more verbose
2308
+ responses. Currently supported values are `low`, `medium`, and `high`.
2309
+
2310
+ web_search_options: This tool searches the web for relevant results to use in a response. Learn more
2311
+ about the
2312
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
2313
+
2314
+ extra_headers: Send extra headers
2315
+
2316
+ extra_query: Add additional query parameters to the request
2317
+
2318
+ extra_body: Add additional JSON properties to the request
2319
+
2320
+ timeout: Override the client-level default timeout for this request, in seconds
2321
+ """
2322
+ ...
2323
+
2324
+ @overload
2325
+ async def create(
2326
+ self,
2327
+ *,
2328
+ messages: Iterable[ChatCompletionMessageParam],
2329
+ model: Union[str, ChatModel],
2330
+ stream: bool,
2331
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
2332
+ frequency_penalty: Optional[float] | Omit = omit,
2333
+ function_call: completion_create_params.FunctionCall | Omit = omit,
2334
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
2335
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
2336
+ logprobs: Optional[bool] | Omit = omit,
2337
+ max_completion_tokens: Optional[int] | Omit = omit,
2338
+ max_tokens: Optional[int] | Omit = omit,
2339
+ metadata: Optional[Metadata] | Omit = omit,
2340
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
2341
+ n: Optional[int] | Omit = omit,
2342
+ parallel_tool_calls: bool | Omit = omit,
2343
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
2344
+ presence_penalty: Optional[float] | Omit = omit,
2345
+ prompt_cache_key: str | Omit = omit,
2346
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2347
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
2348
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
2349
+ safety_identifier: str | Omit = omit,
2350
+ seed: Optional[int] | Omit = omit,
2351
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2352
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
2353
+ store: Optional[bool] | Omit = omit,
2354
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
2355
+ temperature: Optional[float] | Omit = omit,
2356
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
2357
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
2358
+ top_logprobs: Optional[int] | Omit = omit,
2359
+ top_p: Optional[float] | Omit = omit,
2360
+ user: str | Omit = omit,
2361
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
2362
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
2363
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2364
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2365
+ extra_headers: Headers | None = None,
2366
+ extra_query: Query | None = None,
2367
+ extra_body: Body | None = None,
2368
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2369
+ ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
2370
+ """
2371
+ **Starting a new project?** We recommend trying
2372
+ [Responses](https://platform.openai.com/docs/api-reference/responses) to take
2373
+ advantage of the latest OpenAI platform features. Compare
2374
+ [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
2375
+
2376
+ ---
2377
+
2378
+ Creates a model response for the given chat conversation. Learn more in the
2379
+ [text generation](https://platform.openai.com/docs/guides/text-generation),
2380
+ [vision](https://platform.openai.com/docs/guides/vision), and
2381
+ [audio](https://platform.openai.com/docs/guides/audio) guides.
2382
+
2383
+ Parameter support can differ depending on the model used to generate the
2384
+ response, particularly for newer reasoning models. Parameters that are only
2385
+ supported for reasoning models are noted below. For the current state of
2386
+ unsupported parameters in reasoning models,
2387
+ [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
2388
+
2389
+ Args:
2390
+ messages: A list of messages comprising the conversation so far. Depending on the
2391
+ [model](https://platform.openai.com/docs/models) you use, different message
2392
+ types (modalities) are supported, like
2393
+ [text](https://platform.openai.com/docs/guides/text-generation),
2394
+ [images](https://platform.openai.com/docs/guides/vision), and
2395
+ [audio](https://platform.openai.com/docs/guides/audio).
2396
+
2397
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2398
+ wide range of models with different capabilities, performance characteristics,
2399
+ and price points. Refer to the
2400
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
2401
+ available models.
2402
+
2403
+ stream: If set to true, the model response data will be streamed to the client as it is
2404
+ generated using
2405
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
2406
+ See the
2407
+ [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
2408
+ for more information, along with the
2409
+ [streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
2410
+ guide for more information on how to handle the streaming events.
2411
+
2412
+ audio: Parameters for audio output. Required when audio output is requested with
2413
+ `modalities: ["audio"]`.
2414
+ [Learn more](https://platform.openai.com/docs/guides/audio).
2415
+
2416
+ frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
2417
+ existing frequency in the text so far, decreasing the model's likelihood to
2418
+ repeat the same line verbatim.
2419
+
2420
+ function_call: Deprecated in favor of `tool_choice`.
2421
+
2422
+ Controls which (if any) function is called by the model.
2423
+
2424
+ `none` means the model will not call a function and instead generates a message.
2425
+
2426
+ `auto` means the model can pick between generating a message or calling a
2427
+ function.
2428
+
2429
+ Specifying a particular function via `{"name": "my_function"}` forces the model
2430
+ to call that function.
2431
+
2432
+ `none` is the default when no functions are present. `auto` is the default if
2433
+ functions are present.
2434
+
2435
+ functions: Deprecated in favor of `tools`.
2436
+
2437
+ A list of functions the model may generate JSON inputs for.
2438
+
2439
+ logit_bias: Modify the likelihood of specified tokens appearing in the completion.
2440
+
2441
+ Accepts a JSON object that maps tokens (specified by their token ID in the
2442
+ tokenizer) to an associated bias value from -100 to 100. Mathematically, the
2443
+ bias is added to the logits generated by the model prior to sampling. The exact
2444
+ effect will vary per model, but values between -1 and 1 should decrease or
2445
+ increase likelihood of selection; values like -100 or 100 should result in a ban
2446
+ or exclusive selection of the relevant token.
2447
+
2448
+ logprobs: Whether to return log probabilities of the output tokens or not. If true,
2449
+ returns the log probabilities of each output token returned in the `content` of
2450
+ `message`.
2451
+
2452
+ max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
2453
+ including visible output tokens and
2454
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2455
+
2456
+ max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
2457
+ completion. This value can be used to control
2458
+ [costs](https://openai.com/api/pricing/) for text generated via API.
2459
+
2460
+ This value is now deprecated in favor of `max_completion_tokens`, and is not
2461
+ compatible with
2462
+ [o-series models](https://platform.openai.com/docs/guides/reasoning).
2463
+
2464
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
2465
+ for storing additional information about the object in a structured format, and
2466
+ querying for objects via API or the dashboard.
2467
+
2468
+ Keys are strings with a maximum length of 64 characters. Values are strings with
2469
+ a maximum length of 512 characters.
2470
+
2471
+ modalities: Output types that you would like the model to generate. Most models are capable
2472
+ of generating text, which is the default:
2473
+
2474
+ `["text"]`
2475
+
2476
+ The `gpt-4o-audio-preview` model can also be used to
2477
+ [generate audio](https://platform.openai.com/docs/guides/audio). To request that
2478
+ this model generate both text and audio responses, you can use:
2479
+
2480
+ `["text", "audio"]`
2481
+
2482
+ n: How many chat completion choices to generate for each input message. Note that
2483
+ you will be charged based on the number of generated tokens across all of the
2484
+ choices. Keep `n` as `1` to minimize costs.
2485
+
2486
+ parallel_tool_calls: Whether to enable
2487
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
2488
+ during tool use.
2489
+
2490
+ prediction: Static predicted output content, such as the content of a text file that is
2491
+ being regenerated.
2492
+
2493
+ presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
2494
+ whether they appear in the text so far, increasing the model's likelihood to
2495
+ talk about new topics.
2496
+
2497
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
2498
+ hit rates. Replaces the `user` field.
2499
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2500
+
2501
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
2502
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
2503
+ of 24 hours.
2504
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2505
+
2506
+ reasoning_effort: Constrains effort on reasoning for
2507
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
2508
+ supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
2509
+ reasoning effort can result in faster responses and fewer tokens used on
2510
+ reasoning in a response.
2511
+
2512
+ - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
2513
+ reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
2514
+ calls are supported for all reasoning values in gpt-5.1.
2515
+ - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
2516
+ support `none`.
2517
+ - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
2518
+
2519
+ response_format: An object specifying the format that the model must output.
2520
+
2521
+ Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
2522
+ Outputs which ensures the model will match your supplied JSON schema. Learn more
2523
+ in the
2524
+ [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
2525
+
2526
+ Setting to `{ "type": "json_object" }` enables the older JSON mode, which
2527
+ ensures the message the model generates is valid JSON. Using `json_schema` is
2528
+ preferred for models that support it.
2529
+
2530
+ safety_identifier: A stable identifier used to help detect users of your application that may be
2531
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
2532
+ identifies each user. We recommend hashing their username or email address, in
2533
+ order to avoid sending us any identifying information.
2534
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2535
+
2536
+ seed: This feature is in Beta. If specified, our system will make a best effort to
2537
+ sample deterministically, such that repeated requests with the same `seed` and
2538
+ parameters should return the same result. Determinism is not guaranteed, and you
2539
+ should refer to the `system_fingerprint` response parameter to monitor changes
2540
+ in the backend.
2541
+
2542
+ service_tier: Specifies the processing type used for serving the request.
2543
+
2544
+ - If set to 'auto', then the request will be processed with the service tier
2545
+ configured in the Project settings. Unless otherwise configured, the Project
2546
+ will use 'default'.
2547
+ - If set to 'default', then the request will be processed with the standard
2548
+ pricing and performance for the selected model.
2549
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
2550
+ '[priority](https://openai.com/api-priority-processing/)', then the request
2551
+ will be processed with the corresponding service tier.
2552
+ - When not set, the default behavior is 'auto'.
2553
+
2554
+ When the `service_tier` parameter is set, the response body will include the
2555
+ `service_tier` value based on the processing mode actually used to serve the
2556
+ request. This response value may be different from the value set in the
2557
+ parameter.
2558
+
2559
+ stop: Not supported with latest reasoning models `o3` and `o4-mini`.
2560
+
2561
+ Up to 4 sequences where the API will stop generating further tokens. The
2562
+ returned text will not contain the stop sequence.
2563
+
2564
+ store: Whether or not to store the output of this chat completion request for use in
2565
+ our [model distillation](https://platform.openai.com/docs/guides/distillation)
2566
+ or [evals](https://platform.openai.com/docs/guides/evals) products.
2567
+
2568
+ Supports text and image inputs. Note: image inputs over 8MB will be dropped.
2569
+
2570
+ stream_options: Options for streaming response. Only set this when you set `stream: true`.
2571
+
2572
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
2573
+ make the output more random, while lower values like 0.2 will make it more
2574
+ focused and deterministic. We generally recommend altering this or `top_p` but
2575
+ not both.
2576
+
2577
+ tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
2578
+ not call any tool and instead generates a message. `auto` means the model can
2579
+ pick between generating a message or calling one or more tools. `required` means
2580
+ the model must call one or more tools. Specifying a particular tool via
2581
+ `{"type": "function", "function": {"name": "my_function"}}` forces the model to
2582
+ call that tool.
2583
+
2584
+ `none` is the default when no tools are present. `auto` is the default if tools
2585
+ are present.
2586
+
2587
+ tools: A list of tools the model may call. You can provide either
2588
+ [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
2589
+ or [function tools](https://platform.openai.com/docs/guides/function-calling).
2590
+
2591
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
2592
+ return at each token position, each with an associated log probability.
2593
+ `logprobs` must be set to `true` if this parameter is used.
2594
+
2595
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
2596
+ model considers the results of the tokens with top_p probability mass. So 0.1
2597
+ means only the tokens comprising the top 10% probability mass are considered.
2598
+
2599
+ We generally recommend altering this or `temperature` but not both.
2600
+
2601
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
2602
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
2603
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
2604
+ similar requests and to help OpenAI detect and prevent abuse.
2605
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2606
+
2607
+ verbosity: Constrains the verbosity of the model's response. Lower values will result in
2608
+ more concise responses, while higher values will result in more verbose
2609
+ responses. Currently supported values are `low`, `medium`, and `high`.
2610
+
2611
+ web_search_options: This tool searches the web for relevant results to use in a response. Learn more
2612
+ about the
2613
+ [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
2614
+
2615
+ extra_headers: Send extra headers
2616
+
2617
+ extra_query: Add additional query parameters to the request
2618
+
2619
+ extra_body: Add additional JSON properties to the request
2620
+
2621
+ timeout: Override the client-level default timeout for this request, in seconds
2622
+ """
2623
+ ...
2624
+
2625
+ @required_args(["messages", "model"], ["messages", "model", "stream"])
2626
+ async def create(
2627
+ self,
2628
+ *,
2629
+ messages: Iterable[ChatCompletionMessageParam],
2630
+ model: Union[str, ChatModel],
2631
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
2632
+ frequency_penalty: Optional[float] | Omit = omit,
2633
+ function_call: completion_create_params.FunctionCall | Omit = omit,
2634
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
2635
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
2636
+ logprobs: Optional[bool] | Omit = omit,
2637
+ max_completion_tokens: Optional[int] | Omit = omit,
2638
+ max_tokens: Optional[int] | Omit = omit,
2639
+ metadata: Optional[Metadata] | Omit = omit,
2640
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
2641
+ n: Optional[int] | Omit = omit,
2642
+ parallel_tool_calls: bool | Omit = omit,
2643
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
2644
+ presence_penalty: Optional[float] | Omit = omit,
2645
+ prompt_cache_key: str | Omit = omit,
2646
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2647
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
2648
+ response_format: completion_create_params.ResponseFormat | Omit = omit,
2649
+ safety_identifier: str | Omit = omit,
2650
+ seed: Optional[int] | Omit = omit,
2651
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2652
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
2653
+ store: Optional[bool] | Omit = omit,
2654
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
2655
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
2656
+ temperature: Optional[float] | Omit = omit,
2657
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
2658
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
2659
+ top_logprobs: Optional[int] | Omit = omit,
2660
+ top_p: Optional[float] | Omit = omit,
2661
+ user: str | Omit = omit,
2662
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
2663
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
2664
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2665
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2666
+ extra_headers: Headers | None = None,
2667
+ extra_query: Query | None = None,
2668
+ extra_body: Body | None = None,
2669
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2670
+ ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
2671
+ validate_response_format(response_format)
2672
+ return await self._post(
2673
+ "/chat/completions",
2674
+ body=await async_maybe_transform(
2675
+ {
2676
+ "messages": messages,
2677
+ "model": model,
2678
+ "audio": audio,
2679
+ "frequency_penalty": frequency_penalty,
2680
+ "function_call": function_call,
2681
+ "functions": functions,
2682
+ "logit_bias": logit_bias,
2683
+ "logprobs": logprobs,
2684
+ "max_completion_tokens": max_completion_tokens,
2685
+ "max_tokens": max_tokens,
2686
+ "metadata": metadata,
2687
+ "modalities": modalities,
2688
+ "n": n,
2689
+ "parallel_tool_calls": parallel_tool_calls,
2690
+ "prediction": prediction,
2691
+ "presence_penalty": presence_penalty,
2692
+ "prompt_cache_key": prompt_cache_key,
2693
+ "prompt_cache_retention": prompt_cache_retention,
2694
+ "reasoning_effort": reasoning_effort,
2695
+ "response_format": response_format,
2696
+ "safety_identifier": safety_identifier,
2697
+ "seed": seed,
2698
+ "service_tier": service_tier,
2699
+ "stop": stop,
2700
+ "store": store,
2701
+ "stream": stream,
2702
+ "stream_options": stream_options,
2703
+ "temperature": temperature,
2704
+ "tool_choice": tool_choice,
2705
+ "tools": tools,
2706
+ "top_logprobs": top_logprobs,
2707
+ "top_p": top_p,
2708
+ "user": user,
2709
+ "verbosity": verbosity,
2710
+ "web_search_options": web_search_options,
2711
+ },
2712
+ completion_create_params.CompletionCreateParamsStreaming
2713
+ if stream
2714
+ else completion_create_params.CompletionCreateParamsNonStreaming,
2715
+ ),
2716
+ options=make_request_options(
2717
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2718
+ ),
2719
+ cast_to=ChatCompletion,
2720
+ stream=stream or False,
2721
+ stream_cls=AsyncStream[ChatCompletionChunk],
2722
+ )
2723
+
2724
+ async def retrieve(
2725
+ self,
2726
+ completion_id: str,
2727
+ *,
2728
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2729
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2730
+ extra_headers: Headers | None = None,
2731
+ extra_query: Query | None = None,
2732
+ extra_body: Body | None = None,
2733
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2734
+ ) -> ChatCompletion:
2735
+ """Get a stored chat completion.
2736
+
2737
+ Only Chat Completions that have been created with
2738
+ the `store` parameter set to `true` will be returned.
2739
+
2740
+ Args:
2741
+ extra_headers: Send extra headers
2742
+
2743
+ extra_query: Add additional query parameters to the request
2744
+
2745
+ extra_body: Add additional JSON properties to the request
2746
+
2747
+ timeout: Override the client-level default timeout for this request, in seconds
2748
+ """
2749
+ if not completion_id:
2750
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
2751
+ return await self._get(
2752
+ f"/chat/completions/{completion_id}",
2753
+ options=make_request_options(
2754
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2755
+ ),
2756
+ cast_to=ChatCompletion,
2757
+ )
2758
+
2759
+ async def update(
2760
+ self,
2761
+ completion_id: str,
2762
+ *,
2763
+ metadata: Optional[Metadata],
2764
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2765
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2766
+ extra_headers: Headers | None = None,
2767
+ extra_query: Query | None = None,
2768
+ extra_body: Body | None = None,
2769
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2770
+ ) -> ChatCompletion:
2771
+ """Modify a stored chat completion.
2772
+
2773
+ Only Chat Completions that have been created
2774
+ with the `store` parameter set to `true` can be modified. Currently, the only
2775
+ supported modification is to update the `metadata` field.
2776
+
2777
+ Args:
2778
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
2779
+ for storing additional information about the object in a structured format, and
2780
+ querying for objects via API or the dashboard.
2781
+
2782
+ Keys are strings with a maximum length of 64 characters. Values are strings with
2783
+ a maximum length of 512 characters.
2784
+
2785
+ extra_headers: Send extra headers
2786
+
2787
+ extra_query: Add additional query parameters to the request
2788
+
2789
+ extra_body: Add additional JSON properties to the request
2790
+
2791
+ timeout: Override the client-level default timeout for this request, in seconds
2792
+ """
2793
+ if not completion_id:
2794
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
2795
+ return await self._post(
2796
+ f"/chat/completions/{completion_id}",
2797
+ body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
2798
+ options=make_request_options(
2799
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2800
+ ),
2801
+ cast_to=ChatCompletion,
2802
+ )
2803
+
2804
+ def list(
2805
+ self,
2806
+ *,
2807
+ after: str | Omit = omit,
2808
+ limit: int | Omit = omit,
2809
+ metadata: Optional[Metadata] | Omit = omit,
2810
+ model: str | Omit = omit,
2811
+ order: Literal["asc", "desc"] | Omit = omit,
2812
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2813
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2814
+ extra_headers: Headers | None = None,
2815
+ extra_query: Query | None = None,
2816
+ extra_body: Body | None = None,
2817
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2818
+ ) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]:
2819
+ """List stored Chat Completions.
2820
+
2821
+ Only Chat Completions that have been stored with
2822
+ the `store` parameter set to `true` will be returned.
2823
+
2824
+ Args:
2825
+ after: Identifier for the last chat completion from the previous pagination request.
2826
+
2827
+ limit: Number of Chat Completions to retrieve.
2828
+
2829
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
2830
+ for storing additional information about the object in a structured format, and
2831
+ querying for objects via API or the dashboard.
2832
+
2833
+ Keys are strings with a maximum length of 64 characters. Values are strings with
2834
+ a maximum length of 512 characters.
2835
+
2836
+ model: The model used to generate the Chat Completions.
2837
+
2838
+ order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
2839
+ `desc` for descending order. Defaults to `asc`.
2840
+
2841
+ extra_headers: Send extra headers
2842
+
2843
+ extra_query: Add additional query parameters to the request
2844
+
2845
+ extra_body: Add additional JSON properties to the request
2846
+
2847
+ timeout: Override the client-level default timeout for this request, in seconds
2848
+ """
2849
+ return self._get_api_list(
2850
+ "/chat/completions",
2851
+ page=AsyncCursorPage[ChatCompletion],
2852
+ options=make_request_options(
2853
+ extra_headers=extra_headers,
2854
+ extra_query=extra_query,
2855
+ extra_body=extra_body,
2856
+ timeout=timeout,
2857
+ query=maybe_transform(
2858
+ {
2859
+ "after": after,
2860
+ "limit": limit,
2861
+ "metadata": metadata,
2862
+ "model": model,
2863
+ "order": order,
2864
+ },
2865
+ completion_list_params.CompletionListParams,
2866
+ ),
2867
+ ),
2868
+ model=ChatCompletion,
2869
+ )
2870
+
2871
+ async def delete(
2872
+ self,
2873
+ completion_id: str,
2874
+ *,
2875
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2876
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2877
+ extra_headers: Headers | None = None,
2878
+ extra_query: Query | None = None,
2879
+ extra_body: Body | None = None,
2880
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2881
+ ) -> ChatCompletionDeleted:
2882
+ """Delete a stored chat completion.
2883
+
2884
+ Only Chat Completions that have been created
2885
+ with the `store` parameter set to `true` can be deleted.
2886
+
2887
+ Args:
2888
+ extra_headers: Send extra headers
2889
+
2890
+ extra_query: Add additional query parameters to the request
2891
+
2892
+ extra_body: Add additional JSON properties to the request
2893
+
2894
+ timeout: Override the client-level default timeout for this request, in seconds
2895
+ """
2896
+ if not completion_id:
2897
+ raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
2898
+ return await self._delete(
2899
+ f"/chat/completions/{completion_id}",
2900
+ options=make_request_options(
2901
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2902
+ ),
2903
+ cast_to=ChatCompletionDeleted,
2904
+ )
2905
+
2906
+ def stream(
2907
+ self,
2908
+ *,
2909
+ messages: Iterable[ChatCompletionMessageParam],
2910
+ model: Union[str, ChatModel],
2911
+ audio: Optional[ChatCompletionAudioParam] | Omit = omit,
2912
+ response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit,
2913
+ frequency_penalty: Optional[float] | Omit = omit,
2914
+ function_call: completion_create_params.FunctionCall | Omit = omit,
2915
+ functions: Iterable[completion_create_params.Function] | Omit = omit,
2916
+ logit_bias: Optional[Dict[str, int]] | Omit = omit,
2917
+ logprobs: Optional[bool] | Omit = omit,
2918
+ max_completion_tokens: Optional[int] | Omit = omit,
2919
+ max_tokens: Optional[int] | Omit = omit,
2920
+ metadata: Optional[Metadata] | Omit = omit,
2921
+ modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
2922
+ n: Optional[int] | Omit = omit,
2923
+ parallel_tool_calls: bool | Omit = omit,
2924
+ prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
2925
+ presence_penalty: Optional[float] | Omit = omit,
2926
+ prompt_cache_key: str | Omit = omit,
2927
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2928
+ reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
2929
+ safety_identifier: str | Omit = omit,
2930
+ seed: Optional[int] | Omit = omit,
2931
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2932
+ stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
2933
+ store: Optional[bool] | Omit = omit,
2934
+ stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
2935
+ temperature: Optional[float] | Omit = omit,
2936
+ tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
2937
+ tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
2938
+ top_logprobs: Optional[int] | Omit = omit,
2939
+ top_p: Optional[float] | Omit = omit,
2940
+ user: str | Omit = omit,
2941
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
2942
+ web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
2943
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2944
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2945
+ extra_headers: Headers | None = None,
2946
+ extra_query: Query | None = None,
2947
+ extra_body: Body | None = None,
2948
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2949
+ ) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
2950
+ """Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
2951
+ and automatic accumulation of each delta.
2952
+
2953
+ This also supports all of the parsing utilities that `.parse()` does.
2954
+
2955
+ Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
2956
+
2957
+ ```py
2958
+ async with client.chat.completions.stream(
2959
+ model="gpt-4o-2024-08-06",
2960
+ messages=[...],
2961
+ ) as stream:
2962
+ async for event in stream:
2963
+ if event.type == "content.delta":
2964
+ print(event.delta, flush=True, end="")
2965
+ ```
2966
+
2967
+ When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
2968
+
2969
+ When the context manager exits, the response will be closed, however the `stream` instance is still available outside
2970
+ the context manager.
2971
+ """
2972
+ _validate_input_tools(tools)
2973
+
2974
+ extra_headers = {
2975
+ "X-Stainless-Helper-Method": "chat.completions.stream",
2976
+ **(extra_headers or {}),
2977
+ }
2978
+
2979
+ api_request = self.create(
2980
+ messages=messages,
2981
+ model=model,
2982
+ audio=audio,
2983
+ stream=True,
2984
+ response_format=_type_to_response_format(response_format),
2985
+ frequency_penalty=frequency_penalty,
2986
+ function_call=function_call,
2987
+ functions=functions,
2988
+ logit_bias=logit_bias,
2989
+ logprobs=logprobs,
2990
+ max_completion_tokens=max_completion_tokens,
2991
+ max_tokens=max_tokens,
2992
+ metadata=metadata,
2993
+ modalities=modalities,
2994
+ n=n,
2995
+ parallel_tool_calls=parallel_tool_calls,
2996
+ prediction=prediction,
2997
+ presence_penalty=presence_penalty,
2998
+ prompt_cache_key=prompt_cache_key,
2999
+ prompt_cache_retention=prompt_cache_retention,
3000
+ reasoning_effort=reasoning_effort,
3001
+ safety_identifier=safety_identifier,
3002
+ seed=seed,
3003
+ service_tier=service_tier,
3004
+ stop=stop,
3005
+ store=store,
3006
+ stream_options=stream_options,
3007
+ temperature=temperature,
3008
+ tool_choice=tool_choice,
3009
+ tools=tools,
3010
+ top_logprobs=top_logprobs,
3011
+ top_p=top_p,
3012
+ user=user,
3013
+ verbosity=verbosity,
3014
+ web_search_options=web_search_options,
3015
+ extra_headers=extra_headers,
3016
+ extra_query=extra_query,
3017
+ extra_body=extra_body,
3018
+ timeout=timeout,
3019
+ )
3020
+ return AsyncChatCompletionStreamManager(
3021
+ api_request,
3022
+ response_format=response_format,
3023
+ input_tools=tools,
3024
+ )
3025
+
3026
+
3027
+ class CompletionsWithRawResponse:
3028
+ def __init__(self, completions: Completions) -> None:
3029
+ self._completions = completions
3030
+
3031
+ self.parse = _legacy_response.to_raw_response_wrapper(
3032
+ completions.parse,
3033
+ )
3034
+ self.create = _legacy_response.to_raw_response_wrapper(
3035
+ completions.create,
3036
+ )
3037
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
3038
+ completions.retrieve,
3039
+ )
3040
+ self.update = _legacy_response.to_raw_response_wrapper(
3041
+ completions.update,
3042
+ )
3043
+ self.list = _legacy_response.to_raw_response_wrapper(
3044
+ completions.list,
3045
+ )
3046
+ self.delete = _legacy_response.to_raw_response_wrapper(
3047
+ completions.delete,
3048
+ )
3049
+
3050
+ @cached_property
3051
+ def messages(self) -> MessagesWithRawResponse:
3052
+ return MessagesWithRawResponse(self._completions.messages)
3053
+
3054
+
3055
+ class AsyncCompletionsWithRawResponse:
3056
+ def __init__(self, completions: AsyncCompletions) -> None:
3057
+ self._completions = completions
3058
+
3059
+ self.parse = _legacy_response.async_to_raw_response_wrapper(
3060
+ completions.parse,
3061
+ )
3062
+ self.create = _legacy_response.async_to_raw_response_wrapper(
3063
+ completions.create,
3064
+ )
3065
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
3066
+ completions.retrieve,
3067
+ )
3068
+ self.update = _legacy_response.async_to_raw_response_wrapper(
3069
+ completions.update,
3070
+ )
3071
+ self.list = _legacy_response.async_to_raw_response_wrapper(
3072
+ completions.list,
3073
+ )
3074
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
3075
+ completions.delete,
3076
+ )
3077
+
3078
+ @cached_property
3079
+ def messages(self) -> AsyncMessagesWithRawResponse:
3080
+ return AsyncMessagesWithRawResponse(self._completions.messages)
3081
+
3082
+
3083
+ class CompletionsWithStreamingResponse:
3084
+ def __init__(self, completions: Completions) -> None:
3085
+ self._completions = completions
3086
+
3087
+ self.parse = to_streamed_response_wrapper(
3088
+ completions.parse,
3089
+ )
3090
+ self.create = to_streamed_response_wrapper(
3091
+ completions.create,
3092
+ )
3093
+ self.retrieve = to_streamed_response_wrapper(
3094
+ completions.retrieve,
3095
+ )
3096
+ self.update = to_streamed_response_wrapper(
3097
+ completions.update,
3098
+ )
3099
+ self.list = to_streamed_response_wrapper(
3100
+ completions.list,
3101
+ )
3102
+ self.delete = to_streamed_response_wrapper(
3103
+ completions.delete,
3104
+ )
3105
+
3106
+ @cached_property
3107
+ def messages(self) -> MessagesWithStreamingResponse:
3108
+ return MessagesWithStreamingResponse(self._completions.messages)
3109
+
3110
+
3111
+ class AsyncCompletionsWithStreamingResponse:
3112
+ def __init__(self, completions: AsyncCompletions) -> None:
3113
+ self._completions = completions
3114
+
3115
+ self.parse = async_to_streamed_response_wrapper(
3116
+ completions.parse,
3117
+ )
3118
+ self.create = async_to_streamed_response_wrapper(
3119
+ completions.create,
3120
+ )
3121
+ self.retrieve = async_to_streamed_response_wrapper(
3122
+ completions.retrieve,
3123
+ )
3124
+ self.update = async_to_streamed_response_wrapper(
3125
+ completions.update,
3126
+ )
3127
+ self.list = async_to_streamed_response_wrapper(
3128
+ completions.list,
3129
+ )
3130
+ self.delete = async_to_streamed_response_wrapper(
3131
+ completions.delete,
3132
+ )
3133
+
3134
+ @cached_property
3135
+ def messages(self) -> AsyncMessagesWithStreamingResponse:
3136
+ return AsyncMessagesWithStreamingResponse(self._completions.messages)
3137
+
3138
+
3139
+ def validate_response_format(response_format: object) -> None:
3140
+ if inspect.isclass(response_format) and issubclass(response_format, pydantic.BaseModel):
3141
+ raise TypeError(
3142
+ "You tried to pass a `BaseModel` class to `chat.completions.create()`; You must use `chat.completions.parse()` instead"
3143
+ )