aimlapi-sdk-python 2.8.1b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1958) hide show
  1. aimlapi/__init__.py +243 -0
  2. aimlapi/__main__.py +3 -0
  3. aimlapi/_client.py +368 -0
  4. aimlapi/_utils/__init__.py +3 -0
  5. aimlapi/_utils/_compat.py +3 -0
  6. aimlapi/_utils/_datetime_parse.py +3 -0
  7. aimlapi/_utils/_logs.py +3 -0
  8. aimlapi/_utils/_proxy.py +3 -0
  9. aimlapi/_utils/_reflection.py +3 -0
  10. aimlapi/_utils/_resources_proxy.py +3 -0
  11. aimlapi/_utils/_streams.py +3 -0
  12. aimlapi/_utils/_sync.py +3 -0
  13. aimlapi/_utils/_transform.py +3 -0
  14. aimlapi/_utils/_typing.py +3 -0
  15. aimlapi/_utils/_utils.py +3 -0
  16. aimlapi/_version.py +9 -0
  17. aimlapi/cli/__init__.py +3 -0
  18. aimlapi/cli/_api/__init__.py +3 -0
  19. aimlapi/cli/_api/_main.py +3 -0
  20. aimlapi/cli/_api/audio.py +3 -0
  21. aimlapi/cli/_api/chat/__init__.py +3 -0
  22. aimlapi/cli/_api/chat/completions.py +3 -0
  23. aimlapi/cli/_api/completions.py +3 -0
  24. aimlapi/cli/_api/files.py +3 -0
  25. aimlapi/cli/_api/fine_tuning/__init__.py +3 -0
  26. aimlapi/cli/_api/fine_tuning/jobs.py +3 -0
  27. aimlapi/cli/_api/image.py +3 -0
  28. aimlapi/cli/_api/models.py +3 -0
  29. aimlapi/cli/_cli.py +3 -0
  30. aimlapi/cli/_errors.py +3 -0
  31. aimlapi/cli/_models.py +3 -0
  32. aimlapi/cli/_progress.py +3 -0
  33. aimlapi/cli/_tools/__init__.py +3 -0
  34. aimlapi/cli/_tools/_main.py +3 -0
  35. aimlapi/cli/_tools/fine_tunes.py +3 -0
  36. aimlapi/cli/_tools/migrate.py +3 -0
  37. aimlapi/cli/_utils.py +3 -0
  38. aimlapi/helpers/__init__.py +3 -0
  39. aimlapi/helpers/local_audio_player.py +3 -0
  40. aimlapi/helpers/microphone.py +3 -0
  41. aimlapi/lib/__init__.py +3 -0
  42. aimlapi/lib/_old_api.py +3 -0
  43. aimlapi/lib/_parsing/__init__.py +3 -0
  44. aimlapi/lib/_parsing/_completions.py +3 -0
  45. aimlapi/lib/_parsing/_responses.py +3 -0
  46. aimlapi/lib/_pydantic.py +3 -0
  47. aimlapi/lib/_realtime.py +3 -0
  48. aimlapi/lib/_tools.py +3 -0
  49. aimlapi/lib/_validators.py +3 -0
  50. aimlapi/lib/azure.py +3 -0
  51. aimlapi/lib/streaming/__init__.py +3 -0
  52. aimlapi/lib/streaming/_assistants.py +3 -0
  53. aimlapi/lib/streaming/_deltas.py +3 -0
  54. aimlapi/lib/streaming/chat/__init__.py +3 -0
  55. aimlapi/lib/streaming/chat/_completions.py +3 -0
  56. aimlapi/lib/streaming/chat/_events.py +3 -0
  57. aimlapi/lib/streaming/chat/_types.py +3 -0
  58. aimlapi/lib/streaming/responses/__init__.py +3 -0
  59. aimlapi/lib/streaming/responses/_events.py +3 -0
  60. aimlapi/lib/streaming/responses/_responses.py +3 -0
  61. aimlapi/lib/streaming/responses/_types.py +3 -0
  62. aimlapi/pagination.py +3 -0
  63. aimlapi/resources/__init__.py +3 -0
  64. aimlapi/resources/audio/__init__.py +47 -0
  65. aimlapi/resources/audio/_polling.py +129 -0
  66. aimlapi/resources/audio/audio.py +56 -0
  67. aimlapi/resources/audio/speech.py +428 -0
  68. aimlapi/resources/audio/transcriptions.py +219 -0
  69. aimlapi/resources/audio/translations.py +3 -0
  70. aimlapi/resources/batches.py +3 -0
  71. aimlapi/resources/beta/__init__.py +3 -0
  72. aimlapi/resources/beta/assistants.py +3 -0
  73. aimlapi/resources/beta/beta.py +3 -0
  74. aimlapi/resources/beta/chatkit/__init__.py +3 -0
  75. aimlapi/resources/beta/chatkit/chatkit.py +3 -0
  76. aimlapi/resources/beta/chatkit/sessions.py +3 -0
  77. aimlapi/resources/beta/chatkit/threads.py +3 -0
  78. aimlapi/resources/beta/realtime/__init__.py +3 -0
  79. aimlapi/resources/beta/realtime/realtime.py +3 -0
  80. aimlapi/resources/beta/realtime/sessions.py +3 -0
  81. aimlapi/resources/beta/realtime/transcription_sessions.py +3 -0
  82. aimlapi/resources/beta/threads/__init__.py +3 -0
  83. aimlapi/resources/beta/threads/messages.py +3 -0
  84. aimlapi/resources/beta/threads/runs/__init__.py +3 -0
  85. aimlapi/resources/beta/threads/runs/runs.py +3 -0
  86. aimlapi/resources/beta/threads/runs/steps.py +3 -0
  87. aimlapi/resources/beta/threads/threads.py +3 -0
  88. aimlapi/resources/chat/__init__.py +3 -0
  89. aimlapi/resources/chat/chat.py +86 -0
  90. aimlapi/resources/chat/completions/__init__.py +4 -0
  91. aimlapi/resources/chat/completions/completions.py +452 -0
  92. aimlapi/resources/chat/completions/messages.py +3 -0
  93. aimlapi/resources/completions.py +3 -0
  94. aimlapi/resources/containers/__init__.py +3 -0
  95. aimlapi/resources/containers/containers.py +3 -0
  96. aimlapi/resources/containers/files/__init__.py +3 -0
  97. aimlapi/resources/containers/files/content.py +3 -0
  98. aimlapi/resources/containers/files/files.py +3 -0
  99. aimlapi/resources/conversations/__init__.py +3 -0
  100. aimlapi/resources/conversations/conversations.py +3 -0
  101. aimlapi/resources/conversations/items.py +3 -0
  102. aimlapi/resources/embeddings.py +3 -0
  103. aimlapi/resources/evals/__init__.py +3 -0
  104. aimlapi/resources/evals/evals.py +3 -0
  105. aimlapi/resources/evals/runs/__init__.py +3 -0
  106. aimlapi/resources/evals/runs/output_items.py +3 -0
  107. aimlapi/resources/evals/runs/runs.py +3 -0
  108. aimlapi/resources/files.py +3 -0
  109. aimlapi/resources/fine_tuning/__init__.py +3 -0
  110. aimlapi/resources/fine_tuning/alpha/__init__.py +3 -0
  111. aimlapi/resources/fine_tuning/alpha/alpha.py +3 -0
  112. aimlapi/resources/fine_tuning/alpha/graders.py +3 -0
  113. aimlapi/resources/fine_tuning/checkpoints/__init__.py +3 -0
  114. aimlapi/resources/fine_tuning/checkpoints/checkpoints.py +3 -0
  115. aimlapi/resources/fine_tuning/checkpoints/permissions.py +3 -0
  116. aimlapi/resources/fine_tuning/fine_tuning.py +3 -0
  117. aimlapi/resources/fine_tuning/jobs/__init__.py +3 -0
  118. aimlapi/resources/fine_tuning/jobs/checkpoints.py +3 -0
  119. aimlapi/resources/fine_tuning/jobs/jobs.py +3 -0
  120. aimlapi/resources/images.py +184 -0
  121. aimlapi/resources/models.py +3 -0
  122. aimlapi/resources/moderations.py +3 -0
  123. aimlapi/resources/realtime/__init__.py +3 -0
  124. aimlapi/resources/realtime/calls.py +3 -0
  125. aimlapi/resources/realtime/client_secrets.py +3 -0
  126. aimlapi/resources/realtime/realtime.py +3 -0
  127. aimlapi/resources/responses/__init__.py +4 -0
  128. aimlapi/resources/responses/input_items.py +3 -0
  129. aimlapi/resources/responses/input_tokens.py +3 -0
  130. aimlapi/resources/responses/responses.py +229 -0
  131. aimlapi/resources/uploads/__init__.py +19 -0
  132. aimlapi/resources/uploads/parts.py +3 -0
  133. aimlapi/resources/uploads/uploads.py +99 -0
  134. aimlapi/resources/vector_stores/__init__.py +3 -0
  135. aimlapi/resources/vector_stores/file_batches.py +3 -0
  136. aimlapi/resources/vector_stores/files.py +3 -0
  137. aimlapi/resources/vector_stores/vector_stores.py +3 -0
  138. aimlapi/resources/videos.py +267 -0
  139. aimlapi/resources/webhooks.py +3 -0
  140. aimlapi/types/__init__.py +3 -0
  141. aimlapi/types/audio/__init__.py +3 -0
  142. aimlapi/types/audio/speech_create_params.py +3 -0
  143. aimlapi/types/audio/speech_model.py +3 -0
  144. aimlapi/types/audio/transcription.py +3 -0
  145. aimlapi/types/audio/transcription_create_params.py +3 -0
  146. aimlapi/types/audio/transcription_create_response.py +3 -0
  147. aimlapi/types/audio/transcription_diarized.py +3 -0
  148. aimlapi/types/audio/transcription_diarized_segment.py +3 -0
  149. aimlapi/types/audio/transcription_include.py +3 -0
  150. aimlapi/types/audio/transcription_segment.py +3 -0
  151. aimlapi/types/audio/transcription_stream_event.py +3 -0
  152. aimlapi/types/audio/transcription_text_delta_event.py +3 -0
  153. aimlapi/types/audio/transcription_text_done_event.py +3 -0
  154. aimlapi/types/audio/transcription_text_segment_event.py +3 -0
  155. aimlapi/types/audio/transcription_verbose.py +3 -0
  156. aimlapi/types/audio/transcription_word.py +3 -0
  157. aimlapi/types/audio/translation.py +3 -0
  158. aimlapi/types/audio/translation_create_params.py +3 -0
  159. aimlapi/types/audio/translation_create_response.py +3 -0
  160. aimlapi/types/audio/translation_verbose.py +3 -0
  161. aimlapi/types/audio_model.py +3 -0
  162. aimlapi/types/audio_response_format.py +3 -0
  163. aimlapi/types/auto_file_chunking_strategy_param.py +3 -0
  164. aimlapi/types/batch.py +3 -0
  165. aimlapi/types/batch_create_params.py +3 -0
  166. aimlapi/types/batch_error.py +3 -0
  167. aimlapi/types/batch_list_params.py +3 -0
  168. aimlapi/types/batch_request_counts.py +3 -0
  169. aimlapi/types/batch_usage.py +3 -0
  170. aimlapi/types/beta/__init__.py +3 -0
  171. aimlapi/types/beta/assistant.py +3 -0
  172. aimlapi/types/beta/assistant_create_params.py +3 -0
  173. aimlapi/types/beta/assistant_deleted.py +3 -0
  174. aimlapi/types/beta/assistant_list_params.py +3 -0
  175. aimlapi/types/beta/assistant_response_format_option.py +3 -0
  176. aimlapi/types/beta/assistant_response_format_option_param.py +3 -0
  177. aimlapi/types/beta/assistant_stream_event.py +3 -0
  178. aimlapi/types/beta/assistant_tool.py +3 -0
  179. aimlapi/types/beta/assistant_tool_choice.py +3 -0
  180. aimlapi/types/beta/assistant_tool_choice_function.py +3 -0
  181. aimlapi/types/beta/assistant_tool_choice_function_param.py +3 -0
  182. aimlapi/types/beta/assistant_tool_choice_option.py +3 -0
  183. aimlapi/types/beta/assistant_tool_choice_option_param.py +3 -0
  184. aimlapi/types/beta/assistant_tool_choice_param.py +3 -0
  185. aimlapi/types/beta/assistant_tool_param.py +3 -0
  186. aimlapi/types/beta/assistant_update_params.py +3 -0
  187. aimlapi/types/beta/chat/__init__.py +3 -0
  188. aimlapi/types/beta/chatkit/__init__.py +3 -0
  189. aimlapi/types/beta/chatkit/chat_session.py +3 -0
  190. aimlapi/types/beta/chatkit/chat_session_automatic_thread_titling.py +3 -0
  191. aimlapi/types/beta/chatkit/chat_session_chatkit_configuration.py +3 -0
  192. aimlapi/types/beta/chatkit/chat_session_chatkit_configuration_param.py +3 -0
  193. aimlapi/types/beta/chatkit/chat_session_expires_after_param.py +3 -0
  194. aimlapi/types/beta/chatkit/chat_session_file_upload.py +3 -0
  195. aimlapi/types/beta/chatkit/chat_session_history.py +3 -0
  196. aimlapi/types/beta/chatkit/chat_session_rate_limits.py +3 -0
  197. aimlapi/types/beta/chatkit/chat_session_rate_limits_param.py +3 -0
  198. aimlapi/types/beta/chatkit/chat_session_status.py +3 -0
  199. aimlapi/types/beta/chatkit/chat_session_workflow_param.py +3 -0
  200. aimlapi/types/beta/chatkit/chatkit_attachment.py +3 -0
  201. aimlapi/types/beta/chatkit/chatkit_response_output_text.py +3 -0
  202. aimlapi/types/beta/chatkit/chatkit_thread.py +3 -0
  203. aimlapi/types/beta/chatkit/chatkit_thread_assistant_message_item.py +3 -0
  204. aimlapi/types/beta/chatkit/chatkit_thread_item_list.py +3 -0
  205. aimlapi/types/beta/chatkit/chatkit_thread_user_message_item.py +3 -0
  206. aimlapi/types/beta/chatkit/chatkit_widget_item.py +3 -0
  207. aimlapi/types/beta/chatkit/session_create_params.py +3 -0
  208. aimlapi/types/beta/chatkit/thread_delete_response.py +3 -0
  209. aimlapi/types/beta/chatkit/thread_list_items_params.py +3 -0
  210. aimlapi/types/beta/chatkit/thread_list_params.py +3 -0
  211. aimlapi/types/beta/chatkit_workflow.py +3 -0
  212. aimlapi/types/beta/code_interpreter_tool.py +3 -0
  213. aimlapi/types/beta/code_interpreter_tool_param.py +3 -0
  214. aimlapi/types/beta/file_search_tool.py +3 -0
  215. aimlapi/types/beta/file_search_tool_param.py +3 -0
  216. aimlapi/types/beta/function_tool.py +3 -0
  217. aimlapi/types/beta/function_tool_param.py +3 -0
  218. aimlapi/types/beta/realtime/__init__.py +3 -0
  219. aimlapi/types/beta/realtime/conversation_created_event.py +3 -0
  220. aimlapi/types/beta/realtime/conversation_item.py +3 -0
  221. aimlapi/types/beta/realtime/conversation_item_content.py +3 -0
  222. aimlapi/types/beta/realtime/conversation_item_content_param.py +3 -0
  223. aimlapi/types/beta/realtime/conversation_item_create_event.py +3 -0
  224. aimlapi/types/beta/realtime/conversation_item_create_event_param.py +3 -0
  225. aimlapi/types/beta/realtime/conversation_item_created_event.py +3 -0
  226. aimlapi/types/beta/realtime/conversation_item_delete_event.py +3 -0
  227. aimlapi/types/beta/realtime/conversation_item_delete_event_param.py +3 -0
  228. aimlapi/types/beta/realtime/conversation_item_deleted_event.py +3 -0
  229. aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
  230. aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
  231. aimlapi/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
  232. aimlapi/types/beta/realtime/conversation_item_param.py +3 -0
  233. aimlapi/types/beta/realtime/conversation_item_retrieve_event.py +3 -0
  234. aimlapi/types/beta/realtime/conversation_item_retrieve_event_param.py +3 -0
  235. aimlapi/types/beta/realtime/conversation_item_truncate_event.py +3 -0
  236. aimlapi/types/beta/realtime/conversation_item_truncate_event_param.py +3 -0
  237. aimlapi/types/beta/realtime/conversation_item_truncated_event.py +3 -0
  238. aimlapi/types/beta/realtime/conversation_item_with_reference.py +3 -0
  239. aimlapi/types/beta/realtime/conversation_item_with_reference_param.py +3 -0
  240. aimlapi/types/beta/realtime/error_event.py +3 -0
  241. aimlapi/types/beta/realtime/input_audio_buffer_append_event.py +3 -0
  242. aimlapi/types/beta/realtime/input_audio_buffer_append_event_param.py +3 -0
  243. aimlapi/types/beta/realtime/input_audio_buffer_clear_event.py +3 -0
  244. aimlapi/types/beta/realtime/input_audio_buffer_clear_event_param.py +3 -0
  245. aimlapi/types/beta/realtime/input_audio_buffer_cleared_event.py +3 -0
  246. aimlapi/types/beta/realtime/input_audio_buffer_commit_event.py +3 -0
  247. aimlapi/types/beta/realtime/input_audio_buffer_commit_event_param.py +3 -0
  248. aimlapi/types/beta/realtime/input_audio_buffer_committed_event.py +3 -0
  249. aimlapi/types/beta/realtime/input_audio_buffer_speech_started_event.py +3 -0
  250. aimlapi/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
  251. aimlapi/types/beta/realtime/rate_limits_updated_event.py +3 -0
  252. aimlapi/types/beta/realtime/realtime_client_event.py +3 -0
  253. aimlapi/types/beta/realtime/realtime_client_event_param.py +3 -0
  254. aimlapi/types/beta/realtime/realtime_connect_params.py +3 -0
  255. aimlapi/types/beta/realtime/realtime_response.py +3 -0
  256. aimlapi/types/beta/realtime/realtime_response_status.py +3 -0
  257. aimlapi/types/beta/realtime/realtime_response_usage.py +3 -0
  258. aimlapi/types/beta/realtime/realtime_server_event.py +3 -0
  259. aimlapi/types/beta/realtime/response_audio_delta_event.py +3 -0
  260. aimlapi/types/beta/realtime/response_audio_done_event.py +3 -0
  261. aimlapi/types/beta/realtime/response_audio_transcript_delta_event.py +3 -0
  262. aimlapi/types/beta/realtime/response_audio_transcript_done_event.py +3 -0
  263. aimlapi/types/beta/realtime/response_cancel_event.py +3 -0
  264. aimlapi/types/beta/realtime/response_cancel_event_param.py +3 -0
  265. aimlapi/types/beta/realtime/response_content_part_added_event.py +3 -0
  266. aimlapi/types/beta/realtime/response_content_part_done_event.py +3 -0
  267. aimlapi/types/beta/realtime/response_create_event.py +3 -0
  268. aimlapi/types/beta/realtime/response_create_event_param.py +3 -0
  269. aimlapi/types/beta/realtime/response_created_event.py +3 -0
  270. aimlapi/types/beta/realtime/response_done_event.py +3 -0
  271. aimlapi/types/beta/realtime/response_function_call_arguments_delta_event.py +3 -0
  272. aimlapi/types/beta/realtime/response_function_call_arguments_done_event.py +3 -0
  273. aimlapi/types/beta/realtime/response_output_item_added_event.py +3 -0
  274. aimlapi/types/beta/realtime/response_output_item_done_event.py +3 -0
  275. aimlapi/types/beta/realtime/response_text_delta_event.py +3 -0
  276. aimlapi/types/beta/realtime/response_text_done_event.py +3 -0
  277. aimlapi/types/beta/realtime/session.py +3 -0
  278. aimlapi/types/beta/realtime/session_create_params.py +3 -0
  279. aimlapi/types/beta/realtime/session_create_response.py +3 -0
  280. aimlapi/types/beta/realtime/session_created_event.py +3 -0
  281. aimlapi/types/beta/realtime/session_update_event.py +3 -0
  282. aimlapi/types/beta/realtime/session_update_event_param.py +3 -0
  283. aimlapi/types/beta/realtime/session_updated_event.py +3 -0
  284. aimlapi/types/beta/realtime/transcription_session.py +3 -0
  285. aimlapi/types/beta/realtime/transcription_session_create_params.py +3 -0
  286. aimlapi/types/beta/realtime/transcription_session_update.py +3 -0
  287. aimlapi/types/beta/realtime/transcription_session_update_param.py +3 -0
  288. aimlapi/types/beta/realtime/transcription_session_updated_event.py +3 -0
  289. aimlapi/types/beta/thread.py +3 -0
  290. aimlapi/types/beta/thread_create_and_run_params.py +3 -0
  291. aimlapi/types/beta/thread_create_params.py +3 -0
  292. aimlapi/types/beta/thread_deleted.py +3 -0
  293. aimlapi/types/beta/thread_update_params.py +3 -0
  294. aimlapi/types/beta/threads/__init__.py +3 -0
  295. aimlapi/types/beta/threads/annotation.py +3 -0
  296. aimlapi/types/beta/threads/annotation_delta.py +3 -0
  297. aimlapi/types/beta/threads/file_citation_annotation.py +3 -0
  298. aimlapi/types/beta/threads/file_citation_delta_annotation.py +3 -0
  299. aimlapi/types/beta/threads/file_path_annotation.py +3 -0
  300. aimlapi/types/beta/threads/file_path_delta_annotation.py +3 -0
  301. aimlapi/types/beta/threads/image_file.py +3 -0
  302. aimlapi/types/beta/threads/image_file_content_block.py +3 -0
  303. aimlapi/types/beta/threads/image_file_content_block_param.py +3 -0
  304. aimlapi/types/beta/threads/image_file_delta.py +3 -0
  305. aimlapi/types/beta/threads/image_file_delta_block.py +3 -0
  306. aimlapi/types/beta/threads/image_file_param.py +3 -0
  307. aimlapi/types/beta/threads/image_url.py +3 -0
  308. aimlapi/types/beta/threads/image_url_content_block.py +3 -0
  309. aimlapi/types/beta/threads/image_url_content_block_param.py +3 -0
  310. aimlapi/types/beta/threads/image_url_delta.py +3 -0
  311. aimlapi/types/beta/threads/image_url_delta_block.py +3 -0
  312. aimlapi/types/beta/threads/image_url_param.py +3 -0
  313. aimlapi/types/beta/threads/message.py +3 -0
  314. aimlapi/types/beta/threads/message_content.py +3 -0
  315. aimlapi/types/beta/threads/message_content_delta.py +3 -0
  316. aimlapi/types/beta/threads/message_content_part_param.py +3 -0
  317. aimlapi/types/beta/threads/message_create_params.py +3 -0
  318. aimlapi/types/beta/threads/message_deleted.py +3 -0
  319. aimlapi/types/beta/threads/message_delta.py +3 -0
  320. aimlapi/types/beta/threads/message_delta_event.py +3 -0
  321. aimlapi/types/beta/threads/message_list_params.py +3 -0
  322. aimlapi/types/beta/threads/message_update_params.py +3 -0
  323. aimlapi/types/beta/threads/refusal_content_block.py +3 -0
  324. aimlapi/types/beta/threads/refusal_delta_block.py +3 -0
  325. aimlapi/types/beta/threads/required_action_function_tool_call.py +3 -0
  326. aimlapi/types/beta/threads/run.py +3 -0
  327. aimlapi/types/beta/threads/run_create_params.py +3 -0
  328. aimlapi/types/beta/threads/run_list_params.py +3 -0
  329. aimlapi/types/beta/threads/run_status.py +3 -0
  330. aimlapi/types/beta/threads/run_submit_tool_outputs_params.py +3 -0
  331. aimlapi/types/beta/threads/run_update_params.py +3 -0
  332. aimlapi/types/beta/threads/runs/__init__.py +3 -0
  333. aimlapi/types/beta/threads/runs/code_interpreter_logs.py +3 -0
  334. aimlapi/types/beta/threads/runs/code_interpreter_output_image.py +3 -0
  335. aimlapi/types/beta/threads/runs/code_interpreter_tool_call.py +3 -0
  336. aimlapi/types/beta/threads/runs/code_interpreter_tool_call_delta.py +3 -0
  337. aimlapi/types/beta/threads/runs/file_search_tool_call.py +3 -0
  338. aimlapi/types/beta/threads/runs/file_search_tool_call_delta.py +3 -0
  339. aimlapi/types/beta/threads/runs/function_tool_call.py +3 -0
  340. aimlapi/types/beta/threads/runs/function_tool_call_delta.py +3 -0
  341. aimlapi/types/beta/threads/runs/message_creation_step_details.py +3 -0
  342. aimlapi/types/beta/threads/runs/run_step.py +3 -0
  343. aimlapi/types/beta/threads/runs/run_step_delta.py +3 -0
  344. aimlapi/types/beta/threads/runs/run_step_delta_event.py +3 -0
  345. aimlapi/types/beta/threads/runs/run_step_delta_message_delta.py +3 -0
  346. aimlapi/types/beta/threads/runs/run_step_include.py +3 -0
  347. aimlapi/types/beta/threads/runs/step_list_params.py +3 -0
  348. aimlapi/types/beta/threads/runs/step_retrieve_params.py +3 -0
  349. aimlapi/types/beta/threads/runs/tool_call.py +3 -0
  350. aimlapi/types/beta/threads/runs/tool_call_delta.py +3 -0
  351. aimlapi/types/beta/threads/runs/tool_call_delta_object.py +3 -0
  352. aimlapi/types/beta/threads/runs/tool_calls_step_details.py +3 -0
  353. aimlapi/types/beta/threads/text.py +3 -0
  354. aimlapi/types/beta/threads/text_content_block.py +3 -0
  355. aimlapi/types/beta/threads/text_content_block_param.py +3 -0
  356. aimlapi/types/beta/threads/text_delta.py +3 -0
  357. aimlapi/types/beta/threads/text_delta_block.py +3 -0
  358. aimlapi/types/chat/__init__.py +3 -0
  359. aimlapi/types/chat/chat_completion.py +3 -0
  360. aimlapi/types/chat/chat_completion_allowed_tool_choice_param.py +3 -0
  361. aimlapi/types/chat/chat_completion_allowed_tools_param.py +3 -0
  362. aimlapi/types/chat/chat_completion_assistant_message_param.py +3 -0
  363. aimlapi/types/chat/chat_completion_audio.py +3 -0
  364. aimlapi/types/chat/chat_completion_audio_param.py +3 -0
  365. aimlapi/types/chat/chat_completion_chunk.py +3 -0
  366. aimlapi/types/chat/chat_completion_content_part_image.py +3 -0
  367. aimlapi/types/chat/chat_completion_content_part_image_param.py +3 -0
  368. aimlapi/types/chat/chat_completion_content_part_input_audio_param.py +3 -0
  369. aimlapi/types/chat/chat_completion_content_part_param.py +3 -0
  370. aimlapi/types/chat/chat_completion_content_part_refusal_param.py +3 -0
  371. aimlapi/types/chat/chat_completion_content_part_text.py +3 -0
  372. aimlapi/types/chat/chat_completion_content_part_text_param.py +3 -0
  373. aimlapi/types/chat/chat_completion_custom_tool_param.py +3 -0
  374. aimlapi/types/chat/chat_completion_deleted.py +3 -0
  375. aimlapi/types/chat/chat_completion_developer_message_param.py +3 -0
  376. aimlapi/types/chat/chat_completion_function_call_option_param.py +3 -0
  377. aimlapi/types/chat/chat_completion_function_message_param.py +3 -0
  378. aimlapi/types/chat/chat_completion_function_tool.py +3 -0
  379. aimlapi/types/chat/chat_completion_function_tool_param.py +3 -0
  380. aimlapi/types/chat/chat_completion_message.py +3 -0
  381. aimlapi/types/chat/chat_completion_message_custom_tool_call.py +3 -0
  382. aimlapi/types/chat/chat_completion_message_custom_tool_call_param.py +3 -0
  383. aimlapi/types/chat/chat_completion_message_function_tool_call.py +3 -0
  384. aimlapi/types/chat/chat_completion_message_function_tool_call_param.py +3 -0
  385. aimlapi/types/chat/chat_completion_message_param.py +3 -0
  386. aimlapi/types/chat/chat_completion_message_tool_call.py +3 -0
  387. aimlapi/types/chat/chat_completion_message_tool_call_param.py +3 -0
  388. aimlapi/types/chat/chat_completion_message_tool_call_union_param.py +3 -0
  389. aimlapi/types/chat/chat_completion_modality.py +3 -0
  390. aimlapi/types/chat/chat_completion_named_tool_choice_custom_param.py +3 -0
  391. aimlapi/types/chat/chat_completion_named_tool_choice_param.py +3 -0
  392. aimlapi/types/chat/chat_completion_prediction_content_param.py +3 -0
  393. aimlapi/types/chat/chat_completion_reasoning_effort.py +3 -0
  394. aimlapi/types/chat/chat_completion_role.py +3 -0
  395. aimlapi/types/chat/chat_completion_store_message.py +3 -0
  396. aimlapi/types/chat/chat_completion_stream_options_param.py +3 -0
  397. aimlapi/types/chat/chat_completion_system_message_param.py +3 -0
  398. aimlapi/types/chat/chat_completion_token_logprob.py +3 -0
  399. aimlapi/types/chat/chat_completion_tool_choice_option_param.py +3 -0
  400. aimlapi/types/chat/chat_completion_tool_message_param.py +3 -0
  401. aimlapi/types/chat/chat_completion_tool_param.py +3 -0
  402. aimlapi/types/chat/chat_completion_tool_union_param.py +3 -0
  403. aimlapi/types/chat/chat_completion_user_message_param.py +3 -0
  404. aimlapi/types/chat/completion_create_params.py +3 -0
  405. aimlapi/types/chat/completion_list_params.py +3 -0
  406. aimlapi/types/chat/completion_update_params.py +3 -0
  407. aimlapi/types/chat/completions/__init__.py +3 -0
  408. aimlapi/types/chat/completions/message_list_params.py +3 -0
  409. aimlapi/types/chat/parsed_chat_completion.py +3 -0
  410. aimlapi/types/chat/parsed_function_tool_call.py +3 -0
  411. aimlapi/types/chat_model.py +3 -0
  412. aimlapi/types/completion.py +3 -0
  413. aimlapi/types/completion_choice.py +3 -0
  414. aimlapi/types/completion_create_params.py +3 -0
  415. aimlapi/types/completion_usage.py +3 -0
  416. aimlapi/types/container_create_params.py +3 -0
  417. aimlapi/types/container_create_response.py +3 -0
  418. aimlapi/types/container_list_params.py +3 -0
  419. aimlapi/types/container_list_response.py +3 -0
  420. aimlapi/types/container_retrieve_response.py +3 -0
  421. aimlapi/types/containers/__init__.py +3 -0
  422. aimlapi/types/containers/file_create_params.py +3 -0
  423. aimlapi/types/containers/file_create_response.py +3 -0
  424. aimlapi/types/containers/file_list_params.py +3 -0
  425. aimlapi/types/containers/file_list_response.py +3 -0
  426. aimlapi/types/containers/file_retrieve_response.py +3 -0
  427. aimlapi/types/containers/files/__init__.py +3 -0
  428. aimlapi/types/conversations/__init__.py +3 -0
  429. aimlapi/types/conversations/computer_screenshot_content.py +3 -0
  430. aimlapi/types/conversations/conversation.py +3 -0
  431. aimlapi/types/conversations/conversation_create_params.py +3 -0
  432. aimlapi/types/conversations/conversation_deleted_resource.py +3 -0
  433. aimlapi/types/conversations/conversation_item.py +3 -0
  434. aimlapi/types/conversations/conversation_item_list.py +3 -0
  435. aimlapi/types/conversations/conversation_update_params.py +3 -0
  436. aimlapi/types/conversations/input_file_content.py +3 -0
  437. aimlapi/types/conversations/input_file_content_param.py +3 -0
  438. aimlapi/types/conversations/input_image_content.py +3 -0
  439. aimlapi/types/conversations/input_image_content_param.py +3 -0
  440. aimlapi/types/conversations/input_text_content.py +3 -0
  441. aimlapi/types/conversations/input_text_content_param.py +3 -0
  442. aimlapi/types/conversations/item_create_params.py +3 -0
  443. aimlapi/types/conversations/item_list_params.py +3 -0
  444. aimlapi/types/conversations/item_retrieve_params.py +3 -0
  445. aimlapi/types/conversations/message.py +3 -0
  446. aimlapi/types/conversations/output_text_content.py +3 -0
  447. aimlapi/types/conversations/output_text_content_param.py +3 -0
  448. aimlapi/types/conversations/refusal_content.py +3 -0
  449. aimlapi/types/conversations/refusal_content_param.py +3 -0
  450. aimlapi/types/conversations/summary_text_content.py +3 -0
  451. aimlapi/types/conversations/text_content.py +3 -0
  452. aimlapi/types/create_embedding_response.py +3 -0
  453. aimlapi/types/embedding.py +3 -0
  454. aimlapi/types/embedding_create_params.py +3 -0
  455. aimlapi/types/embedding_model.py +3 -0
  456. aimlapi/types/eval_create_params.py +3 -0
  457. aimlapi/types/eval_create_response.py +3 -0
  458. aimlapi/types/eval_custom_data_source_config.py +3 -0
  459. aimlapi/types/eval_delete_response.py +3 -0
  460. aimlapi/types/eval_list_params.py +3 -0
  461. aimlapi/types/eval_list_response.py +3 -0
  462. aimlapi/types/eval_retrieve_response.py +3 -0
  463. aimlapi/types/eval_stored_completions_data_source_config.py +3 -0
  464. aimlapi/types/eval_update_params.py +3 -0
  465. aimlapi/types/eval_update_response.py +3 -0
  466. aimlapi/types/evals/__init__.py +3 -0
  467. aimlapi/types/evals/create_eval_completions_run_data_source.py +3 -0
  468. aimlapi/types/evals/create_eval_completions_run_data_source_param.py +3 -0
  469. aimlapi/types/evals/create_eval_jsonl_run_data_source.py +3 -0
  470. aimlapi/types/evals/create_eval_jsonl_run_data_source_param.py +3 -0
  471. aimlapi/types/evals/eval_api_error.py +3 -0
  472. aimlapi/types/evals/run_cancel_response.py +3 -0
  473. aimlapi/types/evals/run_create_params.py +3 -0
  474. aimlapi/types/evals/run_create_response.py +3 -0
  475. aimlapi/types/evals/run_delete_response.py +3 -0
  476. aimlapi/types/evals/run_list_params.py +3 -0
  477. aimlapi/types/evals/run_list_response.py +3 -0
  478. aimlapi/types/evals/run_retrieve_response.py +3 -0
  479. aimlapi/types/evals/runs/__init__.py +3 -0
  480. aimlapi/types/evals/runs/output_item_list_params.py +3 -0
  481. aimlapi/types/evals/runs/output_item_list_response.py +3 -0
  482. aimlapi/types/evals/runs/output_item_retrieve_response.py +3 -0
  483. aimlapi/types/file_chunking_strategy.py +3 -0
  484. aimlapi/types/file_chunking_strategy_param.py +3 -0
  485. aimlapi/types/file_content.py +3 -0
  486. aimlapi/types/file_create_params.py +3 -0
  487. aimlapi/types/file_deleted.py +3 -0
  488. aimlapi/types/file_list_params.py +3 -0
  489. aimlapi/types/file_object.py +3 -0
  490. aimlapi/types/file_purpose.py +3 -0
  491. aimlapi/types/fine_tuning/__init__.py +3 -0
  492. aimlapi/types/fine_tuning/alpha/__init__.py +3 -0
  493. aimlapi/types/fine_tuning/alpha/grader_run_params.py +3 -0
  494. aimlapi/types/fine_tuning/alpha/grader_run_response.py +3 -0
  495. aimlapi/types/fine_tuning/alpha/grader_validate_params.py +3 -0
  496. aimlapi/types/fine_tuning/alpha/grader_validate_response.py +3 -0
  497. aimlapi/types/fine_tuning/checkpoints/__init__.py +3 -0
  498. aimlapi/types/fine_tuning/checkpoints/permission_create_params.py +3 -0
  499. aimlapi/types/fine_tuning/checkpoints/permission_create_response.py +3 -0
  500. aimlapi/types/fine_tuning/checkpoints/permission_delete_response.py +3 -0
  501. aimlapi/types/fine_tuning/checkpoints/permission_retrieve_params.py +3 -0
  502. aimlapi/types/fine_tuning/checkpoints/permission_retrieve_response.py +3 -0
  503. aimlapi/types/fine_tuning/dpo_hyperparameters.py +3 -0
  504. aimlapi/types/fine_tuning/dpo_hyperparameters_param.py +3 -0
  505. aimlapi/types/fine_tuning/dpo_method.py +3 -0
  506. aimlapi/types/fine_tuning/dpo_method_param.py +3 -0
  507. aimlapi/types/fine_tuning/fine_tuning_job.py +3 -0
  508. aimlapi/types/fine_tuning/fine_tuning_job_event.py +3 -0
  509. aimlapi/types/fine_tuning/fine_tuning_job_integration.py +3 -0
  510. aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration.py +3 -0
  511. aimlapi/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +3 -0
  512. aimlapi/types/fine_tuning/job_create_params.py +3 -0
  513. aimlapi/types/fine_tuning/job_list_events_params.py +3 -0
  514. aimlapi/types/fine_tuning/job_list_params.py +3 -0
  515. aimlapi/types/fine_tuning/jobs/__init__.py +3 -0
  516. aimlapi/types/fine_tuning/jobs/checkpoint_list_params.py +3 -0
  517. aimlapi/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +3 -0
  518. aimlapi/types/fine_tuning/reinforcement_hyperparameters.py +3 -0
  519. aimlapi/types/fine_tuning/reinforcement_hyperparameters_param.py +3 -0
  520. aimlapi/types/fine_tuning/reinforcement_method.py +3 -0
  521. aimlapi/types/fine_tuning/reinforcement_method_param.py +3 -0
  522. aimlapi/types/fine_tuning/supervised_hyperparameters.py +3 -0
  523. aimlapi/types/fine_tuning/supervised_hyperparameters_param.py +3 -0
  524. aimlapi/types/fine_tuning/supervised_method.py +3 -0
  525. aimlapi/types/fine_tuning/supervised_method_param.py +3 -0
  526. aimlapi/types/graders/__init__.py +3 -0
  527. aimlapi/types/graders/label_model_grader.py +3 -0
  528. aimlapi/types/graders/label_model_grader_param.py +3 -0
  529. aimlapi/types/graders/multi_grader.py +3 -0
  530. aimlapi/types/graders/multi_grader_param.py +3 -0
  531. aimlapi/types/graders/python_grader.py +3 -0
  532. aimlapi/types/graders/python_grader_param.py +3 -0
  533. aimlapi/types/graders/score_model_grader.py +3 -0
  534. aimlapi/types/graders/score_model_grader_param.py +3 -0
  535. aimlapi/types/graders/string_check_grader.py +3 -0
  536. aimlapi/types/graders/string_check_grader_param.py +3 -0
  537. aimlapi/types/graders/text_similarity_grader.py +3 -0
  538. aimlapi/types/graders/text_similarity_grader_param.py +3 -0
  539. aimlapi/types/image.py +3 -0
  540. aimlapi/types/image_create_variation_params.py +3 -0
  541. aimlapi/types/image_edit_completed_event.py +3 -0
  542. aimlapi/types/image_edit_params.py +3 -0
  543. aimlapi/types/image_edit_partial_image_event.py +3 -0
  544. aimlapi/types/image_edit_stream_event.py +3 -0
  545. aimlapi/types/image_gen_completed_event.py +3 -0
  546. aimlapi/types/image_gen_partial_image_event.py +3 -0
  547. aimlapi/types/image_gen_stream_event.py +3 -0
  548. aimlapi/types/image_generate_params.py +3 -0
  549. aimlapi/types/image_model.py +3 -0
  550. aimlapi/types/images_response.py +3 -0
  551. aimlapi/types/model.py +3 -0
  552. aimlapi/types/model_deleted.py +3 -0
  553. aimlapi/types/moderation.py +3 -0
  554. aimlapi/types/moderation_create_params.py +3 -0
  555. aimlapi/types/moderation_create_response.py +3 -0
  556. aimlapi/types/moderation_image_url_input_param.py +3 -0
  557. aimlapi/types/moderation_model.py +3 -0
  558. aimlapi/types/moderation_multi_modal_input_param.py +3 -0
  559. aimlapi/types/moderation_text_input_param.py +3 -0
  560. aimlapi/types/other_file_chunking_strategy_object.py +3 -0
  561. aimlapi/types/realtime/__init__.py +3 -0
  562. aimlapi/types/realtime/audio_transcription.py +3 -0
  563. aimlapi/types/realtime/audio_transcription_param.py +3 -0
  564. aimlapi/types/realtime/call_accept_params.py +3 -0
  565. aimlapi/types/realtime/call_create_params.py +3 -0
  566. aimlapi/types/realtime/call_refer_params.py +3 -0
  567. aimlapi/types/realtime/call_reject_params.py +3 -0
  568. aimlapi/types/realtime/client_secret_create_params.py +3 -0
  569. aimlapi/types/realtime/client_secret_create_response.py +3 -0
  570. aimlapi/types/realtime/conversation_created_event.py +3 -0
  571. aimlapi/types/realtime/conversation_item.py +3 -0
  572. aimlapi/types/realtime/conversation_item_added.py +3 -0
  573. aimlapi/types/realtime/conversation_item_create_event.py +3 -0
  574. aimlapi/types/realtime/conversation_item_create_event_param.py +3 -0
  575. aimlapi/types/realtime/conversation_item_created_event.py +3 -0
  576. aimlapi/types/realtime/conversation_item_delete_event.py +3 -0
  577. aimlapi/types/realtime/conversation_item_delete_event_param.py +3 -0
  578. aimlapi/types/realtime/conversation_item_deleted_event.py +3 -0
  579. aimlapi/types/realtime/conversation_item_done.py +3 -0
  580. aimlapi/types/realtime/conversation_item_input_audio_transcription_completed_event.py +3 -0
  581. aimlapi/types/realtime/conversation_item_input_audio_transcription_delta_event.py +3 -0
  582. aimlapi/types/realtime/conversation_item_input_audio_transcription_failed_event.py +3 -0
  583. aimlapi/types/realtime/conversation_item_input_audio_transcription_segment.py +3 -0
  584. aimlapi/types/realtime/conversation_item_param.py +3 -0
  585. aimlapi/types/realtime/conversation_item_retrieve_event.py +3 -0
  586. aimlapi/types/realtime/conversation_item_retrieve_event_param.py +3 -0
  587. aimlapi/types/realtime/conversation_item_truncate_event.py +3 -0
  588. aimlapi/types/realtime/conversation_item_truncate_event_param.py +3 -0
  589. aimlapi/types/realtime/conversation_item_truncated_event.py +3 -0
  590. aimlapi/types/realtime/input_audio_buffer_append_event.py +3 -0
  591. aimlapi/types/realtime/input_audio_buffer_append_event_param.py +3 -0
  592. aimlapi/types/realtime/input_audio_buffer_clear_event.py +3 -0
  593. aimlapi/types/realtime/input_audio_buffer_clear_event_param.py +3 -0
  594. aimlapi/types/realtime/input_audio_buffer_cleared_event.py +3 -0
  595. aimlapi/types/realtime/input_audio_buffer_commit_event.py +3 -0
  596. aimlapi/types/realtime/input_audio_buffer_commit_event_param.py +3 -0
  597. aimlapi/types/realtime/input_audio_buffer_committed_event.py +3 -0
  598. aimlapi/types/realtime/input_audio_buffer_speech_started_event.py +3 -0
  599. aimlapi/types/realtime/input_audio_buffer_speech_stopped_event.py +3 -0
  600. aimlapi/types/realtime/input_audio_buffer_timeout_triggered.py +3 -0
  601. aimlapi/types/realtime/log_prob_properties.py +3 -0
  602. aimlapi/types/realtime/mcp_list_tools_completed.py +3 -0
  603. aimlapi/types/realtime/mcp_list_tools_failed.py +3 -0
  604. aimlapi/types/realtime/mcp_list_tools_in_progress.py +3 -0
  605. aimlapi/types/realtime/noise_reduction_type.py +3 -0
  606. aimlapi/types/realtime/output_audio_buffer_clear_event.py +3 -0
  607. aimlapi/types/realtime/output_audio_buffer_clear_event_param.py +3 -0
  608. aimlapi/types/realtime/rate_limits_updated_event.py +3 -0
  609. aimlapi/types/realtime/realtime_audio_config.py +3 -0
  610. aimlapi/types/realtime/realtime_audio_config_input.py +3 -0
  611. aimlapi/types/realtime/realtime_audio_config_input_param.py +3 -0
  612. aimlapi/types/realtime/realtime_audio_config_output.py +3 -0
  613. aimlapi/types/realtime/realtime_audio_config_output_param.py +3 -0
  614. aimlapi/types/realtime/realtime_audio_config_param.py +3 -0
  615. aimlapi/types/realtime/realtime_audio_formats.py +3 -0
  616. aimlapi/types/realtime/realtime_audio_formats_param.py +3 -0
  617. aimlapi/types/realtime/realtime_audio_input_turn_detection.py +3 -0
  618. aimlapi/types/realtime/realtime_audio_input_turn_detection_param.py +3 -0
  619. aimlapi/types/realtime/realtime_client_event.py +3 -0
  620. aimlapi/types/realtime/realtime_client_event_param.py +3 -0
  621. aimlapi/types/realtime/realtime_connect_params.py +3 -0
  622. aimlapi/types/realtime/realtime_conversation_item_assistant_message.py +3 -0
  623. aimlapi/types/realtime/realtime_conversation_item_assistant_message_param.py +3 -0
  624. aimlapi/types/realtime/realtime_conversation_item_function_call.py +3 -0
  625. aimlapi/types/realtime/realtime_conversation_item_function_call_output.py +3 -0
  626. aimlapi/types/realtime/realtime_conversation_item_function_call_output_param.py +3 -0
  627. aimlapi/types/realtime/realtime_conversation_item_function_call_param.py +3 -0
  628. aimlapi/types/realtime/realtime_conversation_item_system_message.py +3 -0
  629. aimlapi/types/realtime/realtime_conversation_item_system_message_param.py +3 -0
  630. aimlapi/types/realtime/realtime_conversation_item_user_message.py +3 -0
  631. aimlapi/types/realtime/realtime_conversation_item_user_message_param.py +3 -0
  632. aimlapi/types/realtime/realtime_error.py +3 -0
  633. aimlapi/types/realtime/realtime_error_event.py +3 -0
  634. aimlapi/types/realtime/realtime_function_tool.py +3 -0
  635. aimlapi/types/realtime/realtime_function_tool_param.py +3 -0
  636. aimlapi/types/realtime/realtime_mcp_approval_request.py +3 -0
  637. aimlapi/types/realtime/realtime_mcp_approval_request_param.py +3 -0
  638. aimlapi/types/realtime/realtime_mcp_approval_response.py +3 -0
  639. aimlapi/types/realtime/realtime_mcp_approval_response_param.py +3 -0
  640. aimlapi/types/realtime/realtime_mcp_list_tools.py +3 -0
  641. aimlapi/types/realtime/realtime_mcp_list_tools_param.py +3 -0
  642. aimlapi/types/realtime/realtime_mcp_protocol_error.py +3 -0
  643. aimlapi/types/realtime/realtime_mcp_protocol_error_param.py +3 -0
  644. aimlapi/types/realtime/realtime_mcp_tool_call.py +3 -0
  645. aimlapi/types/realtime/realtime_mcp_tool_call_param.py +3 -0
  646. aimlapi/types/realtime/realtime_mcp_tool_execution_error.py +3 -0
  647. aimlapi/types/realtime/realtime_mcp_tool_execution_error_param.py +3 -0
  648. aimlapi/types/realtime/realtime_mcphttp_error.py +3 -0
  649. aimlapi/types/realtime/realtime_mcphttp_error_param.py +3 -0
  650. aimlapi/types/realtime/realtime_response.py +3 -0
  651. aimlapi/types/realtime/realtime_response_create_audio_output.py +3 -0
  652. aimlapi/types/realtime/realtime_response_create_audio_output_param.py +3 -0
  653. aimlapi/types/realtime/realtime_response_create_mcp_tool.py +3 -0
  654. aimlapi/types/realtime/realtime_response_create_mcp_tool_param.py +3 -0
  655. aimlapi/types/realtime/realtime_response_create_params.py +3 -0
  656. aimlapi/types/realtime/realtime_response_create_params_param.py +3 -0
  657. aimlapi/types/realtime/realtime_response_status.py +3 -0
  658. aimlapi/types/realtime/realtime_response_usage.py +3 -0
  659. aimlapi/types/realtime/realtime_response_usage_input_token_details.py +3 -0
  660. aimlapi/types/realtime/realtime_response_usage_output_token_details.py +3 -0
  661. aimlapi/types/realtime/realtime_server_event.py +3 -0
  662. aimlapi/types/realtime/realtime_session_client_secret.py +3 -0
  663. aimlapi/types/realtime/realtime_session_create_request.py +3 -0
  664. aimlapi/types/realtime/realtime_session_create_request_param.py +3 -0
  665. aimlapi/types/realtime/realtime_session_create_response.py +3 -0
  666. aimlapi/types/realtime/realtime_tool_choice_config.py +3 -0
  667. aimlapi/types/realtime/realtime_tool_choice_config_param.py +3 -0
  668. aimlapi/types/realtime/realtime_tools_config.py +3 -0
  669. aimlapi/types/realtime/realtime_tools_config_param.py +3 -0
  670. aimlapi/types/realtime/realtime_tools_config_union.py +3 -0
  671. aimlapi/types/realtime/realtime_tools_config_union_param.py +3 -0
  672. aimlapi/types/realtime/realtime_tracing_config.py +3 -0
  673. aimlapi/types/realtime/realtime_tracing_config_param.py +3 -0
  674. aimlapi/types/realtime/realtime_transcription_session_audio.py +3 -0
  675. aimlapi/types/realtime/realtime_transcription_session_audio_input.py +3 -0
  676. aimlapi/types/realtime/realtime_transcription_session_audio_input_param.py +3 -0
  677. aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +3 -0
  678. aimlapi/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +3 -0
  679. aimlapi/types/realtime/realtime_transcription_session_audio_param.py +3 -0
  680. aimlapi/types/realtime/realtime_transcription_session_create_request.py +3 -0
  681. aimlapi/types/realtime/realtime_transcription_session_create_request_param.py +3 -0
  682. aimlapi/types/realtime/realtime_transcription_session_create_response.py +3 -0
  683. aimlapi/types/realtime/realtime_transcription_session_turn_detection.py +3 -0
  684. aimlapi/types/realtime/realtime_truncation.py +3 -0
  685. aimlapi/types/realtime/realtime_truncation_param.py +3 -0
  686. aimlapi/types/realtime/realtime_truncation_retention_ratio.py +3 -0
  687. aimlapi/types/realtime/realtime_truncation_retention_ratio_param.py +3 -0
  688. aimlapi/types/realtime/response_audio_delta_event.py +3 -0
  689. aimlapi/types/realtime/response_audio_done_event.py +3 -0
  690. aimlapi/types/realtime/response_audio_transcript_delta_event.py +3 -0
  691. aimlapi/types/realtime/response_audio_transcript_done_event.py +3 -0
  692. aimlapi/types/realtime/response_cancel_event.py +3 -0
  693. aimlapi/types/realtime/response_cancel_event_param.py +3 -0
  694. aimlapi/types/realtime/response_content_part_added_event.py +3 -0
  695. aimlapi/types/realtime/response_content_part_done_event.py +3 -0
  696. aimlapi/types/realtime/response_create_event.py +3 -0
  697. aimlapi/types/realtime/response_create_event_param.py +3 -0
  698. aimlapi/types/realtime/response_created_event.py +3 -0
  699. aimlapi/types/realtime/response_done_event.py +3 -0
  700. aimlapi/types/realtime/response_function_call_arguments_delta_event.py +3 -0
  701. aimlapi/types/realtime/response_function_call_arguments_done_event.py +3 -0
  702. aimlapi/types/realtime/response_mcp_call_arguments_delta.py +3 -0
  703. aimlapi/types/realtime/response_mcp_call_arguments_done.py +3 -0
  704. aimlapi/types/realtime/response_mcp_call_completed.py +3 -0
  705. aimlapi/types/realtime/response_mcp_call_failed.py +3 -0
  706. aimlapi/types/realtime/response_mcp_call_in_progress.py +3 -0
  707. aimlapi/types/realtime/response_output_item_added_event.py +3 -0
  708. aimlapi/types/realtime/response_output_item_done_event.py +3 -0
  709. aimlapi/types/realtime/response_text_delta_event.py +3 -0
  710. aimlapi/types/realtime/response_text_done_event.py +3 -0
  711. aimlapi/types/realtime/session_created_event.py +3 -0
  712. aimlapi/types/realtime/session_update_event.py +3 -0
  713. aimlapi/types/realtime/session_update_event_param.py +3 -0
  714. aimlapi/types/realtime/session_updated_event.py +3 -0
  715. aimlapi/types/responses/__init__.py +3 -0
  716. aimlapi/types/responses/computer_tool.py +3 -0
  717. aimlapi/types/responses/computer_tool_param.py +3 -0
  718. aimlapi/types/responses/custom_tool.py +3 -0
  719. aimlapi/types/responses/custom_tool_param.py +3 -0
  720. aimlapi/types/responses/easy_input_message.py +3 -0
  721. aimlapi/types/responses/easy_input_message_param.py +3 -0
  722. aimlapi/types/responses/file_search_tool.py +3 -0
  723. aimlapi/types/responses/file_search_tool_param.py +3 -0
  724. aimlapi/types/responses/function_tool.py +3 -0
  725. aimlapi/types/responses/function_tool_param.py +3 -0
  726. aimlapi/types/responses/input_item_list_params.py +3 -0
  727. aimlapi/types/responses/input_token_count_params.py +3 -0
  728. aimlapi/types/responses/input_token_count_response.py +3 -0
  729. aimlapi/types/responses/parsed_response.py +3 -0
  730. aimlapi/types/responses/response.py +3 -0
  731. aimlapi/types/responses/response_audio_delta_event.py +3 -0
  732. aimlapi/types/responses/response_audio_done_event.py +3 -0
  733. aimlapi/types/responses/response_audio_transcript_delta_event.py +3 -0
  734. aimlapi/types/responses/response_audio_transcript_done_event.py +3 -0
  735. aimlapi/types/responses/response_code_interpreter_call_code_delta_event.py +3 -0
  736. aimlapi/types/responses/response_code_interpreter_call_code_done_event.py +3 -0
  737. aimlapi/types/responses/response_code_interpreter_call_completed_event.py +3 -0
  738. aimlapi/types/responses/response_code_interpreter_call_in_progress_event.py +3 -0
  739. aimlapi/types/responses/response_code_interpreter_call_interpreting_event.py +3 -0
  740. aimlapi/types/responses/response_code_interpreter_tool_call.py +3 -0
  741. aimlapi/types/responses/response_code_interpreter_tool_call_param.py +3 -0
  742. aimlapi/types/responses/response_completed_event.py +3 -0
  743. aimlapi/types/responses/response_computer_tool_call.py +3 -0
  744. aimlapi/types/responses/response_computer_tool_call_output_item.py +3 -0
  745. aimlapi/types/responses/response_computer_tool_call_output_screenshot.py +3 -0
  746. aimlapi/types/responses/response_computer_tool_call_output_screenshot_param.py +3 -0
  747. aimlapi/types/responses/response_computer_tool_call_param.py +3 -0
  748. aimlapi/types/responses/response_content_part_added_event.py +3 -0
  749. aimlapi/types/responses/response_content_part_done_event.py +3 -0
  750. aimlapi/types/responses/response_conversation_param.py +3 -0
  751. aimlapi/types/responses/response_create_params.py +3 -0
  752. aimlapi/types/responses/response_created_event.py +3 -0
  753. aimlapi/types/responses/response_custom_tool_call.py +3 -0
  754. aimlapi/types/responses/response_custom_tool_call_input_delta_event.py +3 -0
  755. aimlapi/types/responses/response_custom_tool_call_input_done_event.py +3 -0
  756. aimlapi/types/responses/response_custom_tool_call_output.py +3 -0
  757. aimlapi/types/responses/response_custom_tool_call_output_param.py +3 -0
  758. aimlapi/types/responses/response_custom_tool_call_param.py +3 -0
  759. aimlapi/types/responses/response_error.py +3 -0
  760. aimlapi/types/responses/response_error_event.py +3 -0
  761. aimlapi/types/responses/response_failed_event.py +3 -0
  762. aimlapi/types/responses/response_file_search_call_completed_event.py +3 -0
  763. aimlapi/types/responses/response_file_search_call_in_progress_event.py +3 -0
  764. aimlapi/types/responses/response_file_search_call_searching_event.py +3 -0
  765. aimlapi/types/responses/response_file_search_tool_call.py +3 -0
  766. aimlapi/types/responses/response_file_search_tool_call_param.py +3 -0
  767. aimlapi/types/responses/response_format_text_config.py +3 -0
  768. aimlapi/types/responses/response_format_text_config_param.py +3 -0
  769. aimlapi/types/responses/response_format_text_json_schema_config.py +3 -0
  770. aimlapi/types/responses/response_format_text_json_schema_config_param.py +3 -0
  771. aimlapi/types/responses/response_function_call_arguments_delta_event.py +3 -0
  772. aimlapi/types/responses/response_function_call_arguments_done_event.py +3 -0
  773. aimlapi/types/responses/response_function_call_output_item.py +3 -0
  774. aimlapi/types/responses/response_function_call_output_item_list.py +3 -0
  775. aimlapi/types/responses/response_function_call_output_item_list_param.py +3 -0
  776. aimlapi/types/responses/response_function_call_output_item_param.py +3 -0
  777. aimlapi/types/responses/response_function_tool_call.py +3 -0
  778. aimlapi/types/responses/response_function_tool_call_item.py +3 -0
  779. aimlapi/types/responses/response_function_tool_call_output_item.py +3 -0
  780. aimlapi/types/responses/response_function_tool_call_param.py +3 -0
  781. aimlapi/types/responses/response_function_web_search.py +3 -0
  782. aimlapi/types/responses/response_function_web_search_param.py +3 -0
  783. aimlapi/types/responses/response_image_gen_call_completed_event.py +3 -0
  784. aimlapi/types/responses/response_image_gen_call_generating_event.py +3 -0
  785. aimlapi/types/responses/response_image_gen_call_in_progress_event.py +3 -0
  786. aimlapi/types/responses/response_image_gen_call_partial_image_event.py +3 -0
  787. aimlapi/types/responses/response_in_progress_event.py +3 -0
  788. aimlapi/types/responses/response_includable.py +3 -0
  789. aimlapi/types/responses/response_incomplete_event.py +3 -0
  790. aimlapi/types/responses/response_input_audio.py +3 -0
  791. aimlapi/types/responses/response_input_audio_param.py +3 -0
  792. aimlapi/types/responses/response_input_content.py +3 -0
  793. aimlapi/types/responses/response_input_content_param.py +3 -0
  794. aimlapi/types/responses/response_input_file.py +3 -0
  795. aimlapi/types/responses/response_input_file_content.py +3 -0
  796. aimlapi/types/responses/response_input_file_content_param.py +3 -0
  797. aimlapi/types/responses/response_input_file_param.py +3 -0
  798. aimlapi/types/responses/response_input_image.py +3 -0
  799. aimlapi/types/responses/response_input_image_content.py +3 -0
  800. aimlapi/types/responses/response_input_image_content_param.py +3 -0
  801. aimlapi/types/responses/response_input_image_param.py +3 -0
  802. aimlapi/types/responses/response_input_item.py +3 -0
  803. aimlapi/types/responses/response_input_item_param.py +3 -0
  804. aimlapi/types/responses/response_input_message_content_list.py +3 -0
  805. aimlapi/types/responses/response_input_message_content_list_param.py +3 -0
  806. aimlapi/types/responses/response_input_message_item.py +3 -0
  807. aimlapi/types/responses/response_input_param.py +3 -0
  808. aimlapi/types/responses/response_input_text.py +3 -0
  809. aimlapi/types/responses/response_input_text_content.py +3 -0
  810. aimlapi/types/responses/response_input_text_content_param.py +3 -0
  811. aimlapi/types/responses/response_input_text_param.py +3 -0
  812. aimlapi/types/responses/response_item.py +3 -0
  813. aimlapi/types/responses/response_item_list.py +3 -0
  814. aimlapi/types/responses/response_mcp_call_arguments_delta_event.py +3 -0
  815. aimlapi/types/responses/response_mcp_call_arguments_done_event.py +3 -0
  816. aimlapi/types/responses/response_mcp_call_completed_event.py +3 -0
  817. aimlapi/types/responses/response_mcp_call_failed_event.py +3 -0
  818. aimlapi/types/responses/response_mcp_call_in_progress_event.py +3 -0
  819. aimlapi/types/responses/response_mcp_list_tools_completed_event.py +3 -0
  820. aimlapi/types/responses/response_mcp_list_tools_failed_event.py +3 -0
  821. aimlapi/types/responses/response_mcp_list_tools_in_progress_event.py +3 -0
  822. aimlapi/types/responses/response_output_item.py +3 -0
  823. aimlapi/types/responses/response_output_item_added_event.py +3 -0
  824. aimlapi/types/responses/response_output_item_done_event.py +3 -0
  825. aimlapi/types/responses/response_output_message.py +3 -0
  826. aimlapi/types/responses/response_output_message_param.py +3 -0
  827. aimlapi/types/responses/response_output_refusal.py +3 -0
  828. aimlapi/types/responses/response_output_refusal_param.py +3 -0
  829. aimlapi/types/responses/response_output_text.py +3 -0
  830. aimlapi/types/responses/response_output_text_annotation_added_event.py +3 -0
  831. aimlapi/types/responses/response_output_text_param.py +3 -0
  832. aimlapi/types/responses/response_prompt.py +3 -0
  833. aimlapi/types/responses/response_prompt_param.py +3 -0
  834. aimlapi/types/responses/response_queued_event.py +3 -0
  835. aimlapi/types/responses/response_reasoning_item.py +3 -0
  836. aimlapi/types/responses/response_reasoning_item_param.py +3 -0
  837. aimlapi/types/responses/response_reasoning_summary_part_added_event.py +3 -0
  838. aimlapi/types/responses/response_reasoning_summary_part_done_event.py +3 -0
  839. aimlapi/types/responses/response_reasoning_summary_text_delta_event.py +3 -0
  840. aimlapi/types/responses/response_reasoning_summary_text_done_event.py +3 -0
  841. aimlapi/types/responses/response_reasoning_text_delta_event.py +3 -0
  842. aimlapi/types/responses/response_reasoning_text_done_event.py +3 -0
  843. aimlapi/types/responses/response_refusal_delta_event.py +3 -0
  844. aimlapi/types/responses/response_refusal_done_event.py +3 -0
  845. aimlapi/types/responses/response_retrieve_params.py +3 -0
  846. aimlapi/types/responses/response_status.py +3 -0
  847. aimlapi/types/responses/response_stream_event.py +3 -0
  848. aimlapi/types/responses/response_text_config.py +3 -0
  849. aimlapi/types/responses/response_text_config_param.py +3 -0
  850. aimlapi/types/responses/response_text_delta_event.py +3 -0
  851. aimlapi/types/responses/response_text_done_event.py +3 -0
  852. aimlapi/types/responses/response_usage.py +3 -0
  853. aimlapi/types/responses/response_web_search_call_completed_event.py +3 -0
  854. aimlapi/types/responses/response_web_search_call_in_progress_event.py +3 -0
  855. aimlapi/types/responses/response_web_search_call_searching_event.py +3 -0
  856. aimlapi/types/responses/tool.py +3 -0
  857. aimlapi/types/responses/tool_choice_allowed.py +3 -0
  858. aimlapi/types/responses/tool_choice_allowed_param.py +3 -0
  859. aimlapi/types/responses/tool_choice_custom.py +3 -0
  860. aimlapi/types/responses/tool_choice_custom_param.py +3 -0
  861. aimlapi/types/responses/tool_choice_function.py +3 -0
  862. aimlapi/types/responses/tool_choice_function_param.py +3 -0
  863. aimlapi/types/responses/tool_choice_mcp.py +3 -0
  864. aimlapi/types/responses/tool_choice_mcp_param.py +3 -0
  865. aimlapi/types/responses/tool_choice_options.py +3 -0
  866. aimlapi/types/responses/tool_choice_types.py +3 -0
  867. aimlapi/types/responses/tool_choice_types_param.py +3 -0
  868. aimlapi/types/responses/tool_param.py +3 -0
  869. aimlapi/types/responses/web_search_preview_tool.py +3 -0
  870. aimlapi/types/responses/web_search_preview_tool_param.py +3 -0
  871. aimlapi/types/responses/web_search_tool.py +3 -0
  872. aimlapi/types/responses/web_search_tool_param.py +3 -0
  873. aimlapi/types/shared/__init__.py +3 -0
  874. aimlapi/types/shared/all_models.py +3 -0
  875. aimlapi/types/shared/chat_model.py +3 -0
  876. aimlapi/types/shared/comparison_filter.py +3 -0
  877. aimlapi/types/shared/compound_filter.py +3 -0
  878. aimlapi/types/shared/custom_tool_input_format.py +3 -0
  879. aimlapi/types/shared/error_object.py +3 -0
  880. aimlapi/types/shared/function_definition.py +3 -0
  881. aimlapi/types/shared/function_parameters.py +3 -0
  882. aimlapi/types/shared/metadata.py +3 -0
  883. aimlapi/types/shared/reasoning.py +3 -0
  884. aimlapi/types/shared/reasoning_effort.py +3 -0
  885. aimlapi/types/shared/response_format_json_object.py +3 -0
  886. aimlapi/types/shared/response_format_json_schema.py +3 -0
  887. aimlapi/types/shared/response_format_text.py +3 -0
  888. aimlapi/types/shared/response_format_text_grammar.py +3 -0
  889. aimlapi/types/shared/response_format_text_python.py +3 -0
  890. aimlapi/types/shared/responses_model.py +3 -0
  891. aimlapi/types/shared_params/__init__.py +3 -0
  892. aimlapi/types/shared_params/chat_model.py +3 -0
  893. aimlapi/types/shared_params/comparison_filter.py +3 -0
  894. aimlapi/types/shared_params/compound_filter.py +3 -0
  895. aimlapi/types/shared_params/custom_tool_input_format.py +3 -0
  896. aimlapi/types/shared_params/function_definition.py +3 -0
  897. aimlapi/types/shared_params/function_parameters.py +3 -0
  898. aimlapi/types/shared_params/metadata.py +3 -0
  899. aimlapi/types/shared_params/reasoning.py +3 -0
  900. aimlapi/types/shared_params/reasoning_effort.py +3 -0
  901. aimlapi/types/shared_params/response_format_json_object.py +3 -0
  902. aimlapi/types/shared_params/response_format_json_schema.py +3 -0
  903. aimlapi/types/shared_params/response_format_text.py +3 -0
  904. aimlapi/types/shared_params/responses_model.py +3 -0
  905. aimlapi/types/static_file_chunking_strategy.py +3 -0
  906. aimlapi/types/static_file_chunking_strategy_object.py +3 -0
  907. aimlapi/types/static_file_chunking_strategy_object_param.py +3 -0
  908. aimlapi/types/static_file_chunking_strategy_param.py +3 -0
  909. aimlapi/types/upload.py +3 -0
  910. aimlapi/types/upload_complete_params.py +3 -0
  911. aimlapi/types/upload_create_params.py +3 -0
  912. aimlapi/types/uploads/__init__.py +3 -0
  913. aimlapi/types/uploads/part_create_params.py +3 -0
  914. aimlapi/types/uploads/upload_part.py +3 -0
  915. aimlapi/types/vector_store.py +3 -0
  916. aimlapi/types/vector_store_create_params.py +3 -0
  917. aimlapi/types/vector_store_deleted.py +3 -0
  918. aimlapi/types/vector_store_list_params.py +3 -0
  919. aimlapi/types/vector_store_search_params.py +3 -0
  920. aimlapi/types/vector_store_search_response.py +3 -0
  921. aimlapi/types/vector_store_update_params.py +3 -0
  922. aimlapi/types/vector_stores/__init__.py +3 -0
  923. aimlapi/types/vector_stores/file_batch_create_params.py +3 -0
  924. aimlapi/types/vector_stores/file_batch_list_files_params.py +3 -0
  925. aimlapi/types/vector_stores/file_content_response.py +3 -0
  926. aimlapi/types/vector_stores/file_create_params.py +3 -0
  927. aimlapi/types/vector_stores/file_list_params.py +3 -0
  928. aimlapi/types/vector_stores/file_update_params.py +3 -0
  929. aimlapi/types/vector_stores/vector_store_file.py +3 -0
  930. aimlapi/types/vector_stores/vector_store_file_batch.py +3 -0
  931. aimlapi/types/vector_stores/vector_store_file_deleted.py +3 -0
  932. aimlapi/types/video.py +3 -0
  933. aimlapi/types/video_create_error.py +3 -0
  934. aimlapi/types/video_create_params.py +3 -0
  935. aimlapi/types/video_delete_response.py +3 -0
  936. aimlapi/types/video_download_content_params.py +3 -0
  937. aimlapi/types/video_list_params.py +3 -0
  938. aimlapi/types/video_model.py +3 -0
  939. aimlapi/types/video_remix_params.py +3 -0
  940. aimlapi/types/video_seconds.py +3 -0
  941. aimlapi/types/video_size.py +3 -0
  942. aimlapi/types/webhooks/__init__.py +3 -0
  943. aimlapi/types/webhooks/batch_cancelled_webhook_event.py +3 -0
  944. aimlapi/types/webhooks/batch_completed_webhook_event.py +3 -0
  945. aimlapi/types/webhooks/batch_expired_webhook_event.py +3 -0
  946. aimlapi/types/webhooks/batch_failed_webhook_event.py +3 -0
  947. aimlapi/types/webhooks/eval_run_canceled_webhook_event.py +3 -0
  948. aimlapi/types/webhooks/eval_run_failed_webhook_event.py +3 -0
  949. aimlapi/types/webhooks/eval_run_succeeded_webhook_event.py +3 -0
  950. aimlapi/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +3 -0
  951. aimlapi/types/webhooks/fine_tuning_job_failed_webhook_event.py +3 -0
  952. aimlapi/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +3 -0
  953. aimlapi/types/webhooks/realtime_call_incoming_webhook_event.py +3 -0
  954. aimlapi/types/webhooks/response_cancelled_webhook_event.py +3 -0
  955. aimlapi/types/webhooks/response_completed_webhook_event.py +3 -0
  956. aimlapi/types/webhooks/response_failed_webhook_event.py +3 -0
  957. aimlapi/types/webhooks/response_incomplete_webhook_event.py +3 -0
  958. aimlapi/types/webhooks/unwrap_webhook_event.py +3 -0
  959. aimlapi/types/websocket_connection_options.py +3 -0
  960. aimlapi/version.py +3 -0
  961. aimlapi_sdk_python-2.8.1b0.dist-info/METADATA +886 -0
  962. aimlapi_sdk_python-2.8.1b0.dist-info/RECORD +1958 -0
  963. aimlapi_sdk_python-2.8.1b0.dist-info/WHEEL +4 -0
  964. aimlapi_sdk_python-2.8.1b0.dist-info/entry_points.txt +2 -0
  965. aimlapi_sdk_python-2.8.1b0.dist-info/licenses/LICENSE +201 -0
  966. openai/__init__.py +395 -0
  967. openai/__main__.py +3 -0
  968. openai/_base_client.py +2027 -0
  969. openai/_client.py +1272 -0
  970. openai/_compat.py +231 -0
  971. openai/_constants.py +14 -0
  972. openai/_exceptions.py +161 -0
  973. openai/_extras/__init__.py +3 -0
  974. openai/_extras/_common.py +21 -0
  975. openai/_extras/numpy_proxy.py +37 -0
  976. openai/_extras/pandas_proxy.py +28 -0
  977. openai/_extras/sounddevice_proxy.py +28 -0
  978. openai/_files.py +123 -0
  979. openai/_legacy_response.py +488 -0
  980. openai/_models.py +897 -0
  981. openai/_module_client.py +173 -0
  982. openai/_qs.py +150 -0
  983. openai/_resource.py +43 -0
  984. openai/_response.py +848 -0
  985. openai/_streaming.py +408 -0
  986. openai/_types.py +264 -0
  987. openai/_utils/__init__.py +67 -0
  988. openai/_utils/_compat.py +45 -0
  989. openai/_utils/_datetime_parse.py +136 -0
  990. openai/_utils/_logs.py +42 -0
  991. openai/_utils/_proxy.py +65 -0
  992. openai/_utils/_reflection.py +45 -0
  993. openai/_utils/_resources_proxy.py +24 -0
  994. openai/_utils/_streams.py +12 -0
  995. openai/_utils/_sync.py +58 -0
  996. openai/_utils/_transform.py +457 -0
  997. openai/_utils/_typing.py +156 -0
  998. openai/_utils/_utils.py +437 -0
  999. openai/_version.py +4 -0
  1000. openai/cli/__init__.py +1 -0
  1001. openai/cli/_api/__init__.py +1 -0
  1002. openai/cli/_api/_main.py +17 -0
  1003. openai/cli/_api/audio.py +108 -0
  1004. openai/cli/_api/chat/__init__.py +13 -0
  1005. openai/cli/_api/chat/completions.py +160 -0
  1006. openai/cli/_api/completions.py +173 -0
  1007. openai/cli/_api/files.py +80 -0
  1008. openai/cli/_api/fine_tuning/__init__.py +13 -0
  1009. openai/cli/_api/fine_tuning/jobs.py +170 -0
  1010. openai/cli/_api/image.py +139 -0
  1011. openai/cli/_api/models.py +45 -0
  1012. openai/cli/_cli.py +233 -0
  1013. openai/cli/_errors.py +21 -0
  1014. openai/cli/_models.py +17 -0
  1015. openai/cli/_progress.py +59 -0
  1016. openai/cli/_tools/__init__.py +1 -0
  1017. openai/cli/_tools/_main.py +17 -0
  1018. openai/cli/_tools/fine_tunes.py +63 -0
  1019. openai/cli/_tools/migrate.py +164 -0
  1020. openai/cli/_utils.py +45 -0
  1021. openai/helpers/__init__.py +4 -0
  1022. openai/helpers/local_audio_player.py +165 -0
  1023. openai/helpers/microphone.py +100 -0
  1024. openai/lib/.keep +4 -0
  1025. openai/lib/__init__.py +2 -0
  1026. openai/lib/_old_api.py +72 -0
  1027. openai/lib/_parsing/__init__.py +12 -0
  1028. openai/lib/_parsing/_completions.py +305 -0
  1029. openai/lib/_parsing/_responses.py +180 -0
  1030. openai/lib/_pydantic.py +155 -0
  1031. openai/lib/_realtime.py +92 -0
  1032. openai/lib/_tools.py +66 -0
  1033. openai/lib/_validators.py +809 -0
  1034. openai/lib/azure.py +647 -0
  1035. openai/lib/streaming/__init__.py +8 -0
  1036. openai/lib/streaming/_assistants.py +1038 -0
  1037. openai/lib/streaming/_deltas.py +64 -0
  1038. openai/lib/streaming/chat/__init__.py +27 -0
  1039. openai/lib/streaming/chat/_completions.py +770 -0
  1040. openai/lib/streaming/chat/_events.py +123 -0
  1041. openai/lib/streaming/chat/_types.py +20 -0
  1042. openai/lib/streaming/responses/__init__.py +13 -0
  1043. openai/lib/streaming/responses/_events.py +148 -0
  1044. openai/lib/streaming/responses/_responses.py +372 -0
  1045. openai/lib/streaming/responses/_types.py +10 -0
  1046. openai/pagination.py +190 -0
  1047. openai/py.typed +0 -0
  1048. openai/resources/__init__.py +229 -0
  1049. openai/resources/audio/__init__.py +61 -0
  1050. openai/resources/audio/audio.py +166 -0
  1051. openai/resources/audio/speech.py +255 -0
  1052. openai/resources/audio/transcriptions.py +980 -0
  1053. openai/resources/audio/translations.py +367 -0
  1054. openai/resources/batches.py +530 -0
  1055. openai/resources/beta/__init__.py +61 -0
  1056. openai/resources/beta/assistants.py +1049 -0
  1057. openai/resources/beta/beta.py +187 -0
  1058. openai/resources/beta/chatkit/__init__.py +47 -0
  1059. openai/resources/beta/chatkit/chatkit.py +134 -0
  1060. openai/resources/beta/chatkit/sessions.py +301 -0
  1061. openai/resources/beta/chatkit/threads.py +521 -0
  1062. openai/resources/beta/realtime/__init__.py +47 -0
  1063. openai/resources/beta/realtime/realtime.py +1094 -0
  1064. openai/resources/beta/realtime/sessions.py +424 -0
  1065. openai/resources/beta/realtime/transcription_sessions.py +282 -0
  1066. openai/resources/beta/threads/__init__.py +47 -0
  1067. openai/resources/beta/threads/messages.py +718 -0
  1068. openai/resources/beta/threads/runs/__init__.py +33 -0
  1069. openai/resources/beta/threads/runs/runs.py +3122 -0
  1070. openai/resources/beta/threads/runs/steps.py +399 -0
  1071. openai/resources/beta/threads/threads.py +1935 -0
  1072. openai/resources/chat/__init__.py +33 -0
  1073. openai/resources/chat/chat.py +102 -0
  1074. openai/resources/chat/completions/__init__.py +33 -0
  1075. openai/resources/chat/completions/completions.py +3143 -0
  1076. openai/resources/chat/completions/messages.py +212 -0
  1077. openai/resources/completions.py +1160 -0
  1078. openai/resources/containers/__init__.py +33 -0
  1079. openai/resources/containers/containers.py +510 -0
  1080. openai/resources/containers/files/__init__.py +33 -0
  1081. openai/resources/containers/files/content.py +173 -0
  1082. openai/resources/containers/files/files.py +545 -0
  1083. openai/resources/conversations/__init__.py +33 -0
  1084. openai/resources/conversations/conversations.py +486 -0
  1085. openai/resources/conversations/items.py +557 -0
  1086. openai/resources/embeddings.py +298 -0
  1087. openai/resources/evals/__init__.py +33 -0
  1088. openai/resources/evals/evals.py +662 -0
  1089. openai/resources/evals/runs/__init__.py +33 -0
  1090. openai/resources/evals/runs/output_items.py +315 -0
  1091. openai/resources/evals/runs/runs.py +634 -0
  1092. openai/resources/files.py +770 -0
  1093. openai/resources/fine_tuning/__init__.py +61 -0
  1094. openai/resources/fine_tuning/alpha/__init__.py +33 -0
  1095. openai/resources/fine_tuning/alpha/alpha.py +102 -0
  1096. openai/resources/fine_tuning/alpha/graders.py +282 -0
  1097. openai/resources/fine_tuning/checkpoints/__init__.py +33 -0
  1098. openai/resources/fine_tuning/checkpoints/checkpoints.py +102 -0
  1099. openai/resources/fine_tuning/checkpoints/permissions.py +418 -0
  1100. openai/resources/fine_tuning/fine_tuning.py +166 -0
  1101. openai/resources/fine_tuning/jobs/__init__.py +33 -0
  1102. openai/resources/fine_tuning/jobs/checkpoints.py +199 -0
  1103. openai/resources/fine_tuning/jobs/jobs.py +918 -0
  1104. openai/resources/images.py +1858 -0
  1105. openai/resources/models.py +306 -0
  1106. openai/resources/moderations.py +197 -0
  1107. openai/resources/realtime/__init__.py +47 -0
  1108. openai/resources/realtime/calls.py +764 -0
  1109. openai/resources/realtime/client_secrets.py +189 -0
  1110. openai/resources/realtime/realtime.py +1079 -0
  1111. openai/resources/responses/__init__.py +47 -0
  1112. openai/resources/responses/input_items.py +226 -0
  1113. openai/resources/responses/input_tokens.py +309 -0
  1114. openai/resources/responses/responses.py +3130 -0
  1115. openai/resources/uploads/__init__.py +33 -0
  1116. openai/resources/uploads/parts.py +205 -0
  1117. openai/resources/uploads/uploads.py +719 -0
  1118. openai/resources/vector_stores/__init__.py +47 -0
  1119. openai/resources/vector_stores/file_batches.py +813 -0
  1120. openai/resources/vector_stores/files.py +939 -0
  1121. openai/resources/vector_stores/vector_stores.py +875 -0
  1122. openai/resources/videos.py +847 -0
  1123. openai/resources/webhooks.py +210 -0
  1124. openai/types/__init__.py +115 -0
  1125. openai/types/audio/__init__.py +23 -0
  1126. openai/types/audio/speech_create_params.py +57 -0
  1127. openai/types/audio/speech_model.py +7 -0
  1128. openai/types/audio/transcription.py +71 -0
  1129. openai/types/audio/transcription_create_params.py +172 -0
  1130. openai/types/audio/transcription_create_response.py +12 -0
  1131. openai/types/audio/transcription_diarized.py +63 -0
  1132. openai/types/audio/transcription_diarized_segment.py +32 -0
  1133. openai/types/audio/transcription_include.py +7 -0
  1134. openai/types/audio/transcription_segment.py +49 -0
  1135. openai/types/audio/transcription_stream_event.py +16 -0
  1136. openai/types/audio/transcription_text_delta_event.py +41 -0
  1137. openai/types/audio/transcription_text_done_event.py +63 -0
  1138. openai/types/audio/transcription_text_segment_event.py +27 -0
  1139. openai/types/audio/transcription_verbose.py +38 -0
  1140. openai/types/audio/transcription_word.py +16 -0
  1141. openai/types/audio/translation.py +9 -0
  1142. openai/types/audio/translation_create_params.py +49 -0
  1143. openai/types/audio/translation_create_response.py +11 -0
  1144. openai/types/audio/translation_verbose.py +22 -0
  1145. openai/types/audio_model.py +7 -0
  1146. openai/types/audio_response_format.py +7 -0
  1147. openai/types/auto_file_chunking_strategy_param.py +12 -0
  1148. openai/types/batch.py +104 -0
  1149. openai/types/batch_create_params.py +72 -0
  1150. openai/types/batch_error.py +21 -0
  1151. openai/types/batch_list_params.py +24 -0
  1152. openai/types/batch_request_counts.py +16 -0
  1153. openai/types/batch_usage.py +35 -0
  1154. openai/types/beta/__init__.py +34 -0
  1155. openai/types/beta/assistant.py +134 -0
  1156. openai/types/beta/assistant_create_params.py +220 -0
  1157. openai/types/beta/assistant_deleted.py +15 -0
  1158. openai/types/beta/assistant_list_params.py +39 -0
  1159. openai/types/beta/assistant_response_format_option.py +14 -0
  1160. openai/types/beta/assistant_response_format_option_param.py +16 -0
  1161. openai/types/beta/assistant_stream_event.py +294 -0
  1162. openai/types/beta/assistant_tool.py +15 -0
  1163. openai/types/beta/assistant_tool_choice.py +16 -0
  1164. openai/types/beta/assistant_tool_choice_function.py +10 -0
  1165. openai/types/beta/assistant_tool_choice_function_param.py +12 -0
  1166. openai/types/beta/assistant_tool_choice_option.py +10 -0
  1167. openai/types/beta/assistant_tool_choice_option_param.py +12 -0
  1168. openai/types/beta/assistant_tool_choice_param.py +16 -0
  1169. openai/types/beta/assistant_tool_param.py +14 -0
  1170. openai/types/beta/assistant_update_params.py +191 -0
  1171. openai/types/beta/chat/__init__.py +3 -0
  1172. openai/types/beta/chatkit/__init__.py +32 -0
  1173. openai/types/beta/chatkit/chat_session.py +43 -0
  1174. openai/types/beta/chatkit/chat_session_automatic_thread_titling.py +10 -0
  1175. openai/types/beta/chatkit/chat_session_chatkit_configuration.py +19 -0
  1176. openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py +59 -0
  1177. openai/types/beta/chatkit/chat_session_expires_after_param.py +15 -0
  1178. openai/types/beta/chatkit/chat_session_file_upload.py +18 -0
  1179. openai/types/beta/chatkit/chat_session_history.py +18 -0
  1180. openai/types/beta/chatkit/chat_session_rate_limits.py +10 -0
  1181. openai/types/beta/chatkit/chat_session_rate_limits_param.py +12 -0
  1182. openai/types/beta/chatkit/chat_session_status.py +7 -0
  1183. openai/types/beta/chatkit/chat_session_workflow_param.py +34 -0
  1184. openai/types/beta/chatkit/chatkit_attachment.py +25 -0
  1185. openai/types/beta/chatkit/chatkit_response_output_text.py +62 -0
  1186. openai/types/beta/chatkit/chatkit_thread.py +56 -0
  1187. openai/types/beta/chatkit/chatkit_thread_assistant_message_item.py +29 -0
  1188. openai/types/beta/chatkit/chatkit_thread_item_list.py +144 -0
  1189. openai/types/beta/chatkit/chatkit_thread_user_message_item.py +77 -0
  1190. openai/types/beta/chatkit/chatkit_widget_item.py +27 -0
  1191. openai/types/beta/chatkit/session_create_params.py +35 -0
  1192. openai/types/beta/chatkit/thread_delete_response.py +18 -0
  1193. openai/types/beta/chatkit/thread_list_items_params.py +27 -0
  1194. openai/types/beta/chatkit/thread_list_params.py +33 -0
  1195. openai/types/beta/chatkit_workflow.py +32 -0
  1196. openai/types/beta/code_interpreter_tool.py +12 -0
  1197. openai/types/beta/code_interpreter_tool_param.py +12 -0
  1198. openai/types/beta/file_search_tool.py +55 -0
  1199. openai/types/beta/file_search_tool_param.py +54 -0
  1200. openai/types/beta/function_tool.py +15 -0
  1201. openai/types/beta/function_tool_param.py +16 -0
  1202. openai/types/beta/realtime/__init__.py +96 -0
  1203. openai/types/beta/realtime/conversation_created_event.py +27 -0
  1204. openai/types/beta/realtime/conversation_item.py +61 -0
  1205. openai/types/beta/realtime/conversation_item_content.py +32 -0
  1206. openai/types/beta/realtime/conversation_item_content_param.py +31 -0
  1207. openai/types/beta/realtime/conversation_item_create_event.py +29 -0
  1208. openai/types/beta/realtime/conversation_item_create_event_param.py +29 -0
  1209. openai/types/beta/realtime/conversation_item_created_event.py +27 -0
  1210. openai/types/beta/realtime/conversation_item_delete_event.py +19 -0
  1211. openai/types/beta/realtime/conversation_item_delete_event_param.py +18 -0
  1212. openai/types/beta/realtime/conversation_item_deleted_event.py +18 -0
  1213. openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +87 -0
  1214. openai/types/beta/realtime/conversation_item_input_audio_transcription_delta_event.py +39 -0
  1215. openai/types/beta/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
  1216. openai/types/beta/realtime/conversation_item_param.py +62 -0
  1217. openai/types/beta/realtime/conversation_item_retrieve_event.py +19 -0
  1218. openai/types/beta/realtime/conversation_item_retrieve_event_param.py +18 -0
  1219. openai/types/beta/realtime/conversation_item_truncate_event.py +32 -0
  1220. openai/types/beta/realtime/conversation_item_truncate_event_param.py +31 -0
  1221. openai/types/beta/realtime/conversation_item_truncated_event.py +24 -0
  1222. openai/types/beta/realtime/conversation_item_with_reference.py +87 -0
  1223. openai/types/beta/realtime/conversation_item_with_reference_param.py +87 -0
  1224. openai/types/beta/realtime/error_event.py +36 -0
  1225. openai/types/beta/realtime/input_audio_buffer_append_event.py +23 -0
  1226. openai/types/beta/realtime/input_audio_buffer_append_event_param.py +22 -0
  1227. openai/types/beta/realtime/input_audio_buffer_clear_event.py +16 -0
  1228. openai/types/beta/realtime/input_audio_buffer_clear_event_param.py +15 -0
  1229. openai/types/beta/realtime/input_audio_buffer_cleared_event.py +15 -0
  1230. openai/types/beta/realtime/input_audio_buffer_commit_event.py +16 -0
  1231. openai/types/beta/realtime/input_audio_buffer_commit_event_param.py +15 -0
  1232. openai/types/beta/realtime/input_audio_buffer_committed_event.py +25 -0
  1233. openai/types/beta/realtime/input_audio_buffer_speech_started_event.py +26 -0
  1234. openai/types/beta/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
  1235. openai/types/beta/realtime/rate_limits_updated_event.py +33 -0
  1236. openai/types/beta/realtime/realtime_client_event.py +47 -0
  1237. openai/types/beta/realtime/realtime_client_event_param.py +44 -0
  1238. openai/types/beta/realtime/realtime_connect_params.py +11 -0
  1239. openai/types/beta/realtime/realtime_response.py +87 -0
  1240. openai/types/beta/realtime/realtime_response_status.py +39 -0
  1241. openai/types/beta/realtime/realtime_response_usage.py +52 -0
  1242. openai/types/beta/realtime/realtime_server_event.py +133 -0
  1243. openai/types/beta/realtime/response_audio_delta_event.py +30 -0
  1244. openai/types/beta/realtime/response_audio_done_event.py +27 -0
  1245. openai/types/beta/realtime/response_audio_transcript_delta_event.py +30 -0
  1246. openai/types/beta/realtime/response_audio_transcript_done_event.py +30 -0
  1247. openai/types/beta/realtime/response_cancel_event.py +22 -0
  1248. openai/types/beta/realtime/response_cancel_event_param.py +21 -0
  1249. openai/types/beta/realtime/response_content_part_added_event.py +45 -0
  1250. openai/types/beta/realtime/response_content_part_done_event.py +45 -0
  1251. openai/types/beta/realtime/response_create_event.py +121 -0
  1252. openai/types/beta/realtime/response_create_event_param.py +122 -0
  1253. openai/types/beta/realtime/response_created_event.py +19 -0
  1254. openai/types/beta/realtime/response_done_event.py +19 -0
  1255. openai/types/beta/realtime/response_function_call_arguments_delta_event.py +30 -0
  1256. openai/types/beta/realtime/response_function_call_arguments_done_event.py +30 -0
  1257. openai/types/beta/realtime/response_output_item_added_event.py +25 -0
  1258. openai/types/beta/realtime/response_output_item_done_event.py +25 -0
  1259. openai/types/beta/realtime/response_text_delta_event.py +30 -0
  1260. openai/types/beta/realtime/response_text_done_event.py +30 -0
  1261. openai/types/beta/realtime/session.py +279 -0
  1262. openai/types/beta/realtime/session_create_params.py +298 -0
  1263. openai/types/beta/realtime/session_create_response.py +196 -0
  1264. openai/types/beta/realtime/session_created_event.py +19 -0
  1265. openai/types/beta/realtime/session_update_event.py +312 -0
  1266. openai/types/beta/realtime/session_update_event_param.py +310 -0
  1267. openai/types/beta/realtime/session_updated_event.py +19 -0
  1268. openai/types/beta/realtime/transcription_session.py +100 -0
  1269. openai/types/beta/realtime/transcription_session_create_params.py +173 -0
  1270. openai/types/beta/realtime/transcription_session_update.py +185 -0
  1271. openai/types/beta/realtime/transcription_session_update_param.py +185 -0
  1272. openai/types/beta/realtime/transcription_session_updated_event.py +24 -0
  1273. openai/types/beta/thread.py +63 -0
  1274. openai/types/beta/thread_create_and_run_params.py +397 -0
  1275. openai/types/beta/thread_create_params.py +186 -0
  1276. openai/types/beta/thread_deleted.py +15 -0
  1277. openai/types/beta/thread_update_params.py +56 -0
  1278. openai/types/beta/threads/__init__.py +46 -0
  1279. openai/types/beta/threads/annotation.py +12 -0
  1280. openai/types/beta/threads/annotation_delta.py +14 -0
  1281. openai/types/beta/threads/file_citation_annotation.py +26 -0
  1282. openai/types/beta/threads/file_citation_delta_annotation.py +33 -0
  1283. openai/types/beta/threads/file_path_annotation.py +26 -0
  1284. openai/types/beta/threads/file_path_delta_annotation.py +30 -0
  1285. openai/types/beta/threads/image_file.py +23 -0
  1286. openai/types/beta/threads/image_file_content_block.py +15 -0
  1287. openai/types/beta/threads/image_file_content_block_param.py +16 -0
  1288. openai/types/beta/threads/image_file_delta.py +23 -0
  1289. openai/types/beta/threads/image_file_delta_block.py +19 -0
  1290. openai/types/beta/threads/image_file_param.py +22 -0
  1291. openai/types/beta/threads/image_url.py +23 -0
  1292. openai/types/beta/threads/image_url_content_block.py +15 -0
  1293. openai/types/beta/threads/image_url_content_block_param.py +16 -0
  1294. openai/types/beta/threads/image_url_delta.py +22 -0
  1295. openai/types/beta/threads/image_url_delta_block.py +19 -0
  1296. openai/types/beta/threads/image_url_param.py +22 -0
  1297. openai/types/beta/threads/message.py +103 -0
  1298. openai/types/beta/threads/message_content.py +18 -0
  1299. openai/types/beta/threads/message_content_delta.py +17 -0
  1300. openai/types/beta/threads/message_content_part_param.py +14 -0
  1301. openai/types/beta/threads/message_create_params.py +55 -0
  1302. openai/types/beta/threads/message_deleted.py +15 -0
  1303. openai/types/beta/threads/message_delta.py +17 -0
  1304. openai/types/beta/threads/message_delta_event.py +19 -0
  1305. openai/types/beta/threads/message_list_params.py +42 -0
  1306. openai/types/beta/threads/message_update_params.py +24 -0
  1307. openai/types/beta/threads/refusal_content_block.py +14 -0
  1308. openai/types/beta/threads/refusal_delta_block.py +18 -0
  1309. openai/types/beta/threads/required_action_function_tool_call.py +34 -0
  1310. openai/types/beta/threads/run.py +245 -0
  1311. openai/types/beta/threads/run_create_params.py +268 -0
  1312. openai/types/beta/threads/run_list_params.py +39 -0
  1313. openai/types/beta/threads/run_status.py +17 -0
  1314. openai/types/beta/threads/run_submit_tool_outputs_params.py +52 -0
  1315. openai/types/beta/threads/run_update_params.py +24 -0
  1316. openai/types/beta/threads/runs/__init__.py +24 -0
  1317. openai/types/beta/threads/runs/code_interpreter_logs.py +19 -0
  1318. openai/types/beta/threads/runs/code_interpreter_output_image.py +26 -0
  1319. openai/types/beta/threads/runs/code_interpreter_tool_call.py +70 -0
  1320. openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py +44 -0
  1321. openai/types/beta/threads/runs/file_search_tool_call.py +78 -0
  1322. openai/types/beta/threads/runs/file_search_tool_call_delta.py +25 -0
  1323. openai/types/beta/threads/runs/function_tool_call.py +38 -0
  1324. openai/types/beta/threads/runs/function_tool_call_delta.py +41 -0
  1325. openai/types/beta/threads/runs/message_creation_step_details.py +19 -0
  1326. openai/types/beta/threads/runs/run_step.py +115 -0
  1327. openai/types/beta/threads/runs/run_step_delta.py +20 -0
  1328. openai/types/beta/threads/runs/run_step_delta_event.py +19 -0
  1329. openai/types/beta/threads/runs/run_step_delta_message_delta.py +20 -0
  1330. openai/types/beta/threads/runs/run_step_include.py +7 -0
  1331. openai/types/beta/threads/runs/step_list_params.py +56 -0
  1332. openai/types/beta/threads/runs/step_retrieve_params.py +28 -0
  1333. openai/types/beta/threads/runs/tool_call.py +15 -0
  1334. openai/types/beta/threads/runs/tool_call_delta.py +16 -0
  1335. openai/types/beta/threads/runs/tool_call_delta_object.py +21 -0
  1336. openai/types/beta/threads/runs/tool_calls_step_details.py +21 -0
  1337. openai/types/beta/threads/text.py +15 -0
  1338. openai/types/beta/threads/text_content_block.py +15 -0
  1339. openai/types/beta/threads/text_content_block_param.py +15 -0
  1340. openai/types/beta/threads/text_delta.py +15 -0
  1341. openai/types/beta/threads/text_delta_block.py +19 -0
  1342. openai/types/chat/__init__.py +102 -0
  1343. openai/types/chat/chat_completion.py +89 -0
  1344. openai/types/chat/chat_completion_allowed_tool_choice_param.py +17 -0
  1345. openai/types/chat/chat_completion_allowed_tools_param.py +32 -0
  1346. openai/types/chat/chat_completion_assistant_message_param.py +70 -0
  1347. openai/types/chat/chat_completion_audio.py +25 -0
  1348. openai/types/chat/chat_completion_audio_param.py +25 -0
  1349. openai/types/chat/chat_completion_chunk.py +166 -0
  1350. openai/types/chat/chat_completion_content_part_image.py +27 -0
  1351. openai/types/chat/chat_completion_content_part_image_param.py +26 -0
  1352. openai/types/chat/chat_completion_content_part_input_audio_param.py +22 -0
  1353. openai/types/chat/chat_completion_content_part_param.py +41 -0
  1354. openai/types/chat/chat_completion_content_part_refusal_param.py +15 -0
  1355. openai/types/chat/chat_completion_content_part_text.py +15 -0
  1356. openai/types/chat/chat_completion_content_part_text_param.py +15 -0
  1357. openai/types/chat/chat_completion_custom_tool_param.py +58 -0
  1358. openai/types/chat/chat_completion_deleted.py +18 -0
  1359. openai/types/chat/chat_completion_developer_message_param.py +25 -0
  1360. openai/types/chat/chat_completion_function_call_option_param.py +12 -0
  1361. openai/types/chat/chat_completion_function_message_param.py +19 -0
  1362. openai/types/chat/chat_completion_function_tool.py +15 -0
  1363. openai/types/chat/chat_completion_function_tool_param.py +16 -0
  1364. openai/types/chat/chat_completion_message.py +79 -0
  1365. openai/types/chat/chat_completion_message_custom_tool_call.py +26 -0
  1366. openai/types/chat/chat_completion_message_custom_tool_call_param.py +26 -0
  1367. openai/types/chat/chat_completion_message_function_tool_call.py +31 -0
  1368. openai/types/chat/chat_completion_message_function_tool_call_param.py +31 -0
  1369. openai/types/chat/chat_completion_message_param.py +24 -0
  1370. openai/types/chat/chat_completion_message_tool_call.py +17 -0
  1371. openai/types/chat/chat_completion_message_tool_call_param.py +14 -0
  1372. openai/types/chat/chat_completion_message_tool_call_union_param.py +15 -0
  1373. openai/types/chat/chat_completion_modality.py +7 -0
  1374. openai/types/chat/chat_completion_named_tool_choice_custom_param.py +19 -0
  1375. openai/types/chat/chat_completion_named_tool_choice_param.py +19 -0
  1376. openai/types/chat/chat_completion_prediction_content_param.py +25 -0
  1377. openai/types/chat/chat_completion_reasoning_effort.py +7 -0
  1378. openai/types/chat/chat_completion_role.py +7 -0
  1379. openai/types/chat/chat_completion_store_message.py +23 -0
  1380. openai/types/chat/chat_completion_stream_options_param.py +31 -0
  1381. openai/types/chat/chat_completion_system_message_param.py +25 -0
  1382. openai/types/chat/chat_completion_token_logprob.py +57 -0
  1383. openai/types/chat/chat_completion_tool_choice_option_param.py +19 -0
  1384. openai/types/chat/chat_completion_tool_message_param.py +21 -0
  1385. openai/types/chat/chat_completion_tool_param.py +14 -0
  1386. openai/types/chat/chat_completion_tool_union_param.py +13 -0
  1387. openai/types/chat/chat_completion_user_message_param.py +25 -0
  1388. openai/types/chat/completion_create_params.py +450 -0
  1389. openai/types/chat/completion_list_params.py +37 -0
  1390. openai/types/chat/completion_update_params.py +22 -0
  1391. openai/types/chat/completions/__init__.py +5 -0
  1392. openai/types/chat/completions/message_list_params.py +21 -0
  1393. openai/types/chat/parsed_chat_completion.py +40 -0
  1394. openai/types/chat/parsed_function_tool_call.py +29 -0
  1395. openai/types/chat_model.py +7 -0
  1396. openai/types/completion.py +37 -0
  1397. openai/types/completion_choice.py +35 -0
  1398. openai/types/completion_create_params.py +189 -0
  1399. openai/types/completion_usage.py +54 -0
  1400. openai/types/container_create_params.py +30 -0
  1401. openai/types/container_create_response.py +40 -0
  1402. openai/types/container_list_params.py +30 -0
  1403. openai/types/container_list_response.py +40 -0
  1404. openai/types/container_retrieve_response.py +40 -0
  1405. openai/types/containers/__init__.py +9 -0
  1406. openai/types/containers/file_create_params.py +17 -0
  1407. openai/types/containers/file_create_response.py +30 -0
  1408. openai/types/containers/file_list_params.py +30 -0
  1409. openai/types/containers/file_list_response.py +30 -0
  1410. openai/types/containers/file_retrieve_response.py +30 -0
  1411. openai/types/containers/files/__init__.py +3 -0
  1412. openai/types/conversations/__init__.py +27 -0
  1413. openai/types/conversations/computer_screenshot_content.py +22 -0
  1414. openai/types/conversations/conversation.py +30 -0
  1415. openai/types/conversations/conversation_create_params.py +29 -0
  1416. openai/types/conversations/conversation_deleted_resource.py +15 -0
  1417. openai/types/conversations/conversation_item.py +230 -0
  1418. openai/types/conversations/conversation_item_list.py +26 -0
  1419. openai/types/conversations/conversation_update_params.py +22 -0
  1420. openai/types/conversations/input_file_content.py +7 -0
  1421. openai/types/conversations/input_file_content_param.py +7 -0
  1422. openai/types/conversations/input_image_content.py +7 -0
  1423. openai/types/conversations/input_image_content_param.py +7 -0
  1424. openai/types/conversations/input_text_content.py +7 -0
  1425. openai/types/conversations/input_text_content_param.py +7 -0
  1426. openai/types/conversations/item_create_params.py +24 -0
  1427. openai/types/conversations/item_list_params.py +50 -0
  1428. openai/types/conversations/item_retrieve_params.py +22 -0
  1429. openai/types/conversations/message.py +66 -0
  1430. openai/types/conversations/output_text_content.py +7 -0
  1431. openai/types/conversations/output_text_content_param.py +7 -0
  1432. openai/types/conversations/refusal_content.py +7 -0
  1433. openai/types/conversations/refusal_content_param.py +7 -0
  1434. openai/types/conversations/summary_text_content.py +15 -0
  1435. openai/types/conversations/text_content.py +13 -0
  1436. openai/types/create_embedding_response.py +31 -0
  1437. openai/types/embedding.py +23 -0
  1438. openai/types/embedding_create_params.py +55 -0
  1439. openai/types/embedding_model.py +7 -0
  1440. openai/types/eval_create_params.py +202 -0
  1441. openai/types/eval_create_response.py +111 -0
  1442. openai/types/eval_custom_data_source_config.py +21 -0
  1443. openai/types/eval_delete_response.py +13 -0
  1444. openai/types/eval_list_params.py +27 -0
  1445. openai/types/eval_list_response.py +111 -0
  1446. openai/types/eval_retrieve_response.py +111 -0
  1447. openai/types/eval_stored_completions_data_source_config.py +32 -0
  1448. openai/types/eval_update_params.py +25 -0
  1449. openai/types/eval_update_response.py +111 -0
  1450. openai/types/evals/__init__.py +22 -0
  1451. openai/types/evals/create_eval_completions_run_data_source.py +236 -0
  1452. openai/types/evals/create_eval_completions_run_data_source_param.py +232 -0
  1453. openai/types/evals/create_eval_jsonl_run_data_source.py +42 -0
  1454. openai/types/evals/create_eval_jsonl_run_data_source_param.py +47 -0
  1455. openai/types/evals/eval_api_error.py +13 -0
  1456. openai/types/evals/run_cancel_response.py +417 -0
  1457. openai/types/evals/run_create_params.py +340 -0
  1458. openai/types/evals/run_create_response.py +417 -0
  1459. openai/types/evals/run_delete_response.py +15 -0
  1460. openai/types/evals/run_list_params.py +27 -0
  1461. openai/types/evals/run_list_response.py +417 -0
  1462. openai/types/evals/run_retrieve_response.py +417 -0
  1463. openai/types/evals/runs/__init__.py +7 -0
  1464. openai/types/evals/runs/output_item_list_params.py +30 -0
  1465. openai/types/evals/runs/output_item_list_response.py +134 -0
  1466. openai/types/evals/runs/output_item_retrieve_response.py +134 -0
  1467. openai/types/file_chunking_strategy.py +14 -0
  1468. openai/types/file_chunking_strategy_param.py +13 -0
  1469. openai/types/file_content.py +7 -0
  1470. openai/types/file_create_params.py +45 -0
  1471. openai/types/file_deleted.py +15 -0
  1472. openai/types/file_list_params.py +33 -0
  1473. openai/types/file_object.py +58 -0
  1474. openai/types/file_purpose.py +7 -0
  1475. openai/types/fine_tuning/__init__.py +26 -0
  1476. openai/types/fine_tuning/alpha/__init__.py +8 -0
  1477. openai/types/fine_tuning/alpha/grader_run_params.py +40 -0
  1478. openai/types/fine_tuning/alpha/grader_run_response.py +67 -0
  1479. openai/types/fine_tuning/alpha/grader_validate_params.py +24 -0
  1480. openai/types/fine_tuning/alpha/grader_validate_response.py +20 -0
  1481. openai/types/fine_tuning/checkpoints/__init__.py +9 -0
  1482. openai/types/fine_tuning/checkpoints/permission_create_params.py +14 -0
  1483. openai/types/fine_tuning/checkpoints/permission_create_response.py +21 -0
  1484. openai/types/fine_tuning/checkpoints/permission_delete_response.py +18 -0
  1485. openai/types/fine_tuning/checkpoints/permission_retrieve_params.py +21 -0
  1486. openai/types/fine_tuning/checkpoints/permission_retrieve_response.py +34 -0
  1487. openai/types/fine_tuning/dpo_hyperparameters.py +36 -0
  1488. openai/types/fine_tuning/dpo_hyperparameters_param.py +36 -0
  1489. openai/types/fine_tuning/dpo_method.py +13 -0
  1490. openai/types/fine_tuning/dpo_method_param.py +14 -0
  1491. openai/types/fine_tuning/fine_tuning_job.py +161 -0
  1492. openai/types/fine_tuning/fine_tuning_job_event.py +32 -0
  1493. openai/types/fine_tuning/fine_tuning_job_integration.py +5 -0
  1494. openai/types/fine_tuning/fine_tuning_job_wandb_integration.py +33 -0
  1495. openai/types/fine_tuning/fine_tuning_job_wandb_integration_object.py +21 -0
  1496. openai/types/fine_tuning/job_create_params.py +176 -0
  1497. openai/types/fine_tuning/job_list_events_params.py +15 -0
  1498. openai/types/fine_tuning/job_list_params.py +23 -0
  1499. openai/types/fine_tuning/jobs/__init__.py +6 -0
  1500. openai/types/fine_tuning/jobs/checkpoint_list_params.py +15 -0
  1501. openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py +47 -0
  1502. openai/types/fine_tuning/reinforcement_hyperparameters.py +43 -0
  1503. openai/types/fine_tuning/reinforcement_hyperparameters_param.py +43 -0
  1504. openai/types/fine_tuning/reinforcement_method.py +24 -0
  1505. openai/types/fine_tuning/reinforcement_method_param.py +27 -0
  1506. openai/types/fine_tuning/supervised_hyperparameters.py +29 -0
  1507. openai/types/fine_tuning/supervised_hyperparameters_param.py +29 -0
  1508. openai/types/fine_tuning/supervised_method.py +13 -0
  1509. openai/types/fine_tuning/supervised_method_param.py +14 -0
  1510. openai/types/graders/__init__.py +16 -0
  1511. openai/types/graders/label_model_grader.py +70 -0
  1512. openai/types/graders/label_model_grader_param.py +77 -0
  1513. openai/types/graders/multi_grader.py +32 -0
  1514. openai/types/graders/multi_grader_param.py +35 -0
  1515. openai/types/graders/python_grader.py +22 -0
  1516. openai/types/graders/python_grader_param.py +21 -0
  1517. openai/types/graders/score_model_grader.py +109 -0
  1518. openai/types/graders/score_model_grader_param.py +115 -0
  1519. openai/types/graders/string_check_grader.py +24 -0
  1520. openai/types/graders/string_check_grader_param.py +24 -0
  1521. openai/types/graders/text_similarity_grader.py +40 -0
  1522. openai/types/graders/text_similarity_grader_param.py +42 -0
  1523. openai/types/image.py +26 -0
  1524. openai/types/image_create_variation_params.py +48 -0
  1525. openai/types/image_edit_completed_event.py +55 -0
  1526. openai/types/image_edit_params.py +145 -0
  1527. openai/types/image_edit_partial_image_event.py +33 -0
  1528. openai/types/image_edit_stream_event.py +14 -0
  1529. openai/types/image_gen_completed_event.py +55 -0
  1530. openai/types/image_gen_partial_image_event.py +33 -0
  1531. openai/types/image_gen_stream_event.py +14 -0
  1532. openai/types/image_generate_params.py +143 -0
  1533. openai/types/image_model.py +7 -0
  1534. openai/types/images_response.py +60 -0
  1535. openai/types/model.py +21 -0
  1536. openai/types/model_deleted.py +13 -0
  1537. openai/types/moderation.py +186 -0
  1538. openai/types/moderation_create_params.py +30 -0
  1539. openai/types/moderation_create_response.py +19 -0
  1540. openai/types/moderation_image_url_input_param.py +20 -0
  1541. openai/types/moderation_model.py +9 -0
  1542. openai/types/moderation_multi_modal_input_param.py +13 -0
  1543. openai/types/moderation_text_input_param.py +15 -0
  1544. openai/types/other_file_chunking_strategy_object.py +12 -0
  1545. openai/types/realtime/__init__.py +237 -0
  1546. openai/types/realtime/audio_transcription.py +37 -0
  1547. openai/types/realtime/audio_transcription_param.py +34 -0
  1548. openai/types/realtime/call_accept_params.py +122 -0
  1549. openai/types/realtime/call_create_params.py +17 -0
  1550. openai/types/realtime/call_refer_params.py +15 -0
  1551. openai/types/realtime/call_reject_params.py +15 -0
  1552. openai/types/realtime/client_secret_create_params.py +46 -0
  1553. openai/types/realtime/client_secret_create_response.py +26 -0
  1554. openai/types/realtime/conversation_created_event.py +27 -0
  1555. openai/types/realtime/conversation_item.py +32 -0
  1556. openai/types/realtime/conversation_item_added.py +26 -0
  1557. openai/types/realtime/conversation_item_create_event.py +29 -0
  1558. openai/types/realtime/conversation_item_create_event_param.py +29 -0
  1559. openai/types/realtime/conversation_item_created_event.py +27 -0
  1560. openai/types/realtime/conversation_item_delete_event.py +19 -0
  1561. openai/types/realtime/conversation_item_delete_event_param.py +18 -0
  1562. openai/types/realtime/conversation_item_deleted_event.py +18 -0
  1563. openai/types/realtime/conversation_item_done.py +26 -0
  1564. openai/types/realtime/conversation_item_input_audio_transcription_completed_event.py +79 -0
  1565. openai/types/realtime/conversation_item_input_audio_transcription_delta_event.py +36 -0
  1566. openai/types/realtime/conversation_item_input_audio_transcription_failed_event.py +39 -0
  1567. openai/types/realtime/conversation_item_input_audio_transcription_segment.py +36 -0
  1568. openai/types/realtime/conversation_item_param.py +30 -0
  1569. openai/types/realtime/conversation_item_retrieve_event.py +19 -0
  1570. openai/types/realtime/conversation_item_retrieve_event_param.py +18 -0
  1571. openai/types/realtime/conversation_item_truncate_event.py +32 -0
  1572. openai/types/realtime/conversation_item_truncate_event_param.py +31 -0
  1573. openai/types/realtime/conversation_item_truncated_event.py +24 -0
  1574. openai/types/realtime/input_audio_buffer_append_event.py +23 -0
  1575. openai/types/realtime/input_audio_buffer_append_event_param.py +22 -0
  1576. openai/types/realtime/input_audio_buffer_clear_event.py +16 -0
  1577. openai/types/realtime/input_audio_buffer_clear_event_param.py +15 -0
  1578. openai/types/realtime/input_audio_buffer_cleared_event.py +15 -0
  1579. openai/types/realtime/input_audio_buffer_commit_event.py +16 -0
  1580. openai/types/realtime/input_audio_buffer_commit_event_param.py +15 -0
  1581. openai/types/realtime/input_audio_buffer_committed_event.py +25 -0
  1582. openai/types/realtime/input_audio_buffer_speech_started_event.py +26 -0
  1583. openai/types/realtime/input_audio_buffer_speech_stopped_event.py +25 -0
  1584. openai/types/realtime/input_audio_buffer_timeout_triggered.py +30 -0
  1585. openai/types/realtime/log_prob_properties.py +18 -0
  1586. openai/types/realtime/mcp_list_tools_completed.py +18 -0
  1587. openai/types/realtime/mcp_list_tools_failed.py +18 -0
  1588. openai/types/realtime/mcp_list_tools_in_progress.py +18 -0
  1589. openai/types/realtime/noise_reduction_type.py +7 -0
  1590. openai/types/realtime/output_audio_buffer_clear_event.py +16 -0
  1591. openai/types/realtime/output_audio_buffer_clear_event_param.py +15 -0
  1592. openai/types/realtime/rate_limits_updated_event.py +33 -0
  1593. openai/types/realtime/realtime_audio_config.py +15 -0
  1594. openai/types/realtime/realtime_audio_config_input.py +63 -0
  1595. openai/types/realtime/realtime_audio_config_input_param.py +65 -0
  1596. openai/types/realtime/realtime_audio_config_output.py +36 -0
  1597. openai/types/realtime/realtime_audio_config_output_param.py +35 -0
  1598. openai/types/realtime/realtime_audio_config_param.py +16 -0
  1599. openai/types/realtime/realtime_audio_formats.py +30 -0
  1600. openai/types/realtime/realtime_audio_formats_param.py +29 -0
  1601. openai/types/realtime/realtime_audio_input_turn_detection.py +98 -0
  1602. openai/types/realtime/realtime_audio_input_turn_detection_param.py +95 -0
  1603. openai/types/realtime/realtime_client_event.py +36 -0
  1604. openai/types/realtime/realtime_client_event_param.py +34 -0
  1605. openai/types/realtime/realtime_connect_params.py +13 -0
  1606. openai/types/realtime/realtime_conversation_item_assistant_message.py +58 -0
  1607. openai/types/realtime/realtime_conversation_item_assistant_message_param.py +58 -0
  1608. openai/types/realtime/realtime_conversation_item_function_call.py +41 -0
  1609. openai/types/realtime/realtime_conversation_item_function_call_output.py +37 -0
  1610. openai/types/realtime/realtime_conversation_item_function_call_output_param.py +36 -0
  1611. openai/types/realtime/realtime_conversation_item_function_call_param.py +40 -0
  1612. openai/types/realtime/realtime_conversation_item_system_message.py +42 -0
  1613. openai/types/realtime/realtime_conversation_item_system_message_param.py +42 -0
  1614. openai/types/realtime/realtime_conversation_item_user_message.py +69 -0
  1615. openai/types/realtime/realtime_conversation_item_user_message_param.py +69 -0
  1616. openai/types/realtime/realtime_error.py +24 -0
  1617. openai/types/realtime/realtime_error_event.py +19 -0
  1618. openai/types/realtime/realtime_function_tool.py +25 -0
  1619. openai/types/realtime/realtime_function_tool_param.py +24 -0
  1620. openai/types/realtime/realtime_mcp_approval_request.py +24 -0
  1621. openai/types/realtime/realtime_mcp_approval_request_param.py +24 -0
  1622. openai/types/realtime/realtime_mcp_approval_response.py +25 -0
  1623. openai/types/realtime/realtime_mcp_approval_response_param.py +25 -0
  1624. openai/types/realtime/realtime_mcp_list_tools.py +36 -0
  1625. openai/types/realtime/realtime_mcp_list_tools_param.py +36 -0
  1626. openai/types/realtime/realtime_mcp_protocol_error.py +15 -0
  1627. openai/types/realtime/realtime_mcp_protocol_error_param.py +15 -0
  1628. openai/types/realtime/realtime_mcp_tool_call.py +43 -0
  1629. openai/types/realtime/realtime_mcp_tool_call_param.py +40 -0
  1630. openai/types/realtime/realtime_mcp_tool_execution_error.py +13 -0
  1631. openai/types/realtime/realtime_mcp_tool_execution_error_param.py +13 -0
  1632. openai/types/realtime/realtime_mcphttp_error.py +15 -0
  1633. openai/types/realtime/realtime_mcphttp_error_param.py +15 -0
  1634. openai/types/realtime/realtime_response.py +98 -0
  1635. openai/types/realtime/realtime_response_create_audio_output.py +29 -0
  1636. openai/types/realtime/realtime_response_create_audio_output_param.py +28 -0
  1637. openai/types/realtime/realtime_response_create_mcp_tool.py +135 -0
  1638. openai/types/realtime/realtime_response_create_mcp_tool_param.py +135 -0
  1639. openai/types/realtime/realtime_response_create_params.py +98 -0
  1640. openai/types/realtime/realtime_response_create_params_param.py +99 -0
  1641. openai/types/realtime/realtime_response_status.py +39 -0
  1642. openai/types/realtime/realtime_response_usage.py +41 -0
  1643. openai/types/realtime/realtime_response_usage_input_token_details.py +35 -0
  1644. openai/types/realtime/realtime_response_usage_output_token_details.py +15 -0
  1645. openai/types/realtime/realtime_server_event.py +155 -0
  1646. openai/types/realtime/realtime_session_client_secret.py +20 -0
  1647. openai/types/realtime/realtime_session_create_request.py +122 -0
  1648. openai/types/realtime/realtime_session_create_request_param.py +122 -0
  1649. openai/types/realtime/realtime_session_create_response.py +475 -0
  1650. openai/types/realtime/realtime_tool_choice_config.py +12 -0
  1651. openai/types/realtime/realtime_tool_choice_config_param.py +14 -0
  1652. openai/types/realtime/realtime_tools_config.py +10 -0
  1653. openai/types/realtime/realtime_tools_config_param.py +143 -0
  1654. openai/types/realtime/realtime_tools_config_union.py +141 -0
  1655. openai/types/realtime/realtime_tools_config_union_param.py +140 -0
  1656. openai/types/realtime/realtime_tracing_config.py +31 -0
  1657. openai/types/realtime/realtime_tracing_config_param.py +31 -0
  1658. openai/types/realtime/realtime_transcription_session_audio.py +12 -0
  1659. openai/types/realtime/realtime_transcription_session_audio_input.py +65 -0
  1660. openai/types/realtime/realtime_transcription_session_audio_input_param.py +67 -0
  1661. openai/types/realtime/realtime_transcription_session_audio_input_turn_detection.py +98 -0
  1662. openai/types/realtime/realtime_transcription_session_audio_input_turn_detection_param.py +95 -0
  1663. openai/types/realtime/realtime_transcription_session_audio_param.py +13 -0
  1664. openai/types/realtime/realtime_transcription_session_create_request.py +27 -0
  1665. openai/types/realtime/realtime_transcription_session_create_request_param.py +28 -0
  1666. openai/types/realtime/realtime_transcription_session_create_response.py +68 -0
  1667. openai/types/realtime/realtime_transcription_session_turn_detection.py +32 -0
  1668. openai/types/realtime/realtime_truncation.py +10 -0
  1669. openai/types/realtime/realtime_truncation_param.py +12 -0
  1670. openai/types/realtime/realtime_truncation_retention_ratio.py +38 -0
  1671. openai/types/realtime/realtime_truncation_retention_ratio_param.py +37 -0
  1672. openai/types/realtime/response_audio_delta_event.py +30 -0
  1673. openai/types/realtime/response_audio_done_event.py +27 -0
  1674. openai/types/realtime/response_audio_transcript_delta_event.py +30 -0
  1675. openai/types/realtime/response_audio_transcript_done_event.py +30 -0
  1676. openai/types/realtime/response_cancel_event.py +22 -0
  1677. openai/types/realtime/response_cancel_event_param.py +21 -0
  1678. openai/types/realtime/response_content_part_added_event.py +45 -0
  1679. openai/types/realtime/response_content_part_done_event.py +45 -0
  1680. openai/types/realtime/response_create_event.py +20 -0
  1681. openai/types/realtime/response_create_event_param.py +20 -0
  1682. openai/types/realtime/response_created_event.py +19 -0
  1683. openai/types/realtime/response_done_event.py +19 -0
  1684. openai/types/realtime/response_function_call_arguments_delta_event.py +30 -0
  1685. openai/types/realtime/response_function_call_arguments_done_event.py +30 -0
  1686. openai/types/realtime/response_mcp_call_arguments_delta.py +31 -0
  1687. openai/types/realtime/response_mcp_call_arguments_done.py +27 -0
  1688. openai/types/realtime/response_mcp_call_completed.py +21 -0
  1689. openai/types/realtime/response_mcp_call_failed.py +21 -0
  1690. openai/types/realtime/response_mcp_call_in_progress.py +21 -0
  1691. openai/types/realtime/response_output_item_added_event.py +25 -0
  1692. openai/types/realtime/response_output_item_done_event.py +25 -0
  1693. openai/types/realtime/response_text_delta_event.py +30 -0
  1694. openai/types/realtime/response_text_done_event.py +30 -0
  1695. openai/types/realtime/session_created_event.py +23 -0
  1696. openai/types/realtime/session_update_event.py +31 -0
  1697. openai/types/realtime/session_update_event_param.py +32 -0
  1698. openai/types/realtime/session_updated_event.py +23 -0
  1699. openai/types/responses/__init__.py +270 -0
  1700. openai/types/responses/apply_patch_tool.py +12 -0
  1701. openai/types/responses/apply_patch_tool_param.py +12 -0
  1702. openai/types/responses/computer_tool.py +21 -0
  1703. openai/types/responses/computer_tool_param.py +21 -0
  1704. openai/types/responses/custom_tool.py +23 -0
  1705. openai/types/responses/custom_tool_param.py +23 -0
  1706. openai/types/responses/easy_input_message.py +26 -0
  1707. openai/types/responses/easy_input_message_param.py +27 -0
  1708. openai/types/responses/file_search_tool.py +58 -0
  1709. openai/types/responses/file_search_tool_param.py +60 -0
  1710. openai/types/responses/function_shell_tool.py +12 -0
  1711. openai/types/responses/function_shell_tool_param.py +12 -0
  1712. openai/types/responses/function_tool.py +28 -0
  1713. openai/types/responses/function_tool_param.py +28 -0
  1714. openai/types/responses/input_item_list_params.py +34 -0
  1715. openai/types/responses/input_token_count_params.py +142 -0
  1716. openai/types/responses/input_token_count_response.py +13 -0
  1717. openai/types/responses/parsed_response.py +105 -0
  1718. openai/types/responses/response.py +307 -0
  1719. openai/types/responses/response_apply_patch_tool_call.py +76 -0
  1720. openai/types/responses/response_apply_patch_tool_call_output.py +31 -0
  1721. openai/types/responses/response_audio_delta_event.py +18 -0
  1722. openai/types/responses/response_audio_done_event.py +15 -0
  1723. openai/types/responses/response_audio_transcript_delta_event.py +18 -0
  1724. openai/types/responses/response_audio_transcript_done_event.py +15 -0
  1725. openai/types/responses/response_code_interpreter_call_code_delta_event.py +27 -0
  1726. openai/types/responses/response_code_interpreter_call_code_done_event.py +24 -0
  1727. openai/types/responses/response_code_interpreter_call_completed_event.py +24 -0
  1728. openai/types/responses/response_code_interpreter_call_in_progress_event.py +24 -0
  1729. openai/types/responses/response_code_interpreter_call_interpreting_event.py +24 -0
  1730. openai/types/responses/response_code_interpreter_tool_call.py +55 -0
  1731. openai/types/responses/response_code_interpreter_tool_call_param.py +54 -0
  1732. openai/types/responses/response_completed_event.py +19 -0
  1733. openai/types/responses/response_computer_tool_call.py +209 -0
  1734. openai/types/responses/response_computer_tool_call_output_item.py +47 -0
  1735. openai/types/responses/response_computer_tool_call_output_screenshot.py +22 -0
  1736. openai/types/responses/response_computer_tool_call_output_screenshot_param.py +21 -0
  1737. openai/types/responses/response_computer_tool_call_param.py +207 -0
  1738. openai/types/responses/response_content_part_added_event.py +44 -0
  1739. openai/types/responses/response_content_part_done_event.py +44 -0
  1740. openai/types/responses/response_conversation_param.py +12 -0
  1741. openai/types/responses/response_create_params.py +334 -0
  1742. openai/types/responses/response_created_event.py +19 -0
  1743. openai/types/responses/response_custom_tool_call.py +25 -0
  1744. openai/types/responses/response_custom_tool_call_input_delta_event.py +24 -0
  1745. openai/types/responses/response_custom_tool_call_input_done_event.py +24 -0
  1746. openai/types/responses/response_custom_tool_call_output.py +33 -0
  1747. openai/types/responses/response_custom_tool_call_output_param.py +31 -0
  1748. openai/types/responses/response_custom_tool_call_param.py +24 -0
  1749. openai/types/responses/response_error.py +34 -0
  1750. openai/types/responses/response_error_event.py +25 -0
  1751. openai/types/responses/response_failed_event.py +19 -0
  1752. openai/types/responses/response_file_search_call_completed_event.py +21 -0
  1753. openai/types/responses/response_file_search_call_in_progress_event.py +21 -0
  1754. openai/types/responses/response_file_search_call_searching_event.py +21 -0
  1755. openai/types/responses/response_file_search_tool_call.py +51 -0
  1756. openai/types/responses/response_file_search_tool_call_param.py +53 -0
  1757. openai/types/responses/response_format_text_config.py +16 -0
  1758. openai/types/responses/response_format_text_config_param.py +16 -0
  1759. openai/types/responses/response_format_text_json_schema_config.py +43 -0
  1760. openai/types/responses/response_format_text_json_schema_config_param.py +41 -0
  1761. openai/types/responses/response_function_call_arguments_delta_event.py +26 -0
  1762. openai/types/responses/response_function_call_arguments_done_event.py +26 -0
  1763. openai/types/responses/response_function_call_output_item.py +16 -0
  1764. openai/types/responses/response_function_call_output_item_list.py +10 -0
  1765. openai/types/responses/response_function_call_output_item_list_param.py +18 -0
  1766. openai/types/responses/response_function_call_output_item_param.py +16 -0
  1767. openai/types/responses/response_function_shell_call_output_content.py +36 -0
  1768. openai/types/responses/response_function_shell_call_output_content_param.py +35 -0
  1769. openai/types/responses/response_function_shell_tool_call.py +44 -0
  1770. openai/types/responses/response_function_shell_tool_call_output.py +70 -0
  1771. openai/types/responses/response_function_tool_call.py +32 -0
  1772. openai/types/responses/response_function_tool_call_item.py +10 -0
  1773. openai/types/responses/response_function_tool_call_output_item.py +40 -0
  1774. openai/types/responses/response_function_tool_call_param.py +31 -0
  1775. openai/types/responses/response_function_web_search.py +67 -0
  1776. openai/types/responses/response_function_web_search_param.py +73 -0
  1777. openai/types/responses/response_image_gen_call_completed_event.py +21 -0
  1778. openai/types/responses/response_image_gen_call_generating_event.py +21 -0
  1779. openai/types/responses/response_image_gen_call_in_progress_event.py +21 -0
  1780. openai/types/responses/response_image_gen_call_partial_image_event.py +30 -0
  1781. openai/types/responses/response_in_progress_event.py +19 -0
  1782. openai/types/responses/response_includable.py +16 -0
  1783. openai/types/responses/response_incomplete_event.py +19 -0
  1784. openai/types/responses/response_input_audio.py +22 -0
  1785. openai/types/responses/response_input_audio_param.py +22 -0
  1786. openai/types/responses/response_input_content.py +15 -0
  1787. openai/types/responses/response_input_content_param.py +14 -0
  1788. openai/types/responses/response_input_file.py +25 -0
  1789. openai/types/responses/response_input_file_content.py +25 -0
  1790. openai/types/responses/response_input_file_content_param.py +25 -0
  1791. openai/types/responses/response_input_file_param.py +25 -0
  1792. openai/types/responses/response_input_image.py +28 -0
  1793. openai/types/responses/response_input_image_content.py +28 -0
  1794. openai/types/responses/response_input_image_content_param.py +28 -0
  1795. openai/types/responses/response_input_image_param.py +28 -0
  1796. openai/types/responses/response_input_item.py +482 -0
  1797. openai/types/responses/response_input_item_param.py +479 -0
  1798. openai/types/responses/response_input_message_content_list.py +10 -0
  1799. openai/types/responses/response_input_message_content_list_param.py +16 -0
  1800. openai/types/responses/response_input_message_item.py +33 -0
  1801. openai/types/responses/response_input_param.py +482 -0
  1802. openai/types/responses/response_input_text.py +15 -0
  1803. openai/types/responses/response_input_text_content.py +15 -0
  1804. openai/types/responses/response_input_text_content_param.py +15 -0
  1805. openai/types/responses/response_input_text_param.py +15 -0
  1806. openai/types/responses/response_item.py +226 -0
  1807. openai/types/responses/response_item_list.py +26 -0
  1808. openai/types/responses/response_mcp_call_arguments_delta_event.py +27 -0
  1809. openai/types/responses/response_mcp_call_arguments_done_event.py +24 -0
  1810. openai/types/responses/response_mcp_call_completed_event.py +21 -0
  1811. openai/types/responses/response_mcp_call_failed_event.py +21 -0
  1812. openai/types/responses/response_mcp_call_in_progress_event.py +21 -0
  1813. openai/types/responses/response_mcp_list_tools_completed_event.py +21 -0
  1814. openai/types/responses/response_mcp_list_tools_failed_event.py +21 -0
  1815. openai/types/responses/response_mcp_list_tools_in_progress_event.py +21 -0
  1816. openai/types/responses/response_output_item.py +189 -0
  1817. openai/types/responses/response_output_item_added_event.py +22 -0
  1818. openai/types/responses/response_output_item_done_event.py +22 -0
  1819. openai/types/responses/response_output_message.py +34 -0
  1820. openai/types/responses/response_output_message_param.py +34 -0
  1821. openai/types/responses/response_output_refusal.py +15 -0
  1822. openai/types/responses/response_output_refusal_param.py +15 -0
  1823. openai/types/responses/response_output_text.py +117 -0
  1824. openai/types/responses/response_output_text_annotation_added_event.py +30 -0
  1825. openai/types/responses/response_output_text_param.py +115 -0
  1826. openai/types/responses/response_prompt.py +28 -0
  1827. openai/types/responses/response_prompt_param.py +29 -0
  1828. openai/types/responses/response_queued_event.py +19 -0
  1829. openai/types/responses/response_reasoning_item.py +51 -0
  1830. openai/types/responses/response_reasoning_item_param.py +51 -0
  1831. openai/types/responses/response_reasoning_summary_part_added_event.py +35 -0
  1832. openai/types/responses/response_reasoning_summary_part_done_event.py +35 -0
  1833. openai/types/responses/response_reasoning_summary_text_delta_event.py +27 -0
  1834. openai/types/responses/response_reasoning_summary_text_done_event.py +27 -0
  1835. openai/types/responses/response_reasoning_text_delta_event.py +27 -0
  1836. openai/types/responses/response_reasoning_text_done_event.py +27 -0
  1837. openai/types/responses/response_refusal_delta_event.py +27 -0
  1838. openai/types/responses/response_refusal_done_event.py +27 -0
  1839. openai/types/responses/response_retrieve_params.py +59 -0
  1840. openai/types/responses/response_status.py +7 -0
  1841. openai/types/responses/response_stream_event.py +120 -0
  1842. openai/types/responses/response_text_config.py +35 -0
  1843. openai/types/responses/response_text_config_param.py +36 -0
  1844. openai/types/responses/response_text_delta_event.py +50 -0
  1845. openai/types/responses/response_text_done_event.py +50 -0
  1846. openai/types/responses/response_usage.py +35 -0
  1847. openai/types/responses/response_web_search_call_completed_event.py +21 -0
  1848. openai/types/responses/response_web_search_call_in_progress_event.py +21 -0
  1849. openai/types/responses/response_web_search_call_searching_event.py +21 -0
  1850. openai/types/responses/tool.py +271 -0
  1851. openai/types/responses/tool_choice_allowed.py +36 -0
  1852. openai/types/responses/tool_choice_allowed_param.py +36 -0
  1853. openai/types/responses/tool_choice_apply_patch.py +12 -0
  1854. openai/types/responses/tool_choice_apply_patch_param.py +12 -0
  1855. openai/types/responses/tool_choice_custom.py +15 -0
  1856. openai/types/responses/tool_choice_custom_param.py +15 -0
  1857. openai/types/responses/tool_choice_function.py +15 -0
  1858. openai/types/responses/tool_choice_function_param.py +15 -0
  1859. openai/types/responses/tool_choice_mcp.py +19 -0
  1860. openai/types/responses/tool_choice_mcp_param.py +19 -0
  1861. openai/types/responses/tool_choice_options.py +7 -0
  1862. openai/types/responses/tool_choice_shell.py +12 -0
  1863. openai/types/responses/tool_choice_shell_param.py +12 -0
  1864. openai/types/responses/tool_choice_types.py +31 -0
  1865. openai/types/responses/tool_choice_types_param.py +33 -0
  1866. openai/types/responses/tool_param.py +271 -0
  1867. openai/types/responses/web_search_preview_tool.py +49 -0
  1868. openai/types/responses/web_search_preview_tool_param.py +49 -0
  1869. openai/types/responses/web_search_tool.py +63 -0
  1870. openai/types/responses/web_search_tool_param.py +65 -0
  1871. openai/types/shared/__init__.py +19 -0
  1872. openai/types/shared/all_models.py +28 -0
  1873. openai/types/shared/chat_model.py +75 -0
  1874. openai/types/shared/comparison_filter.py +34 -0
  1875. openai/types/shared/compound_filter.py +22 -0
  1876. openai/types/shared/custom_tool_input_format.py +28 -0
  1877. openai/types/shared/error_object.py +17 -0
  1878. openai/types/shared/function_definition.py +43 -0
  1879. openai/types/shared/function_parameters.py +8 -0
  1880. openai/types/shared/metadata.py +8 -0
  1881. openai/types/shared/reasoning.py +44 -0
  1882. openai/types/shared/reasoning_effort.py +8 -0
  1883. openai/types/shared/response_format_json_object.py +12 -0
  1884. openai/types/shared/response_format_json_schema.py +48 -0
  1885. openai/types/shared/response_format_text.py +12 -0
  1886. openai/types/shared/response_format_text_grammar.py +15 -0
  1887. openai/types/shared/response_format_text_python.py +12 -0
  1888. openai/types/shared/responses_model.py +28 -0
  1889. openai/types/shared_params/__init__.py +15 -0
  1890. openai/types/shared_params/chat_model.py +77 -0
  1891. openai/types/shared_params/comparison_filter.py +36 -0
  1892. openai/types/shared_params/compound_filter.py +23 -0
  1893. openai/types/shared_params/custom_tool_input_format.py +27 -0
  1894. openai/types/shared_params/function_definition.py +45 -0
  1895. openai/types/shared_params/function_parameters.py +10 -0
  1896. openai/types/shared_params/metadata.py +10 -0
  1897. openai/types/shared_params/reasoning.py +45 -0
  1898. openai/types/shared_params/reasoning_effort.py +10 -0
  1899. openai/types/shared_params/response_format_json_object.py +12 -0
  1900. openai/types/shared_params/response_format_json_schema.py +46 -0
  1901. openai/types/shared_params/response_format_text.py +12 -0
  1902. openai/types/shared_params/responses_model.py +30 -0
  1903. openai/types/static_file_chunking_strategy.py +20 -0
  1904. openai/types/static_file_chunking_strategy_object.py +15 -0
  1905. openai/types/static_file_chunking_strategy_object_param.py +16 -0
  1906. openai/types/static_file_chunking_strategy_param.py +22 -0
  1907. openai/types/upload.py +42 -0
  1908. openai/types/upload_complete_params.py +20 -0
  1909. openai/types/upload_create_params.py +52 -0
  1910. openai/types/uploads/__init__.py +6 -0
  1911. openai/types/uploads/part_create_params.py +14 -0
  1912. openai/types/uploads/upload_part.py +21 -0
  1913. openai/types/vector_store.py +82 -0
  1914. openai/types/vector_store_create_params.py +61 -0
  1915. openai/types/vector_store_deleted.py +15 -0
  1916. openai/types/vector_store_list_params.py +39 -0
  1917. openai/types/vector_store_search_params.py +42 -0
  1918. openai/types/vector_store_search_response.py +39 -0
  1919. openai/types/vector_store_update_params.py +39 -0
  1920. openai/types/vector_stores/__init__.py +13 -0
  1921. openai/types/vector_stores/file_batch_create_params.py +70 -0
  1922. openai/types/vector_stores/file_batch_list_files_params.py +47 -0
  1923. openai/types/vector_stores/file_content_response.py +15 -0
  1924. openai/types/vector_stores/file_create_params.py +35 -0
  1925. openai/types/vector_stores/file_list_params.py +45 -0
  1926. openai/types/vector_stores/file_update_params.py +21 -0
  1927. openai/types/vector_stores/vector_store_file.py +67 -0
  1928. openai/types/vector_stores/vector_store_file_batch.py +54 -0
  1929. openai/types/vector_stores/vector_store_file_deleted.py +15 -0
  1930. openai/types/video.py +53 -0
  1931. openai/types/video_create_error.py +11 -0
  1932. openai/types/video_create_params.py +29 -0
  1933. openai/types/video_delete_response.py +18 -0
  1934. openai/types/video_download_content_params.py +12 -0
  1935. openai/types/video_list_params.py +21 -0
  1936. openai/types/video_model.py +7 -0
  1937. openai/types/video_remix_params.py +12 -0
  1938. openai/types/video_seconds.py +7 -0
  1939. openai/types/video_size.py +7 -0
  1940. openai/types/webhooks/__init__.py +24 -0
  1941. openai/types/webhooks/batch_cancelled_webhook_event.py +30 -0
  1942. openai/types/webhooks/batch_completed_webhook_event.py +30 -0
  1943. openai/types/webhooks/batch_expired_webhook_event.py +30 -0
  1944. openai/types/webhooks/batch_failed_webhook_event.py +30 -0
  1945. openai/types/webhooks/eval_run_canceled_webhook_event.py +30 -0
  1946. openai/types/webhooks/eval_run_failed_webhook_event.py +30 -0
  1947. openai/types/webhooks/eval_run_succeeded_webhook_event.py +30 -0
  1948. openai/types/webhooks/fine_tuning_job_cancelled_webhook_event.py +30 -0
  1949. openai/types/webhooks/fine_tuning_job_failed_webhook_event.py +30 -0
  1950. openai/types/webhooks/fine_tuning_job_succeeded_webhook_event.py +30 -0
  1951. openai/types/webhooks/realtime_call_incoming_webhook_event.py +41 -0
  1952. openai/types/webhooks/response_cancelled_webhook_event.py +30 -0
  1953. openai/types/webhooks/response_completed_webhook_event.py +30 -0
  1954. openai/types/webhooks/response_failed_webhook_event.py +30 -0
  1955. openai/types/webhooks/response_incomplete_webhook_event.py +30 -0
  1956. openai/types/webhooks/unwrap_webhook_event.py +44 -0
  1957. openai/types/websocket_connection_options.py +36 -0
  1958. openai/version.py +3 -0
@@ -0,0 +1,3130 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, List, Type, Union, Iterable, Optional, cast
6
+ from functools import partial
7
+ from typing_extensions import Literal, overload
8
+
9
+ import httpx
10
+
11
+ from ... import _legacy_response
12
+ from ..._types import NOT_GIVEN, Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
13
+ from ..._utils import is_given, maybe_transform, async_maybe_transform
14
+ from ..._compat import cached_property
15
+ from ..._resource import SyncAPIResource, AsyncAPIResource
16
+ from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
17
+ from .input_items import (
18
+ InputItems,
19
+ AsyncInputItems,
20
+ InputItemsWithRawResponse,
21
+ AsyncInputItemsWithRawResponse,
22
+ InputItemsWithStreamingResponse,
23
+ AsyncInputItemsWithStreamingResponse,
24
+ )
25
+ from ..._streaming import Stream, AsyncStream
26
+ from ...lib._tools import PydanticFunctionTool, ResponsesPydanticFunctionTool
27
+ from .input_tokens import (
28
+ InputTokens,
29
+ AsyncInputTokens,
30
+ InputTokensWithRawResponse,
31
+ AsyncInputTokensWithRawResponse,
32
+ InputTokensWithStreamingResponse,
33
+ AsyncInputTokensWithStreamingResponse,
34
+ )
35
+ from ..._base_client import make_request_options
36
+ from ...types.responses import response_create_params, response_retrieve_params
37
+ from ...lib._parsing._responses import (
38
+ TextFormatT,
39
+ parse_response,
40
+ type_to_text_format_param as _type_to_text_format_param,
41
+ )
42
+ from ...types.responses.response import Response
43
+ from ...types.responses.tool_param import ToolParam, ParseableToolParam
44
+ from ...types.shared_params.metadata import Metadata
45
+ from ...types.shared_params.reasoning import Reasoning
46
+ from ...types.responses.parsed_response import ParsedResponse
47
+ from ...lib.streaming.responses._responses import ResponseStreamManager, AsyncResponseStreamManager
48
+ from ...types.responses.response_includable import ResponseIncludable
49
+ from ...types.shared_params.responses_model import ResponsesModel
50
+ from ...types.responses.response_input_param import ResponseInputParam
51
+ from ...types.responses.response_prompt_param import ResponsePromptParam
52
+ from ...types.responses.response_stream_event import ResponseStreamEvent
53
+ from ...types.responses.response_text_config_param import ResponseTextConfigParam
54
+
55
+ __all__ = ["Responses", "AsyncResponses"]
56
+
57
+
58
+ class Responses(SyncAPIResource):
59
+ @cached_property
60
+ def input_items(self) -> InputItems:
61
+ return InputItems(self._client)
62
+
63
+ @cached_property
64
+ def input_tokens(self) -> InputTokens:
65
+ return InputTokens(self._client)
66
+
67
+ @cached_property
68
+ def with_raw_response(self) -> ResponsesWithRawResponse:
69
+ """
70
+ This property can be used as a prefix for any HTTP method call to return
71
+ the raw response object instead of the parsed content.
72
+
73
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
74
+ """
75
+ return ResponsesWithRawResponse(self)
76
+
77
+ @cached_property
78
+ def with_streaming_response(self) -> ResponsesWithStreamingResponse:
79
+ """
80
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
81
+
82
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
83
+ """
84
+ return ResponsesWithStreamingResponse(self)
85
+
86
+ @overload
87
+ def create(
88
+ self,
89
+ *,
90
+ background: Optional[bool] | Omit = omit,
91
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
92
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
93
+ input: Union[str, ResponseInputParam] | Omit = omit,
94
+ instructions: Optional[str] | Omit = omit,
95
+ max_output_tokens: Optional[int] | Omit = omit,
96
+ max_tool_calls: Optional[int] | Omit = omit,
97
+ metadata: Optional[Metadata] | Omit = omit,
98
+ model: ResponsesModel | Omit = omit,
99
+ parallel_tool_calls: Optional[bool] | Omit = omit,
100
+ previous_response_id: Optional[str] | Omit = omit,
101
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
102
+ prompt_cache_key: str | Omit = omit,
103
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
104
+ reasoning: Optional[Reasoning] | Omit = omit,
105
+ safety_identifier: str | Omit = omit,
106
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
107
+ store: Optional[bool] | Omit = omit,
108
+ stream: Optional[Literal[False]] | Omit = omit,
109
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
110
+ temperature: Optional[float] | Omit = omit,
111
+ text: ResponseTextConfigParam | Omit = omit,
112
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
113
+ tools: Iterable[ToolParam] | Omit = omit,
114
+ top_logprobs: Optional[int] | Omit = omit,
115
+ top_p: Optional[float] | Omit = omit,
116
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
117
+ user: str | Omit = omit,
118
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
119
+ # The extra values given here take precedence over values defined on the client or passed to this method.
120
+ extra_headers: Headers | None = None,
121
+ extra_query: Query | None = None,
122
+ extra_body: Body | None = None,
123
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
124
+ ) -> Response:
125
+ """Creates a model response.
126
+
127
+ Provide
128
+ [text](https://platform.openai.com/docs/guides/text) or
129
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
130
+ [text](https://platform.openai.com/docs/guides/text) or
131
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
132
+ the model call your own
133
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
134
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
135
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
136
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
137
+ your own data as input for the model's response.
138
+
139
+ Args:
140
+ background: Whether to run the model response in the background.
141
+ [Learn more](https://platform.openai.com/docs/guides/background).
142
+
143
+ conversation: The conversation that this response belongs to. Items from this conversation are
144
+ prepended to `input_items` for this response request. Input items and output
145
+ items from this response are automatically added to this conversation after this
146
+ response completes.
147
+
148
+ include: Specify additional output data to include in the model response. Currently
149
+ supported values are:
150
+
151
+ - `web_search_call.action.sources`: Include the sources of the web search tool
152
+ call.
153
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
154
+ in code interpreter tool call items.
155
+ - `computer_call_output.output.image_url`: Include image urls from the computer
156
+ call output.
157
+ - `file_search_call.results`: Include the search results of the file search tool
158
+ call.
159
+ - `message.input_image.image_url`: Include image urls from the input message.
160
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
161
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
162
+ tokens in reasoning item outputs. This enables reasoning items to be used in
163
+ multi-turn conversations when using the Responses API statelessly (like when
164
+ the `store` parameter is set to `false`, or when an organization is enrolled
165
+ in the zero data retention program).
166
+
167
+ input: Text, image, or file inputs to the model, used to generate a response.
168
+
169
+ Learn more:
170
+
171
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
172
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
173
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
174
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
175
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
176
+
177
+ instructions: A system (or developer) message inserted into the model's context.
178
+
179
+ When using along with `previous_response_id`, the instructions from a previous
180
+ response will not be carried over to the next response. This makes it simple to
181
+ swap out system (or developer) messages in new responses.
182
+
183
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
184
+ including visible output tokens and
185
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
186
+
187
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
188
+ response. This maximum number applies across all built-in tool calls, not per
189
+ individual tool. Any further attempts to call a tool by the model will be
190
+ ignored.
191
+
192
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
193
+ for storing additional information about the object in a structured format, and
194
+ querying for objects via API or the dashboard.
195
+
196
+ Keys are strings with a maximum length of 64 characters. Values are strings with
197
+ a maximum length of 512 characters.
198
+
199
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
200
+ wide range of models with different capabilities, performance characteristics,
201
+ and price points. Refer to the
202
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
203
+ available models.
204
+
205
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
206
+
207
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
208
+ multi-turn conversations. Learn more about
209
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
210
+ Cannot be used in conjunction with `conversation`.
211
+
212
+ prompt: Reference to a prompt template and its variables.
213
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
214
+
215
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
216
+ hit rates. Replaces the `user` field.
217
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
218
+
219
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
220
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
221
+ of 24 hours.
222
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
223
+
224
+ reasoning: **gpt-5 and o-series models only**
225
+
226
+ Configuration options for
227
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
228
+
229
+ safety_identifier: A stable identifier used to help detect users of your application that may be
230
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
231
+ identifies each user. We recommend hashing their username or email address, in
232
+ order to avoid sending us any identifying information.
233
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
234
+
235
+ service_tier: Specifies the processing type used for serving the request.
236
+
237
+ - If set to 'auto', then the request will be processed with the service tier
238
+ configured in the Project settings. Unless otherwise configured, the Project
239
+ will use 'default'.
240
+ - If set to 'default', then the request will be processed with the standard
241
+ pricing and performance for the selected model.
242
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
243
+ '[priority](https://openai.com/api-priority-processing/)', then the request
244
+ will be processed with the corresponding service tier.
245
+ - When not set, the default behavior is 'auto'.
246
+
247
+ When the `service_tier` parameter is set, the response body will include the
248
+ `service_tier` value based on the processing mode actually used to serve the
249
+ request. This response value may be different from the value set in the
250
+ parameter.
251
+
252
+ store: Whether to store the generated model response for later retrieval via API.
253
+
254
+ stream: If set to true, the model response data will be streamed to the client as it is
255
+ generated using
256
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
257
+ See the
258
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
259
+ for more information.
260
+
261
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
262
+
263
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
264
+ make the output more random, while lower values like 0.2 will make it more
265
+ focused and deterministic. We generally recommend altering this or `top_p` but
266
+ not both.
267
+
268
+ text: Configuration options for a text response from the model. Can be plain text or
269
+ structured JSON data. Learn more:
270
+
271
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
272
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
273
+
274
+ tool_choice: How the model should select which tool (or tools) to use when generating a
275
+ response. See the `tools` parameter to see how to specify which tools the model
276
+ can call.
277
+
278
+ tools: An array of tools the model may call while generating a response. You can
279
+ specify which tool to use by setting the `tool_choice` parameter.
280
+
281
+ We support the following categories of tools:
282
+
283
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
284
+ capabilities, like
285
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
286
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
287
+ Learn more about
288
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
289
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
290
+ predefined connectors such as Google Drive and SharePoint. Learn more about
291
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
292
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
293
+ the model to call your own code with strongly typed arguments and outputs.
294
+ Learn more about
295
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
296
+ You can also use custom tools to call your own code.
297
+
298
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
299
+ return at each token position, each with an associated log probability.
300
+
301
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
302
+ model considers the results of the tokens with top_p probability mass. So 0.1
303
+ means only the tokens comprising the top 10% probability mass are considered.
304
+
305
+ We generally recommend altering this or `temperature` but not both.
306
+
307
+ truncation: The truncation strategy to use for the model response.
308
+
309
+ - `auto`: If the input to this Response exceeds the model's context window size,
310
+ the model will truncate the response to fit the context window by dropping
311
+ items from the beginning of the conversation.
312
+ - `disabled` (default): If the input size will exceed the context window size
313
+ for a model, the request will fail with a 400 error.
314
+
315
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
316
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
317
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
318
+ similar requests and to help OpenAI detect and prevent abuse.
319
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
320
+
321
+ extra_headers: Send extra headers
322
+
323
+ extra_query: Add additional query parameters to the request
324
+
325
+ extra_body: Add additional JSON properties to the request
326
+
327
+ timeout: Override the client-level default timeout for this request, in seconds
328
+ """
329
+ ...
330
+
331
+ @overload
332
+ def create(
333
+ self,
334
+ *,
335
+ stream: Literal[True],
336
+ background: Optional[bool] | Omit = omit,
337
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
338
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
339
+ input: Union[str, ResponseInputParam] | Omit = omit,
340
+ instructions: Optional[str] | Omit = omit,
341
+ max_output_tokens: Optional[int] | Omit = omit,
342
+ max_tool_calls: Optional[int] | Omit = omit,
343
+ metadata: Optional[Metadata] | Omit = omit,
344
+ model: ResponsesModel | Omit = omit,
345
+ parallel_tool_calls: Optional[bool] | Omit = omit,
346
+ previous_response_id: Optional[str] | Omit = omit,
347
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
348
+ prompt_cache_key: str | Omit = omit,
349
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
350
+ reasoning: Optional[Reasoning] | Omit = omit,
351
+ safety_identifier: str | Omit = omit,
352
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
353
+ store: Optional[bool] | Omit = omit,
354
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
355
+ temperature: Optional[float] | Omit = omit,
356
+ text: ResponseTextConfigParam | Omit = omit,
357
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
358
+ tools: Iterable[ToolParam] | Omit = omit,
359
+ top_logprobs: Optional[int] | Omit = omit,
360
+ top_p: Optional[float] | Omit = omit,
361
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
362
+ user: str | Omit = omit,
363
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
364
+ # The extra values given here take precedence over values defined on the client or passed to this method.
365
+ extra_headers: Headers | None = None,
366
+ extra_query: Query | None = None,
367
+ extra_body: Body | None = None,
368
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
369
+ ) -> Stream[ResponseStreamEvent]:
370
+ """Creates a model response.
371
+
372
+ Provide
373
+ [text](https://platform.openai.com/docs/guides/text) or
374
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
375
+ [text](https://platform.openai.com/docs/guides/text) or
376
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
377
+ the model call your own
378
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
379
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
380
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
381
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
382
+ your own data as input for the model's response.
383
+
384
+ Args:
385
+ stream: If set to true, the model response data will be streamed to the client as it is
386
+ generated using
387
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
388
+ See the
389
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
390
+ for more information.
391
+
392
+ background: Whether to run the model response in the background.
393
+ [Learn more](https://platform.openai.com/docs/guides/background).
394
+
395
+ conversation: The conversation that this response belongs to. Items from this conversation are
396
+ prepended to `input_items` for this response request. Input items and output
397
+ items from this response are automatically added to this conversation after this
398
+ response completes.
399
+
400
+ include: Specify additional output data to include in the model response. Currently
401
+ supported values are:
402
+
403
+ - `web_search_call.action.sources`: Include the sources of the web search tool
404
+ call.
405
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
406
+ in code interpreter tool call items.
407
+ - `computer_call_output.output.image_url`: Include image urls from the computer
408
+ call output.
409
+ - `file_search_call.results`: Include the search results of the file search tool
410
+ call.
411
+ - `message.input_image.image_url`: Include image urls from the input message.
412
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
413
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
414
+ tokens in reasoning item outputs. This enables reasoning items to be used in
415
+ multi-turn conversations when using the Responses API statelessly (like when
416
+ the `store` parameter is set to `false`, or when an organization is enrolled
417
+ in the zero data retention program).
418
+
419
+ input: Text, image, or file inputs to the model, used to generate a response.
420
+
421
+ Learn more:
422
+
423
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
424
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
425
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
426
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
427
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
428
+
429
+ instructions: A system (or developer) message inserted into the model's context.
430
+
431
+ When using along with `previous_response_id`, the instructions from a previous
432
+ response will not be carried over to the next response. This makes it simple to
433
+ swap out system (or developer) messages in new responses.
434
+
435
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
436
+ including visible output tokens and
437
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
438
+
439
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
440
+ response. This maximum number applies across all built-in tool calls, not per
441
+ individual tool. Any further attempts to call a tool by the model will be
442
+ ignored.
443
+
444
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
445
+ for storing additional information about the object in a structured format, and
446
+ querying for objects via API or the dashboard.
447
+
448
+ Keys are strings with a maximum length of 64 characters. Values are strings with
449
+ a maximum length of 512 characters.
450
+
451
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
452
+ wide range of models with different capabilities, performance characteristics,
453
+ and price points. Refer to the
454
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
455
+ available models.
456
+
457
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
458
+
459
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
460
+ multi-turn conversations. Learn more about
461
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
462
+ Cannot be used in conjunction with `conversation`.
463
+
464
+ prompt: Reference to a prompt template and its variables.
465
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
466
+
467
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
468
+ hit rates. Replaces the `user` field.
469
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
470
+
471
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
472
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
473
+ of 24 hours.
474
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
475
+
476
+ reasoning: **gpt-5 and o-series models only**
477
+
478
+ Configuration options for
479
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
480
+
481
+ safety_identifier: A stable identifier used to help detect users of your application that may be
482
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
483
+ identifies each user. We recommend hashing their username or email address, in
484
+ order to avoid sending us any identifying information.
485
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
486
+
487
+ service_tier: Specifies the processing type used for serving the request.
488
+
489
+ - If set to 'auto', then the request will be processed with the service tier
490
+ configured in the Project settings. Unless otherwise configured, the Project
491
+ will use 'default'.
492
+ - If set to 'default', then the request will be processed with the standard
493
+ pricing and performance for the selected model.
494
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
495
+ '[priority](https://openai.com/api-priority-processing/)', then the request
496
+ will be processed with the corresponding service tier.
497
+ - When not set, the default behavior is 'auto'.
498
+
499
+ When the `service_tier` parameter is set, the response body will include the
500
+ `service_tier` value based on the processing mode actually used to serve the
501
+ request. This response value may be different from the value set in the
502
+ parameter.
503
+
504
+ store: Whether to store the generated model response for later retrieval via API.
505
+
506
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
507
+
508
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
509
+ make the output more random, while lower values like 0.2 will make it more
510
+ focused and deterministic. We generally recommend altering this or `top_p` but
511
+ not both.
512
+
513
+ text: Configuration options for a text response from the model. Can be plain text or
514
+ structured JSON data. Learn more:
515
+
516
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
517
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
518
+
519
+ tool_choice: How the model should select which tool (or tools) to use when generating a
520
+ response. See the `tools` parameter to see how to specify which tools the model
521
+ can call.
522
+
523
+ tools: An array of tools the model may call while generating a response. You can
524
+ specify which tool to use by setting the `tool_choice` parameter.
525
+
526
+ We support the following categories of tools:
527
+
528
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
529
+ capabilities, like
530
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
531
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
532
+ Learn more about
533
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
534
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
535
+ predefined connectors such as Google Drive and SharePoint. Learn more about
536
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
537
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
538
+ the model to call your own code with strongly typed arguments and outputs.
539
+ Learn more about
540
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
541
+ You can also use custom tools to call your own code.
542
+
543
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
544
+ return at each token position, each with an associated log probability.
545
+
546
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
547
+ model considers the results of the tokens with top_p probability mass. So 0.1
548
+ means only the tokens comprising the top 10% probability mass are considered.
549
+
550
+ We generally recommend altering this or `temperature` but not both.
551
+
552
+ truncation: The truncation strategy to use for the model response.
553
+
554
+ - `auto`: If the input to this Response exceeds the model's context window size,
555
+ the model will truncate the response to fit the context window by dropping
556
+ items from the beginning of the conversation.
557
+ - `disabled` (default): If the input size will exceed the context window size
558
+ for a model, the request will fail with a 400 error.
559
+
560
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
561
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
562
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
563
+ similar requests and to help OpenAI detect and prevent abuse.
564
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
565
+
566
+ extra_headers: Send extra headers
567
+
568
+ extra_query: Add additional query parameters to the request
569
+
570
+ extra_body: Add additional JSON properties to the request
571
+
572
+ timeout: Override the client-level default timeout for this request, in seconds
573
+ """
574
+ ...
575
+
576
+ @overload
577
+ def create(
578
+ self,
579
+ *,
580
+ stream: bool,
581
+ background: Optional[bool] | Omit = omit,
582
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
583
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
584
+ input: Union[str, ResponseInputParam] | Omit = omit,
585
+ instructions: Optional[str] | Omit = omit,
586
+ max_output_tokens: Optional[int] | Omit = omit,
587
+ max_tool_calls: Optional[int] | Omit = omit,
588
+ metadata: Optional[Metadata] | Omit = omit,
589
+ model: ResponsesModel | Omit = omit,
590
+ parallel_tool_calls: Optional[bool] | Omit = omit,
591
+ previous_response_id: Optional[str] | Omit = omit,
592
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
593
+ prompt_cache_key: str | Omit = omit,
594
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
595
+ reasoning: Optional[Reasoning] | Omit = omit,
596
+ safety_identifier: str | Omit = omit,
597
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
598
+ store: Optional[bool] | Omit = omit,
599
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
600
+ temperature: Optional[float] | Omit = omit,
601
+ text: ResponseTextConfigParam | Omit = omit,
602
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
603
+ tools: Iterable[ToolParam] | Omit = omit,
604
+ top_logprobs: Optional[int] | Omit = omit,
605
+ top_p: Optional[float] | Omit = omit,
606
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
607
+ user: str | Omit = omit,
608
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
609
+ # The extra values given here take precedence over values defined on the client or passed to this method.
610
+ extra_headers: Headers | None = None,
611
+ extra_query: Query | None = None,
612
+ extra_body: Body | None = None,
613
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
614
+ ) -> Response | Stream[ResponseStreamEvent]:
615
+ """Creates a model response.
616
+
617
+ Provide
618
+ [text](https://platform.openai.com/docs/guides/text) or
619
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
620
+ [text](https://platform.openai.com/docs/guides/text) or
621
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
622
+ the model call your own
623
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
624
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
625
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
626
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
627
+ your own data as input for the model's response.
628
+
629
+ Args:
630
+ stream: If set to true, the model response data will be streamed to the client as it is
631
+ generated using
632
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
633
+ See the
634
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
635
+ for more information.
636
+
637
+ background: Whether to run the model response in the background.
638
+ [Learn more](https://platform.openai.com/docs/guides/background).
639
+
640
+ conversation: The conversation that this response belongs to. Items from this conversation are
641
+ prepended to `input_items` for this response request. Input items and output
642
+ items from this response are automatically added to this conversation after this
643
+ response completes.
644
+
645
+ include: Specify additional output data to include in the model response. Currently
646
+ supported values are:
647
+
648
+ - `web_search_call.action.sources`: Include the sources of the web search tool
649
+ call.
650
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
651
+ in code interpreter tool call items.
652
+ - `computer_call_output.output.image_url`: Include image urls from the computer
653
+ call output.
654
+ - `file_search_call.results`: Include the search results of the file search tool
655
+ call.
656
+ - `message.input_image.image_url`: Include image urls from the input message.
657
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
658
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
659
+ tokens in reasoning item outputs. This enables reasoning items to be used in
660
+ multi-turn conversations when using the Responses API statelessly (like when
661
+ the `store` parameter is set to `false`, or when an organization is enrolled
662
+ in the zero data retention program).
663
+
664
+ input: Text, image, or file inputs to the model, used to generate a response.
665
+
666
+ Learn more:
667
+
668
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
669
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
670
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
671
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
672
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
673
+
674
+ instructions: A system (or developer) message inserted into the model's context.
675
+
676
+ When using along with `previous_response_id`, the instructions from a previous
677
+ response will not be carried over to the next response. This makes it simple to
678
+ swap out system (or developer) messages in new responses.
679
+
680
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
681
+ including visible output tokens and
682
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
683
+
684
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
685
+ response. This maximum number applies across all built-in tool calls, not per
686
+ individual tool. Any further attempts to call a tool by the model will be
687
+ ignored.
688
+
689
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
690
+ for storing additional information about the object in a structured format, and
691
+ querying for objects via API or the dashboard.
692
+
693
+ Keys are strings with a maximum length of 64 characters. Values are strings with
694
+ a maximum length of 512 characters.
695
+
696
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
697
+ wide range of models with different capabilities, performance characteristics,
698
+ and price points. Refer to the
699
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
700
+ available models.
701
+
702
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
703
+
704
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
705
+ multi-turn conversations. Learn more about
706
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
707
+ Cannot be used in conjunction with `conversation`.
708
+
709
+ prompt: Reference to a prompt template and its variables.
710
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
711
+
712
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
713
+ hit rates. Replaces the `user` field.
714
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
715
+
716
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
717
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
718
+ of 24 hours.
719
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
720
+
721
+ reasoning: **gpt-5 and o-series models only**
722
+
723
+ Configuration options for
724
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
725
+
726
+ safety_identifier: A stable identifier used to help detect users of your application that may be
727
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
728
+ identifies each user. We recommend hashing their username or email address, in
729
+ order to avoid sending us any identifying information.
730
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
731
+
732
+ service_tier: Specifies the processing type used for serving the request.
733
+
734
+ - If set to 'auto', then the request will be processed with the service tier
735
+ configured in the Project settings. Unless otherwise configured, the Project
736
+ will use 'default'.
737
+ - If set to 'default', then the request will be processed with the standard
738
+ pricing and performance for the selected model.
739
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
740
+ '[priority](https://openai.com/api-priority-processing/)', then the request
741
+ will be processed with the corresponding service tier.
742
+ - When not set, the default behavior is 'auto'.
743
+
744
+ When the `service_tier` parameter is set, the response body will include the
745
+ `service_tier` value based on the processing mode actually used to serve the
746
+ request. This response value may be different from the value set in the
747
+ parameter.
748
+
749
+ store: Whether to store the generated model response for later retrieval via API.
750
+
751
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
752
+
753
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
754
+ make the output more random, while lower values like 0.2 will make it more
755
+ focused and deterministic. We generally recommend altering this or `top_p` but
756
+ not both.
757
+
758
+ text: Configuration options for a text response from the model. Can be plain text or
759
+ structured JSON data. Learn more:
760
+
761
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
762
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
763
+
764
+ tool_choice: How the model should select which tool (or tools) to use when generating a
765
+ response. See the `tools` parameter to see how to specify which tools the model
766
+ can call.
767
+
768
+ tools: An array of tools the model may call while generating a response. You can
769
+ specify which tool to use by setting the `tool_choice` parameter.
770
+
771
+ We support the following categories of tools:
772
+
773
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
774
+ capabilities, like
775
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
776
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
777
+ Learn more about
778
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
779
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
780
+ predefined connectors such as Google Drive and SharePoint. Learn more about
781
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
782
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
783
+ the model to call your own code with strongly typed arguments and outputs.
784
+ Learn more about
785
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
786
+ You can also use custom tools to call your own code.
787
+
788
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
789
+ return at each token position, each with an associated log probability.
790
+
791
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
792
+ model considers the results of the tokens with top_p probability mass. So 0.1
793
+ means only the tokens comprising the top 10% probability mass are considered.
794
+
795
+ We generally recommend altering this or `temperature` but not both.
796
+
797
+ truncation: The truncation strategy to use for the model response.
798
+
799
+ - `auto`: If the input to this Response exceeds the model's context window size,
800
+ the model will truncate the response to fit the context window by dropping
801
+ items from the beginning of the conversation.
802
+ - `disabled` (default): If the input size will exceed the context window size
803
+ for a model, the request will fail with a 400 error.
804
+
805
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
806
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
807
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
808
+ similar requests and to help OpenAI detect and prevent abuse.
809
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
810
+
811
+ extra_headers: Send extra headers
812
+
813
+ extra_query: Add additional query parameters to the request
814
+
815
+ extra_body: Add additional JSON properties to the request
816
+
817
+ timeout: Override the client-level default timeout for this request, in seconds
818
+ """
819
+ ...
820
+
821
+ def create(
822
+ self,
823
+ *,
824
+ background: Optional[bool] | Omit = omit,
825
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
826
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
827
+ input: Union[str, ResponseInputParam] | Omit = omit,
828
+ instructions: Optional[str] | Omit = omit,
829
+ max_output_tokens: Optional[int] | Omit = omit,
830
+ max_tool_calls: Optional[int] | Omit = omit,
831
+ metadata: Optional[Metadata] | Omit = omit,
832
+ model: ResponsesModel | Omit = omit,
833
+ parallel_tool_calls: Optional[bool] | Omit = omit,
834
+ previous_response_id: Optional[str] | Omit = omit,
835
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
836
+ prompt_cache_key: str | Omit = omit,
837
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
838
+ reasoning: Optional[Reasoning] | Omit = omit,
839
+ safety_identifier: str | Omit = omit,
840
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
841
+ store: Optional[bool] | Omit = omit,
842
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
843
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
844
+ temperature: Optional[float] | Omit = omit,
845
+ text: ResponseTextConfigParam | Omit = omit,
846
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
847
+ tools: Iterable[ToolParam] | Omit = omit,
848
+ top_logprobs: Optional[int] | Omit = omit,
849
+ top_p: Optional[float] | Omit = omit,
850
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
851
+ user: str | Omit = omit,
852
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
853
+ # The extra values given here take precedence over values defined on the client or passed to this method.
854
+ extra_headers: Headers | None = None,
855
+ extra_query: Query | None = None,
856
+ extra_body: Body | None = None,
857
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
858
+ ) -> Response | Stream[ResponseStreamEvent]:
859
+ return self._post(
860
+ "/responses",
861
+ body=maybe_transform(
862
+ {
863
+ "background": background,
864
+ "conversation": conversation,
865
+ "include": include,
866
+ "input": input,
867
+ "instructions": instructions,
868
+ "max_output_tokens": max_output_tokens,
869
+ "max_tool_calls": max_tool_calls,
870
+ "metadata": metadata,
871
+ "model": model,
872
+ "parallel_tool_calls": parallel_tool_calls,
873
+ "previous_response_id": previous_response_id,
874
+ "prompt": prompt,
875
+ "prompt_cache_key": prompt_cache_key,
876
+ "prompt_cache_retention": prompt_cache_retention,
877
+ "reasoning": reasoning,
878
+ "safety_identifier": safety_identifier,
879
+ "service_tier": service_tier,
880
+ "store": store,
881
+ "stream": stream,
882
+ "stream_options": stream_options,
883
+ "temperature": temperature,
884
+ "text": text,
885
+ "tool_choice": tool_choice,
886
+ "tools": tools,
887
+ "top_logprobs": top_logprobs,
888
+ "top_p": top_p,
889
+ "truncation": truncation,
890
+ "user": user,
891
+ },
892
+ response_create_params.ResponseCreateParamsStreaming
893
+ if stream
894
+ else response_create_params.ResponseCreateParamsNonStreaming,
895
+ ),
896
+ options=make_request_options(
897
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
898
+ ),
899
+ cast_to=Response,
900
+ stream=stream or False,
901
+ stream_cls=Stream[ResponseStreamEvent],
902
+ )
903
+
904
+ @overload
905
+ def stream(
906
+ self,
907
+ *,
908
+ response_id: str,
909
+ text_format: type[TextFormatT] | Omit = omit,
910
+ starting_after: int | Omit = omit,
911
+ tools: Iterable[ParseableToolParam] | Omit = omit,
912
+ # The extra values given here take precedence over values defined on the client or passed to this method.
913
+ extra_headers: Headers | None = None,
914
+ extra_query: Query | None = None,
915
+ extra_body: Body | None = None,
916
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
917
+ ) -> ResponseStreamManager[TextFormatT]: ...
918
+
919
+ @overload
920
+ def stream(
921
+ self,
922
+ *,
923
+ input: Union[str, ResponseInputParam],
924
+ model: ResponsesModel,
925
+ background: Optional[bool] | Omit = omit,
926
+ text_format: type[TextFormatT] | Omit = omit,
927
+ tools: Iterable[ParseableToolParam] | Omit = omit,
928
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
929
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
930
+ instructions: Optional[str] | Omit = omit,
931
+ max_output_tokens: Optional[int] | Omit = omit,
932
+ max_tool_calls: Optional[int] | Omit = omit,
933
+ metadata: Optional[Metadata] | Omit = omit,
934
+ parallel_tool_calls: Optional[bool] | Omit = omit,
935
+ previous_response_id: Optional[str] | Omit = omit,
936
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
937
+ prompt_cache_key: str | Omit = omit,
938
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
939
+ reasoning: Optional[Reasoning] | Omit = omit,
940
+ safety_identifier: str | Omit = omit,
941
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
942
+ store: Optional[bool] | Omit = omit,
943
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
944
+ temperature: Optional[float] | Omit = omit,
945
+ text: ResponseTextConfigParam | Omit = omit,
946
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
947
+ top_logprobs: Optional[int] | Omit = omit,
948
+ top_p: Optional[float] | Omit = omit,
949
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
950
+ user: str | Omit = omit,
951
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
952
+ # The extra values given here take precedence over values defined on the client or passed to this method.
953
+ extra_headers: Headers | None = None,
954
+ extra_query: Query | None = None,
955
+ extra_body: Body | None = None,
956
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
957
+ ) -> ResponseStreamManager[TextFormatT]: ...
958
+
959
+ def stream(
960
+ self,
961
+ *,
962
+ response_id: str | Omit = omit,
963
+ input: Union[str, ResponseInputParam] | Omit = omit,
964
+ model: ResponsesModel | Omit = omit,
965
+ background: Optional[bool] | Omit = omit,
966
+ text_format: type[TextFormatT] | Omit = omit,
967
+ tools: Iterable[ParseableToolParam] | Omit = omit,
968
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
969
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
970
+ instructions: Optional[str] | Omit = omit,
971
+ max_output_tokens: Optional[int] | Omit = omit,
972
+ max_tool_calls: Optional[int] | Omit = omit,
973
+ metadata: Optional[Metadata] | Omit = omit,
974
+ parallel_tool_calls: Optional[bool] | Omit = omit,
975
+ previous_response_id: Optional[str] | Omit = omit,
976
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
977
+ prompt_cache_key: str | Omit = omit,
978
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
979
+ reasoning: Optional[Reasoning] | Omit = omit,
980
+ safety_identifier: str | Omit = omit,
981
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
982
+ store: Optional[bool] | Omit = omit,
983
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
984
+ temperature: Optional[float] | Omit = omit,
985
+ text: ResponseTextConfigParam | Omit = omit,
986
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
987
+ top_logprobs: Optional[int] | Omit = omit,
988
+ top_p: Optional[float] | Omit = omit,
989
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
990
+ user: str | Omit = omit,
991
+ starting_after: int | Omit = omit,
992
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
993
+ # The extra values given here take precedence over values defined on the client or passed to this method.
994
+ extra_headers: Headers | None = None,
995
+ extra_query: Query | None = None,
996
+ extra_body: Body | None = None,
997
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
998
+ ) -> ResponseStreamManager[TextFormatT]:
999
+ new_response_args = {
1000
+ "input": input,
1001
+ "model": model,
1002
+ "conversation": conversation,
1003
+ "include": include,
1004
+ "instructions": instructions,
1005
+ "max_output_tokens": max_output_tokens,
1006
+ "max_tool_calls": max_tool_calls,
1007
+ "metadata": metadata,
1008
+ "parallel_tool_calls": parallel_tool_calls,
1009
+ "previous_response_id": previous_response_id,
1010
+ "prompt": prompt,
1011
+ "prompt_cache_key": prompt_cache_key,
1012
+ "prompt_cache_retention": prompt_cache_retention,
1013
+ "reasoning": reasoning,
1014
+ "safety_identifier": safety_identifier,
1015
+ "service_tier": service_tier,
1016
+ "store": store,
1017
+ "stream_options": stream_options,
1018
+ "temperature": temperature,
1019
+ "text": text,
1020
+ "tool_choice": tool_choice,
1021
+ "top_logprobs": top_logprobs,
1022
+ "top_p": top_p,
1023
+ "truncation": truncation,
1024
+ "user": user,
1025
+ "background": background,
1026
+ }
1027
+ new_response_args_names = [k for k, v in new_response_args.items() if is_given(v)]
1028
+
1029
+ if (is_given(response_id) or is_given(starting_after)) and len(new_response_args_names) > 0:
1030
+ raise ValueError(
1031
+ "Cannot provide both response_id/starting_after can't be provided together with "
1032
+ + ", ".join(new_response_args_names)
1033
+ )
1034
+ tools = _make_tools(tools)
1035
+ if len(new_response_args_names) > 0:
1036
+ if not is_given(input):
1037
+ raise ValueError("input must be provided when creating a new response")
1038
+
1039
+ if not is_given(model):
1040
+ raise ValueError("model must be provided when creating a new response")
1041
+
1042
+ if is_given(text_format):
1043
+ if not text:
1044
+ text = {}
1045
+
1046
+ if "format" in text:
1047
+ raise TypeError("Cannot mix and match text.format with text_format")
1048
+
1049
+ text["format"] = _type_to_text_format_param(text_format)
1050
+
1051
+ api_request: partial[Stream[ResponseStreamEvent]] = partial(
1052
+ self.create,
1053
+ input=input,
1054
+ model=model,
1055
+ tools=tools,
1056
+ conversation=conversation,
1057
+ include=include,
1058
+ instructions=instructions,
1059
+ max_output_tokens=max_output_tokens,
1060
+ max_tool_calls=max_tool_calls,
1061
+ metadata=metadata,
1062
+ parallel_tool_calls=parallel_tool_calls,
1063
+ previous_response_id=previous_response_id,
1064
+ prompt=prompt,
1065
+ prompt_cache_key=prompt_cache_key,
1066
+ prompt_cache_retention=prompt_cache_retention,
1067
+ store=store,
1068
+ stream_options=stream_options,
1069
+ stream=True,
1070
+ temperature=temperature,
1071
+ text=text,
1072
+ tool_choice=tool_choice,
1073
+ reasoning=reasoning,
1074
+ safety_identifier=safety_identifier,
1075
+ service_tier=service_tier,
1076
+ top_logprobs=top_logprobs,
1077
+ top_p=top_p,
1078
+ truncation=truncation,
1079
+ user=user,
1080
+ background=background,
1081
+ extra_headers=extra_headers,
1082
+ extra_query=extra_query,
1083
+ extra_body=extra_body,
1084
+ timeout=timeout,
1085
+ )
1086
+
1087
+ return ResponseStreamManager(api_request, text_format=text_format, input_tools=tools, starting_after=None)
1088
+ else:
1089
+ if not is_given(response_id):
1090
+ raise ValueError("id must be provided when streaming an existing response")
1091
+
1092
+ return ResponseStreamManager(
1093
+ lambda: self.retrieve(
1094
+ response_id=response_id,
1095
+ stream=True,
1096
+ include=include or [],
1097
+ extra_headers=extra_headers,
1098
+ extra_query=extra_query,
1099
+ extra_body=extra_body,
1100
+ starting_after=omit,
1101
+ timeout=timeout,
1102
+ ),
1103
+ text_format=text_format,
1104
+ input_tools=tools,
1105
+ starting_after=starting_after if is_given(starting_after) else None,
1106
+ )
1107
+
1108
+ def parse(
1109
+ self,
1110
+ *,
1111
+ text_format: type[TextFormatT] | Omit = omit,
1112
+ background: Optional[bool] | Omit = omit,
1113
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
1114
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
1115
+ input: Union[str, ResponseInputParam] | Omit = omit,
1116
+ instructions: Optional[str] | Omit = omit,
1117
+ max_output_tokens: Optional[int] | Omit = omit,
1118
+ max_tool_calls: Optional[int] | Omit = omit,
1119
+ metadata: Optional[Metadata] | Omit = omit,
1120
+ model: ResponsesModel | Omit = omit,
1121
+ parallel_tool_calls: Optional[bool] | Omit = omit,
1122
+ previous_response_id: Optional[str] | Omit = omit,
1123
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
1124
+ prompt_cache_key: str | Omit = omit,
1125
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1126
+ reasoning: Optional[Reasoning] | Omit = omit,
1127
+ safety_identifier: str | Omit = omit,
1128
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1129
+ store: Optional[bool] | Omit = omit,
1130
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
1131
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
1132
+ temperature: Optional[float] | Omit = omit,
1133
+ text: ResponseTextConfigParam | Omit = omit,
1134
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
1135
+ tools: Iterable[ParseableToolParam] | Omit = omit,
1136
+ top_logprobs: Optional[int] | Omit = omit,
1137
+ top_p: Optional[float] | Omit = omit,
1138
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
1139
+ user: str | Omit = omit,
1140
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
1141
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1142
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1143
+ extra_headers: Headers | None = None,
1144
+ extra_query: Query | None = None,
1145
+ extra_body: Body | None = None,
1146
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1147
+ ) -> ParsedResponse[TextFormatT]:
1148
+ if is_given(text_format):
1149
+ if not text:
1150
+ text = {}
1151
+
1152
+ if "format" in text:
1153
+ raise TypeError("Cannot mix and match text.format with text_format")
1154
+
1155
+ text["format"] = _type_to_text_format_param(text_format)
1156
+
1157
+ tools = _make_tools(tools)
1158
+
1159
+ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
1160
+ return parse_response(
1161
+ input_tools=tools,
1162
+ text_format=text_format,
1163
+ response=raw_response,
1164
+ )
1165
+
1166
+ return self._post(
1167
+ "/responses",
1168
+ body=maybe_transform(
1169
+ {
1170
+ "background": background,
1171
+ "conversation": conversation,
1172
+ "include": include,
1173
+ "input": input,
1174
+ "instructions": instructions,
1175
+ "max_output_tokens": max_output_tokens,
1176
+ "max_tool_calls": max_tool_calls,
1177
+ "metadata": metadata,
1178
+ "model": model,
1179
+ "parallel_tool_calls": parallel_tool_calls,
1180
+ "previous_response_id": previous_response_id,
1181
+ "prompt": prompt,
1182
+ "prompt_cache_key": prompt_cache_key,
1183
+ "prompt_cache_retention": prompt_cache_retention,
1184
+ "reasoning": reasoning,
1185
+ "safety_identifier": safety_identifier,
1186
+ "service_tier": service_tier,
1187
+ "store": store,
1188
+ "stream": stream,
1189
+ "stream_options": stream_options,
1190
+ "temperature": temperature,
1191
+ "text": text,
1192
+ "tool_choice": tool_choice,
1193
+ "tools": tools,
1194
+ "top_logprobs": top_logprobs,
1195
+ "top_p": top_p,
1196
+ "truncation": truncation,
1197
+ "user": user,
1198
+ "verbosity": verbosity,
1199
+ },
1200
+ response_create_params.ResponseCreateParams,
1201
+ ),
1202
+ options=make_request_options(
1203
+ extra_headers=extra_headers,
1204
+ extra_query=extra_query,
1205
+ extra_body=extra_body,
1206
+ timeout=timeout,
1207
+ post_parser=parser,
1208
+ ),
1209
+ # we turn the `Response` instance into a `ParsedResponse`
1210
+ # in the `parser` function above
1211
+ cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
1212
+ )
1213
+
1214
+ @overload
1215
+ def retrieve(
1216
+ self,
1217
+ response_id: str,
1218
+ *,
1219
+ include: List[ResponseIncludable] | Omit = omit,
1220
+ include_obfuscation: bool | Omit = omit,
1221
+ starting_after: int | Omit = omit,
1222
+ stream: Literal[False] | Omit = omit,
1223
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1224
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1225
+ extra_headers: Headers | None = None,
1226
+ extra_query: Query | None = None,
1227
+ extra_body: Body | None = None,
1228
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1229
+ ) -> Response: ...
1230
+
1231
+ @overload
1232
+ def retrieve(
1233
+ self,
1234
+ response_id: str,
1235
+ *,
1236
+ stream: Literal[True],
1237
+ include: List[ResponseIncludable] | Omit = omit,
1238
+ starting_after: int | Omit = omit,
1239
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1240
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1241
+ extra_headers: Headers | None = None,
1242
+ extra_query: Query | None = None,
1243
+ extra_body: Body | None = None,
1244
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1245
+ ) -> Stream[ResponseStreamEvent]: ...
1246
+
1247
+ @overload
1248
+ def retrieve(
1249
+ self,
1250
+ response_id: str,
1251
+ *,
1252
+ stream: bool,
1253
+ include: List[ResponseIncludable] | Omit = omit,
1254
+ starting_after: int | Omit = omit,
1255
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1256
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1257
+ extra_headers: Headers | None = None,
1258
+ extra_query: Query | None = None,
1259
+ extra_body: Body | None = None,
1260
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1261
+ ) -> Response | Stream[ResponseStreamEvent]: ...
1262
+
1263
+ @overload
1264
+ def retrieve(
1265
+ self,
1266
+ response_id: str,
1267
+ *,
1268
+ stream: bool = False,
1269
+ include: List[ResponseIncludable] | Omit = omit,
1270
+ starting_after: int | Omit = omit,
1271
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1272
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1273
+ extra_headers: Headers | None = None,
1274
+ extra_query: Query | None = None,
1275
+ extra_body: Body | None = None,
1276
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1277
+ ) -> Response | Stream[ResponseStreamEvent]:
1278
+ """
1279
+ Retrieves a model response with the given ID.
1280
+
1281
+ Args:
1282
+ include: Additional fields to include in the response. See the `include` parameter for
1283
+ Response creation above for more information.
1284
+
1285
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
1286
+ characters to an `obfuscation` field on streaming delta events to normalize
1287
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
1288
+ fields are included by default, but add a small amount of overhead to the data
1289
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
1290
+ you trust the network links between your application and the OpenAI API.
1291
+
1292
+ starting_after: The sequence number of the event after which to start streaming.
1293
+
1294
+ stream: If set to true, the model response data will be streamed to the client as it is
1295
+ generated using
1296
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
1297
+ See the
1298
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
1299
+ for more information.
1300
+
1301
+ extra_headers: Send extra headers
1302
+
1303
+ extra_query: Add additional query parameters to the request
1304
+
1305
+ extra_body: Add additional JSON properties to the request
1306
+
1307
+ timeout: Override the client-level default timeout for this request, in seconds
1308
+ """
1309
+ ...
1310
+
1311
+ @overload
1312
+ def retrieve(
1313
+ self,
1314
+ response_id: str,
1315
+ *,
1316
+ stream: Literal[True],
1317
+ include: List[ResponseIncludable] | Omit = omit,
1318
+ include_obfuscation: bool | Omit = omit,
1319
+ starting_after: int | Omit = omit,
1320
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1321
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1322
+ extra_headers: Headers | None = None,
1323
+ extra_query: Query | None = None,
1324
+ extra_body: Body | None = None,
1325
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1326
+ ) -> Stream[ResponseStreamEvent]:
1327
+ """
1328
+ Retrieves a model response with the given ID.
1329
+
1330
+ Args:
1331
+ stream: If set to true, the model response data will be streamed to the client as it is
1332
+ generated using
1333
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
1334
+ See the
1335
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
1336
+ for more information.
1337
+
1338
+ include: Additional fields to include in the response. See the `include` parameter for
1339
+ Response creation above for more information.
1340
+
1341
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
1342
+ characters to an `obfuscation` field on streaming delta events to normalize
1343
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
1344
+ fields are included by default, but add a small amount of overhead to the data
1345
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
1346
+ you trust the network links between your application and the OpenAI API.
1347
+
1348
+ starting_after: The sequence number of the event after which to start streaming.
1349
+
1350
+ extra_headers: Send extra headers
1351
+
1352
+ extra_query: Add additional query parameters to the request
1353
+
1354
+ extra_body: Add additional JSON properties to the request
1355
+
1356
+ timeout: Override the client-level default timeout for this request, in seconds
1357
+ """
1358
+ ...
1359
+
1360
+ @overload
1361
+ def retrieve(
1362
+ self,
1363
+ response_id: str,
1364
+ *,
1365
+ stream: bool,
1366
+ include: List[ResponseIncludable] | Omit = omit,
1367
+ include_obfuscation: bool | Omit = omit,
1368
+ starting_after: int | Omit = omit,
1369
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1370
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1371
+ extra_headers: Headers | None = None,
1372
+ extra_query: Query | None = None,
1373
+ extra_body: Body | None = None,
1374
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1375
+ ) -> Response | Stream[ResponseStreamEvent]:
1376
+ """
1377
+ Retrieves a model response with the given ID.
1378
+
1379
+ Args:
1380
+ stream: If set to true, the model response data will be streamed to the client as it is
1381
+ generated using
1382
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
1383
+ See the
1384
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
1385
+ for more information.
1386
+
1387
+ include: Additional fields to include in the response. See the `include` parameter for
1388
+ Response creation above for more information.
1389
+
1390
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
1391
+ characters to an `obfuscation` field on streaming delta events to normalize
1392
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
1393
+ fields are included by default, but add a small amount of overhead to the data
1394
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
1395
+ you trust the network links between your application and the OpenAI API.
1396
+
1397
+ starting_after: The sequence number of the event after which to start streaming.
1398
+
1399
+ extra_headers: Send extra headers
1400
+
1401
+ extra_query: Add additional query parameters to the request
1402
+
1403
+ extra_body: Add additional JSON properties to the request
1404
+
1405
+ timeout: Override the client-level default timeout for this request, in seconds
1406
+ """
1407
+ ...
1408
+
1409
+ def retrieve(
1410
+ self,
1411
+ response_id: str,
1412
+ *,
1413
+ include: List[ResponseIncludable] | Omit = omit,
1414
+ include_obfuscation: bool | Omit = omit,
1415
+ starting_after: int | Omit = omit,
1416
+ stream: Literal[False] | Literal[True] | Omit = omit,
1417
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1418
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1419
+ extra_headers: Headers | None = None,
1420
+ extra_query: Query | None = None,
1421
+ extra_body: Body | None = None,
1422
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1423
+ ) -> Response | Stream[ResponseStreamEvent]:
1424
+ if not response_id:
1425
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
1426
+ return self._get(
1427
+ f"/responses/{response_id}",
1428
+ options=make_request_options(
1429
+ extra_headers=extra_headers,
1430
+ extra_query=extra_query,
1431
+ extra_body=extra_body,
1432
+ timeout=timeout,
1433
+ query=maybe_transform(
1434
+ {
1435
+ "include": include,
1436
+ "include_obfuscation": include_obfuscation,
1437
+ "starting_after": starting_after,
1438
+ "stream": stream,
1439
+ },
1440
+ response_retrieve_params.ResponseRetrieveParams,
1441
+ ),
1442
+ ),
1443
+ cast_to=Response,
1444
+ stream=stream or False,
1445
+ stream_cls=Stream[ResponseStreamEvent],
1446
+ )
1447
+
1448
+ def delete(
1449
+ self,
1450
+ response_id: str,
1451
+ *,
1452
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1453
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1454
+ extra_headers: Headers | None = None,
1455
+ extra_query: Query | None = None,
1456
+ extra_body: Body | None = None,
1457
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1458
+ ) -> None:
1459
+ """
1460
+ Deletes a model response with the given ID.
1461
+
1462
+ Args:
1463
+ extra_headers: Send extra headers
1464
+
1465
+ extra_query: Add additional query parameters to the request
1466
+
1467
+ extra_body: Add additional JSON properties to the request
1468
+
1469
+ timeout: Override the client-level default timeout for this request, in seconds
1470
+ """
1471
+ if not response_id:
1472
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
1473
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
1474
+ return self._delete(
1475
+ f"/responses/{response_id}",
1476
+ options=make_request_options(
1477
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1478
+ ),
1479
+ cast_to=NoneType,
1480
+ )
1481
+
1482
+ def cancel(
1483
+ self,
1484
+ response_id: str,
1485
+ *,
1486
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1487
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1488
+ extra_headers: Headers | None = None,
1489
+ extra_query: Query | None = None,
1490
+ extra_body: Body | None = None,
1491
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1492
+ ) -> Response:
1493
+ """Cancels a model response with the given ID.
1494
+
1495
+ Only responses created with the
1496
+ `background` parameter set to `true` can be cancelled.
1497
+ [Learn more](https://platform.openai.com/docs/guides/background).
1498
+
1499
+ Args:
1500
+ extra_headers: Send extra headers
1501
+
1502
+ extra_query: Add additional query parameters to the request
1503
+
1504
+ extra_body: Add additional JSON properties to the request
1505
+
1506
+ timeout: Override the client-level default timeout for this request, in seconds
1507
+ """
1508
+ if not response_id:
1509
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
1510
+ return self._post(
1511
+ f"/responses/{response_id}/cancel",
1512
+ options=make_request_options(
1513
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1514
+ ),
1515
+ cast_to=Response,
1516
+ )
1517
+
1518
+
1519
+ class AsyncResponses(AsyncAPIResource):
1520
+ @cached_property
1521
+ def input_items(self) -> AsyncInputItems:
1522
+ return AsyncInputItems(self._client)
1523
+
1524
+ @cached_property
1525
+ def input_tokens(self) -> AsyncInputTokens:
1526
+ return AsyncInputTokens(self._client)
1527
+
1528
+ @cached_property
1529
+ def with_raw_response(self) -> AsyncResponsesWithRawResponse:
1530
+ """
1531
+ This property can be used as a prefix for any HTTP method call to return
1532
+ the raw response object instead of the parsed content.
1533
+
1534
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
1535
+ """
1536
+ return AsyncResponsesWithRawResponse(self)
1537
+
1538
+ @cached_property
1539
+ def with_streaming_response(self) -> AsyncResponsesWithStreamingResponse:
1540
+ """
1541
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
1542
+
1543
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
1544
+ """
1545
+ return AsyncResponsesWithStreamingResponse(self)
1546
+
1547
+ @overload
1548
+ async def create(
1549
+ self,
1550
+ *,
1551
+ background: Optional[bool] | Omit = omit,
1552
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
1553
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
1554
+ input: Union[str, ResponseInputParam] | Omit = omit,
1555
+ instructions: Optional[str] | Omit = omit,
1556
+ max_output_tokens: Optional[int] | Omit = omit,
1557
+ max_tool_calls: Optional[int] | Omit = omit,
1558
+ metadata: Optional[Metadata] | Omit = omit,
1559
+ model: ResponsesModel | Omit = omit,
1560
+ parallel_tool_calls: Optional[bool] | Omit = omit,
1561
+ previous_response_id: Optional[str] | Omit = omit,
1562
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
1563
+ prompt_cache_key: str | Omit = omit,
1564
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1565
+ reasoning: Optional[Reasoning] | Omit = omit,
1566
+ safety_identifier: str | Omit = omit,
1567
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1568
+ store: Optional[bool] | Omit = omit,
1569
+ stream: Optional[Literal[False]] | Omit = omit,
1570
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
1571
+ temperature: Optional[float] | Omit = omit,
1572
+ text: ResponseTextConfigParam | Omit = omit,
1573
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
1574
+ tools: Iterable[ToolParam] | Omit = omit,
1575
+ top_logprobs: Optional[int] | Omit = omit,
1576
+ top_p: Optional[float] | Omit = omit,
1577
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
1578
+ user: str | Omit = omit,
1579
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1580
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1581
+ extra_headers: Headers | None = None,
1582
+ extra_query: Query | None = None,
1583
+ extra_body: Body | None = None,
1584
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1585
+ ) -> Response:
1586
+ """Creates a model response.
1587
+
1588
+ Provide
1589
+ [text](https://platform.openai.com/docs/guides/text) or
1590
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
1591
+ [text](https://platform.openai.com/docs/guides/text) or
1592
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
1593
+ the model call your own
1594
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
1595
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
1596
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1597
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
1598
+ your own data as input for the model's response.
1599
+
1600
+ Args:
1601
+ background: Whether to run the model response in the background.
1602
+ [Learn more](https://platform.openai.com/docs/guides/background).
1603
+
1604
+ conversation: The conversation that this response belongs to. Items from this conversation are
1605
+ prepended to `input_items` for this response request. Input items and output
1606
+ items from this response are automatically added to this conversation after this
1607
+ response completes.
1608
+
1609
+ include: Specify additional output data to include in the model response. Currently
1610
+ supported values are:
1611
+
1612
+ - `web_search_call.action.sources`: Include the sources of the web search tool
1613
+ call.
1614
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
1615
+ in code interpreter tool call items.
1616
+ - `computer_call_output.output.image_url`: Include image urls from the computer
1617
+ call output.
1618
+ - `file_search_call.results`: Include the search results of the file search tool
1619
+ call.
1620
+ - `message.input_image.image_url`: Include image urls from the input message.
1621
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
1622
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
1623
+ tokens in reasoning item outputs. This enables reasoning items to be used in
1624
+ multi-turn conversations when using the Responses API statelessly (like when
1625
+ the `store` parameter is set to `false`, or when an organization is enrolled
1626
+ in the zero data retention program).
1627
+
1628
+ input: Text, image, or file inputs to the model, used to generate a response.
1629
+
1630
+ Learn more:
1631
+
1632
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1633
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
1634
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
1635
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
1636
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
1637
+
1638
+ instructions: A system (or developer) message inserted into the model's context.
1639
+
1640
+ When using along with `previous_response_id`, the instructions from a previous
1641
+ response will not be carried over to the next response. This makes it simple to
1642
+ swap out system (or developer) messages in new responses.
1643
+
1644
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
1645
+ including visible output tokens and
1646
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
1647
+
1648
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
1649
+ response. This maximum number applies across all built-in tool calls, not per
1650
+ individual tool. Any further attempts to call a tool by the model will be
1651
+ ignored.
1652
+
1653
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
1654
+ for storing additional information about the object in a structured format, and
1655
+ querying for objects via API or the dashboard.
1656
+
1657
+ Keys are strings with a maximum length of 64 characters. Values are strings with
1658
+ a maximum length of 512 characters.
1659
+
1660
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
1661
+ wide range of models with different capabilities, performance characteristics,
1662
+ and price points. Refer to the
1663
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
1664
+ available models.
1665
+
1666
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
1667
+
1668
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
1669
+ multi-turn conversations. Learn more about
1670
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
1671
+ Cannot be used in conjunction with `conversation`.
1672
+
1673
+ prompt: Reference to a prompt template and its variables.
1674
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
1675
+
1676
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
1677
+ hit rates. Replaces the `user` field.
1678
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
1679
+
1680
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
1681
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
1682
+ of 24 hours.
1683
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1684
+
1685
+ reasoning: **gpt-5 and o-series models only**
1686
+
1687
+ Configuration options for
1688
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
1689
+
1690
+ safety_identifier: A stable identifier used to help detect users of your application that may be
1691
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
1692
+ identifies each user. We recommend hashing their username or email address, in
1693
+ order to avoid sending us any identifying information.
1694
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
1695
+
1696
+ service_tier: Specifies the processing type used for serving the request.
1697
+
1698
+ - If set to 'auto', then the request will be processed with the service tier
1699
+ configured in the Project settings. Unless otherwise configured, the Project
1700
+ will use 'default'.
1701
+ - If set to 'default', then the request will be processed with the standard
1702
+ pricing and performance for the selected model.
1703
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1704
+ '[priority](https://openai.com/api-priority-processing/)', then the request
1705
+ will be processed with the corresponding service tier.
1706
+ - When not set, the default behavior is 'auto'.
1707
+
1708
+ When the `service_tier` parameter is set, the response body will include the
1709
+ `service_tier` value based on the processing mode actually used to serve the
1710
+ request. This response value may be different from the value set in the
1711
+ parameter.
1712
+
1713
+ store: Whether to store the generated model response for later retrieval via API.
1714
+
1715
+ stream: If set to true, the model response data will be streamed to the client as it is
1716
+ generated using
1717
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
1718
+ See the
1719
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
1720
+ for more information.
1721
+
1722
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
1723
+
1724
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1725
+ make the output more random, while lower values like 0.2 will make it more
1726
+ focused and deterministic. We generally recommend altering this or `top_p` but
1727
+ not both.
1728
+
1729
+ text: Configuration options for a text response from the model. Can be plain text or
1730
+ structured JSON data. Learn more:
1731
+
1732
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1733
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1734
+
1735
+ tool_choice: How the model should select which tool (or tools) to use when generating a
1736
+ response. See the `tools` parameter to see how to specify which tools the model
1737
+ can call.
1738
+
1739
+ tools: An array of tools the model may call while generating a response. You can
1740
+ specify which tool to use by setting the `tool_choice` parameter.
1741
+
1742
+ We support the following categories of tools:
1743
+
1744
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1745
+ capabilities, like
1746
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1747
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
1748
+ Learn more about
1749
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
1750
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
1751
+ predefined connectors such as Google Drive and SharePoint. Learn more about
1752
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
1753
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
1754
+ the model to call your own code with strongly typed arguments and outputs.
1755
+ Learn more about
1756
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
1757
+ You can also use custom tools to call your own code.
1758
+
1759
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
1760
+ return at each token position, each with an associated log probability.
1761
+
1762
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
1763
+ model considers the results of the tokens with top_p probability mass. So 0.1
1764
+ means only the tokens comprising the top 10% probability mass are considered.
1765
+
1766
+ We generally recommend altering this or `temperature` but not both.
1767
+
1768
+ truncation: The truncation strategy to use for the model response.
1769
+
1770
+ - `auto`: If the input to this Response exceeds the model's context window size,
1771
+ the model will truncate the response to fit the context window by dropping
1772
+ items from the beginning of the conversation.
1773
+ - `disabled` (default): If the input size will exceed the context window size
1774
+ for a model, the request will fail with a 400 error.
1775
+
1776
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
1777
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
1778
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
1779
+ similar requests and to help OpenAI detect and prevent abuse.
1780
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
1781
+
1782
+ extra_headers: Send extra headers
1783
+
1784
+ extra_query: Add additional query parameters to the request
1785
+
1786
+ extra_body: Add additional JSON properties to the request
1787
+
1788
+ timeout: Override the client-level default timeout for this request, in seconds
1789
+ """
1790
+ ...
1791
+
1792
+ @overload
1793
+ async def create(
1794
+ self,
1795
+ *,
1796
+ stream: Literal[True],
1797
+ background: Optional[bool] | Omit = omit,
1798
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
1799
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
1800
+ input: Union[str, ResponseInputParam] | Omit = omit,
1801
+ instructions: Optional[str] | Omit = omit,
1802
+ max_output_tokens: Optional[int] | Omit = omit,
1803
+ max_tool_calls: Optional[int] | Omit = omit,
1804
+ metadata: Optional[Metadata] | Omit = omit,
1805
+ model: ResponsesModel | Omit = omit,
1806
+ parallel_tool_calls: Optional[bool] | Omit = omit,
1807
+ previous_response_id: Optional[str] | Omit = omit,
1808
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
1809
+ prompt_cache_key: str | Omit = omit,
1810
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
1811
+ reasoning: Optional[Reasoning] | Omit = omit,
1812
+ safety_identifier: str | Omit = omit,
1813
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
1814
+ store: Optional[bool] | Omit = omit,
1815
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
1816
+ temperature: Optional[float] | Omit = omit,
1817
+ text: ResponseTextConfigParam | Omit = omit,
1818
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
1819
+ tools: Iterable[ToolParam] | Omit = omit,
1820
+ top_logprobs: Optional[int] | Omit = omit,
1821
+ top_p: Optional[float] | Omit = omit,
1822
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
1823
+ user: str | Omit = omit,
1824
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1825
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1826
+ extra_headers: Headers | None = None,
1827
+ extra_query: Query | None = None,
1828
+ extra_body: Body | None = None,
1829
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
1830
+ ) -> AsyncStream[ResponseStreamEvent]:
1831
+ """Creates a model response.
1832
+
1833
+ Provide
1834
+ [text](https://platform.openai.com/docs/guides/text) or
1835
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
1836
+ [text](https://platform.openai.com/docs/guides/text) or
1837
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
1838
+ the model call your own
1839
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
1840
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
1841
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1842
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
1843
+ your own data as input for the model's response.
1844
+
1845
+ Args:
1846
+ stream: If set to true, the model response data will be streamed to the client as it is
1847
+ generated using
1848
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
1849
+ See the
1850
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
1851
+ for more information.
1852
+
1853
+ background: Whether to run the model response in the background.
1854
+ [Learn more](https://platform.openai.com/docs/guides/background).
1855
+
1856
+ conversation: The conversation that this response belongs to. Items from this conversation are
1857
+ prepended to `input_items` for this response request. Input items and output
1858
+ items from this response are automatically added to this conversation after this
1859
+ response completes.
1860
+
1861
+ include: Specify additional output data to include in the model response. Currently
1862
+ supported values are:
1863
+
1864
+ - `web_search_call.action.sources`: Include the sources of the web search tool
1865
+ call.
1866
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
1867
+ in code interpreter tool call items.
1868
+ - `computer_call_output.output.image_url`: Include image urls from the computer
1869
+ call output.
1870
+ - `file_search_call.results`: Include the search results of the file search tool
1871
+ call.
1872
+ - `message.input_image.image_url`: Include image urls from the input message.
1873
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
1874
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
1875
+ tokens in reasoning item outputs. This enables reasoning items to be used in
1876
+ multi-turn conversations when using the Responses API statelessly (like when
1877
+ the `store` parameter is set to `false`, or when an organization is enrolled
1878
+ in the zero data retention program).
1879
+
1880
+ input: Text, image, or file inputs to the model, used to generate a response.
1881
+
1882
+ Learn more:
1883
+
1884
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1885
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
1886
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
1887
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
1888
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
1889
+
1890
+ instructions: A system (or developer) message inserted into the model's context.
1891
+
1892
+ When using along with `previous_response_id`, the instructions from a previous
1893
+ response will not be carried over to the next response. This makes it simple to
1894
+ swap out system (or developer) messages in new responses.
1895
+
1896
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
1897
+ including visible output tokens and
1898
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
1899
+
1900
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
1901
+ response. This maximum number applies across all built-in tool calls, not per
1902
+ individual tool. Any further attempts to call a tool by the model will be
1903
+ ignored.
1904
+
1905
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
1906
+ for storing additional information about the object in a structured format, and
1907
+ querying for objects via API or the dashboard.
1908
+
1909
+ Keys are strings with a maximum length of 64 characters. Values are strings with
1910
+ a maximum length of 512 characters.
1911
+
1912
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
1913
+ wide range of models with different capabilities, performance characteristics,
1914
+ and price points. Refer to the
1915
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
1916
+ available models.
1917
+
1918
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
1919
+
1920
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
1921
+ multi-turn conversations. Learn more about
1922
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
1923
+ Cannot be used in conjunction with `conversation`.
1924
+
1925
+ prompt: Reference to a prompt template and its variables.
1926
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
1927
+
1928
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
1929
+ hit rates. Replaces the `user` field.
1930
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
1931
+
1932
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
1933
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
1934
+ of 24 hours.
1935
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
1936
+
1937
+ reasoning: **gpt-5 and o-series models only**
1938
+
1939
+ Configuration options for
1940
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
1941
+
1942
+ safety_identifier: A stable identifier used to help detect users of your application that may be
1943
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
1944
+ identifies each user. We recommend hashing their username or email address, in
1945
+ order to avoid sending us any identifying information.
1946
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
1947
+
1948
+ service_tier: Specifies the processing type used for serving the request.
1949
+
1950
+ - If set to 'auto', then the request will be processed with the service tier
1951
+ configured in the Project settings. Unless otherwise configured, the Project
1952
+ will use 'default'.
1953
+ - If set to 'default', then the request will be processed with the standard
1954
+ pricing and performance for the selected model.
1955
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
1956
+ '[priority](https://openai.com/api-priority-processing/)', then the request
1957
+ will be processed with the corresponding service tier.
1958
+ - When not set, the default behavior is 'auto'.
1959
+
1960
+ When the `service_tier` parameter is set, the response body will include the
1961
+ `service_tier` value based on the processing mode actually used to serve the
1962
+ request. This response value may be different from the value set in the
1963
+ parameter.
1964
+
1965
+ store: Whether to store the generated model response for later retrieval via API.
1966
+
1967
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
1968
+
1969
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1970
+ make the output more random, while lower values like 0.2 will make it more
1971
+ focused and deterministic. We generally recommend altering this or `top_p` but
1972
+ not both.
1973
+
1974
+ text: Configuration options for a text response from the model. Can be plain text or
1975
+ structured JSON data. Learn more:
1976
+
1977
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
1978
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
1979
+
1980
+ tool_choice: How the model should select which tool (or tools) to use when generating a
1981
+ response. See the `tools` parameter to see how to specify which tools the model
1982
+ can call.
1983
+
1984
+ tools: An array of tools the model may call while generating a response. You can
1985
+ specify which tool to use by setting the `tool_choice` parameter.
1986
+
1987
+ We support the following categories of tools:
1988
+
1989
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
1990
+ capabilities, like
1991
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
1992
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
1993
+ Learn more about
1994
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
1995
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
1996
+ predefined connectors such as Google Drive and SharePoint. Learn more about
1997
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
1998
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
1999
+ the model to call your own code with strongly typed arguments and outputs.
2000
+ Learn more about
2001
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
2002
+ You can also use custom tools to call your own code.
2003
+
2004
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
2005
+ return at each token position, each with an associated log probability.
2006
+
2007
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
2008
+ model considers the results of the tokens with top_p probability mass. So 0.1
2009
+ means only the tokens comprising the top 10% probability mass are considered.
2010
+
2011
+ We generally recommend altering this or `temperature` but not both.
2012
+
2013
+ truncation: The truncation strategy to use for the model response.
2014
+
2015
+ - `auto`: If the input to this Response exceeds the model's context window size,
2016
+ the model will truncate the response to fit the context window by dropping
2017
+ items from the beginning of the conversation.
2018
+ - `disabled` (default): If the input size will exceed the context window size
2019
+ for a model, the request will fail with a 400 error.
2020
+
2021
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
2022
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
2023
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
2024
+ similar requests and to help OpenAI detect and prevent abuse.
2025
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2026
+
2027
+ extra_headers: Send extra headers
2028
+
2029
+ extra_query: Add additional query parameters to the request
2030
+
2031
+ extra_body: Add additional JSON properties to the request
2032
+
2033
+ timeout: Override the client-level default timeout for this request, in seconds
2034
+ """
2035
+ ...
2036
+
2037
+ @overload
2038
+ async def create(
2039
+ self,
2040
+ *,
2041
+ stream: bool,
2042
+ background: Optional[bool] | Omit = omit,
2043
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
2044
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
2045
+ input: Union[str, ResponseInputParam] | Omit = omit,
2046
+ instructions: Optional[str] | Omit = omit,
2047
+ max_output_tokens: Optional[int] | Omit = omit,
2048
+ max_tool_calls: Optional[int] | Omit = omit,
2049
+ metadata: Optional[Metadata] | Omit = omit,
2050
+ model: ResponsesModel | Omit = omit,
2051
+ parallel_tool_calls: Optional[bool] | Omit = omit,
2052
+ previous_response_id: Optional[str] | Omit = omit,
2053
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
2054
+ prompt_cache_key: str | Omit = omit,
2055
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2056
+ reasoning: Optional[Reasoning] | Omit = omit,
2057
+ safety_identifier: str | Omit = omit,
2058
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2059
+ store: Optional[bool] | Omit = omit,
2060
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
2061
+ temperature: Optional[float] | Omit = omit,
2062
+ text: ResponseTextConfigParam | Omit = omit,
2063
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
2064
+ tools: Iterable[ToolParam] | Omit = omit,
2065
+ top_logprobs: Optional[int] | Omit = omit,
2066
+ top_p: Optional[float] | Omit = omit,
2067
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
2068
+ user: str | Omit = omit,
2069
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2070
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2071
+ extra_headers: Headers | None = None,
2072
+ extra_query: Query | None = None,
2073
+ extra_body: Body | None = None,
2074
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2075
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
2076
+ """Creates a model response.
2077
+
2078
+ Provide
2079
+ [text](https://platform.openai.com/docs/guides/text) or
2080
+ [image](https://platform.openai.com/docs/guides/images) inputs to generate
2081
+ [text](https://platform.openai.com/docs/guides/text) or
2082
+ [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
2083
+ the model call your own
2084
+ [custom code](https://platform.openai.com/docs/guides/function-calling) or use
2085
+ built-in [tools](https://platform.openai.com/docs/guides/tools) like
2086
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
2087
+ [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
2088
+ your own data as input for the model's response.
2089
+
2090
+ Args:
2091
+ stream: If set to true, the model response data will be streamed to the client as it is
2092
+ generated using
2093
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
2094
+ See the
2095
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
2096
+ for more information.
2097
+
2098
+ background: Whether to run the model response in the background.
2099
+ [Learn more](https://platform.openai.com/docs/guides/background).
2100
+
2101
+ conversation: The conversation that this response belongs to. Items from this conversation are
2102
+ prepended to `input_items` for this response request. Input items and output
2103
+ items from this response are automatically added to this conversation after this
2104
+ response completes.
2105
+
2106
+ include: Specify additional output data to include in the model response. Currently
2107
+ supported values are:
2108
+
2109
+ - `web_search_call.action.sources`: Include the sources of the web search tool
2110
+ call.
2111
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
2112
+ in code interpreter tool call items.
2113
+ - `computer_call_output.output.image_url`: Include image urls from the computer
2114
+ call output.
2115
+ - `file_search_call.results`: Include the search results of the file search tool
2116
+ call.
2117
+ - `message.input_image.image_url`: Include image urls from the input message.
2118
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
2119
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
2120
+ tokens in reasoning item outputs. This enables reasoning items to be used in
2121
+ multi-turn conversations when using the Responses API statelessly (like when
2122
+ the `store` parameter is set to `false`, or when an organization is enrolled
2123
+ in the zero data retention program).
2124
+
2125
+ input: Text, image, or file inputs to the model, used to generate a response.
2126
+
2127
+ Learn more:
2128
+
2129
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2130
+ - [Image inputs](https://platform.openai.com/docs/guides/images)
2131
+ - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
2132
+ - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
2133
+ - [Function calling](https://platform.openai.com/docs/guides/function-calling)
2134
+
2135
+ instructions: A system (or developer) message inserted into the model's context.
2136
+
2137
+ When using along with `previous_response_id`, the instructions from a previous
2138
+ response will not be carried over to the next response. This makes it simple to
2139
+ swap out system (or developer) messages in new responses.
2140
+
2141
+ max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
2142
+ including visible output tokens and
2143
+ [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
2144
+
2145
+ max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
2146
+ response. This maximum number applies across all built-in tool calls, not per
2147
+ individual tool. Any further attempts to call a tool by the model will be
2148
+ ignored.
2149
+
2150
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
2151
+ for storing additional information about the object in a structured format, and
2152
+ querying for objects via API or the dashboard.
2153
+
2154
+ Keys are strings with a maximum length of 64 characters. Values are strings with
2155
+ a maximum length of 512 characters.
2156
+
2157
+ model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
2158
+ wide range of models with different capabilities, performance characteristics,
2159
+ and price points. Refer to the
2160
+ [model guide](https://platform.openai.com/docs/models) to browse and compare
2161
+ available models.
2162
+
2163
+ parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
2164
+
2165
+ previous_response_id: The unique ID of the previous response to the model. Use this to create
2166
+ multi-turn conversations. Learn more about
2167
+ [conversation state](https://platform.openai.com/docs/guides/conversation-state).
2168
+ Cannot be used in conjunction with `conversation`.
2169
+
2170
+ prompt: Reference to a prompt template and its variables.
2171
+ [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2172
+
2173
+ prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
2174
+ hit rates. Replaces the `user` field.
2175
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
2176
+
2177
+ prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
2178
+ prompt caching, which keeps cached prefixes active for longer, up to a maximum
2179
+ of 24 hours.
2180
+ [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
2181
+
2182
+ reasoning: **gpt-5 and o-series models only**
2183
+
2184
+ Configuration options for
2185
+ [reasoning models](https://platform.openai.com/docs/guides/reasoning).
2186
+
2187
+ safety_identifier: A stable identifier used to help detect users of your application that may be
2188
+ violating OpenAI's usage policies. The IDs should be a string that uniquely
2189
+ identifies each user. We recommend hashing their username or email address, in
2190
+ order to avoid sending us any identifying information.
2191
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2192
+
2193
+ service_tier: Specifies the processing type used for serving the request.
2194
+
2195
+ - If set to 'auto', then the request will be processed with the service tier
2196
+ configured in the Project settings. Unless otherwise configured, the Project
2197
+ will use 'default'.
2198
+ - If set to 'default', then the request will be processed with the standard
2199
+ pricing and performance for the selected model.
2200
+ - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
2201
+ '[priority](https://openai.com/api-priority-processing/)', then the request
2202
+ will be processed with the corresponding service tier.
2203
+ - When not set, the default behavior is 'auto'.
2204
+
2205
+ When the `service_tier` parameter is set, the response body will include the
2206
+ `service_tier` value based on the processing mode actually used to serve the
2207
+ request. This response value may be different from the value set in the
2208
+ parameter.
2209
+
2210
+ store: Whether to store the generated model response for later retrieval via API.
2211
+
2212
+ stream_options: Options for streaming responses. Only set this when you set `stream: true`.
2213
+
2214
+ temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
2215
+ make the output more random, while lower values like 0.2 will make it more
2216
+ focused and deterministic. We generally recommend altering this or `top_p` but
2217
+ not both.
2218
+
2219
+ text: Configuration options for a text response from the model. Can be plain text or
2220
+ structured JSON data. Learn more:
2221
+
2222
+ - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
2223
+ - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
2224
+
2225
+ tool_choice: How the model should select which tool (or tools) to use when generating a
2226
+ response. See the `tools` parameter to see how to specify which tools the model
2227
+ can call.
2228
+
2229
+ tools: An array of tools the model may call while generating a response. You can
2230
+ specify which tool to use by setting the `tool_choice` parameter.
2231
+
2232
+ We support the following categories of tools:
2233
+
2234
+ - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
2235
+ capabilities, like
2236
+ [web search](https://platform.openai.com/docs/guides/tools-web-search) or
2237
+ [file search](https://platform.openai.com/docs/guides/tools-file-search).
2238
+ Learn more about
2239
+ [built-in tools](https://platform.openai.com/docs/guides/tools).
2240
+ - **MCP Tools**: Integrations with third-party systems via custom MCP servers or
2241
+ predefined connectors such as Google Drive and SharePoint. Learn more about
2242
+ [MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).
2243
+ - **Function calls (custom tools)**: Functions that are defined by you, enabling
2244
+ the model to call your own code with strongly typed arguments and outputs.
2245
+ Learn more about
2246
+ [function calling](https://platform.openai.com/docs/guides/function-calling).
2247
+ You can also use custom tools to call your own code.
2248
+
2249
+ top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
2250
+ return at each token position, each with an associated log probability.
2251
+
2252
+ top_p: An alternative to sampling with temperature, called nucleus sampling, where the
2253
+ model considers the results of the tokens with top_p probability mass. So 0.1
2254
+ means only the tokens comprising the top 10% probability mass are considered.
2255
+
2256
+ We generally recommend altering this or `temperature` but not both.
2257
+
2258
+ truncation: The truncation strategy to use for the model response.
2259
+
2260
+ - `auto`: If the input to this Response exceeds the model's context window size,
2261
+ the model will truncate the response to fit the context window by dropping
2262
+ items from the beginning of the conversation.
2263
+ - `disabled` (default): If the input size will exceed the context window size
2264
+ for a model, the request will fail with a 400 error.
2265
+
2266
+ user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
2267
+ `prompt_cache_key` instead to maintain caching optimizations. A stable
2268
+ identifier for your end-users. Used to boost cache hit rates by better bucketing
2269
+ similar requests and to help OpenAI detect and prevent abuse.
2270
+ [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
2271
+
2272
+ extra_headers: Send extra headers
2273
+
2274
+ extra_query: Add additional query parameters to the request
2275
+
2276
+ extra_body: Add additional JSON properties to the request
2277
+
2278
+ timeout: Override the client-level default timeout for this request, in seconds
2279
+ """
2280
+ ...
2281
+
2282
+ async def create(
2283
+ self,
2284
+ *,
2285
+ background: Optional[bool] | Omit = omit,
2286
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
2287
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
2288
+ input: Union[str, ResponseInputParam] | Omit = omit,
2289
+ instructions: Optional[str] | Omit = omit,
2290
+ max_output_tokens: Optional[int] | Omit = omit,
2291
+ max_tool_calls: Optional[int] | Omit = omit,
2292
+ metadata: Optional[Metadata] | Omit = omit,
2293
+ model: ResponsesModel | Omit = omit,
2294
+ parallel_tool_calls: Optional[bool] | Omit = omit,
2295
+ previous_response_id: Optional[str] | Omit = omit,
2296
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
2297
+ prompt_cache_key: str | Omit = omit,
2298
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2299
+ reasoning: Optional[Reasoning] | Omit = omit,
2300
+ safety_identifier: str | Omit = omit,
2301
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2302
+ store: Optional[bool] | Omit = omit,
2303
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
2304
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
2305
+ temperature: Optional[float] | Omit = omit,
2306
+ text: ResponseTextConfigParam | Omit = omit,
2307
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
2308
+ tools: Iterable[ToolParam] | Omit = omit,
2309
+ top_logprobs: Optional[int] | Omit = omit,
2310
+ top_p: Optional[float] | Omit = omit,
2311
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
2312
+ user: str | Omit = omit,
2313
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2314
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2315
+ extra_headers: Headers | None = None,
2316
+ extra_query: Query | None = None,
2317
+ extra_body: Body | None = None,
2318
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2319
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
2320
+ return await self._post(
2321
+ "/responses",
2322
+ body=await async_maybe_transform(
2323
+ {
2324
+ "background": background,
2325
+ "conversation": conversation,
2326
+ "include": include,
2327
+ "input": input,
2328
+ "instructions": instructions,
2329
+ "max_output_tokens": max_output_tokens,
2330
+ "max_tool_calls": max_tool_calls,
2331
+ "metadata": metadata,
2332
+ "model": model,
2333
+ "parallel_tool_calls": parallel_tool_calls,
2334
+ "previous_response_id": previous_response_id,
2335
+ "prompt": prompt,
2336
+ "prompt_cache_key": prompt_cache_key,
2337
+ "prompt_cache_retention": prompt_cache_retention,
2338
+ "reasoning": reasoning,
2339
+ "safety_identifier": safety_identifier,
2340
+ "service_tier": service_tier,
2341
+ "store": store,
2342
+ "stream": stream,
2343
+ "stream_options": stream_options,
2344
+ "temperature": temperature,
2345
+ "text": text,
2346
+ "tool_choice": tool_choice,
2347
+ "tools": tools,
2348
+ "top_logprobs": top_logprobs,
2349
+ "top_p": top_p,
2350
+ "truncation": truncation,
2351
+ "user": user,
2352
+ },
2353
+ response_create_params.ResponseCreateParamsStreaming
2354
+ if stream
2355
+ else response_create_params.ResponseCreateParamsNonStreaming,
2356
+ ),
2357
+ options=make_request_options(
2358
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2359
+ ),
2360
+ cast_to=Response,
2361
+ stream=stream or False,
2362
+ stream_cls=AsyncStream[ResponseStreamEvent],
2363
+ )
2364
+
2365
+ @overload
2366
+ def stream(
2367
+ self,
2368
+ *,
2369
+ response_id: str,
2370
+ text_format: type[TextFormatT] | Omit = omit,
2371
+ starting_after: int | Omit = omit,
2372
+ tools: Iterable[ParseableToolParam] | Omit = omit,
2373
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2374
+ extra_headers: Headers | None = None,
2375
+ extra_query: Query | None = None,
2376
+ extra_body: Body | None = None,
2377
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2378
+ ) -> AsyncResponseStreamManager[TextFormatT]: ...
2379
+
2380
+ @overload
2381
+ def stream(
2382
+ self,
2383
+ *,
2384
+ input: Union[str, ResponseInputParam],
2385
+ model: ResponsesModel,
2386
+ background: Optional[bool] | Omit = omit,
2387
+ text_format: type[TextFormatT] | Omit = omit,
2388
+ tools: Iterable[ParseableToolParam] | Omit = omit,
2389
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
2390
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
2391
+ instructions: Optional[str] | Omit = omit,
2392
+ max_output_tokens: Optional[int] | Omit = omit,
2393
+ max_tool_calls: Optional[int] | Omit = omit,
2394
+ metadata: Optional[Metadata] | Omit = omit,
2395
+ parallel_tool_calls: Optional[bool] | Omit = omit,
2396
+ previous_response_id: Optional[str] | Omit = omit,
2397
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
2398
+ prompt_cache_key: str | Omit = omit,
2399
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2400
+ reasoning: Optional[Reasoning] | Omit = omit,
2401
+ safety_identifier: str | Omit = omit,
2402
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2403
+ store: Optional[bool] | Omit = omit,
2404
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
2405
+ temperature: Optional[float] | Omit = omit,
2406
+ text: ResponseTextConfigParam | Omit = omit,
2407
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
2408
+ top_logprobs: Optional[int] | Omit = omit,
2409
+ top_p: Optional[float] | Omit = omit,
2410
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
2411
+ user: str | Omit = omit,
2412
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2413
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2414
+ extra_headers: Headers | None = None,
2415
+ extra_query: Query | None = None,
2416
+ extra_body: Body | None = None,
2417
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2418
+ ) -> AsyncResponseStreamManager[TextFormatT]: ...
2419
+
2420
+ def stream(
2421
+ self,
2422
+ *,
2423
+ response_id: str | Omit = omit,
2424
+ input: Union[str, ResponseInputParam] | Omit = omit,
2425
+ model: ResponsesModel | Omit = omit,
2426
+ background: Optional[bool] | Omit = omit,
2427
+ text_format: type[TextFormatT] | Omit = omit,
2428
+ tools: Iterable[ParseableToolParam] | Omit = omit,
2429
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
2430
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
2431
+ instructions: Optional[str] | Omit = omit,
2432
+ max_output_tokens: Optional[int] | Omit = omit,
2433
+ max_tool_calls: Optional[int] | Omit = omit,
2434
+ metadata: Optional[Metadata] | Omit = omit,
2435
+ parallel_tool_calls: Optional[bool] | Omit = omit,
2436
+ previous_response_id: Optional[str] | Omit = omit,
2437
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
2438
+ prompt_cache_key: str | Omit = omit,
2439
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2440
+ reasoning: Optional[Reasoning] | Omit = omit,
2441
+ safety_identifier: str | Omit = omit,
2442
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2443
+ store: Optional[bool] | Omit = omit,
2444
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
2445
+ temperature: Optional[float] | Omit = omit,
2446
+ text: ResponseTextConfigParam | Omit = omit,
2447
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
2448
+ top_logprobs: Optional[int] | Omit = omit,
2449
+ top_p: Optional[float] | Omit = omit,
2450
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
2451
+ user: str | Omit = omit,
2452
+ starting_after: int | Omit = omit,
2453
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2454
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2455
+ extra_headers: Headers | None = None,
2456
+ extra_query: Query | None = None,
2457
+ extra_body: Body | None = None,
2458
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2459
+ ) -> AsyncResponseStreamManager[TextFormatT]:
2460
+ new_response_args = {
2461
+ "input": input,
2462
+ "model": model,
2463
+ "conversation": conversation,
2464
+ "include": include,
2465
+ "instructions": instructions,
2466
+ "max_output_tokens": max_output_tokens,
2467
+ "max_tool_calls": max_tool_calls,
2468
+ "metadata": metadata,
2469
+ "parallel_tool_calls": parallel_tool_calls,
2470
+ "previous_response_id": previous_response_id,
2471
+ "prompt": prompt,
2472
+ "prompt_cache_key": prompt_cache_key,
2473
+ "prompt_cache_retention": prompt_cache_retention,
2474
+ "reasoning": reasoning,
2475
+ "safety_identifier": safety_identifier,
2476
+ "service_tier": service_tier,
2477
+ "store": store,
2478
+ "stream_options": stream_options,
2479
+ "temperature": temperature,
2480
+ "text": text,
2481
+ "tool_choice": tool_choice,
2482
+ "top_logprobs": top_logprobs,
2483
+ "top_p": top_p,
2484
+ "truncation": truncation,
2485
+ "user": user,
2486
+ "background": background,
2487
+ }
2488
+ new_response_args_names = [k for k, v in new_response_args.items() if is_given(v)]
2489
+
2490
+ if (is_given(response_id) or is_given(starting_after)) and len(new_response_args_names) > 0:
2491
+ raise ValueError(
2492
+ "Cannot provide both response_id/starting_after can't be provided together with "
2493
+ + ", ".join(new_response_args_names)
2494
+ )
2495
+
2496
+ tools = _make_tools(tools)
2497
+ if len(new_response_args_names) > 0:
2498
+ if isinstance(input, NotGiven):
2499
+ raise ValueError("input must be provided when creating a new response")
2500
+
2501
+ if not is_given(model):
2502
+ raise ValueError("model must be provided when creating a new response")
2503
+
2504
+ if is_given(text_format):
2505
+ if not text:
2506
+ text = {}
2507
+
2508
+ if "format" in text:
2509
+ raise TypeError("Cannot mix and match text.format with text_format")
2510
+
2511
+ text["format"] = _type_to_text_format_param(text_format)
2512
+
2513
+ api_request = self.create(
2514
+ input=input,
2515
+ model=model,
2516
+ stream=True,
2517
+ tools=tools,
2518
+ conversation=conversation,
2519
+ include=include,
2520
+ instructions=instructions,
2521
+ max_output_tokens=max_output_tokens,
2522
+ max_tool_calls=max_tool_calls,
2523
+ metadata=metadata,
2524
+ parallel_tool_calls=parallel_tool_calls,
2525
+ previous_response_id=previous_response_id,
2526
+ prompt=prompt,
2527
+ prompt_cache_key=prompt_cache_key,
2528
+ prompt_cache_retention=prompt_cache_retention,
2529
+ store=store,
2530
+ stream_options=stream_options,
2531
+ temperature=temperature,
2532
+ text=text,
2533
+ tool_choice=tool_choice,
2534
+ reasoning=reasoning,
2535
+ safety_identifier=safety_identifier,
2536
+ service_tier=service_tier,
2537
+ top_logprobs=top_logprobs,
2538
+ top_p=top_p,
2539
+ truncation=truncation,
2540
+ user=user,
2541
+ background=background,
2542
+ extra_headers=extra_headers,
2543
+ extra_query=extra_query,
2544
+ extra_body=extra_body,
2545
+ timeout=timeout,
2546
+ )
2547
+
2548
+ return AsyncResponseStreamManager(
2549
+ api_request,
2550
+ text_format=text_format,
2551
+ input_tools=tools,
2552
+ starting_after=None,
2553
+ )
2554
+ else:
2555
+ if isinstance(response_id, Omit):
2556
+ raise ValueError("response_id must be provided when streaming an existing response")
2557
+
2558
+ api_request = self.retrieve(
2559
+ response_id,
2560
+ stream=True,
2561
+ include=include or [],
2562
+ extra_headers=extra_headers,
2563
+ extra_query=extra_query,
2564
+ extra_body=extra_body,
2565
+ timeout=timeout,
2566
+ )
2567
+ return AsyncResponseStreamManager(
2568
+ api_request,
2569
+ text_format=text_format,
2570
+ input_tools=tools,
2571
+ starting_after=starting_after if is_given(starting_after) else None,
2572
+ )
2573
+
2574
+ async def parse(
2575
+ self,
2576
+ *,
2577
+ text_format: type[TextFormatT] | Omit = omit,
2578
+ background: Optional[bool] | Omit = omit,
2579
+ conversation: Optional[response_create_params.Conversation] | Omit = omit,
2580
+ include: Optional[List[ResponseIncludable]] | Omit = omit,
2581
+ input: Union[str, ResponseInputParam] | Omit = omit,
2582
+ instructions: Optional[str] | Omit = omit,
2583
+ max_output_tokens: Optional[int] | Omit = omit,
2584
+ max_tool_calls: Optional[int] | Omit = omit,
2585
+ metadata: Optional[Metadata] | Omit = omit,
2586
+ model: ResponsesModel | Omit = omit,
2587
+ parallel_tool_calls: Optional[bool] | Omit = omit,
2588
+ previous_response_id: Optional[str] | Omit = omit,
2589
+ prompt: Optional[ResponsePromptParam] | Omit = omit,
2590
+ prompt_cache_key: str | Omit = omit,
2591
+ prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
2592
+ reasoning: Optional[Reasoning] | Omit = omit,
2593
+ safety_identifier: str | Omit = omit,
2594
+ service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
2595
+ store: Optional[bool] | Omit = omit,
2596
+ stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
2597
+ stream_options: Optional[response_create_params.StreamOptions] | Omit = omit,
2598
+ temperature: Optional[float] | Omit = omit,
2599
+ text: ResponseTextConfigParam | Omit = omit,
2600
+ tool_choice: response_create_params.ToolChoice | Omit = omit,
2601
+ tools: Iterable[ParseableToolParam] | Omit = omit,
2602
+ top_logprobs: Optional[int] | Omit = omit,
2603
+ top_p: Optional[float] | Omit = omit,
2604
+ truncation: Optional[Literal["auto", "disabled"]] | Omit = omit,
2605
+ user: str | Omit = omit,
2606
+ verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
2607
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2608
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2609
+ extra_headers: Headers | None = None,
2610
+ extra_query: Query | None = None,
2611
+ extra_body: Body | None = None,
2612
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2613
+ ) -> ParsedResponse[TextFormatT]:
2614
+ if is_given(text_format):
2615
+ if not text:
2616
+ text = {}
2617
+
2618
+ if "format" in text:
2619
+ raise TypeError("Cannot mix and match text.format with text_format")
2620
+
2621
+ text["format"] = _type_to_text_format_param(text_format)
2622
+
2623
+ tools = _make_tools(tools)
2624
+
2625
+ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
2626
+ return parse_response(
2627
+ input_tools=tools,
2628
+ text_format=text_format,
2629
+ response=raw_response,
2630
+ )
2631
+
2632
+ return await self._post(
2633
+ "/responses",
2634
+ body=maybe_transform(
2635
+ {
2636
+ "background": background,
2637
+ "conversation": conversation,
2638
+ "include": include,
2639
+ "input": input,
2640
+ "instructions": instructions,
2641
+ "max_output_tokens": max_output_tokens,
2642
+ "max_tool_calls": max_tool_calls,
2643
+ "metadata": metadata,
2644
+ "model": model,
2645
+ "parallel_tool_calls": parallel_tool_calls,
2646
+ "previous_response_id": previous_response_id,
2647
+ "prompt": prompt,
2648
+ "prompt_cache_key": prompt_cache_key,
2649
+ "prompt_cache_retention": prompt_cache_retention,
2650
+ "reasoning": reasoning,
2651
+ "safety_identifier": safety_identifier,
2652
+ "service_tier": service_tier,
2653
+ "store": store,
2654
+ "stream": stream,
2655
+ "stream_options": stream_options,
2656
+ "temperature": temperature,
2657
+ "text": text,
2658
+ "tool_choice": tool_choice,
2659
+ "tools": tools,
2660
+ "top_logprobs": top_logprobs,
2661
+ "top_p": top_p,
2662
+ "truncation": truncation,
2663
+ "user": user,
2664
+ "verbosity": verbosity,
2665
+ },
2666
+ response_create_params.ResponseCreateParams,
2667
+ ),
2668
+ options=make_request_options(
2669
+ extra_headers=extra_headers,
2670
+ extra_query=extra_query,
2671
+ extra_body=extra_body,
2672
+ timeout=timeout,
2673
+ post_parser=parser,
2674
+ ),
2675
+ # we turn the `Response` instance into a `ParsedResponse`
2676
+ # in the `parser` function above
2677
+ cast_to=cast(Type[ParsedResponse[TextFormatT]], Response),
2678
+ )
2679
+
2680
+ @overload
2681
+ async def retrieve(
2682
+ self,
2683
+ response_id: str,
2684
+ *,
2685
+ include: List[ResponseIncludable] | Omit = omit,
2686
+ include_obfuscation: bool | Omit = omit,
2687
+ starting_after: int | Omit = omit,
2688
+ stream: Literal[False] | Omit = omit,
2689
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2690
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2691
+ extra_headers: Headers | None = None,
2692
+ extra_query: Query | None = None,
2693
+ extra_body: Body | None = None,
2694
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2695
+ ) -> Response: ...
2696
+
2697
+ @overload
2698
+ async def retrieve(
2699
+ self,
2700
+ response_id: str,
2701
+ *,
2702
+ stream: Literal[True],
2703
+ include: List[ResponseIncludable] | Omit = omit,
2704
+ starting_after: int | Omit = omit,
2705
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2706
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2707
+ extra_headers: Headers | None = None,
2708
+ extra_query: Query | None = None,
2709
+ extra_body: Body | None = None,
2710
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2711
+ ) -> AsyncStream[ResponseStreamEvent]: ...
2712
+
2713
+ @overload
2714
+ async def retrieve(
2715
+ self,
2716
+ response_id: str,
2717
+ *,
2718
+ stream: bool,
2719
+ include: List[ResponseIncludable] | Omit = omit,
2720
+ starting_after: int | Omit = omit,
2721
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2722
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2723
+ extra_headers: Headers | None = None,
2724
+ extra_query: Query | None = None,
2725
+ extra_body: Body | None = None,
2726
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2727
+ ) -> Response | AsyncStream[ResponseStreamEvent]: ...
2728
+
2729
+ @overload
2730
+ async def retrieve(
2731
+ self,
2732
+ response_id: str,
2733
+ *,
2734
+ stream: bool = False,
2735
+ include: List[ResponseIncludable] | Omit = omit,
2736
+ starting_after: int | Omit = omit,
2737
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2738
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2739
+ extra_headers: Headers | None = None,
2740
+ extra_query: Query | None = None,
2741
+ extra_body: Body | None = None,
2742
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2743
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
2744
+ """
2745
+ Retrieves a model response with the given ID.
2746
+
2747
+ Args:
2748
+ include: Additional fields to include in the response. See the `include` parameter for
2749
+ Response creation above for more information.
2750
+
2751
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
2752
+ characters to an `obfuscation` field on streaming delta events to normalize
2753
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
2754
+ fields are included by default, but add a small amount of overhead to the data
2755
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
2756
+ you trust the network links between your application and the OpenAI API.
2757
+
2758
+ starting_after: The sequence number of the event after which to start streaming.
2759
+
2760
+ stream: If set to true, the model response data will be streamed to the client as it is
2761
+ generated using
2762
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
2763
+ See the
2764
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
2765
+ for more information.
2766
+
2767
+ extra_headers: Send extra headers
2768
+
2769
+ extra_query: Add additional query parameters to the request
2770
+
2771
+ extra_body: Add additional JSON properties to the request
2772
+
2773
+ timeout: Override the client-level default timeout for this request, in seconds
2774
+ """
2775
+ ...
2776
+
2777
+ @overload
2778
+ async def retrieve(
2779
+ self,
2780
+ response_id: str,
2781
+ *,
2782
+ stream: Literal[True],
2783
+ include: List[ResponseIncludable] | Omit = omit,
2784
+ include_obfuscation: bool | Omit = omit,
2785
+ starting_after: int | Omit = omit,
2786
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2787
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2788
+ extra_headers: Headers | None = None,
2789
+ extra_query: Query | None = None,
2790
+ extra_body: Body | None = None,
2791
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2792
+ ) -> AsyncStream[ResponseStreamEvent]:
2793
+ """
2794
+ Retrieves a model response with the given ID.
2795
+
2796
+ Args:
2797
+ stream: If set to true, the model response data will be streamed to the client as it is
2798
+ generated using
2799
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
2800
+ See the
2801
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
2802
+ for more information.
2803
+
2804
+ include: Additional fields to include in the response. See the `include` parameter for
2805
+ Response creation above for more information.
2806
+
2807
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
2808
+ characters to an `obfuscation` field on streaming delta events to normalize
2809
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
2810
+ fields are included by default, but add a small amount of overhead to the data
2811
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
2812
+ you trust the network links between your application and the OpenAI API.
2813
+
2814
+ starting_after: The sequence number of the event after which to start streaming.
2815
+
2816
+ extra_headers: Send extra headers
2817
+
2818
+ extra_query: Add additional query parameters to the request
2819
+
2820
+ extra_body: Add additional JSON properties to the request
2821
+
2822
+ timeout: Override the client-level default timeout for this request, in seconds
2823
+ """
2824
+ ...
2825
+
2826
+ @overload
2827
+ async def retrieve(
2828
+ self,
2829
+ response_id: str,
2830
+ *,
2831
+ stream: bool,
2832
+ include: List[ResponseIncludable] | Omit = omit,
2833
+ include_obfuscation: bool | Omit = omit,
2834
+ starting_after: int | Omit = omit,
2835
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2836
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2837
+ extra_headers: Headers | None = None,
2838
+ extra_query: Query | None = None,
2839
+ extra_body: Body | None = None,
2840
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2841
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
2842
+ """
2843
+ Retrieves a model response with the given ID.
2844
+
2845
+ Args:
2846
+ stream: If set to true, the model response data will be streamed to the client as it is
2847
+ generated using
2848
+ [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
2849
+ See the
2850
+ [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
2851
+ for more information.
2852
+
2853
+ include: Additional fields to include in the response. See the `include` parameter for
2854
+ Response creation above for more information.
2855
+
2856
+ include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random
2857
+ characters to an `obfuscation` field on streaming delta events to normalize
2858
+ payload sizes as a mitigation to certain side-channel attacks. These obfuscation
2859
+ fields are included by default, but add a small amount of overhead to the data
2860
+ stream. You can set `include_obfuscation` to false to optimize for bandwidth if
2861
+ you trust the network links between your application and the OpenAI API.
2862
+
2863
+ starting_after: The sequence number of the event after which to start streaming.
2864
+
2865
+ extra_headers: Send extra headers
2866
+
2867
+ extra_query: Add additional query parameters to the request
2868
+
2869
+ extra_body: Add additional JSON properties to the request
2870
+
2871
+ timeout: Override the client-level default timeout for this request, in seconds
2872
+ """
2873
+ ...
2874
+
2875
+ async def retrieve(
2876
+ self,
2877
+ response_id: str,
2878
+ *,
2879
+ include: List[ResponseIncludable] | Omit = omit,
2880
+ include_obfuscation: bool | Omit = omit,
2881
+ starting_after: int | Omit = omit,
2882
+ stream: Literal[False] | Literal[True] | Omit = omit,
2883
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2884
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2885
+ extra_headers: Headers | None = None,
2886
+ extra_query: Query | None = None,
2887
+ extra_body: Body | None = None,
2888
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2889
+ ) -> Response | AsyncStream[ResponseStreamEvent]:
2890
+ if not response_id:
2891
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
2892
+ return await self._get(
2893
+ f"/responses/{response_id}",
2894
+ options=make_request_options(
2895
+ extra_headers=extra_headers,
2896
+ extra_query=extra_query,
2897
+ extra_body=extra_body,
2898
+ timeout=timeout,
2899
+ query=await async_maybe_transform(
2900
+ {
2901
+ "include": include,
2902
+ "include_obfuscation": include_obfuscation,
2903
+ "starting_after": starting_after,
2904
+ "stream": stream,
2905
+ },
2906
+ response_retrieve_params.ResponseRetrieveParams,
2907
+ ),
2908
+ ),
2909
+ cast_to=Response,
2910
+ stream=stream or False,
2911
+ stream_cls=AsyncStream[ResponseStreamEvent],
2912
+ )
2913
+
2914
+ async def delete(
2915
+ self,
2916
+ response_id: str,
2917
+ *,
2918
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2919
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2920
+ extra_headers: Headers | None = None,
2921
+ extra_query: Query | None = None,
2922
+ extra_body: Body | None = None,
2923
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2924
+ ) -> None:
2925
+ """
2926
+ Deletes a model response with the given ID.
2927
+
2928
+ Args:
2929
+ extra_headers: Send extra headers
2930
+
2931
+ extra_query: Add additional query parameters to the request
2932
+
2933
+ extra_body: Add additional JSON properties to the request
2934
+
2935
+ timeout: Override the client-level default timeout for this request, in seconds
2936
+ """
2937
+ if not response_id:
2938
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
2939
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
2940
+ return await self._delete(
2941
+ f"/responses/{response_id}",
2942
+ options=make_request_options(
2943
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2944
+ ),
2945
+ cast_to=NoneType,
2946
+ )
2947
+
2948
+ async def cancel(
2949
+ self,
2950
+ response_id: str,
2951
+ *,
2952
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2953
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2954
+ extra_headers: Headers | None = None,
2955
+ extra_query: Query | None = None,
2956
+ extra_body: Body | None = None,
2957
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
2958
+ ) -> Response:
2959
+ """Cancels a model response with the given ID.
2960
+
2961
+ Only responses created with the
2962
+ `background` parameter set to `true` can be cancelled.
2963
+ [Learn more](https://platform.openai.com/docs/guides/background).
2964
+
2965
+ Args:
2966
+ extra_headers: Send extra headers
2967
+
2968
+ extra_query: Add additional query parameters to the request
2969
+
2970
+ extra_body: Add additional JSON properties to the request
2971
+
2972
+ timeout: Override the client-level default timeout for this request, in seconds
2973
+ """
2974
+ if not response_id:
2975
+ raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
2976
+ return await self._post(
2977
+ f"/responses/{response_id}/cancel",
2978
+ options=make_request_options(
2979
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
2980
+ ),
2981
+ cast_to=Response,
2982
+ )
2983
+
2984
+
2985
+ class ResponsesWithRawResponse:
2986
+ def __init__(self, responses: Responses) -> None:
2987
+ self._responses = responses
2988
+
2989
+ self.create = _legacy_response.to_raw_response_wrapper(
2990
+ responses.create,
2991
+ )
2992
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
2993
+ responses.retrieve,
2994
+ )
2995
+ self.delete = _legacy_response.to_raw_response_wrapper(
2996
+ responses.delete,
2997
+ )
2998
+ self.cancel = _legacy_response.to_raw_response_wrapper(
2999
+ responses.cancel,
3000
+ )
3001
+ self.parse = _legacy_response.to_raw_response_wrapper(
3002
+ responses.parse,
3003
+ )
3004
+
3005
+ @cached_property
3006
+ def input_items(self) -> InputItemsWithRawResponse:
3007
+ return InputItemsWithRawResponse(self._responses.input_items)
3008
+
3009
+ @cached_property
3010
+ def input_tokens(self) -> InputTokensWithRawResponse:
3011
+ return InputTokensWithRawResponse(self._responses.input_tokens)
3012
+
3013
+
3014
+ class AsyncResponsesWithRawResponse:
3015
+ def __init__(self, responses: AsyncResponses) -> None:
3016
+ self._responses = responses
3017
+
3018
+ self.create = _legacy_response.async_to_raw_response_wrapper(
3019
+ responses.create,
3020
+ )
3021
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
3022
+ responses.retrieve,
3023
+ )
3024
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
3025
+ responses.delete,
3026
+ )
3027
+ self.cancel = _legacy_response.async_to_raw_response_wrapper(
3028
+ responses.cancel,
3029
+ )
3030
+ self.parse = _legacy_response.async_to_raw_response_wrapper(
3031
+ responses.parse,
3032
+ )
3033
+
3034
+ @cached_property
3035
+ def input_items(self) -> AsyncInputItemsWithRawResponse:
3036
+ return AsyncInputItemsWithRawResponse(self._responses.input_items)
3037
+
3038
+ @cached_property
3039
+ def input_tokens(self) -> AsyncInputTokensWithRawResponse:
3040
+ return AsyncInputTokensWithRawResponse(self._responses.input_tokens)
3041
+
3042
+
3043
+ class ResponsesWithStreamingResponse:
3044
+ def __init__(self, responses: Responses) -> None:
3045
+ self._responses = responses
3046
+
3047
+ self.create = to_streamed_response_wrapper(
3048
+ responses.create,
3049
+ )
3050
+ self.retrieve = to_streamed_response_wrapper(
3051
+ responses.retrieve,
3052
+ )
3053
+ self.delete = to_streamed_response_wrapper(
3054
+ responses.delete,
3055
+ )
3056
+ self.cancel = to_streamed_response_wrapper(
3057
+ responses.cancel,
3058
+ )
3059
+
3060
+ @cached_property
3061
+ def input_items(self) -> InputItemsWithStreamingResponse:
3062
+ return InputItemsWithStreamingResponse(self._responses.input_items)
3063
+
3064
+ @cached_property
3065
+ def input_tokens(self) -> InputTokensWithStreamingResponse:
3066
+ return InputTokensWithStreamingResponse(self._responses.input_tokens)
3067
+
3068
+
3069
+ class AsyncResponsesWithStreamingResponse:
3070
+ def __init__(self, responses: AsyncResponses) -> None:
3071
+ self._responses = responses
3072
+
3073
+ self.create = async_to_streamed_response_wrapper(
3074
+ responses.create,
3075
+ )
3076
+ self.retrieve = async_to_streamed_response_wrapper(
3077
+ responses.retrieve,
3078
+ )
3079
+ self.delete = async_to_streamed_response_wrapper(
3080
+ responses.delete,
3081
+ )
3082
+ self.cancel = async_to_streamed_response_wrapper(
3083
+ responses.cancel,
3084
+ )
3085
+
3086
+ @cached_property
3087
+ def input_items(self) -> AsyncInputItemsWithStreamingResponse:
3088
+ return AsyncInputItemsWithStreamingResponse(self._responses.input_items)
3089
+
3090
+ @cached_property
3091
+ def input_tokens(self) -> AsyncInputTokensWithStreamingResponse:
3092
+ return AsyncInputTokensWithStreamingResponse(self._responses.input_tokens)
3093
+
3094
+
3095
+ def _make_tools(tools: Iterable[ParseableToolParam] | Omit) -> List[ToolParam] | Omit:
3096
+ if not is_given(tools):
3097
+ return omit
3098
+
3099
+ converted_tools: List[ToolParam] = []
3100
+ for tool in tools:
3101
+ if tool["type"] != "function":
3102
+ converted_tools.append(tool)
3103
+ continue
3104
+
3105
+ if "function" not in tool:
3106
+ # standard Responses API case
3107
+ converted_tools.append(tool)
3108
+ continue
3109
+
3110
+ function = cast(Any, tool)["function"] # pyright: ignore[reportUnnecessaryCast]
3111
+ if not isinstance(function, PydanticFunctionTool):
3112
+ raise Exception(
3113
+ "Expected Chat Completions function tool shape to be created using `openai.pydantic_function_tool()`"
3114
+ )
3115
+
3116
+ assert "parameters" in function
3117
+ new_tool = ResponsesPydanticFunctionTool(
3118
+ {
3119
+ "type": "function",
3120
+ "name": function["name"],
3121
+ "description": function.get("description"),
3122
+ "parameters": function["parameters"],
3123
+ "strict": function.get("strict") or False,
3124
+ },
3125
+ function.model,
3126
+ )
3127
+
3128
+ converted_tools.append(new_tool.cast())
3129
+
3130
+ return converted_tools